LLVM 22.0.0git
RegAllocFast.cpp
Go to the documentation of this file.
1//===- RegAllocFast.cpp - A fast register allocator for debug code --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file This register allocator allocates registers to a basic block at a
10/// time, attempting to keep values in registers and reusing registers as
11/// appropriate.
12//
13//===----------------------------------------------------------------------===//
14
16#include "llvm/ADT/ArrayRef.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/IndexedMap.h"
19#include "llvm/ADT/MapVector.h"
20#include "llvm/ADT/SmallSet.h"
22#include "llvm/ADT/SparseSet.h"
23#include "llvm/ADT/Statistic.h"
41#include "llvm/Pass.h"
42#include "llvm/Support/Debug.h"
45#include <cassert>
46#include <tuple>
47#include <vector>
48
49using namespace llvm;
50
51#define DEBUG_TYPE "regalloc"
52
53STATISTIC(NumStores, "Number of stores added");
54STATISTIC(NumLoads, "Number of loads added");
55STATISTIC(NumCoalesced, "Number of copies coalesced");
56
57// FIXME: Remove this switch when all testcases are fixed!
58static cl::opt<bool> IgnoreMissingDefs("rafast-ignore-missing-defs",
60
61static RegisterRegAlloc fastRegAlloc("fast", "fast register allocator",
63
64namespace {
65
66/// Assign ascending index for instructions in machine basic block. The index
67/// can be used to determine dominance between instructions in same MBB.
68class InstrPosIndexes {
69public:
70 void unsetInitialized() { IsInitialized = false; }
71
72 void init(const MachineBasicBlock &MBB) {
73 CurMBB = &MBB;
74 Instr2PosIndex.clear();
75 uint64_t LastIndex = 0;
76 for (const MachineInstr &MI : MBB) {
77 LastIndex += InstrDist;
78 Instr2PosIndex[&MI] = LastIndex;
79 }
80 }
81
82 /// Set \p Index to index of \p MI. If \p MI is new inserted, it try to assign
83 /// index without affecting existing instruction's index. Return true if all
84 /// instructions index has been reassigned.
85 bool getIndex(const MachineInstr &MI, uint64_t &Index) {
86 if (!IsInitialized) {
87 init(*MI.getParent());
88 IsInitialized = true;
89 Index = Instr2PosIndex.at(&MI);
90 return true;
91 }
92
93 assert(MI.getParent() == CurMBB && "MI is not in CurMBB");
94 auto It = Instr2PosIndex.find(&MI);
95 if (It != Instr2PosIndex.end()) {
96 Index = It->second;
97 return false;
98 }
99
100 // Distance is the number of consecutive unassigned instructions including
101 // MI. Start is the first instruction of them. End is the next of last
102 // instruction of them.
103 // e.g.
104 // |Instruction| A | B | C | MI | D | E |
105 // | Index | 1024 | | | | | 2048 |
106 //
107 // In this case, B, C, MI, D are unassigned. Distance is 4, Start is B, End
108 // is E.
109 unsigned Distance = 1;
111 End = std::next(Start);
112 while (Start != CurMBB->begin() &&
113 !Instr2PosIndex.count(&*std::prev(Start))) {
114 --Start;
115 ++Distance;
116 }
117 while (End != CurMBB->end() && !Instr2PosIndex.count(&*(End))) {
118 ++End;
119 ++Distance;
120 }
121
122 // LastIndex is initialized to last used index prior to MI or zero.
123 // In previous example, LastIndex is 1024, EndIndex is 2048;
124 uint64_t LastIndex =
125 Start == CurMBB->begin() ? 0 : Instr2PosIndex.at(&*std::prev(Start));
126 uint64_t Step;
127 if (End == CurMBB->end())
128 Step = static_cast<uint64_t>(InstrDist);
129 else {
130 // No instruction uses index zero.
131 uint64_t EndIndex = Instr2PosIndex.at(&*End);
132 assert(EndIndex > LastIndex && "Index must be ascending order");
133 unsigned NumAvailableIndexes = EndIndex - LastIndex - 1;
134 // We want index gap between two adjacent MI is as same as possible. Given
135 // total A available indexes, D is number of consecutive unassigned
136 // instructions, S is the step.
137 // |<- S-1 -> MI <- S-1 -> MI <- A-S*D ->|
138 // There're S-1 available indexes between unassigned instruction and its
139 // predecessor. There're A-S*D available indexes between the last
140 // unassigned instruction and its successor.
141 // Ideally, we want
142 // S-1 = A-S*D
143 // then
144 // S = (A+1)/(D+1)
145 // An valid S must be integer greater than zero, so
146 // S <= (A+1)/(D+1)
147 // =>
148 // A-S*D >= 0
149 // That means we can safely use (A+1)/(D+1) as step.
150 // In previous example, Step is 204, Index of B, C, MI, D is 1228, 1432,
151 // 1636, 1840.
152 Step = (NumAvailableIndexes + 1) / (Distance + 1);
153 }
154
155 // Reassign index for all instructions if number of new inserted
156 // instructions exceed slot or all instructions are new.
157 if (LLVM_UNLIKELY(!Step || (!LastIndex && Step == InstrDist))) {
158 init(*CurMBB);
159 Index = Instr2PosIndex.at(&MI);
160 return true;
161 }
162
163 for (auto I = Start; I != End; ++I) {
164 LastIndex += Step;
165 Instr2PosIndex[&*I] = LastIndex;
166 }
167 Index = Instr2PosIndex.at(&MI);
168 return false;
169 }
170
171private:
172 bool IsInitialized = false;
173 enum { InstrDist = 1024 };
174 const MachineBasicBlock *CurMBB = nullptr;
175 DenseMap<const MachineInstr *, uint64_t> Instr2PosIndex;
176};
177
178class RegAllocFastImpl {
179public:
180 RegAllocFastImpl(const RegAllocFilterFunc F = nullptr,
181 bool ClearVirtRegs_ = true)
182 : ShouldAllocateRegisterImpl(F), StackSlotForVirtReg(-1),
183 ClearVirtRegs(ClearVirtRegs_) {}
184
185private:
186 MachineFrameInfo *MFI = nullptr;
187 MachineRegisterInfo *MRI = nullptr;
188 const TargetRegisterInfo *TRI = nullptr;
189 const TargetInstrInfo *TII = nullptr;
190 RegisterClassInfo RegClassInfo;
191 const RegAllocFilterFunc ShouldAllocateRegisterImpl;
192
193 /// Basic block currently being allocated.
194 MachineBasicBlock *MBB = nullptr;
195
196 /// Maps virtual regs to the frame index where these values are spilled.
197 IndexedMap<int, VirtReg2IndexFunctor> StackSlotForVirtReg;
198
199 /// Everything we know about a live virtual register.
200 struct LiveReg {
201 MachineInstr *LastUse = nullptr; ///< Last instr to use reg.
202 Register VirtReg; ///< Virtual register number.
203 MCPhysReg PhysReg = 0; ///< Currently held here.
204 bool LiveOut = false; ///< Register is possibly live out.
205 bool Reloaded = false; ///< Register was reloaded.
206 bool Error = false; ///< Could not allocate.
207
208 explicit LiveReg(Register VirtReg) : VirtReg(VirtReg) {}
209 explicit LiveReg() = default;
210
211 unsigned getSparseSetIndex() const { return VirtReg.virtRegIndex(); }
212 };
213
214 using LiveRegMap = SparseSet<LiveReg, unsigned, identity, uint16_t>;
215 /// This map contains entries for each virtual register that is currently
216 /// available in a physical register.
217 LiveRegMap LiveVirtRegs;
218
219 /// Stores assigned virtual registers present in the bundle MI.
220 DenseMap<Register, LiveReg> BundleVirtRegsMap;
221
222 DenseMap<Register, SmallVector<MachineOperand *, 2>> LiveDbgValueMap;
223 /// List of DBG_VALUE that we encountered without the vreg being assigned
224 /// because they were placed after the last use of the vreg.
225 DenseMap<Register, SmallVector<MachineInstr *, 1>> DanglingDbgValues;
226
227 /// Has a bit set for every virtual register for which it was determined
228 /// that it is alive across blocks.
229 BitVector MayLiveAcrossBlocks;
230
231 /// State of a register unit.
232 enum RegUnitState {
233 /// A free register is not currently in use and can be allocated
234 /// immediately without checking aliases.
235 regFree,
236
237 /// A pre-assigned register has been assigned before register allocation
238 /// (e.g., setting up a call parameter).
239 regPreAssigned,
240
241 /// Used temporarily in reloadAtBegin() to mark register units that are
242 /// live-in to the basic block.
243 regLiveIn,
244
245 /// A register state may also be a virtual register number, indication
246 /// that the physical register is currently allocated to a virtual
247 /// register. In that case, LiveVirtRegs contains the inverse mapping.
248 };
249
250 /// Maps each physical register to a RegUnitState enum or virtual register.
251 std::vector<unsigned> RegUnitStates;
252
254
255 /// Track register units that are used in the current instruction, and so
256 /// cannot be allocated.
257 ///
258 /// In the first phase (tied defs/early clobber), we consider also physical
259 /// uses, afterwards, we don't. If the lowest bit isn't set, it's a solely
260 /// physical use (markPhysRegUsedInInstr), otherwise, it's a normal use. To
261 /// avoid resetting the entire vector after every instruction, we track the
262 /// instruction "generation" in the remaining 31 bits -- this means, that if
263 /// UsedInInstr[Idx] < InstrGen, the register unit is unused. InstrGen is
264 /// never zero and always incremented by two.
265 ///
266 /// Don't allocate inline storage: the number of register units is typically
267 /// quite large (e.g., AArch64 > 100, X86 > 200, AMDGPU > 1000).
268 uint32_t InstrGen;
269 SmallVector<unsigned, 0> UsedInInstr;
270
271 SmallVector<unsigned, 8> DefOperandIndexes;
272 // Register masks attached to the current instruction.
274
275 // Assign index for each instruction to quickly determine dominance.
276 InstrPosIndexes PosIndexes;
277
278 void setPhysRegState(MCRegister PhysReg, unsigned NewState);
279 bool isPhysRegFree(MCRegister PhysReg) const;
280
281 /// Mark a physreg as used in this instruction.
282 void markRegUsedInInstr(MCPhysReg PhysReg) {
283 for (MCRegUnit Unit : TRI->regunits(PhysReg))
284 UsedInInstr[Unit] = InstrGen | 1;
285 }
286
287 // Check if physreg is clobbered by instruction's regmask(s).
288 bool isClobberedByRegMasks(MCRegister PhysReg) const {
289 return llvm::any_of(RegMasks, [PhysReg](const uint32_t *Mask) {
290 return MachineOperand::clobbersPhysReg(Mask, PhysReg);
291 });
292 }
293
294 /// Check if a physreg or any of its aliases are used in this instruction.
295 bool isRegUsedInInstr(MCRegister PhysReg, bool LookAtPhysRegUses) const {
296 if (LookAtPhysRegUses && isClobberedByRegMasks(PhysReg))
297 return true;
298 for (MCRegUnit Unit : TRI->regunits(PhysReg))
299 if (UsedInInstr[Unit] >= (InstrGen | !LookAtPhysRegUses))
300 return true;
301 return false;
302 }
303
304 /// Mark physical register as being used in a register use operand.
305 /// This is only used by the special livethrough handling code.
306 void markPhysRegUsedInInstr(MCRegister PhysReg) {
307 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
308 assert(UsedInInstr[Unit] <= InstrGen && "non-phys use before phys use?");
309 UsedInInstr[Unit] = InstrGen;
310 }
311 }
312
313 /// Remove mark of physical register being used in the instruction.
314 void unmarkRegUsedInInstr(MCRegister PhysReg) {
315 for (MCRegUnit Unit : TRI->regunits(PhysReg))
316 UsedInInstr[Unit] = 0;
317 }
318
319 enum : unsigned {
320 spillClean = 50,
321 spillDirty = 100,
322 spillPrefBonus = 20,
323 spillImpossible = ~0u
324 };
325
326public:
327 bool ClearVirtRegs;
328
329 bool runOnMachineFunction(MachineFunction &MF);
330
331private:
332 void allocateBasicBlock(MachineBasicBlock &MBB);
333
334 void addRegClassDefCounts(MutableArrayRef<unsigned> RegClassDefCounts,
335 Register Reg) const;
336
337 void findAndSortDefOperandIndexes(const MachineInstr &MI);
338
339 void allocateInstruction(MachineInstr &MI);
340 void handleDebugValue(MachineInstr &MI);
341 void handleBundle(MachineInstr &MI);
342
343 bool usePhysReg(MachineInstr &MI, MCRegister PhysReg);
344 bool definePhysReg(MachineInstr &MI, MCRegister PhysReg);
345 bool displacePhysReg(MachineInstr &MI, MCRegister PhysReg);
346 void freePhysReg(MCRegister PhysReg);
347
348 unsigned calcSpillCost(MCPhysReg PhysReg) const;
349
350 LiveRegMap::iterator findLiveVirtReg(Register VirtReg) {
351 return LiveVirtRegs.find(VirtReg.virtRegIndex());
352 }
353
354 LiveRegMap::const_iterator findLiveVirtReg(Register VirtReg) const {
355 return LiveVirtRegs.find(VirtReg.virtRegIndex());
356 }
357
358 void assignVirtToPhysReg(MachineInstr &MI, LiveReg &, MCRegister PhysReg);
359 void allocVirtReg(MachineInstr &MI, LiveReg &LR, Register Hint,
360 bool LookAtPhysRegUses = false);
361 void allocVirtRegUndef(MachineOperand &MO);
362 void assignDanglingDebugValues(MachineInstr &Def, Register VirtReg,
363 MCRegister Reg);
364 bool defineLiveThroughVirtReg(MachineInstr &MI, unsigned OpNum,
365 Register VirtReg);
366 bool defineVirtReg(MachineInstr &MI, unsigned OpNum, Register VirtReg,
367 bool LookAtPhysRegUses = false);
368 bool useVirtReg(MachineInstr &MI, MachineOperand &MO, Register VirtReg);
369
370 MCPhysReg getErrorAssignment(const LiveReg &LR, MachineInstr &MI,
371 const TargetRegisterClass &RC);
372
374 getMBBBeginInsertionPoint(MachineBasicBlock &MBB,
375 SmallSet<Register, 2> &PrologLiveIns) const;
376
377 void reloadAtBegin(MachineBasicBlock &MBB);
378 bool setPhysReg(MachineInstr &MI, MachineOperand &MO,
379 const LiveReg &Assignment);
380
381 Register traceCopies(Register VirtReg) const;
382 Register traceCopyChain(Register Reg) const;
383
384 bool shouldAllocateRegister(const Register Reg) const;
385 int getStackSpaceFor(Register VirtReg);
386 void spill(MachineBasicBlock::iterator Before, Register VirtReg,
387 MCPhysReg AssignedReg, bool Kill, bool LiveOut);
388 void reload(MachineBasicBlock::iterator Before, Register VirtReg,
389 MCPhysReg PhysReg);
390
391 bool mayLiveOut(Register VirtReg);
392 bool mayLiveIn(Register VirtReg);
393
394 bool mayBeSpillFromInlineAsmBr(const MachineInstr &MI) const;
395
396 void dumpState() const;
397};
398
399class RegAllocFast : public MachineFunctionPass {
400 RegAllocFastImpl Impl;
401
402public:
403 static char ID;
404
405 RegAllocFast(const RegAllocFilterFunc F = nullptr, bool ClearVirtRegs_ = true)
406 : MachineFunctionPass(ID), Impl(F, ClearVirtRegs_) {}
407
408 bool runOnMachineFunction(MachineFunction &MF) override {
409 return Impl.runOnMachineFunction(MF);
410 }
411
412 StringRef getPassName() const override { return "Fast Register Allocator"; }
413
414 void getAnalysisUsage(AnalysisUsage &AU) const override {
415 AU.setPreservesCFG();
417 }
418
419 MachineFunctionProperties getRequiredProperties() const override {
420 return MachineFunctionProperties().setNoPHIs();
421 }
422
423 MachineFunctionProperties getSetProperties() const override {
424 if (Impl.ClearVirtRegs) {
425 return MachineFunctionProperties().setNoVRegs();
426 }
427
428 return MachineFunctionProperties();
429 }
430
431 MachineFunctionProperties getClearedProperties() const override {
432 return MachineFunctionProperties().setIsSSA();
433 }
434};
435
436} // end anonymous namespace
437
438char RegAllocFast::ID = 0;
439
440INITIALIZE_PASS(RegAllocFast, "regallocfast", "Fast Register Allocator", false,
441 false)
442
443bool RegAllocFastImpl::shouldAllocateRegister(const Register Reg) const {
444 assert(Reg.isVirtual());
445 if (!ShouldAllocateRegisterImpl)
446 return true;
447
448 return ShouldAllocateRegisterImpl(*TRI, *MRI, Reg);
449}
450
451void RegAllocFastImpl::setPhysRegState(MCRegister PhysReg, unsigned NewState) {
452 for (MCRegUnit Unit : TRI->regunits(PhysReg))
453 RegUnitStates[Unit] = NewState;
454}
455
456bool RegAllocFastImpl::isPhysRegFree(MCRegister PhysReg) const {
457 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
458 if (RegUnitStates[Unit] != regFree)
459 return false;
460 }
461 return true;
462}
463
464/// This allocates space for the specified virtual register to be held on the
465/// stack.
466int RegAllocFastImpl::getStackSpaceFor(Register VirtReg) {
467 // Find the location Reg would belong...
468 int SS = StackSlotForVirtReg[VirtReg];
469 // Already has space allocated?
470 if (SS != -1)
471 return SS;
472
473 // Allocate a new stack object for this spill location...
474 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
475 unsigned Size = TRI->getSpillSize(RC);
476 Align Alignment = TRI->getSpillAlign(RC);
477
478 const MachineFunction &MF = MRI->getMF();
479 auto &ST = MF.getSubtarget();
480 Align CurrentAlign = ST.getFrameLowering()->getStackAlign();
481 if (Alignment > CurrentAlign && !TRI->canRealignStack(MF))
482 Alignment = CurrentAlign;
483
484 int FrameIdx = MFI->CreateSpillStackObject(Size, Alignment);
485
486 // Assign the slot.
487 StackSlotForVirtReg[VirtReg] = FrameIdx;
488 return FrameIdx;
489}
490
491static bool dominates(InstrPosIndexes &PosIndexes, const MachineInstr &A,
492 const MachineInstr &B) {
493 uint64_t IndexA, IndexB;
494 PosIndexes.getIndex(A, IndexA);
495 if (LLVM_UNLIKELY(PosIndexes.getIndex(B, IndexB)))
496 PosIndexes.getIndex(A, IndexA);
497 return IndexA < IndexB;
498}
499
500/// Returns true if \p MI is a spill of a live-in physical register in a block
501/// targeted by an INLINEASM_BR. Such spills must precede reloads of live-in
502/// virtual registers, so that we do not reload from an uninitialized stack
503/// slot.
504bool RegAllocFastImpl::mayBeSpillFromInlineAsmBr(const MachineInstr &MI) const {
505 int FI;
506 auto *MBB = MI.getParent();
508 MFI->isSpillSlotObjectIndex(FI))
509 for (const auto &Op : MI.operands())
510 if (Op.isReg() && MBB->isLiveIn(Op.getReg()))
511 return true;
512 return false;
513}
514
515/// Returns false if \p VirtReg is known to not live out of the current block.
516bool RegAllocFastImpl::mayLiveOut(Register VirtReg) {
517 if (MayLiveAcrossBlocks.test(VirtReg.virtRegIndex())) {
518 // Cannot be live-out if there are no successors.
519 return !MBB->succ_empty();
520 }
521
522 const MachineInstr *SelfLoopDef = nullptr;
523
524 // If this block loops back to itself, it is necessary to check whether the
525 // use comes after the def.
526 if (MBB->isSuccessor(MBB)) {
527 // Find the first def in the self loop MBB.
528 for (const MachineInstr &DefInst : MRI->def_instructions(VirtReg)) {
529 if (DefInst.getParent() != MBB) {
530 MayLiveAcrossBlocks.set(VirtReg.virtRegIndex());
531 return true;
532 } else {
533 if (!SelfLoopDef || dominates(PosIndexes, DefInst, *SelfLoopDef))
534 SelfLoopDef = &DefInst;
535 }
536 }
537 if (!SelfLoopDef) {
538 MayLiveAcrossBlocks.set(VirtReg.virtRegIndex());
539 return true;
540 }
541 }
542
543 // See if the first \p Limit uses of the register are all in the current
544 // block.
545 static const unsigned Limit = 8;
546 unsigned C = 0;
547 for (const MachineInstr &UseInst : MRI->use_nodbg_instructions(VirtReg)) {
548 if (UseInst.getParent() != MBB || ++C >= Limit) {
549 MayLiveAcrossBlocks.set(VirtReg.virtRegIndex());
550 // Cannot be live-out if there are no successors.
551 return !MBB->succ_empty();
552 }
553
554 if (SelfLoopDef) {
555 // Try to handle some simple cases to avoid spilling and reloading every
556 // value inside a self looping block.
557 if (SelfLoopDef == &UseInst ||
558 !dominates(PosIndexes, *SelfLoopDef, UseInst)) {
559 MayLiveAcrossBlocks.set(VirtReg.virtRegIndex());
560 return true;
561 }
562 }
563 }
564
565 return false;
566}
567
568/// Returns false if \p VirtReg is known to not be live into the current block.
569bool RegAllocFastImpl::mayLiveIn(Register VirtReg) {
570 if (MayLiveAcrossBlocks.test(VirtReg.virtRegIndex()))
571 return !MBB->pred_empty();
572
573 // See if the first \p Limit def of the register are all in the current block.
574 static const unsigned Limit = 8;
575 unsigned C = 0;
576 for (const MachineInstr &DefInst : MRI->def_instructions(VirtReg)) {
577 if (DefInst.getParent() != MBB || ++C >= Limit) {
578 MayLiveAcrossBlocks.set(VirtReg.virtRegIndex());
579 return !MBB->pred_empty();
580 }
581 }
582
583 return false;
584}
585
586/// Insert spill instruction for \p AssignedReg before \p Before. Update
587/// DBG_VALUEs with \p VirtReg operands with the stack slot.
588void RegAllocFastImpl::spill(MachineBasicBlock::iterator Before,
589 Register VirtReg, MCPhysReg AssignedReg, bool Kill,
590 bool LiveOut) {
591 LLVM_DEBUG(dbgs() << "Spilling " << printReg(VirtReg, TRI) << " in "
592 << printReg(AssignedReg, TRI));
593 int FI = getStackSpaceFor(VirtReg);
594 LLVM_DEBUG(dbgs() << " to stack slot #" << FI << '\n');
595
596 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
597 TII->storeRegToStackSlot(*MBB, Before, AssignedReg, Kill, FI, &RC, VirtReg);
598 ++NumStores;
599
601
602 // When we spill a virtual register, we will have spill instructions behind
603 // every definition of it, meaning we can switch all the DBG_VALUEs over
604 // to just reference the stack slot.
605 SmallVectorImpl<MachineOperand *> &LRIDbgOperands = LiveDbgValueMap[VirtReg];
606 SmallMapVector<MachineInstr *, SmallVector<const MachineOperand *>, 2>
607 SpilledOperandsMap;
608 for (MachineOperand *MO : LRIDbgOperands)
609 SpilledOperandsMap[MO->getParent()].push_back(MO);
610 for (const auto &MISpilledOperands : SpilledOperandsMap) {
611 MachineInstr &DBG = *MISpilledOperands.first;
612 // We don't have enough support for tracking operands of DBG_VALUE_LISTs.
613 if (DBG.isDebugValueList())
614 continue;
615 MachineInstr *NewDV = buildDbgValueForSpill(
616 *MBB, Before, *MISpilledOperands.first, FI, MISpilledOperands.second);
617 assert(NewDV->getParent() == MBB && "dangling parent pointer");
618 (void)NewDV;
619 LLVM_DEBUG(dbgs() << "Inserting debug info due to spill:\n" << *NewDV);
620
621 if (LiveOut) {
622 // We need to insert a DBG_VALUE at the end of the block if the spill slot
623 // is live out, but there is another use of the value after the
624 // spill. This will allow LiveDebugValues to see the correct live out
625 // value to propagate to the successors.
626 MachineInstr *ClonedDV = MBB->getParent()->CloneMachineInstr(NewDV);
627 MBB->insert(FirstTerm, ClonedDV);
628 LLVM_DEBUG(dbgs() << "Cloning debug info due to live out spill\n");
629 }
630
631 // Rewrite unassigned dbg_values to use the stack slot.
632 // TODO We can potentially do this for list debug values as well if we know
633 // how the dbg_values are getting unassigned.
634 if (DBG.isNonListDebugValue()) {
635 MachineOperand &MO = DBG.getDebugOperand(0);
636 if (MO.isReg() && MO.getReg() == 0) {
637 updateDbgValueForSpill(DBG, FI, 0);
638 }
639 }
640 }
641 // Now this register is spilled there is should not be any DBG_VALUE
642 // pointing to this register because they are all pointing to spilled value
643 // now.
644 LRIDbgOperands.clear();
645}
646
647/// Insert reload instruction for \p PhysReg before \p Before.
648void RegAllocFastImpl::reload(MachineBasicBlock::iterator Before,
649 Register VirtReg, MCPhysReg PhysReg) {
650 LLVM_DEBUG(dbgs() << "Reloading " << printReg(VirtReg, TRI) << " into "
651 << printReg(PhysReg, TRI) << '\n');
652 int FI = getStackSpaceFor(VirtReg);
653 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
654 TII->loadRegFromStackSlot(*MBB, Before, PhysReg, FI, &RC, VirtReg);
655 ++NumLoads;
656}
657
658/// Get basic block begin insertion point.
659/// This is not just MBB.begin() because surprisingly we have EH_LABEL
660/// instructions marking the begin of a basic block. This means we must insert
661/// new instructions after such labels...
662MachineBasicBlock::iterator RegAllocFastImpl::getMBBBeginInsertionPoint(
663 MachineBasicBlock &MBB, SmallSet<Register, 2> &PrologLiveIns) const {
665 while (I != MBB.end()) {
666 if (I->isLabel()) {
667 ++I;
668 continue;
669 }
670
671 // Skip prologues and inlineasm_br spills to place reloads afterwards.
672 if (!TII->isBasicBlockPrologue(*I) && !mayBeSpillFromInlineAsmBr(*I))
673 break;
674
675 // However if a prolog instruction reads a register that needs to be
676 // reloaded, the reload should be inserted before the prolog.
677 for (MachineOperand &MO : I->operands()) {
678 if (MO.isReg())
679 PrologLiveIns.insert(MO.getReg());
680 }
681
682 ++I;
683 }
684
685 return I;
686}
687
688/// Reload all currently assigned virtual registers.
689void RegAllocFastImpl::reloadAtBegin(MachineBasicBlock &MBB) {
690 if (LiveVirtRegs.empty())
691 return;
692
693 for (MachineBasicBlock::RegisterMaskPair P : MBB.liveins()) {
694 MCRegister Reg = P.PhysReg;
695 // Set state to live-in. This possibly overrides mappings to virtual
696 // registers but we don't care anymore at this point.
697 setPhysRegState(Reg, regLiveIn);
698 }
699
700 SmallSet<Register, 2> PrologLiveIns;
701
702 // The LiveRegMap is keyed by an unsigned (the virtreg number), so the order
703 // of spilling here is deterministic, if arbitrary.
704 MachineBasicBlock::iterator InsertBefore =
705 getMBBBeginInsertionPoint(MBB, PrologLiveIns);
706 for (const LiveReg &LR : LiveVirtRegs) {
707 MCPhysReg PhysReg = LR.PhysReg;
708 if (PhysReg == 0 || LR.Error)
709 continue;
710
711 MCRegUnit FirstUnit = *TRI->regunits(PhysReg).begin();
712 if (RegUnitStates[FirstUnit] == regLiveIn)
713 continue;
714
716 "no reload in start block. Missing vreg def?");
717
718 if (PrologLiveIns.count(PhysReg)) {
719 // FIXME: Theoretically this should use an insert point skipping labels
720 // but I'm not sure how labels should interact with prolog instruction
721 // that need reloads.
722 reload(MBB.begin(), LR.VirtReg, PhysReg);
723 } else
724 reload(InsertBefore, LR.VirtReg, PhysReg);
725 }
726 LiveVirtRegs.clear();
727}
728
729/// Handle the direct use of a physical register. Check that the register is
730/// not used by a virtreg. Kill the physreg, marking it free. This may add
731/// implicit kills to MO->getParent() and invalidate MO.
732bool RegAllocFastImpl::usePhysReg(MachineInstr &MI, MCRegister Reg) {
733 assert(Register::isPhysicalRegister(Reg) && "expected physreg");
734 bool displacedAny = displacePhysReg(MI, Reg);
735 setPhysRegState(Reg, regPreAssigned);
736 markRegUsedInInstr(Reg);
737 return displacedAny;
738}
739
740bool RegAllocFastImpl::definePhysReg(MachineInstr &MI, MCRegister Reg) {
741 bool displacedAny = displacePhysReg(MI, Reg);
742 setPhysRegState(Reg, regPreAssigned);
743 return displacedAny;
744}
745
746/// Mark PhysReg as reserved or free after spilling any virtregs. This is very
747/// similar to defineVirtReg except the physreg is reserved instead of
748/// allocated.
749bool RegAllocFastImpl::displacePhysReg(MachineInstr &MI, MCRegister PhysReg) {
750 bool displacedAny = false;
751
752 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
753 switch (unsigned VirtReg = RegUnitStates[Unit]) {
754 default: {
755 LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
756 assert(LRI != LiveVirtRegs.end() && "datastructures in sync");
757 MachineBasicBlock::iterator ReloadBefore =
758 std::next((MachineBasicBlock::iterator)MI.getIterator());
759 while (mayBeSpillFromInlineAsmBr(*ReloadBefore))
760 ++ReloadBefore;
761 reload(ReloadBefore, VirtReg, LRI->PhysReg);
762
763 setPhysRegState(LRI->PhysReg, regFree);
764 LRI->PhysReg = 0;
765 LRI->Reloaded = true;
766 displacedAny = true;
767 break;
768 }
769 case regPreAssigned:
770 RegUnitStates[Unit] = regFree;
771 displacedAny = true;
772 break;
773 case regFree:
774 break;
775 }
776 }
777 return displacedAny;
778}
779
780void RegAllocFastImpl::freePhysReg(MCRegister PhysReg) {
781 LLVM_DEBUG(dbgs() << "Freeing " << printReg(PhysReg, TRI) << ':');
782
783 MCRegUnit FirstUnit = *TRI->regunits(PhysReg).begin();
784 switch (unsigned VirtReg = RegUnitStates[FirstUnit]) {
785 case regFree:
786 LLVM_DEBUG(dbgs() << '\n');
787 return;
788 case regPreAssigned:
789 LLVM_DEBUG(dbgs() << '\n');
790 setPhysRegState(PhysReg, regFree);
791 return;
792 default: {
793 LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
794 assert(LRI != LiveVirtRegs.end());
795 LLVM_DEBUG(dbgs() << ' ' << printReg(LRI->VirtReg, TRI) << '\n');
796 setPhysRegState(LRI->PhysReg, regFree);
797 LRI->PhysReg = 0;
798 }
799 return;
800 }
801}
802
803/// Return the cost of spilling clearing out PhysReg and aliases so it is free
804/// for allocation. Returns 0 when PhysReg is free or disabled with all aliases
805/// disabled - it can be allocated directly.
806/// \returns spillImpossible when PhysReg or an alias can't be spilled.
807unsigned RegAllocFastImpl::calcSpillCost(MCPhysReg PhysReg) const {
808 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
809 switch (unsigned VirtReg = RegUnitStates[Unit]) {
810 case regFree:
811 break;
812 case regPreAssigned:
813 LLVM_DEBUG(dbgs() << "Cannot spill pre-assigned "
814 << printReg(PhysReg, TRI) << '\n');
815 return spillImpossible;
816 default: {
817 bool SureSpill = StackSlotForVirtReg[VirtReg] != -1 ||
818 findLiveVirtReg(VirtReg)->LiveOut;
819 return SureSpill ? spillClean : spillDirty;
820 }
821 }
822 }
823 return 0;
824}
825
826void RegAllocFastImpl::assignDanglingDebugValues(MachineInstr &Definition,
827 Register VirtReg,
828 MCRegister Reg) {
829 auto UDBGValIter = DanglingDbgValues.find(VirtReg);
830 if (UDBGValIter == DanglingDbgValues.end())
831 return;
832
833 SmallVectorImpl<MachineInstr *> &Dangling = UDBGValIter->second;
834 for (MachineInstr *DbgValue : Dangling) {
835 assert(DbgValue->isDebugValue());
836 if (!DbgValue->hasDebugOperandForReg(VirtReg))
837 continue;
838
839 // Test whether the physreg survives from the definition to the DBG_VALUE.
840 MCPhysReg SetToReg = Reg;
841 unsigned Limit = 20;
842 for (MachineBasicBlock::iterator I = std::next(Definition.getIterator()),
843 E = DbgValue->getIterator();
844 I != E; ++I) {
845 if (I->modifiesRegister(Reg, TRI) || --Limit == 0) {
846 LLVM_DEBUG(dbgs() << "Register did not survive for " << *DbgValue
847 << '\n');
848 SetToReg = 0;
849 break;
850 }
851 }
852 for (MachineOperand &MO : DbgValue->getDebugOperandsForReg(VirtReg)) {
853 MO.setReg(SetToReg);
854 if (SetToReg != 0)
855 MO.setIsRenamable();
856 }
857 }
858 Dangling.clear();
859}
860
861/// This method updates local state so that we know that PhysReg is the
862/// proper container for VirtReg now. The physical register must not be used
863/// for anything else when this is called.
864void RegAllocFastImpl::assignVirtToPhysReg(MachineInstr &AtMI, LiveReg &LR,
865 MCRegister PhysReg) {
866 Register VirtReg = LR.VirtReg;
867 LLVM_DEBUG(dbgs() << "Assigning " << printReg(VirtReg, TRI) << " to "
868 << printReg(PhysReg, TRI) << '\n');
869 assert(LR.PhysReg == 0 && "Already assigned a physreg");
870 assert(PhysReg != 0 && "Trying to assign no register");
871 LR.PhysReg = PhysReg;
872 setPhysRegState(PhysReg, VirtReg.id());
873
874 assignDanglingDebugValues(AtMI, VirtReg, PhysReg);
875}
876
877static bool isCoalescable(const MachineInstr &MI) { return MI.isFullCopy(); }
878
879Register RegAllocFastImpl::traceCopyChain(Register Reg) const {
880 static const unsigned ChainLengthLimit = 3;
881 unsigned C = 0;
882 do {
883 if (Reg.isPhysical())
884 return Reg;
886
887 MachineInstr *VRegDef = MRI->getUniqueVRegDef(Reg);
888 if (!VRegDef || !isCoalescable(*VRegDef))
889 return 0;
890 Reg = VRegDef->getOperand(1).getReg();
891 } while (++C <= ChainLengthLimit);
892 return 0;
893}
894
895/// Check if any of \p VirtReg's definitions is a copy. If it is follow the
896/// chain of copies to check whether we reach a physical register we can
897/// coalesce with.
898Register RegAllocFastImpl::traceCopies(Register VirtReg) const {
899 static const unsigned DefLimit = 3;
900 unsigned C = 0;
901 for (const MachineInstr &MI : MRI->def_instructions(VirtReg)) {
902 if (isCoalescable(MI)) {
903 Register Reg = MI.getOperand(1).getReg();
904 Reg = traceCopyChain(Reg);
905 if (Reg.isValid())
906 return Reg;
907 }
908
909 if (++C >= DefLimit)
910 break;
911 }
912 return Register();
913}
914
915/// Allocates a physical register for VirtReg.
916void RegAllocFastImpl::allocVirtReg(MachineInstr &MI, LiveReg &LR,
917 Register Hint0, bool LookAtPhysRegUses) {
918 const Register VirtReg = LR.VirtReg;
919 assert(LR.PhysReg == 0);
920
921 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
922 LLVM_DEBUG(dbgs() << "Search register for " << printReg(VirtReg)
923 << " in class " << TRI->getRegClassName(&RC)
924 << " with hint " << printReg(Hint0, TRI) << '\n');
925
926 // Take hint when possible.
927 if (Hint0.isPhysical() && MRI->isAllocatable(Hint0) && RC.contains(Hint0) &&
928 !isRegUsedInInstr(Hint0, LookAtPhysRegUses)) {
929 // Take hint if the register is currently free.
930 if (isPhysRegFree(Hint0)) {
931 LLVM_DEBUG(dbgs() << "\tPreferred Register 1: " << printReg(Hint0, TRI)
932 << '\n');
933 assignVirtToPhysReg(MI, LR, Hint0);
934 return;
935 } else {
936 LLVM_DEBUG(dbgs() << "\tPreferred Register 0: " << printReg(Hint0, TRI)
937 << " occupied\n");
938 }
939 } else {
940 Hint0 = Register();
941 }
942
943 // Try other hint.
944 Register Hint1 = traceCopies(VirtReg);
945 if (Hint1.isPhysical() && MRI->isAllocatable(Hint1) && RC.contains(Hint1) &&
946 !isRegUsedInInstr(Hint1, LookAtPhysRegUses)) {
947 // Take hint if the register is currently free.
948 if (isPhysRegFree(Hint1)) {
949 LLVM_DEBUG(dbgs() << "\tPreferred Register 0: " << printReg(Hint1, TRI)
950 << '\n');
951 assignVirtToPhysReg(MI, LR, Hint1);
952 return;
953 } else {
954 LLVM_DEBUG(dbgs() << "\tPreferred Register 1: " << printReg(Hint1, TRI)
955 << " occupied\n");
956 }
957 } else {
958 Hint1 = Register();
959 }
960
961 MCPhysReg BestReg = 0;
962 unsigned BestCost = spillImpossible;
963 ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(&RC);
964 for (MCPhysReg PhysReg : AllocationOrder) {
965 LLVM_DEBUG(dbgs() << "\tRegister: " << printReg(PhysReg, TRI) << ' ');
966 if (isRegUsedInInstr(PhysReg, LookAtPhysRegUses)) {
967 LLVM_DEBUG(dbgs() << "already used in instr.\n");
968 continue;
969 }
970
971 unsigned Cost = calcSpillCost(PhysReg);
972 LLVM_DEBUG(dbgs() << "Cost: " << Cost << " BestCost: " << BestCost << '\n');
973 // Immediate take a register with cost 0.
974 if (Cost == 0) {
975 assignVirtToPhysReg(MI, LR, PhysReg);
976 return;
977 }
978
979 if (PhysReg == Hint0 || PhysReg == Hint1)
980 Cost -= spillPrefBonus;
981
982 if (Cost < BestCost) {
983 BestReg = PhysReg;
984 BestCost = Cost;
985 }
986 }
987
988 if (!BestReg) {
989 // Nothing we can do: Report an error and keep going with an invalid
990 // allocation.
991 LR.PhysReg = getErrorAssignment(LR, MI, RC);
992 LR.Error = true;
993 return;
994 }
995
996 displacePhysReg(MI, BestReg);
997 assignVirtToPhysReg(MI, LR, BestReg);
998}
999
1000void RegAllocFastImpl::allocVirtRegUndef(MachineOperand &MO) {
1001 assert(MO.isUndef() && "expected undef use");
1002 Register VirtReg = MO.getReg();
1003 assert(VirtReg.isVirtual() && "Expected virtreg");
1004 if (!shouldAllocateRegister(VirtReg))
1005 return;
1006
1007 LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
1008 MCPhysReg PhysReg;
1009 bool IsRenamable = true;
1010 if (LRI != LiveVirtRegs.end() && LRI->PhysReg) {
1011 PhysReg = LRI->PhysReg;
1012 } else {
1013 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
1014 ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(&RC);
1015 if (AllocationOrder.empty()) {
1016 // All registers in the class were reserved.
1017 //
1018 // It might be OK to take any entry from the class as this is an undef
1019 // use, but accepting this would give different behavior than greedy and
1020 // basic.
1021 PhysReg = getErrorAssignment(*LRI, *MO.getParent(), RC);
1022 LRI->Error = true;
1023 IsRenamable = false;
1024 } else
1025 PhysReg = AllocationOrder.front();
1026 }
1027
1028 unsigned SubRegIdx = MO.getSubReg();
1029 if (SubRegIdx != 0) {
1030 PhysReg = TRI->getSubReg(PhysReg, SubRegIdx);
1031 MO.setSubReg(0);
1032 }
1033 MO.setReg(PhysReg);
1034 MO.setIsRenamable(IsRenamable);
1035}
1036
1037/// Variation of defineVirtReg() with special handling for livethrough regs
1038/// (tied or earlyclobber) that may interfere with preassigned uses.
1039/// \return true if MI's MachineOperands were re-arranged/invalidated.
1040bool RegAllocFastImpl::defineLiveThroughVirtReg(MachineInstr &MI,
1041 unsigned OpNum,
1042 Register VirtReg) {
1043 if (!shouldAllocateRegister(VirtReg))
1044 return false;
1045 LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
1046 if (LRI != LiveVirtRegs.end()) {
1047 MCPhysReg PrevReg = LRI->PhysReg;
1048 if (PrevReg != 0 && isRegUsedInInstr(PrevReg, true)) {
1049 LLVM_DEBUG(dbgs() << "Need new assignment for " << printReg(PrevReg, TRI)
1050 << " (tied/earlyclobber resolution)\n");
1051 freePhysReg(PrevReg);
1052 LRI->PhysReg = 0;
1053 allocVirtReg(MI, *LRI, 0, true);
1054 MachineBasicBlock::iterator InsertBefore =
1055 std::next((MachineBasicBlock::iterator)MI.getIterator());
1056 LLVM_DEBUG(dbgs() << "Copy " << printReg(LRI->PhysReg, TRI) << " to "
1057 << printReg(PrevReg, TRI) << '\n');
1058 BuildMI(*MBB, InsertBefore, MI.getDebugLoc(),
1059 TII->get(TargetOpcode::COPY), PrevReg)
1060 .addReg(LRI->PhysReg, llvm::RegState::Kill);
1061 }
1062 MachineOperand &MO = MI.getOperand(OpNum);
1063 if (MO.getSubReg() && !MO.isUndef()) {
1064 LRI->LastUse = &MI;
1065 }
1066 }
1067 return defineVirtReg(MI, OpNum, VirtReg, true);
1068}
1069
1070/// Allocates a register for VirtReg definition. Typically the register is
1071/// already assigned from a use of the virtreg, however we still need to
1072/// perform an allocation if:
1073/// - It is a dead definition without any uses.
1074/// - The value is live out and all uses are in different basic blocks.
1075///
1076/// \return true if MI's MachineOperands were re-arranged/invalidated.
1077bool RegAllocFastImpl::defineVirtReg(MachineInstr &MI, unsigned OpNum,
1078 Register VirtReg, bool LookAtPhysRegUses) {
1079 assert(VirtReg.isVirtual() && "Not a virtual register");
1080 if (!shouldAllocateRegister(VirtReg))
1081 return false;
1082 MachineOperand &MO = MI.getOperand(OpNum);
1083 LiveRegMap::iterator LRI;
1084 bool New;
1085 std::tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg));
1086 if (New) {
1087 if (!MO.isDead()) {
1088 if (mayLiveOut(VirtReg)) {
1089 LRI->LiveOut = true;
1090 } else {
1091 // It is a dead def without the dead flag; add the flag now.
1092 MO.setIsDead(true);
1093 }
1094 }
1095 }
1096 if (LRI->PhysReg == 0) {
1097 allocVirtReg(MI, *LRI, 0, LookAtPhysRegUses);
1098 } else {
1099 assert((!isRegUsedInInstr(LRI->PhysReg, LookAtPhysRegUses) || LRI->Error) &&
1100 "TODO: preassign mismatch");
1101 LLVM_DEBUG(dbgs() << "In def of " << printReg(VirtReg, TRI)
1102 << " use existing assignment to "
1103 << printReg(LRI->PhysReg, TRI) << '\n');
1104 }
1105
1106 MCPhysReg PhysReg = LRI->PhysReg;
1107 if (LRI->Reloaded || LRI->LiveOut) {
1108 if (!MI.isImplicitDef()) {
1109 MachineBasicBlock::iterator SpillBefore =
1110 std::next((MachineBasicBlock::iterator)MI.getIterator());
1111 LLVM_DEBUG(dbgs() << "Spill Reason: LO: " << LRI->LiveOut
1112 << " RL: " << LRI->Reloaded << '\n');
1113 bool Kill = LRI->LastUse == nullptr;
1114 spill(SpillBefore, VirtReg, PhysReg, Kill, LRI->LiveOut);
1115
1116 // We need to place additional spills for each indirect destination of an
1117 // INLINEASM_BR.
1118 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR) {
1119 int FI = StackSlotForVirtReg[VirtReg];
1120 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
1121 for (MachineOperand &MO : MI.operands()) {
1122 if (MO.isMBB()) {
1123 MachineBasicBlock *Succ = MO.getMBB();
1124 TII->storeRegToStackSlot(*Succ, Succ->begin(), PhysReg, Kill, FI,
1125 &RC, VirtReg);
1126 ++NumStores;
1127 Succ->addLiveIn(PhysReg);
1128 }
1129 }
1130 }
1131
1132 LRI->LastUse = nullptr;
1133 }
1134 LRI->LiveOut = false;
1135 LRI->Reloaded = false;
1136 }
1137 if (MI.getOpcode() == TargetOpcode::BUNDLE) {
1138 BundleVirtRegsMap[VirtReg] = *LRI;
1139 }
1140 markRegUsedInInstr(PhysReg);
1141 return setPhysReg(MI, MO, *LRI);
1142}
1143
1144/// Allocates a register for a VirtReg use.
1145/// \return true if MI's MachineOperands were re-arranged/invalidated.
1146bool RegAllocFastImpl::useVirtReg(MachineInstr &MI, MachineOperand &MO,
1147 Register VirtReg) {
1148 assert(VirtReg.isVirtual() && "Not a virtual register");
1149 if (!shouldAllocateRegister(VirtReg))
1150 return false;
1151 LiveRegMap::iterator LRI;
1152 bool New;
1153 std::tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg));
1154 if (New) {
1155 if (!MO.isKill()) {
1156 if (mayLiveOut(VirtReg)) {
1157 LRI->LiveOut = true;
1158 } else {
1159 // It is a last (killing) use without the kill flag; add the flag now.
1160 MO.setIsKill(true);
1161 }
1162 }
1163 } else {
1164 assert((!MO.isKill() || LRI->LastUse == &MI) && "Invalid kill flag");
1165 }
1166
1167 // If necessary allocate a register.
1168 if (LRI->PhysReg == 0) {
1169 assert(!MO.isTied() && "tied op should be allocated");
1170 Register Hint;
1171 if (MI.isCopy() && MI.getOperand(1).getSubReg() == 0) {
1172 Hint = MI.getOperand(0).getReg();
1173 if (Hint.isVirtual()) {
1174 assert(!shouldAllocateRegister(Hint));
1175 Hint = Register();
1176 } else {
1177 assert(Hint.isPhysical() &&
1178 "Copy destination should already be assigned");
1179 }
1180 }
1181 allocVirtReg(MI, *LRI, Hint, false);
1182 }
1183
1184 LRI->LastUse = &MI;
1185
1186 if (MI.getOpcode() == TargetOpcode::BUNDLE) {
1187 BundleVirtRegsMap[VirtReg] = *LRI;
1188 }
1189 markRegUsedInInstr(LRI->PhysReg);
1190 return setPhysReg(MI, MO, *LRI);
1191}
1192
1193/// Query a physical register to use as a filler in contexts where the
1194/// allocation has failed. This will raise an error, but not abort the
1195/// compilation.
1196MCPhysReg RegAllocFastImpl::getErrorAssignment(const LiveReg &LR,
1197 MachineInstr &MI,
1198 const TargetRegisterClass &RC) {
1199 MachineFunction &MF = *MI.getMF();
1200
1201 // Avoid repeating the error every time a register is used.
1202 bool EmitError = !MF.getProperties().hasFailedRegAlloc();
1203 if (EmitError)
1204 MF.getProperties().setFailedRegAlloc();
1205
1206 // If the allocation order was empty, all registers in the class were
1207 // probably reserved. Fall back to taking the first register in the class,
1208 // even if it's reserved.
1209 ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(&RC);
1210 if (AllocationOrder.empty()) {
1211 const Function &Fn = MF.getFunction();
1212 if (EmitError) {
1213 Fn.getContext().diagnose(DiagnosticInfoRegAllocFailure(
1214 "no registers from class available to allocate", Fn,
1215 MI.getDebugLoc()));
1216 }
1217
1218 ArrayRef<MCPhysReg> RawRegs = RC.getRegisters();
1219 assert(!RawRegs.empty() && "register classes cannot have no registers");
1220 return RawRegs.front();
1221 }
1222
1223 if (!LR.Error && EmitError) {
1224 // Nothing we can do: Report an error and keep going with an invalid
1225 // allocation.
1226 if (MI.isInlineAsm()) {
1227 MI.emitInlineAsmError(
1228 "inline assembly requires more registers than available");
1229 } else {
1230 const Function &Fn = MBB->getParent()->getFunction();
1231 Fn.getContext().diagnose(DiagnosticInfoRegAllocFailure(
1232 "ran out of registers during register allocation", Fn,
1233 MI.getDebugLoc()));
1234 }
1235 }
1236
1237 return AllocationOrder.front();
1238}
1239
1240/// Changes operand OpNum in MI the refer the PhysReg, considering subregs.
1241/// \return true if MI's MachineOperands were re-arranged/invalidated.
1242bool RegAllocFastImpl::setPhysReg(MachineInstr &MI, MachineOperand &MO,
1243 const LiveReg &Assignment) {
1244 MCPhysReg PhysReg = Assignment.PhysReg;
1245 assert(PhysReg && "assignments should always be to a valid physreg");
1246
1247 if (LLVM_UNLIKELY(Assignment.Error)) {
1248 // Make sure we don't set renamable in error scenarios, as we may have
1249 // assigned to a reserved register.
1250 if (MO.isUse())
1251 MO.setIsUndef(true);
1252 }
1253
1254 if (!MO.getSubReg()) {
1255 MO.setReg(PhysReg);
1256 MO.setIsRenamable(!Assignment.Error);
1257 return false;
1258 }
1259
1260 // Handle subregister index.
1261 MO.setReg(TRI->getSubReg(PhysReg, MO.getSubReg()));
1262 MO.setIsRenamable(!Assignment.Error);
1263
1264 // Note: We leave the subreg number around a little longer in case of defs.
1265 // This is so that the register freeing logic in allocateInstruction can still
1266 // recognize this as subregister defs. The code there will clear the number.
1267 if (!MO.isDef())
1268 MO.setSubReg(0);
1269
1270 // A kill flag implies killing the full register. Add corresponding super
1271 // register kill.
1272 if (MO.isKill()) {
1273 MI.addRegisterKilled(PhysReg, TRI, true);
1274 // Conservatively assume implicit MOs were re-arranged
1275 return true;
1276 }
1277
1278 // A <def,read-undef> of a sub-register requires an implicit def of the full
1279 // register.
1280 if (MO.isDef() && MO.isUndef()) {
1281 if (MO.isDead())
1282 MI.addRegisterDead(PhysReg, TRI, true);
1283 else
1284 MI.addRegisterDefined(PhysReg, TRI);
1285 // Conservatively assume implicit MOs were re-arranged
1286 return true;
1287 }
1288 return false;
1289}
1290
1291#ifndef NDEBUG
1292
1293void RegAllocFastImpl::dumpState() const {
1294 for (unsigned Unit = 1, UnitE = TRI->getNumRegUnits(); Unit != UnitE;
1295 ++Unit) {
1296 switch (unsigned VirtReg = RegUnitStates[Unit]) {
1297 case regFree:
1298 break;
1299 case regPreAssigned:
1300 dbgs() << " " << printRegUnit(Unit, TRI) << "[P]";
1301 break;
1302 case regLiveIn:
1303 llvm_unreachable("Should not have regLiveIn in map");
1304 default: {
1305 dbgs() << ' ' << printRegUnit(Unit, TRI) << '=' << printReg(VirtReg);
1306 LiveRegMap::const_iterator I = findLiveVirtReg(VirtReg);
1307 assert(I != LiveVirtRegs.end() && "have LiveVirtRegs entry");
1308 if (I->LiveOut || I->Reloaded) {
1309 dbgs() << '[';
1310 if (I->LiveOut)
1311 dbgs() << 'O';
1312 if (I->Reloaded)
1313 dbgs() << 'R';
1314 dbgs() << ']';
1315 }
1316 assert(TRI->hasRegUnit(I->PhysReg, Unit) && "inverse mapping present");
1317 break;
1318 }
1319 }
1320 }
1321 dbgs() << '\n';
1322 // Check that LiveVirtRegs is the inverse.
1323 for (const LiveReg &LR : LiveVirtRegs) {
1324 Register VirtReg = LR.VirtReg;
1325 assert(VirtReg.isVirtual() && "Bad map key");
1326 MCPhysReg PhysReg = LR.PhysReg;
1327 if (PhysReg != 0) {
1328 assert(Register::isPhysicalRegister(PhysReg) && "mapped to physreg");
1329 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
1330 assert(RegUnitStates[Unit] == VirtReg && "inverse map valid");
1331 }
1332 }
1333 }
1334}
1335#endif
1336
1337/// Count number of defs consumed from each register class by \p Reg
1338void RegAllocFastImpl::addRegClassDefCounts(
1339 MutableArrayRef<unsigned> RegClassDefCounts, Register Reg) const {
1340 assert(RegClassDefCounts.size() == TRI->getNumRegClasses());
1341
1342 if (Reg.isVirtual()) {
1343 if (!shouldAllocateRegister(Reg))
1344 return;
1345 const TargetRegisterClass *OpRC = MRI->getRegClass(Reg);
1346 for (unsigned RCIdx = 0, RCIdxEnd = TRI->getNumRegClasses();
1347 RCIdx != RCIdxEnd; ++RCIdx) {
1348 const TargetRegisterClass *IdxRC = TRI->getRegClass(RCIdx);
1349 // FIXME: Consider aliasing sub/super registers.
1350 if (OpRC->hasSubClassEq(IdxRC))
1351 ++RegClassDefCounts[RCIdx];
1352 }
1353
1354 return;
1355 }
1356
1357 for (unsigned RCIdx = 0, RCIdxEnd = TRI->getNumRegClasses();
1358 RCIdx != RCIdxEnd; ++RCIdx) {
1359 const TargetRegisterClass *IdxRC = TRI->getRegClass(RCIdx);
1360 for (MCRegAliasIterator Alias(Reg, TRI, true); Alias.isValid(); ++Alias) {
1361 if (IdxRC->contains(*Alias)) {
1362 ++RegClassDefCounts[RCIdx];
1363 break;
1364 }
1365 }
1366 }
1367}
1368
1369/// Compute \ref DefOperandIndexes so it contains the indices of "def" operands
1370/// that are to be allocated. Those are ordered in a way that small classes,
1371/// early clobbers and livethroughs are allocated first.
1372void RegAllocFastImpl::findAndSortDefOperandIndexes(const MachineInstr &MI) {
1373 DefOperandIndexes.clear();
1374
1375 LLVM_DEBUG(dbgs() << "Need to assign livethroughs\n");
1376 for (unsigned I = 0, E = MI.getNumOperands(); I < E; ++I) {
1377 const MachineOperand &MO = MI.getOperand(I);
1378 if (!MO.isReg())
1379 continue;
1380 Register Reg = MO.getReg();
1381 if (MO.readsReg()) {
1382 if (Reg.isPhysical()) {
1383 LLVM_DEBUG(dbgs() << "mark extra used: " << printReg(Reg, TRI) << '\n');
1384 markPhysRegUsedInInstr(Reg);
1385 }
1386 }
1387
1388 if (MO.isDef() && Reg.isVirtual() && shouldAllocateRegister(Reg))
1389 DefOperandIndexes.push_back(I);
1390 }
1391
1392 // Most instructions only have one virtual def, so there's no point in
1393 // computing the possible number of defs for every register class.
1394 if (DefOperandIndexes.size() <= 1)
1395 return;
1396
1397 // Track number of defs which may consume a register from the class. This is
1398 // used to assign registers for possibly-too-small classes first. Example:
1399 // defs are eax, 3 * gr32_abcd, 2 * gr32 => we want to assign the gr32_abcd
1400 // registers first so that the gr32 don't use the gr32_abcd registers before
1401 // we assign these.
1402 SmallVector<unsigned> RegClassDefCounts(TRI->getNumRegClasses(), 0);
1403
1404 for (const MachineOperand &MO : MI.all_defs())
1405 addRegClassDefCounts(RegClassDefCounts, MO.getReg());
1406
1407 llvm::sort(DefOperandIndexes, [&](unsigned I0, unsigned I1) {
1408 const MachineOperand &MO0 = MI.getOperand(I0);
1409 const MachineOperand &MO1 = MI.getOperand(I1);
1410 Register Reg0 = MO0.getReg();
1411 Register Reg1 = MO1.getReg();
1412 const TargetRegisterClass &RC0 = *MRI->getRegClass(Reg0);
1413 const TargetRegisterClass &RC1 = *MRI->getRegClass(Reg1);
1414
1415 // Identify regclass that are easy to use up completely just in this
1416 // instruction.
1417 unsigned ClassSize0 = RegClassInfo.getOrder(&RC0).size();
1418 unsigned ClassSize1 = RegClassInfo.getOrder(&RC1).size();
1419
1420 bool SmallClass0 = ClassSize0 < RegClassDefCounts[RC0.getID()];
1421 bool SmallClass1 = ClassSize1 < RegClassDefCounts[RC1.getID()];
1422 if (SmallClass0 > SmallClass1)
1423 return true;
1424 if (SmallClass0 < SmallClass1)
1425 return false;
1426
1427 // Allocate early clobbers and livethrough operands first.
1428 bool Livethrough0 = MO0.isEarlyClobber() || MO0.isTied() ||
1429 (MO0.getSubReg() == 0 && !MO0.isUndef());
1430 bool Livethrough1 = MO1.isEarlyClobber() || MO1.isTied() ||
1431 (MO1.getSubReg() == 0 && !MO1.isUndef());
1432 if (Livethrough0 > Livethrough1)
1433 return true;
1434 if (Livethrough0 < Livethrough1)
1435 return false;
1436
1437 // Tie-break rule: operand index.
1438 return I0 < I1;
1439 });
1440}
1441
1442// Returns true if MO is tied and the operand it's tied to is not Undef (not
1443// Undef is not the same thing as Def).
1444static bool isTiedToNotUndef(const MachineOperand &MO) {
1445 if (!MO.isTied())
1446 return false;
1447 const MachineInstr &MI = *MO.getParent();
1448 unsigned TiedIdx = MI.findTiedOperandIdx(MI.getOperandNo(&MO));
1449 const MachineOperand &TiedMO = MI.getOperand(TiedIdx);
1450 return !TiedMO.isUndef();
1451}
1452
1453void RegAllocFastImpl::allocateInstruction(MachineInstr &MI) {
1454 // The basic algorithm here is:
1455 // 1. Mark registers of def operands as free
1456 // 2. Allocate registers to use operands and place reload instructions for
1457 // registers displaced by the allocation.
1458 //
1459 // However we need to handle some corner cases:
1460 // - pre-assigned defs and uses need to be handled before the other def/use
1461 // operands are processed to avoid the allocation heuristics clashing with
1462 // the pre-assignment.
1463 // - The "free def operands" step has to come last instead of first for tied
1464 // operands and early-clobbers.
1465
1466 InstrGen += 2;
1467 // In the event we ever get more than 2**31 instructions...
1468 if (LLVM_UNLIKELY(InstrGen == 0)) {
1469 UsedInInstr.assign(UsedInInstr.size(), 0);
1470 InstrGen = 2;
1471 }
1472 RegMasks.clear();
1473 BundleVirtRegsMap.clear();
1474
1475 // Scan for special cases; Apply pre-assigned register defs to state.
1476 bool HasPhysRegUse = false;
1477 bool HasRegMask = false;
1478 bool HasVRegDef = false;
1479 bool HasDef = false;
1480 bool HasEarlyClobber = false;
1481 bool NeedToAssignLiveThroughs = false;
1482 for (MachineOperand &MO : MI.operands()) {
1483 if (MO.isReg()) {
1484 Register Reg = MO.getReg();
1485 if (Reg.isVirtual()) {
1486 if (!shouldAllocateRegister(Reg))
1487 continue;
1488 if (MO.isDef()) {
1489 HasDef = true;
1490 HasVRegDef = true;
1491 if (MO.isEarlyClobber()) {
1492 HasEarlyClobber = true;
1493 NeedToAssignLiveThroughs = true;
1494 }
1495 if (isTiedToNotUndef(MO) || (MO.getSubReg() != 0 && !MO.isUndef()))
1496 NeedToAssignLiveThroughs = true;
1497 }
1498 } else if (Reg.isPhysical()) {
1499 if (!MRI->isReserved(Reg)) {
1500 if (MO.isDef()) {
1501 HasDef = true;
1502 bool displacedAny = definePhysReg(MI, Reg);
1503 if (MO.isEarlyClobber())
1504 HasEarlyClobber = true;
1505 if (!displacedAny)
1506 MO.setIsDead(true);
1507 }
1508 if (MO.readsReg())
1509 HasPhysRegUse = true;
1510 }
1511 }
1512 } else if (MO.isRegMask()) {
1513 HasRegMask = true;
1514 RegMasks.push_back(MO.getRegMask());
1515 }
1516 }
1517
1518 // Allocate virtreg defs.
1519 if (HasDef) {
1520 if (HasVRegDef) {
1521 // Note that Implicit MOs can get re-arranged by defineVirtReg(), so loop
1522 // multiple times to ensure no operand is missed.
1523 bool ReArrangedImplicitOps = true;
1524
1525 // Special handling for early clobbers, tied operands or subregister defs:
1526 // Compared to "normal" defs these:
1527 // - Must not use a register that is pre-assigned for a use operand.
1528 // - In order to solve tricky inline assembly constraints we change the
1529 // heuristic to figure out a good operand order before doing
1530 // assignments.
1531 if (NeedToAssignLiveThroughs) {
1532 while (ReArrangedImplicitOps) {
1533 ReArrangedImplicitOps = false;
1534 findAndSortDefOperandIndexes(MI);
1535 for (unsigned OpIdx : DefOperandIndexes) {
1536 MachineOperand &MO = MI.getOperand(OpIdx);
1537 LLVM_DEBUG(dbgs() << "Allocating " << MO << '\n');
1538 Register Reg = MO.getReg();
1539 if (MO.isEarlyClobber() || isTiedToNotUndef(MO) ||
1540 (MO.getSubReg() && !MO.isUndef())) {
1541 ReArrangedImplicitOps = defineLiveThroughVirtReg(MI, OpIdx, Reg);
1542 } else {
1543 ReArrangedImplicitOps = defineVirtReg(MI, OpIdx, Reg);
1544 }
1545 // Implicit operands of MI were re-arranged,
1546 // re-compute DefOperandIndexes.
1547 if (ReArrangedImplicitOps)
1548 break;
1549 }
1550 }
1551 } else {
1552 // Assign virtual register defs.
1553 while (ReArrangedImplicitOps) {
1554 ReArrangedImplicitOps = false;
1555 for (MachineOperand &MO : MI.all_defs()) {
1556 Register Reg = MO.getReg();
1557 if (Reg.isVirtual()) {
1558 ReArrangedImplicitOps =
1559 defineVirtReg(MI, MI.getOperandNo(&MO), Reg);
1560 if (ReArrangedImplicitOps)
1561 break;
1562 }
1563 }
1564 }
1565 }
1566 }
1567
1568 // Free registers occupied by defs.
1569 // Iterate operands in reverse order, so we see the implicit super register
1570 // defs first (we added them earlier in case of <def,read-undef>).
1571 for (MachineOperand &MO : reverse(MI.all_defs())) {
1572 Register Reg = MO.getReg();
1573
1574 // subreg defs don't free the full register. We left the subreg number
1575 // around as a marker in setPhysReg() to recognize this case here.
1576 if (Reg.isPhysical() && MO.getSubReg() != 0) {
1577 MO.setSubReg(0);
1578 continue;
1579 }
1580
1581 assert((!MO.isTied() || !isClobberedByRegMasks(MO.getReg())) &&
1582 "tied def assigned to clobbered register");
1583
1584 // Do not free tied operands and early clobbers.
1585 if (isTiedToNotUndef(MO) || MO.isEarlyClobber())
1586 continue;
1587 if (!Reg)
1588 continue;
1589 if (Reg.isVirtual()) {
1590 assert(!shouldAllocateRegister(Reg));
1591 continue;
1592 }
1594 if (MRI->isReserved(Reg))
1595 continue;
1596 freePhysReg(Reg);
1597 unmarkRegUsedInInstr(Reg);
1598 }
1599 }
1600
1601 // Displace clobbered registers.
1602 if (HasRegMask) {
1603 assert(!RegMasks.empty() && "expected RegMask");
1604 // MRI bookkeeping.
1605 for (const auto *RM : RegMasks)
1606 MRI->addPhysRegsUsedFromRegMask(RM);
1607
1608 // Displace clobbered registers.
1609 for (const LiveReg &LR : LiveVirtRegs) {
1610 MCPhysReg PhysReg = LR.PhysReg;
1611 if (PhysReg != 0 && isClobberedByRegMasks(PhysReg))
1612 displacePhysReg(MI, PhysReg);
1613 }
1614 }
1615
1616 // Apply pre-assigned register uses to state.
1617 if (HasPhysRegUse) {
1618 for (MachineOperand &MO : MI.operands()) {
1619 if (!MO.isReg() || !MO.readsReg())
1620 continue;
1621 Register Reg = MO.getReg();
1622 if (!Reg.isPhysical())
1623 continue;
1624 if (MRI->isReserved(Reg))
1625 continue;
1626 if (!usePhysReg(MI, Reg))
1627 MO.setIsKill(true);
1628 }
1629 }
1630
1631 // Allocate virtreg uses and insert reloads as necessary.
1632 // Implicit MOs can get moved/removed by useVirtReg(), so loop multiple
1633 // times to ensure no operand is missed.
1634 bool HasUndefUse = false;
1635 bool ReArrangedImplicitMOs = true;
1636 while (ReArrangedImplicitMOs) {
1637 ReArrangedImplicitMOs = false;
1638 for (MachineOperand &MO : MI.operands()) {
1639 if (!MO.isReg() || !MO.isUse())
1640 continue;
1641 Register Reg = MO.getReg();
1642 if (!Reg.isVirtual() || !shouldAllocateRegister(Reg))
1643 continue;
1644
1645 if (MO.isUndef()) {
1646 HasUndefUse = true;
1647 continue;
1648 }
1649
1650 // Populate MayLiveAcrossBlocks in case the use block is allocated before
1651 // the def block (removing the vreg uses).
1652 mayLiveIn(Reg);
1653
1654 assert(!MO.isInternalRead() && "Bundles not supported");
1655 assert(MO.readsReg() && "reading use");
1656 ReArrangedImplicitMOs = useVirtReg(MI, MO, Reg);
1657 if (ReArrangedImplicitMOs)
1658 break;
1659 }
1660 }
1661
1662 // Allocate undef operands. This is a separate step because in a situation
1663 // like ` = OP undef %X, %X` both operands need the same register assign
1664 // so we should perform the normal assignment first.
1665 if (HasUndefUse) {
1666 for (MachineOperand &MO : MI.all_uses()) {
1667 Register Reg = MO.getReg();
1668 if (!Reg.isVirtual() || !shouldAllocateRegister(Reg))
1669 continue;
1670
1671 assert(MO.isUndef() && "Should only have undef virtreg uses left");
1672 allocVirtRegUndef(MO);
1673 }
1674 }
1675
1676 // Free early clobbers.
1677 if (HasEarlyClobber) {
1678 for (MachineOperand &MO : reverse(MI.all_defs())) {
1679 if (!MO.isEarlyClobber())
1680 continue;
1681 assert(!MO.getSubReg() && "should be already handled in def processing");
1682
1683 Register Reg = MO.getReg();
1684 if (!Reg)
1685 continue;
1686 if (Reg.isVirtual()) {
1687 assert(!shouldAllocateRegister(Reg));
1688 continue;
1689 }
1690 assert(Reg.isPhysical() && "should have register assigned");
1691
1692 // We sometimes get odd situations like:
1693 // early-clobber %x0 = INSTRUCTION %x0
1694 // which is semantically questionable as the early-clobber should
1695 // apply before the use. But in practice we consider the use to
1696 // happen before the early clobber now. Don't free the early clobber
1697 // register in this case.
1698 if (MI.readsRegister(Reg, TRI))
1699 continue;
1700
1701 freePhysReg(Reg);
1702 }
1703 }
1704
1705 LLVM_DEBUG(dbgs() << "<< " << MI);
1706 if (MI.isCopy() && MI.getOperand(0).getReg() == MI.getOperand(1).getReg() &&
1707 MI.getNumOperands() == 2) {
1708 LLVM_DEBUG(dbgs() << "Mark identity copy for removal\n");
1709 Coalesced.push_back(&MI);
1710 }
1711}
1712
1713void RegAllocFastImpl::handleDebugValue(MachineInstr &MI) {
1714 // Ignore DBG_VALUEs that aren't based on virtual registers. These are
1715 // mostly constants and frame indices.
1716 assert(MI.isDebugValue() && "not a DBG_VALUE*");
1717 for (const auto &MO : MI.debug_operands()) {
1718 if (!MO.isReg())
1719 continue;
1720 Register Reg = MO.getReg();
1721 if (!Reg.isVirtual())
1722 continue;
1723 if (!shouldAllocateRegister(Reg))
1724 continue;
1725
1726 // Already spilled to a stackslot?
1727 int SS = StackSlotForVirtReg[Reg];
1728 if (SS != -1) {
1729 // Modify DBG_VALUE now that the value is in a spill slot.
1731 LLVM_DEBUG(dbgs() << "Rewrite DBG_VALUE for spilled memory: " << MI);
1732 continue;
1733 }
1734
1735 // See if this virtual register has already been allocated to a physical
1736 // register or spilled to a stack slot.
1737 LiveRegMap::iterator LRI = findLiveVirtReg(Reg);
1739 llvm::make_pointer_range(MI.getDebugOperandsForReg(Reg)));
1740
1741 if (LRI != LiveVirtRegs.end() && LRI->PhysReg) {
1742 // Update every use of Reg within MI.
1743 for (auto &RegMO : DbgOps)
1744 setPhysReg(MI, *RegMO, *LRI);
1745 } else {
1746 DanglingDbgValues[Reg].push_back(&MI);
1747 }
1748
1749 // If Reg hasn't been spilled, put this DBG_VALUE in LiveDbgValueMap so
1750 // that future spills of Reg will have DBG_VALUEs.
1751 LiveDbgValueMap[Reg].append(DbgOps.begin(), DbgOps.end());
1752 }
1753}
1754
1755void RegAllocFastImpl::handleBundle(MachineInstr &MI) {
1756 MachineBasicBlock::instr_iterator BundledMI = MI.getIterator();
1757 ++BundledMI;
1758 while (BundledMI->isBundledWithPred()) {
1759 for (MachineOperand &MO : BundledMI->operands()) {
1760 if (!MO.isReg())
1761 continue;
1762
1763 Register Reg = MO.getReg();
1764 if (!Reg.isVirtual() || !shouldAllocateRegister(Reg))
1765 continue;
1766
1767 DenseMap<Register, LiveReg>::iterator DI = BundleVirtRegsMap.find(Reg);
1768 assert(DI != BundleVirtRegsMap.end() && "Unassigned virtual register");
1769
1770 setPhysReg(MI, MO, DI->second);
1771 }
1772
1773 ++BundledMI;
1774 }
1775}
1776
1777void RegAllocFastImpl::allocateBasicBlock(MachineBasicBlock &MBB) {
1778 this->MBB = &MBB;
1779 LLVM_DEBUG(dbgs() << "\nAllocating " << MBB);
1780
1781 PosIndexes.unsetInitialized();
1782 RegUnitStates.assign(TRI->getNumRegUnits(), regFree);
1783 assert(LiveVirtRegs.empty() && "Mapping not cleared from last block?");
1784
1785 for (const auto &LiveReg : MBB.liveouts())
1786 setPhysRegState(LiveReg.PhysReg, regPreAssigned);
1787
1788 Coalesced.clear();
1789
1790 // Traverse block in reverse order allocating instructions one by one.
1791 for (MachineInstr &MI : reverse(MBB)) {
1792 LLVM_DEBUG(dbgs() << "\n>> " << MI << "Regs:"; dumpState());
1793
1794 // Special handling for debug values. Note that they are not allowed to
1795 // affect codegen of the other instructions in any way.
1796 if (MI.isDebugValue()) {
1797 handleDebugValue(MI);
1798 continue;
1799 }
1800
1801 allocateInstruction(MI);
1802
1803 // Once BUNDLE header is assigned registers, same assignments need to be
1804 // done for bundled MIs.
1805 if (MI.getOpcode() == TargetOpcode::BUNDLE) {
1806 handleBundle(MI);
1807 }
1808 }
1809
1810 LLVM_DEBUG(dbgs() << "Begin Regs:"; dumpState());
1811
1812 // Spill all physical registers holding virtual registers now.
1813 LLVM_DEBUG(dbgs() << "Loading live registers at begin of block.\n");
1814 reloadAtBegin(MBB);
1815
1816 // Erase all the coalesced copies. We are delaying it until now because
1817 // LiveVirtRegs might refer to the instrs.
1818 for (MachineInstr *MI : Coalesced)
1819 MBB.erase(MI);
1820 NumCoalesced += Coalesced.size();
1821
1822 for (auto &UDBGPair : DanglingDbgValues) {
1823 for (MachineInstr *DbgValue : UDBGPair.second) {
1824 assert(DbgValue->isDebugValue() && "expected DBG_VALUE");
1825 // Nothing to do if the vreg was spilled in the meantime.
1826 if (!DbgValue->hasDebugOperandForReg(UDBGPair.first))
1827 continue;
1828 LLVM_DEBUG(dbgs() << "Register did not survive for " << *DbgValue
1829 << '\n');
1830 DbgValue->setDebugValueUndef();
1831 }
1832 }
1833 DanglingDbgValues.clear();
1834
1835 LLVM_DEBUG(MBB.dump());
1836}
1837
1838bool RegAllocFastImpl::runOnMachineFunction(MachineFunction &MF) {
1839 LLVM_DEBUG(dbgs() << "********** FAST REGISTER ALLOCATION **********\n"
1840 << "********** Function: " << MF.getName() << '\n');
1841 MRI = &MF.getRegInfo();
1842 const TargetSubtargetInfo &STI = MF.getSubtarget();
1843 TRI = STI.getRegisterInfo();
1844 TII = STI.getInstrInfo();
1845 MFI = &MF.getFrameInfo();
1846 MRI->freezeReservedRegs();
1847 RegClassInfo.runOnMachineFunction(MF);
1848 unsigned NumRegUnits = TRI->getNumRegUnits();
1849 InstrGen = 0;
1850 UsedInInstr.assign(NumRegUnits, 0);
1851
1852 // initialize the virtual->physical register map to have a 'null'
1853 // mapping for all virtual registers
1854 unsigned NumVirtRegs = MRI->getNumVirtRegs();
1855 StackSlotForVirtReg.resize(NumVirtRegs);
1856 LiveVirtRegs.setUniverse(NumVirtRegs);
1857 MayLiveAcrossBlocks.clear();
1858 MayLiveAcrossBlocks.resize(NumVirtRegs);
1859
1860 // Loop over all of the basic blocks, eliminating virtual register references
1861 for (MachineBasicBlock &MBB : MF)
1862 allocateBasicBlock(MBB);
1863
1864 if (ClearVirtRegs) {
1865 // All machine operands and other references to virtual registers have been
1866 // replaced. Remove the virtual registers.
1867 MRI->clearVirtRegs();
1868 }
1869
1870 StackSlotForVirtReg.clear();
1871 LiveDbgValueMap.clear();
1872 return true;
1873}
1874
1877 MFPropsModifier _(*this, MF);
1878 RegAllocFastImpl Impl(Opts.Filter, Opts.ClearVRegs);
1879 bool Changed = Impl.runOnMachineFunction(MF);
1880 if (!Changed)
1881 return PreservedAnalyses::all();
1883 PA.preserveSet<CFGAnalyses>();
1884 return PA;
1885}
1886
1888 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
1889 bool PrintFilterName = Opts.FilterName != "all";
1890 bool PrintNoClearVRegs = !Opts.ClearVRegs;
1891 bool PrintSemicolon = PrintFilterName && PrintNoClearVRegs;
1892
1893 OS << "regallocfast";
1894 if (PrintFilterName || PrintNoClearVRegs) {
1895 OS << '<';
1896 if (PrintFilterName)
1897 OS << "filter=" << Opts.FilterName;
1898 if (PrintSemicolon)
1899 OS << ';';
1900 if (PrintNoClearVRegs)
1901 OS << "no-clear-vregs";
1902 OS << '>';
1903 }
1904}
1905
1906FunctionPass *llvm::createFastRegisterAllocator() { return new RegAllocFast(); }
1907
1909 bool ClearVirtRegs) {
1910 return new RegAllocFast(Ftor, ClearVirtRegs);
1911}
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
MachineBasicBlock & MBB
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_UNLIKELY(EXPR)
Definition Compiler.h:336
This file defines the DenseMap class.
const HexagonInstrInfo * TII
#define _
IRTranslator LLVM IR MI
This file implements an indexed map.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Register const TargetRegisterInfo * TRI
This file implements a map that provides insertion order iteration.
Promote Memory to Register
Definition Mem2Reg.cpp:110
MachineInstr unsigned OpIdx
#define P(N)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
static bool isCoalescable(const MachineInstr &MI)
static cl::opt< bool > IgnoreMissingDefs("rafast-ignore-missing-defs", cl::Hidden)
static bool dominates(InstrPosIndexes &PosIndexes, const MachineInstr &A, const MachineInstr &B)
static bool isTiedToNotUndef(const MachineOperand &MO)
static RegisterRegAlloc fastRegAlloc("fast", "fast register allocator", createFastRegisterAllocator)
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the SparseSet class derived from the version described in Briggs,...
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
const T & front() const
front - Get the first element.
Definition ArrayRef.h:145
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
bool test(unsigned Idx) const
Definition BitVector.h:480
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
Definition BitVector.h:360
void clear()
clear - Removes all bits from the bitvector.
Definition BitVector.h:354
BitVector & set()
Definition BitVector.h:370
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
iterator end()
Definition DenseMap.h:81
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
Store the specified register of the given register class to the specified stack frame index.
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
Load the specified register of the given register class from the specified stack frame index.
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
If the specified machine instruction is a direct store to a stack slot, return the virtual or physica...
void resize(typename StorageT::size_type S)
Definition IndexedMap.h:67
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:33
An RAII based helper class to modify MachineFunctionProperties when running pass.
bool isInlineAsmBrIndirectTarget() const
Returns true if this is the indirect dest of an INLINEASM_BR.
iterator_range< liveout_iterator > liveouts() const
MachineInstrBundleIterator< const MachineInstr > const_iterator
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator_range< livein_iterator > liveins() const
LLVM_ABI iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
LLVM_ABI void dump() const
Instructions::iterator instr_iterator
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
LLVM_ABI instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
LLVM_ABI bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
MachineInstrBundleIterator< MachineInstr > iterator
LLVM_ABI bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
LLVM_ABI int CreateSpillStackObject(uint64_t Size, Align Alignment)
Create a new statically sized stack object that represents a spill slot, returning a nonnegative iden...
bool isSpillSlotObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a spill slot.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineBasicBlock & front() const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
bool hasDebugOperandForReg(Register Reg) const
Returns whether this debug value has at least one debug operand with the register Reg.
bool isDebugValueList() const
void setDebugValueUndef()
Sets all register debug operands in this debug value instruction to be undef.
const MachineBasicBlock * getParent() const
bool isNonListDebugValue() const
bool isDebugValue() const
MachineOperand & getDebugOperand(unsigned Index)
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
LLVM_ABI void setIsRenamable(bool Val=true)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
MachineBasicBlock * getMBB() const
void setIsDead(bool Val=true)
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
void setIsKill(bool Val=true)
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
void setIsUndef(bool Val=true)
bool isEarlyClobber() const
Register getReg() const
getReg - Returns the register number.
bool isInternalRead() const
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
const uint32_t * getRegMask() const
getRegMask - Returns a bit mask of registers preserved by this RegMask operand.
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &)
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI void runOnMachineFunction(const MachineFunction &MF, bool Rev=false)
runOnFunction - Prepare to answer questions about MF.
ArrayRef< MCPhysReg > getOrder(const TargetRegisterClass *RC) const
getOrder - Returns the preferred allocation order for RC.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
unsigned virtRegIndex() const
Convert a virtual register number to a 0-based index.
Definition Register.h:87
constexpr bool isValid() const
Definition Register.h:112
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
constexpr unsigned id() const
Definition Register.h:100
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition SmallSet.h:175
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:183
void assign(size_type NumElts, ValueParamT Elt)
void push_back(const T &Elt)
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
ArrayRef< MCPhysReg > getRegisters() const
unsigned getID() const
Return the register class ID number.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
An efficient, type-erasing, non-owning reference to a callable.
self_iterator getIterator()
Definition ilist_node.h:123
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ Kill
The last use of a register.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI FunctionPass * createFastRegisterAllocator()
FastRegisterAllocation Pass - This pass register allocates as fast as possible.
std::function< bool(const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, const Register Reg)> RegAllocFilterFunc
Filter function for register classes during regalloc.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
InstructionCost Cost
LLVM_ABI void updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex, Register Reg)
Update a DBG_VALUE whose value has been spilled to FrameIndex.
LLVM_ABI Printable printRegUnit(MCRegUnit Unit, const TargetRegisterInfo *TRI)
Create Printable object to print register units on a raw_ostream.
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
LLVM_ABI PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1732
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1622
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
unsigned MCRegUnit
Register units are used to compute register aliasing.
Definition MCRegister.h:30
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
MutableArrayRef(T &OneElt) -> MutableArrayRef< T >
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI MachineInstr * buildDbgValueForSpill(MachineBasicBlock &BB, MachineBasicBlock::iterator I, const MachineInstr &Orig, int FrameIndex, Register SpillReg)
Clone a DBG_VALUE whose value has been spilled to FrameIndex.
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:363
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.