LLVM 17.0.0git
RegAllocFast.cpp
Go to the documentation of this file.
1//===- RegAllocFast.cpp - A fast register allocator for debug code --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file This register allocator allocates registers to a basic block at a
10/// time, attempting to keep values in registers and reusing registers as
11/// appropriate.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/ADT/ArrayRef.h"
16#include "llvm/ADT/DenseMap.h"
17#include "llvm/ADT/IndexedMap.h"
18#include "llvm/ADT/MapVector.h"
19#include "llvm/ADT/SmallSet.h"
21#include "llvm/ADT/SparseSet.h"
22#include "llvm/ADT/Statistic.h"
40#include "llvm/Pass.h"
41#include "llvm/Support/Debug.h"
44#include <cassert>
45#include <tuple>
46#include <vector>
47
48using namespace llvm;
49
50#define DEBUG_TYPE "regalloc"
51
52STATISTIC(NumStores, "Number of stores added");
53STATISTIC(NumLoads , "Number of loads added");
54STATISTIC(NumCoalesced, "Number of copies coalesced");
55
56// FIXME: Remove this switch when all testcases are fixed!
57static cl::opt<bool> IgnoreMissingDefs("rafast-ignore-missing-defs",
59
61 fastRegAlloc("fast", "fast register allocator", createFastRegisterAllocator);
62
63namespace {
64
65 class RegAllocFast : public MachineFunctionPass {
66 public:
67 static char ID;
68
69 RegAllocFast(const RegClassFilterFunc F = allocateAllRegClasses,
70 bool ClearVirtRegs_ = true) :
72 ShouldAllocateClass(F),
73 StackSlotForVirtReg(-1),
74 ClearVirtRegs(ClearVirtRegs_) {
75 }
76
77 private:
78 MachineFrameInfo *MFI = nullptr;
79 MachineRegisterInfo *MRI = nullptr;
80 const TargetRegisterInfo *TRI = nullptr;
81 const TargetInstrInfo *TII = nullptr;
82 RegisterClassInfo RegClassInfo;
83 const RegClassFilterFunc ShouldAllocateClass;
84
85 /// Basic block currently being allocated.
86 MachineBasicBlock *MBB = nullptr;
87
88 /// Maps virtual regs to the frame index where these values are spilled.
89 IndexedMap<int, VirtReg2IndexFunctor> StackSlotForVirtReg;
90
91 bool ClearVirtRegs;
92
93 /// Everything we know about a live virtual register.
94 struct LiveReg {
95 MachineInstr *LastUse = nullptr; ///< Last instr to use reg.
96 Register VirtReg; ///< Virtual register number.
97 MCPhysReg PhysReg = 0; ///< Currently held here.
98 bool LiveOut = false; ///< Register is possibly live out.
99 bool Reloaded = false; ///< Register was reloaded.
100 bool Error = false; ///< Could not allocate.
101
102 explicit LiveReg(Register VirtReg) : VirtReg(VirtReg) {}
103
104 unsigned getSparseSetIndex() const {
105 return Register::virtReg2Index(VirtReg);
106 }
107 };
108
110 /// This map contains entries for each virtual register that is currently
111 /// available in a physical register.
112 LiveRegMap LiveVirtRegs;
113
114 /// Stores assigned virtual registers present in the bundle MI.
115 DenseMap<Register, MCPhysReg> BundleVirtRegsMap;
116
118 /// List of DBG_VALUE that we encountered without the vreg being assigned
119 /// because they were placed after the last use of the vreg.
121
122 /// Has a bit set for every virtual register for which it was determined
123 /// that it is alive across blocks.
124 BitVector MayLiveAcrossBlocks;
125
126 /// State of a register unit.
127 enum RegUnitState {
128 /// A free register is not currently in use and can be allocated
129 /// immediately without checking aliases.
130 regFree,
131
132 /// A pre-assigned register has been assigned before register allocation
133 /// (e.g., setting up a call parameter).
134 regPreAssigned,
135
136 /// Used temporarily in reloadAtBegin() to mark register units that are
137 /// live-in to the basic block.
138 regLiveIn,
139
140 /// A register state may also be a virtual register number, indication
141 /// that the physical register is currently allocated to a virtual
142 /// register. In that case, LiveVirtRegs contains the inverse mapping.
143 };
144
145 /// Maps each physical register to a RegUnitState enum or virtual register.
146 std::vector<unsigned> RegUnitStates;
147
149
150 using RegUnitSet = SparseSet<uint16_t, identity<uint16_t>>;
151 /// Set of register units that are used in the current instruction, and so
152 /// cannot be allocated.
153 RegUnitSet UsedInInstr;
154 RegUnitSet PhysRegUses;
155 SmallVector<uint16_t, 8> DefOperandIndexes;
156 // Register masks attached to the current instruction.
158
159 void setPhysRegState(MCPhysReg PhysReg, unsigned NewState);
160 bool isPhysRegFree(MCPhysReg PhysReg) const;
161
162 /// Mark a physreg as used in this instruction.
163 void markRegUsedInInstr(MCPhysReg PhysReg) {
164 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units)
165 UsedInInstr.insert(*Units);
166 }
167
168 // Check if physreg is clobbered by instruction's regmask(s).
169 bool isClobberedByRegMasks(MCPhysReg PhysReg) const {
170 return llvm::any_of(RegMasks, [PhysReg](const uint32_t *Mask) {
171 return MachineOperand::clobbersPhysReg(Mask, PhysReg);
172 });
173 }
174
175 /// Check if a physreg or any of its aliases are used in this instruction.
176 bool isRegUsedInInstr(MCPhysReg PhysReg, bool LookAtPhysRegUses) const {
177 if (LookAtPhysRegUses && isClobberedByRegMasks(PhysReg))
178 return true;
179 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
180 if (UsedInInstr.count(*Units))
181 return true;
182 if (LookAtPhysRegUses && PhysRegUses.count(*Units))
183 return true;
184 }
185 return false;
186 }
187
188 /// Mark physical register as being used in a register use operand.
189 /// This is only used by the special livethrough handling code.
190 void markPhysRegUsedInInstr(MCPhysReg PhysReg) {
191 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units)
192 PhysRegUses.insert(*Units);
193 }
194
195 /// Remove mark of physical register being used in the instruction.
196 void unmarkRegUsedInInstr(MCPhysReg PhysReg) {
197 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units)
198 UsedInInstr.erase(*Units);
199 }
200
201 enum : unsigned {
202 spillClean = 50,
203 spillDirty = 100,
204 spillPrefBonus = 20,
205 spillImpossible = ~0u
206 };
207
208 public:
209 StringRef getPassName() const override { return "Fast Register Allocator"; }
210
211 void getAnalysisUsage(AnalysisUsage &AU) const override {
212 AU.setPreservesCFG();
214 }
215
218 MachineFunctionProperties::Property::NoPHIs);
219 }
220
222 if (ClearVirtRegs) {
224 MachineFunctionProperties::Property::NoVRegs);
225 }
226
228 }
229
232 MachineFunctionProperties::Property::IsSSA);
233 }
234
235 private:
236 bool runOnMachineFunction(MachineFunction &MF) override;
237
238 void allocateBasicBlock(MachineBasicBlock &MBB);
239
240 void addRegClassDefCounts(std::vector<unsigned> &RegClassDefCounts,
241 Register Reg) const;
242
243 void findAndSortDefOperandIndexes(const MachineInstr &MI);
244
245 void allocateInstruction(MachineInstr &MI);
246 void handleDebugValue(MachineInstr &MI);
247 void handleBundle(MachineInstr &MI);
248
249 bool usePhysReg(MachineInstr &MI, MCPhysReg PhysReg);
250 bool definePhysReg(MachineInstr &MI, MCPhysReg PhysReg);
251 bool displacePhysReg(MachineInstr &MI, MCPhysReg PhysReg);
252 void freePhysReg(MCPhysReg PhysReg);
253
254 unsigned calcSpillCost(MCPhysReg PhysReg) const;
255
256 LiveRegMap::iterator findLiveVirtReg(Register VirtReg) {
257 return LiveVirtRegs.find(Register::virtReg2Index(VirtReg));
258 }
259
260 LiveRegMap::const_iterator findLiveVirtReg(Register VirtReg) const {
261 return LiveVirtRegs.find(Register::virtReg2Index(VirtReg));
262 }
263
264 void assignVirtToPhysReg(MachineInstr &MI, LiveReg &, MCPhysReg PhysReg);
265 void allocVirtReg(MachineInstr &MI, LiveReg &LR, Register Hint,
266 bool LookAtPhysRegUses = false);
267 void allocVirtRegUndef(MachineOperand &MO);
268 void assignDanglingDebugValues(MachineInstr &Def, Register VirtReg,
269 MCPhysReg Reg);
270 bool defineLiveThroughVirtReg(MachineInstr &MI, unsigned OpNum,
271 Register VirtReg);
272 bool defineVirtReg(MachineInstr &MI, unsigned OpNum, Register VirtReg,
273 bool LookAtPhysRegUses = false);
274 bool useVirtReg(MachineInstr &MI, unsigned OpNum, Register VirtReg);
275
277 getMBBBeginInsertionPoint(MachineBasicBlock &MBB,
278 SmallSet<Register, 2> &PrologLiveIns) const;
279
280 void reloadAtBegin(MachineBasicBlock &MBB);
281 bool setPhysReg(MachineInstr &MI, MachineOperand &MO, MCPhysReg PhysReg);
282
283 Register traceCopies(Register VirtReg) const;
284 Register traceCopyChain(Register Reg) const;
285
286 bool shouldAllocateRegister(const Register Reg) const;
287 int getStackSpaceFor(Register VirtReg);
288 void spill(MachineBasicBlock::iterator Before, Register VirtReg,
289 MCPhysReg AssignedReg, bool Kill, bool LiveOut);
290 void reload(MachineBasicBlock::iterator Before, Register VirtReg,
291 MCPhysReg PhysReg);
292
293 bool mayLiveOut(Register VirtReg);
294 bool mayLiveIn(Register VirtReg);
295
296 void dumpState() const;
297 };
298
299} // end anonymous namespace
300
301char RegAllocFast::ID = 0;
302
303INITIALIZE_PASS(RegAllocFast, "regallocfast", "Fast Register Allocator", false,
304 false)
305
306bool RegAllocFast::shouldAllocateRegister(const Register Reg) const {
307 assert(Reg.isVirtual());
308 const TargetRegisterClass &RC = *MRI->getRegClass(Reg);
309 return ShouldAllocateClass(*TRI, RC);
310}
311
312void RegAllocFast::setPhysRegState(MCPhysReg PhysReg, unsigned NewState) {
313 for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI)
314 RegUnitStates[*UI] = NewState;
315}
316
317bool RegAllocFast::isPhysRegFree(MCPhysReg PhysReg) const {
318 for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI) {
319 if (RegUnitStates[*UI] != regFree)
320 return false;
321 }
322 return true;
323}
324
325/// This allocates space for the specified virtual register to be held on the
326/// stack.
327int RegAllocFast::getStackSpaceFor(Register VirtReg) {
328 // Find the location Reg would belong...
329 int SS = StackSlotForVirtReg[VirtReg];
330 // Already has space allocated?
331 if (SS != -1)
332 return SS;
333
334 // Allocate a new stack object for this spill location...
335 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
336 unsigned Size = TRI->getSpillSize(RC);
337 Align Alignment = TRI->getSpillAlign(RC);
338 int FrameIdx = MFI->CreateSpillStackObject(Size, Alignment);
339
340 // Assign the slot.
341 StackSlotForVirtReg[VirtReg] = FrameIdx;
342 return FrameIdx;
343}
344
348 auto MBBEnd = MBB.end();
349 if (B == MBBEnd)
350 return true;
351
353 for (; &*I != A && &*I != B; ++I)
354 ;
355
356 return &*I == A;
357}
358
359/// Returns false if \p VirtReg is known to not live out of the current block.
360bool RegAllocFast::mayLiveOut(Register VirtReg) {
361 if (MayLiveAcrossBlocks.test(Register::virtReg2Index(VirtReg))) {
362 // Cannot be live-out if there are no successors.
363 return !MBB->succ_empty();
364 }
365
366 const MachineInstr *SelfLoopDef = nullptr;
367
368 // If this block loops back to itself, it is necessary to check whether the
369 // use comes after the def.
370 if (MBB->isSuccessor(MBB)) {
371 // Find the first def in the self loop MBB.
372 for (const MachineInstr &DefInst : MRI->def_instructions(VirtReg)) {
373 if (DefInst.getParent() != MBB) {
374 MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg));
375 return true;
376 } else {
377 if (!SelfLoopDef || dominates(*MBB, DefInst.getIterator(), SelfLoopDef))
378 SelfLoopDef = &DefInst;
379 }
380 }
381 if (!SelfLoopDef) {
382 MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg));
383 return true;
384 }
385 }
386
387 // See if the first \p Limit uses of the register are all in the current
388 // block.
389 static const unsigned Limit = 8;
390 unsigned C = 0;
391 for (const MachineInstr &UseInst : MRI->use_nodbg_instructions(VirtReg)) {
392 if (UseInst.getParent() != MBB || ++C >= Limit) {
393 MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg));
394 // Cannot be live-out if there are no successors.
395 return !MBB->succ_empty();
396 }
397
398 if (SelfLoopDef) {
399 // Try to handle some simple cases to avoid spilling and reloading every
400 // value inside a self looping block.
401 if (SelfLoopDef == &UseInst ||
402 !dominates(*MBB, SelfLoopDef->getIterator(), UseInst.getIterator())) {
403 MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg));
404 return true;
405 }
406 }
407 }
408
409 return false;
410}
411
412/// Returns false if \p VirtReg is known to not be live into the current block.
413bool RegAllocFast::mayLiveIn(Register VirtReg) {
414 if (MayLiveAcrossBlocks.test(Register::virtReg2Index(VirtReg)))
415 return !MBB->pred_empty();
416
417 // See if the first \p Limit def of the register are all in the current block.
418 static const unsigned Limit = 8;
419 unsigned C = 0;
420 for (const MachineInstr &DefInst : MRI->def_instructions(VirtReg)) {
421 if (DefInst.getParent() != MBB || ++C >= Limit) {
422 MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg));
423 return !MBB->pred_empty();
424 }
425 }
426
427 return false;
428}
429
430/// Insert spill instruction for \p AssignedReg before \p Before. Update
431/// DBG_VALUEs with \p VirtReg operands with the stack slot.
432void RegAllocFast::spill(MachineBasicBlock::iterator Before, Register VirtReg,
433 MCPhysReg AssignedReg, bool Kill, bool LiveOut) {
434 LLVM_DEBUG(dbgs() << "Spilling " << printReg(VirtReg, TRI)
435 << " in " << printReg(AssignedReg, TRI));
436 int FI = getStackSpaceFor(VirtReg);
437 LLVM_DEBUG(dbgs() << " to stack slot #" << FI << '\n');
438
439 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
440 TII->storeRegToStackSlot(*MBB, Before, AssignedReg, Kill, FI, &RC, TRI,
441 VirtReg);
442 ++NumStores;
443
445
446 // When we spill a virtual register, we will have spill instructions behind
447 // every definition of it, meaning we can switch all the DBG_VALUEs over
448 // to just reference the stack slot.
449 SmallVectorImpl<MachineOperand *> &LRIDbgOperands = LiveDbgValueMap[VirtReg];
451 SpilledOperandsMap;
452 for (MachineOperand *MO : LRIDbgOperands)
453 SpilledOperandsMap[MO->getParent()].push_back(MO);
454 for (auto MISpilledOperands : SpilledOperandsMap) {
455 MachineInstr &DBG = *MISpilledOperands.first;
456 // We don't have enough support for tracking operands of DBG_VALUE_LISTs.
457 if (DBG.isDebugValueList())
458 continue;
460 *MBB, Before, *MISpilledOperands.first, FI, MISpilledOperands.second);
461 assert(NewDV->getParent() == MBB && "dangling parent pointer");
462 (void)NewDV;
463 LLVM_DEBUG(dbgs() << "Inserting debug info due to spill:\n" << *NewDV);
464
465 if (LiveOut) {
466 // We need to insert a DBG_VALUE at the end of the block if the spill slot
467 // is live out, but there is another use of the value after the
468 // spill. This will allow LiveDebugValues to see the correct live out
469 // value to propagate to the successors.
470 MachineInstr *ClonedDV = MBB->getParent()->CloneMachineInstr(NewDV);
471 MBB->insert(FirstTerm, ClonedDV);
472 LLVM_DEBUG(dbgs() << "Cloning debug info due to live out spill\n");
473 }
474
475 // Rewrite unassigned dbg_values to use the stack slot.
476 // TODO We can potentially do this for list debug values as well if we know
477 // how the dbg_values are getting unassigned.
478 if (DBG.isNonListDebugValue()) {
479 MachineOperand &MO = DBG.getDebugOperand(0);
480 if (MO.isReg() && MO.getReg() == 0) {
481 updateDbgValueForSpill(DBG, FI, 0);
482 }
483 }
484 }
485 // Now this register is spilled there is should not be any DBG_VALUE
486 // pointing to this register because they are all pointing to spilled value
487 // now.
488 LRIDbgOperands.clear();
489}
490
491/// Insert reload instruction for \p PhysReg before \p Before.
492void RegAllocFast::reload(MachineBasicBlock::iterator Before, Register VirtReg,
493 MCPhysReg PhysReg) {
494 LLVM_DEBUG(dbgs() << "Reloading " << printReg(VirtReg, TRI) << " into "
495 << printReg(PhysReg, TRI) << '\n');
496 int FI = getStackSpaceFor(VirtReg);
497 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
498 TII->loadRegFromStackSlot(*MBB, Before, PhysReg, FI, &RC, TRI, VirtReg);
499 ++NumLoads;
500}
501
502/// Get basic block begin insertion point.
503/// This is not just MBB.begin() because surprisingly we have EH_LABEL
504/// instructions marking the begin of a basic block. This means we must insert
505/// new instructions after such labels...
507RegAllocFast::getMBBBeginInsertionPoint(
508 MachineBasicBlock &MBB, SmallSet<Register, 2> &PrologLiveIns) const {
510 while (I != MBB.end()) {
511 if (I->isLabel()) {
512 ++I;
513 continue;
514 }
515
516 // Most reloads should be inserted after prolog instructions.
517 if (!TII->isBasicBlockPrologue(*I))
518 break;
519
520 // However if a prolog instruction reads a register that needs to be
521 // reloaded, the reload should be inserted before the prolog.
522 for (MachineOperand &MO : I->operands()) {
523 if (MO.isReg())
524 PrologLiveIns.insert(MO.getReg());
525 }
526
527 ++I;
528 }
529
530 return I;
531}
532
533/// Reload all currently assigned virtual registers.
534void RegAllocFast::reloadAtBegin(MachineBasicBlock &MBB) {
535 if (LiveVirtRegs.empty())
536 return;
537
539 MCPhysReg Reg = P.PhysReg;
540 // Set state to live-in. This possibly overrides mappings to virtual
541 // registers but we don't care anymore at this point.
542 setPhysRegState(Reg, regLiveIn);
543 }
544
545
546 SmallSet<Register, 2> PrologLiveIns;
547
548 // The LiveRegMap is keyed by an unsigned (the virtreg number), so the order
549 // of spilling here is deterministic, if arbitrary.
550 MachineBasicBlock::iterator InsertBefore
551 = getMBBBeginInsertionPoint(MBB, PrologLiveIns);
552 for (const LiveReg &LR : LiveVirtRegs) {
553 MCPhysReg PhysReg = LR.PhysReg;
554 if (PhysReg == 0)
555 continue;
556
557 MCRegister FirstUnit = *MCRegUnitIterator(PhysReg, TRI);
558 if (RegUnitStates[FirstUnit] == regLiveIn)
559 continue;
560
562 "no reload in start block. Missing vreg def?");
563
564 if (PrologLiveIns.count(PhysReg)) {
565 // FIXME: Theoretically this should use an insert point skipping labels
566 // but I'm not sure how labels should interact with prolog instruction
567 // that need reloads.
568 reload(MBB.begin(), LR.VirtReg, PhysReg);
569 } else
570 reload(InsertBefore, LR.VirtReg, PhysReg);
571 }
572 LiveVirtRegs.clear();
573}
574
575/// Handle the direct use of a physical register. Check that the register is
576/// not used by a virtreg. Kill the physreg, marking it free. This may add
577/// implicit kills to MO->getParent() and invalidate MO.
578bool RegAllocFast::usePhysReg(MachineInstr &MI, MCPhysReg Reg) {
579 assert(Register::isPhysicalRegister(Reg) && "expected physreg");
580 bool displacedAny = displacePhysReg(MI, Reg);
581 setPhysRegState(Reg, regPreAssigned);
582 markRegUsedInInstr(Reg);
583 return displacedAny;
584}
585
586bool RegAllocFast::definePhysReg(MachineInstr &MI, MCPhysReg Reg) {
587 bool displacedAny = displacePhysReg(MI, Reg);
588 setPhysRegState(Reg, regPreAssigned);
589 return displacedAny;
590}
591
592/// Mark PhysReg as reserved or free after spilling any virtregs. This is very
593/// similar to defineVirtReg except the physreg is reserved instead of
594/// allocated.
595bool RegAllocFast::displacePhysReg(MachineInstr &MI, MCPhysReg PhysReg) {
596 bool displacedAny = false;
597
598 for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI) {
599 unsigned Unit = *UI;
600 switch (unsigned VirtReg = RegUnitStates[Unit]) {
601 default: {
602 LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
603 assert(LRI != LiveVirtRegs.end() && "datastructures in sync");
604 MachineBasicBlock::iterator ReloadBefore =
605 std::next((MachineBasicBlock::iterator)MI.getIterator());
606 reload(ReloadBefore, VirtReg, LRI->PhysReg);
607
608 setPhysRegState(LRI->PhysReg, regFree);
609 LRI->PhysReg = 0;
610 LRI->Reloaded = true;
611 displacedAny = true;
612 break;
613 }
614 case regPreAssigned:
615 RegUnitStates[Unit] = regFree;
616 displacedAny = true;
617 break;
618 case regFree:
619 break;
620 }
621 }
622 return displacedAny;
623}
624
625void RegAllocFast::freePhysReg(MCPhysReg PhysReg) {
626 LLVM_DEBUG(dbgs() << "Freeing " << printReg(PhysReg, TRI) << ':');
627
628 MCRegister FirstUnit = *MCRegUnitIterator(PhysReg, TRI);
629 switch (unsigned VirtReg = RegUnitStates[FirstUnit]) {
630 case regFree:
631 LLVM_DEBUG(dbgs() << '\n');
632 return;
633 case regPreAssigned:
634 LLVM_DEBUG(dbgs() << '\n');
635 setPhysRegState(PhysReg, regFree);
636 return;
637 default: {
638 LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
639 assert(LRI != LiveVirtRegs.end());
640 LLVM_DEBUG(dbgs() << ' ' << printReg(LRI->VirtReg, TRI) << '\n');
641 setPhysRegState(LRI->PhysReg, regFree);
642 LRI->PhysReg = 0;
643 }
644 return;
645 }
646}
647
648/// Return the cost of spilling clearing out PhysReg and aliases so it is free
649/// for allocation. Returns 0 when PhysReg is free or disabled with all aliases
650/// disabled - it can be allocated directly.
651/// \returns spillImpossible when PhysReg or an alias can't be spilled.
652unsigned RegAllocFast::calcSpillCost(MCPhysReg PhysReg) const {
653 for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI) {
654 switch (unsigned VirtReg = RegUnitStates[*UI]) {
655 case regFree:
656 break;
657 case regPreAssigned:
658 LLVM_DEBUG(dbgs() << "Cannot spill pre-assigned "
659 << printReg(PhysReg, TRI) << '\n');
660 return spillImpossible;
661 default: {
662 bool SureSpill = StackSlotForVirtReg[VirtReg] != -1 ||
663 findLiveVirtReg(VirtReg)->LiveOut;
664 return SureSpill ? spillClean : spillDirty;
665 }
666 }
667 }
668 return 0;
669}
670
671void RegAllocFast::assignDanglingDebugValues(MachineInstr &Definition,
672 Register VirtReg, MCPhysReg Reg) {
673 auto UDBGValIter = DanglingDbgValues.find(VirtReg);
674 if (UDBGValIter == DanglingDbgValues.end())
675 return;
676
677 SmallVectorImpl<MachineInstr*> &Dangling = UDBGValIter->second;
678 for (MachineInstr *DbgValue : Dangling) {
679 assert(DbgValue->isDebugValue());
680 if (!DbgValue->hasDebugOperandForReg(VirtReg))
681 continue;
682
683 // Test whether the physreg survives from the definition to the DBG_VALUE.
684 MCPhysReg SetToReg = Reg;
685 unsigned Limit = 20;
686 for (MachineBasicBlock::iterator I = std::next(Definition.getIterator()),
687 E = DbgValue->getIterator(); I != E; ++I) {
688 if (I->modifiesRegister(Reg, TRI) || --Limit == 0) {
689 LLVM_DEBUG(dbgs() << "Register did not survive for " << *DbgValue
690 << '\n');
691 SetToReg = 0;
692 break;
693 }
694 }
695 for (MachineOperand &MO : DbgValue->getDebugOperandsForReg(VirtReg)) {
696 MO.setReg(SetToReg);
697 if (SetToReg != 0)
698 MO.setIsRenamable();
699 }
700 }
701 Dangling.clear();
702}
703
704/// This method updates local state so that we know that PhysReg is the
705/// proper container for VirtReg now. The physical register must not be used
706/// for anything else when this is called.
707void RegAllocFast::assignVirtToPhysReg(MachineInstr &AtMI, LiveReg &LR,
708 MCPhysReg PhysReg) {
709 Register VirtReg = LR.VirtReg;
710 LLVM_DEBUG(dbgs() << "Assigning " << printReg(VirtReg, TRI) << " to "
711 << printReg(PhysReg, TRI) << '\n');
712 assert(LR.PhysReg == 0 && "Already assigned a physreg");
713 assert(PhysReg != 0 && "Trying to assign no register");
714 LR.PhysReg = PhysReg;
715 setPhysRegState(PhysReg, VirtReg);
716
717 assignDanglingDebugValues(AtMI, VirtReg, PhysReg);
718}
719
720static bool isCoalescable(const MachineInstr &MI) {
721 return MI.isFullCopy();
722}
723
724Register RegAllocFast::traceCopyChain(Register Reg) const {
725 static const unsigned ChainLengthLimit = 3;
726 unsigned C = 0;
727 do {
728 if (Reg.isPhysical())
729 return Reg;
730 assert(Reg.isVirtual());
731
732 MachineInstr *VRegDef = MRI->getUniqueVRegDef(Reg);
733 if (!VRegDef || !isCoalescable(*VRegDef))
734 return 0;
735 Reg = VRegDef->getOperand(1).getReg();
736 } while (++C <= ChainLengthLimit);
737 return 0;
738}
739
740/// Check if any of \p VirtReg's definitions is a copy. If it is follow the
741/// chain of copies to check whether we reach a physical register we can
742/// coalesce with.
743Register RegAllocFast::traceCopies(Register VirtReg) const {
744 static const unsigned DefLimit = 3;
745 unsigned C = 0;
746 for (const MachineInstr &MI : MRI->def_instructions(VirtReg)) {
747 if (isCoalescable(MI)) {
748 Register Reg = MI.getOperand(1).getReg();
749 Reg = traceCopyChain(Reg);
750 if (Reg.isValid())
751 return Reg;
752 }
753
754 if (++C >= DefLimit)
755 break;
756 }
757 return Register();
758}
759
760/// Allocates a physical register for VirtReg.
761void RegAllocFast::allocVirtReg(MachineInstr &MI, LiveReg &LR,
762 Register Hint0, bool LookAtPhysRegUses) {
763 const Register VirtReg = LR.VirtReg;
764 assert(LR.PhysReg == 0);
765
766 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
767 LLVM_DEBUG(dbgs() << "Search register for " << printReg(VirtReg)
768 << " in class " << TRI->getRegClassName(&RC)
769 << " with hint " << printReg(Hint0, TRI) << '\n');
770
771 // Take hint when possible.
772 if (Hint0.isPhysical() && MRI->isAllocatable(Hint0) && RC.contains(Hint0) &&
773 !isRegUsedInInstr(Hint0, LookAtPhysRegUses)) {
774 // Take hint if the register is currently free.
775 if (isPhysRegFree(Hint0)) {
776 LLVM_DEBUG(dbgs() << "\tPreferred Register 1: " << printReg(Hint0, TRI)
777 << '\n');
778 assignVirtToPhysReg(MI, LR, Hint0);
779 return;
780 } else {
781 LLVM_DEBUG(dbgs() << "\tPreferred Register 0: " << printReg(Hint0, TRI)
782 << " occupied\n");
783 }
784 } else {
785 Hint0 = Register();
786 }
787
788
789 // Try other hint.
790 Register Hint1 = traceCopies(VirtReg);
791 if (Hint1.isPhysical() && MRI->isAllocatable(Hint1) && RC.contains(Hint1) &&
792 !isRegUsedInInstr(Hint1, LookAtPhysRegUses)) {
793 // Take hint if the register is currently free.
794 if (isPhysRegFree(Hint1)) {
795 LLVM_DEBUG(dbgs() << "\tPreferred Register 0: " << printReg(Hint1, TRI)
796 << '\n');
797 assignVirtToPhysReg(MI, LR, Hint1);
798 return;
799 } else {
800 LLVM_DEBUG(dbgs() << "\tPreferred Register 1: " << printReg(Hint1, TRI)
801 << " occupied\n");
802 }
803 } else {
804 Hint1 = Register();
805 }
806
807 MCPhysReg BestReg = 0;
808 unsigned BestCost = spillImpossible;
809 ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(&RC);
810 for (MCPhysReg PhysReg : AllocationOrder) {
811 LLVM_DEBUG(dbgs() << "\tRegister: " << printReg(PhysReg, TRI) << ' ');
812 if (isRegUsedInInstr(PhysReg, LookAtPhysRegUses)) {
813 LLVM_DEBUG(dbgs() << "already used in instr.\n");
814 continue;
815 }
816
817 unsigned Cost = calcSpillCost(PhysReg);
818 LLVM_DEBUG(dbgs() << "Cost: " << Cost << " BestCost: " << BestCost << '\n');
819 // Immediate take a register with cost 0.
820 if (Cost == 0) {
821 assignVirtToPhysReg(MI, LR, PhysReg);
822 return;
823 }
824
825 if (PhysReg == Hint0 || PhysReg == Hint1)
826 Cost -= spillPrefBonus;
827
828 if (Cost < BestCost) {
829 BestReg = PhysReg;
830 BestCost = Cost;
831 }
832 }
833
834 if (!BestReg) {
835 // Nothing we can do: Report an error and keep going with an invalid
836 // allocation.
837 if (MI.isInlineAsm())
838 MI.emitError("inline assembly requires more registers than available");
839 else
840 MI.emitError("ran out of registers during register allocation");
841
842 LR.Error = true;
843 LR.PhysReg = 0;
844 return;
845 }
846
847 displacePhysReg(MI, BestReg);
848 assignVirtToPhysReg(MI, LR, BestReg);
849}
850
851void RegAllocFast::allocVirtRegUndef(MachineOperand &MO) {
852 assert(MO.isUndef() && "expected undef use");
853 Register VirtReg = MO.getReg();
854 assert(VirtReg.isVirtual() && "Expected virtreg");
855 if (!shouldAllocateRegister(VirtReg))
856 return;
857
858 LiveRegMap::const_iterator LRI = findLiveVirtReg(VirtReg);
859 MCPhysReg PhysReg;
860 if (LRI != LiveVirtRegs.end() && LRI->PhysReg) {
861 PhysReg = LRI->PhysReg;
862 } else {
863 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
864 ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(&RC);
865 assert(!AllocationOrder.empty() && "Allocation order must not be empty");
866 PhysReg = AllocationOrder[0];
867 }
868
869 unsigned SubRegIdx = MO.getSubReg();
870 if (SubRegIdx != 0) {
871 PhysReg = TRI->getSubReg(PhysReg, SubRegIdx);
872 MO.setSubReg(0);
873 }
874 MO.setReg(PhysReg);
875 MO.setIsRenamable(true);
876}
877
878/// Variation of defineVirtReg() with special handling for livethrough regs
879/// (tied or earlyclobber) that may interfere with preassigned uses.
880/// \return true if MI's MachineOperands were re-arranged/invalidated.
881bool RegAllocFast::defineLiveThroughVirtReg(MachineInstr &MI, unsigned OpNum,
882 Register VirtReg) {
883 if (!shouldAllocateRegister(VirtReg))
884 return false;
885 LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
886 if (LRI != LiveVirtRegs.end()) {
887 MCPhysReg PrevReg = LRI->PhysReg;
888 if (PrevReg != 0 && isRegUsedInInstr(PrevReg, true)) {
889 LLVM_DEBUG(dbgs() << "Need new assignment for " << printReg(PrevReg, TRI)
890 << " (tied/earlyclobber resolution)\n");
891 freePhysReg(PrevReg);
892 LRI->PhysReg = 0;
893 allocVirtReg(MI, *LRI, 0, true);
894 MachineBasicBlock::iterator InsertBefore =
895 std::next((MachineBasicBlock::iterator)MI.getIterator());
896 LLVM_DEBUG(dbgs() << "Copy " << printReg(LRI->PhysReg, TRI) << " to "
897 << printReg(PrevReg, TRI) << '\n');
898 BuildMI(*MBB, InsertBefore, MI.getDebugLoc(),
899 TII->get(TargetOpcode::COPY), PrevReg)
900 .addReg(LRI->PhysReg, llvm::RegState::Kill);
901 }
902 MachineOperand &MO = MI.getOperand(OpNum);
903 if (MO.getSubReg() && !MO.isUndef()) {
904 LRI->LastUse = &MI;
905 }
906 }
907 return defineVirtReg(MI, OpNum, VirtReg, true);
908}
909
910/// Allocates a register for VirtReg definition. Typically the register is
911/// already assigned from a use of the virtreg, however we still need to
912/// perform an allocation if:
913/// - It is a dead definition without any uses.
914/// - The value is live out and all uses are in different basic blocks.
915///
916/// \return true if MI's MachineOperands were re-arranged/invalidated.
917bool RegAllocFast::defineVirtReg(MachineInstr &MI, unsigned OpNum,
918 Register VirtReg, bool LookAtPhysRegUses) {
919 assert(VirtReg.isVirtual() && "Not a virtual register");
920 if (!shouldAllocateRegister(VirtReg))
921 return false;
922 MachineOperand &MO = MI.getOperand(OpNum);
923 LiveRegMap::iterator LRI;
924 bool New;
925 std::tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg));
926 if (New) {
927 if (!MO.isDead()) {
928 if (mayLiveOut(VirtReg)) {
929 LRI->LiveOut = true;
930 } else {
931 // It is a dead def without the dead flag; add the flag now.
932 MO.setIsDead(true);
933 }
934 }
935 }
936 if (LRI->PhysReg == 0)
937 allocVirtReg(MI, *LRI, 0, LookAtPhysRegUses);
938 else {
939 assert(!isRegUsedInInstr(LRI->PhysReg, LookAtPhysRegUses) &&
940 "TODO: preassign mismatch");
941 LLVM_DEBUG(dbgs() << "In def of " << printReg(VirtReg, TRI)
942 << " use existing assignment to "
943 << printReg(LRI->PhysReg, TRI) << '\n');
944 }
945
946 MCPhysReg PhysReg = LRI->PhysReg;
947 assert(PhysReg != 0 && "Register not assigned");
948 if (LRI->Reloaded || LRI->LiveOut) {
949 if (!MI.isImplicitDef()) {
950 MachineBasicBlock::iterator SpillBefore =
951 std::next((MachineBasicBlock::iterator)MI.getIterator());
952 LLVM_DEBUG(dbgs() << "Spill Reason: LO: " << LRI->LiveOut << " RL: "
953 << LRI->Reloaded << '\n');
954 bool Kill = LRI->LastUse == nullptr;
955 spill(SpillBefore, VirtReg, PhysReg, Kill, LRI->LiveOut);
956
957 // We need to place additional spills for each indirect destination of an
958 // INLINEASM_BR.
959 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR) {
960 int FI = StackSlotForVirtReg[VirtReg];
961 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
962 for (MachineOperand &MO : MI.operands()) {
963 if (MO.isMBB()) {
964 MachineBasicBlock *Succ = MO.getMBB();
965 TII->storeRegToStackSlot(*Succ, Succ->begin(), PhysReg, Kill,
966 FI, &RC, TRI, VirtReg);
967 ++NumStores;
968 Succ->addLiveIn(PhysReg);
969 }
970 }
971 }
972
973 LRI->LastUse = nullptr;
974 }
975 LRI->LiveOut = false;
976 LRI->Reloaded = false;
977 }
978 if (MI.getOpcode() == TargetOpcode::BUNDLE) {
979 BundleVirtRegsMap[VirtReg] = PhysReg;
980 }
981 markRegUsedInInstr(PhysReg);
982 return setPhysReg(MI, MO, PhysReg);
983}
984
985/// Allocates a register for a VirtReg use.
986/// \return true if MI's MachineOperands were re-arranged/invalidated.
987bool RegAllocFast::useVirtReg(MachineInstr &MI, unsigned OpNum,
988 Register VirtReg) {
989 assert(VirtReg.isVirtual() && "Not a virtual register");
990 if (!shouldAllocateRegister(VirtReg))
991 return false;
992 MachineOperand &MO = MI.getOperand(OpNum);
993 LiveRegMap::iterator LRI;
994 bool New;
995 std::tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg));
996 if (New) {
997 MachineOperand &MO = MI.getOperand(OpNum);
998 if (!MO.isKill()) {
999 if (mayLiveOut(VirtReg)) {
1000 LRI->LiveOut = true;
1001 } else {
1002 // It is a last (killing) use without the kill flag; add the flag now.
1003 MO.setIsKill(true);
1004 }
1005 }
1006 } else {
1007 assert((!MO.isKill() || LRI->LastUse == &MI) && "Invalid kill flag");
1008 }
1009
1010 // If necessary allocate a register.
1011 if (LRI->PhysReg == 0) {
1012 assert(!MO.isTied() && "tied op should be allocated");
1013 Register Hint;
1014 if (MI.isCopy() && MI.getOperand(1).getSubReg() == 0) {
1015 Hint = MI.getOperand(0).getReg();
1016 if (Hint.isVirtual()) {
1017 assert(!shouldAllocateRegister(Hint));
1018 Hint = Register();
1019 } else {
1020 assert(Hint.isPhysical() &&
1021 "Copy destination should already be assigned");
1022 }
1023 }
1024 allocVirtReg(MI, *LRI, Hint, false);
1025 if (LRI->Error) {
1026 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
1027 ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(&RC);
1028 return setPhysReg(MI, MO, *AllocationOrder.begin());
1029 }
1030 }
1031
1032 LRI->LastUse = &MI;
1033
1034 if (MI.getOpcode() == TargetOpcode::BUNDLE) {
1035 BundleVirtRegsMap[VirtReg] = LRI->PhysReg;
1036 }
1037 markRegUsedInInstr(LRI->PhysReg);
1038 return setPhysReg(MI, MO, LRI->PhysReg);
1039}
1040
1041/// Changes operand OpNum in MI the refer the PhysReg, considering subregs.
1042/// \return true if MI's MachineOperands were re-arranged/invalidated.
1043bool RegAllocFast::setPhysReg(MachineInstr &MI, MachineOperand &MO,
1044 MCPhysReg PhysReg) {
1045 if (!MO.getSubReg()) {
1046 MO.setReg(PhysReg);
1047 MO.setIsRenamable(true);
1048 return false;
1049 }
1050
1051 // Handle subregister index.
1052 MO.setReg(PhysReg ? TRI->getSubReg(PhysReg, MO.getSubReg()) : MCRegister());
1053 MO.setIsRenamable(true);
1054 // Note: We leave the subreg number around a little longer in case of defs.
1055 // This is so that the register freeing logic in allocateInstruction can still
1056 // recognize this as subregister defs. The code there will clear the number.
1057 if (!MO.isDef())
1058 MO.setSubReg(0);
1059
1060 // A kill flag implies killing the full register. Add corresponding super
1061 // register kill.
1062 if (MO.isKill()) {
1063 MI.addRegisterKilled(PhysReg, TRI, true);
1064 // Conservatively assume implicit MOs were re-arranged
1065 return true;
1066 }
1067
1068 // A <def,read-undef> of a sub-register requires an implicit def of the full
1069 // register.
1070 if (MO.isDef() && MO.isUndef()) {
1071 if (MO.isDead())
1072 MI.addRegisterDead(PhysReg, TRI, true);
1073 else
1074 MI.addRegisterDefined(PhysReg, TRI);
1075 // Conservatively assume implicit MOs were re-arranged
1076 return true;
1077 }
1078 return false;
1079}
1080
1081#ifndef NDEBUG
1082
1083void RegAllocFast::dumpState() const {
1084 for (unsigned Unit = 1, UnitE = TRI->getNumRegUnits(); Unit != UnitE;
1085 ++Unit) {
1086 switch (unsigned VirtReg = RegUnitStates[Unit]) {
1087 case regFree:
1088 break;
1089 case regPreAssigned:
1090 dbgs() << " " << printRegUnit(Unit, TRI) << "[P]";
1091 break;
1092 case regLiveIn:
1093 llvm_unreachable("Should not have regLiveIn in map");
1094 default: {
1095 dbgs() << ' ' << printRegUnit(Unit, TRI) << '=' << printReg(VirtReg);
1096 LiveRegMap::const_iterator I = findLiveVirtReg(VirtReg);
1097 assert(I != LiveVirtRegs.end() && "have LiveVirtRegs entry");
1098 if (I->LiveOut || I->Reloaded) {
1099 dbgs() << '[';
1100 if (I->LiveOut) dbgs() << 'O';
1101 if (I->Reloaded) dbgs() << 'R';
1102 dbgs() << ']';
1103 }
1104 assert(TRI->hasRegUnit(I->PhysReg, Unit) && "inverse mapping present");
1105 break;
1106 }
1107 }
1108 }
1109 dbgs() << '\n';
1110 // Check that LiveVirtRegs is the inverse.
1111 for (const LiveReg &LR : LiveVirtRegs) {
1112 Register VirtReg = LR.VirtReg;
1113 assert(VirtReg.isVirtual() && "Bad map key");
1114 MCPhysReg PhysReg = LR.PhysReg;
1115 if (PhysReg != 0) {
1117 "mapped to physreg");
1118 for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI) {
1119 assert(RegUnitStates[*UI] == VirtReg && "inverse map valid");
1120 }
1121 }
1122 }
1123}
1124#endif
1125
1126/// Count number of defs consumed from each register class by \p Reg
1127void RegAllocFast::addRegClassDefCounts(std::vector<unsigned> &RegClassDefCounts,
1128 Register Reg) const {
1129 assert(RegClassDefCounts.size() == TRI->getNumRegClasses());
1130
1131 if (Reg.isVirtual()) {
1132 if (!shouldAllocateRegister(Reg))
1133 return;
1134 const TargetRegisterClass *OpRC = MRI->getRegClass(Reg);
1135 for (unsigned RCIdx = 0, RCIdxEnd = TRI->getNumRegClasses();
1136 RCIdx != RCIdxEnd; ++RCIdx) {
1137 const TargetRegisterClass *IdxRC = TRI->getRegClass(RCIdx);
1138 // FIXME: Consider aliasing sub/super registers.
1139 if (OpRC->hasSubClassEq(IdxRC))
1140 ++RegClassDefCounts[RCIdx];
1141 }
1142
1143 return;
1144 }
1145
1146 for (unsigned RCIdx = 0, RCIdxEnd = TRI->getNumRegClasses();
1147 RCIdx != RCIdxEnd; ++RCIdx) {
1148 const TargetRegisterClass *IdxRC = TRI->getRegClass(RCIdx);
1149 for (MCRegAliasIterator Alias(Reg, TRI, true); Alias.isValid(); ++Alias) {
1150 if (IdxRC->contains(*Alias)) {
1151 ++RegClassDefCounts[RCIdx];
1152 break;
1153 }
1154 }
1155 }
1156}
1157
1158/// Compute \ref DefOperandIndexes so it contains the indices of "def" operands
1159/// that are to be allocated. Those are ordered in a way that small classes,
1160/// early clobbers and livethroughs are allocated first.
1161void RegAllocFast::findAndSortDefOperandIndexes(const MachineInstr &MI) {
1162 DefOperandIndexes.clear();
1163
1164 // Track number of defs which may consume a register from the class.
1165 std::vector<unsigned> RegClassDefCounts(TRI->getNumRegClasses(), 0);
1166 assert(RegClassDefCounts[0] == 0);
1167
1168 LLVM_DEBUG(dbgs() << "Need to assign livethroughs\n");
1169 for (unsigned I = 0, E = MI.getNumOperands(); I < E; ++I) {
1170 const MachineOperand &MO = MI.getOperand(I);
1171 if (!MO.isReg())
1172 continue;
1173 Register Reg = MO.getReg();
1174 if (MO.readsReg()) {
1175 if (Reg.isPhysical()) {
1176 LLVM_DEBUG(dbgs() << "mark extra used: " << printReg(Reg, TRI) << '\n');
1177 markPhysRegUsedInInstr(Reg);
1178 }
1179 }
1180
1181 if (MO.isDef()) {
1182 if (Reg.isVirtual() && shouldAllocateRegister(Reg))
1183 DefOperandIndexes.push_back(I);
1184
1185 addRegClassDefCounts(RegClassDefCounts, Reg);
1186 }
1187 }
1188
1189 llvm::sort(DefOperandIndexes, [&](uint16_t I0, uint16_t I1) {
1190 const MachineOperand &MO0 = MI.getOperand(I0);
1191 const MachineOperand &MO1 = MI.getOperand(I1);
1192 Register Reg0 = MO0.getReg();
1193 Register Reg1 = MO1.getReg();
1194 const TargetRegisterClass &RC0 = *MRI->getRegClass(Reg0);
1195 const TargetRegisterClass &RC1 = *MRI->getRegClass(Reg1);
1196
1197 // Identify regclass that are easy to use up completely just in this
1198 // instruction.
1199 unsigned ClassSize0 = RegClassInfo.getOrder(&RC0).size();
1200 unsigned ClassSize1 = RegClassInfo.getOrder(&RC1).size();
1201
1202 bool SmallClass0 = ClassSize0 < RegClassDefCounts[RC0.getID()];
1203 bool SmallClass1 = ClassSize1 < RegClassDefCounts[RC1.getID()];
1204 if (SmallClass0 > SmallClass1)
1205 return true;
1206 if (SmallClass0 < SmallClass1)
1207 return false;
1208
1209 // Allocate early clobbers and livethrough operands first.
1210 bool Livethrough0 = MO0.isEarlyClobber() || MO0.isTied() ||
1211 (MO0.getSubReg() == 0 && !MO0.isUndef());
1212 bool Livethrough1 = MO1.isEarlyClobber() || MO1.isTied() ||
1213 (MO1.getSubReg() == 0 && !MO1.isUndef());
1214 if (Livethrough0 > Livethrough1)
1215 return true;
1216 if (Livethrough0 < Livethrough1)
1217 return false;
1218
1219 // Tie-break rule: operand index.
1220 return I0 < I1;
1221 });
1222}
1223
1224void RegAllocFast::allocateInstruction(MachineInstr &MI) {
1225 // The basic algorithm here is:
1226 // 1. Mark registers of def operands as free
1227 // 2. Allocate registers to use operands and place reload instructions for
1228 // registers displaced by the allocation.
1229 //
1230 // However we need to handle some corner cases:
1231 // - pre-assigned defs and uses need to be handled before the other def/use
1232 // operands are processed to avoid the allocation heuristics clashing with
1233 // the pre-assignment.
1234 // - The "free def operands" step has to come last instead of first for tied
1235 // operands and early-clobbers.
1236
1237 UsedInInstr.clear();
1238 RegMasks.clear();
1239 BundleVirtRegsMap.clear();
1240
1241 auto TiedOpIsUndef = [&](const MachineOperand &MO, unsigned Idx) {
1242 assert(MO.isTied());
1243 unsigned TiedIdx = MI.findTiedOperandIdx(Idx);
1244 const MachineOperand &TiedMO = MI.getOperand(TiedIdx);
1245 return TiedMO.isUndef();
1246 };
1247 // Scan for special cases; Apply pre-assigned register defs to state.
1248 bool HasPhysRegUse = false;
1249 bool HasRegMask = false;
1250 bool HasVRegDef = false;
1251 bool HasDef = false;
1252 bool HasEarlyClobber = false;
1253 bool NeedToAssignLiveThroughs = false;
1254 for (unsigned I = 0; I < MI.getNumOperands(); ++I) {
1255 MachineOperand &MO = MI.getOperand(I);
1256 if (MO.isReg()) {
1257 Register Reg = MO.getReg();
1258 if (Reg.isVirtual()) {
1259 if (!shouldAllocateRegister(Reg))
1260 continue;
1261 if (MO.isDef()) {
1262 HasDef = true;
1263 HasVRegDef = true;
1264 if (MO.isEarlyClobber()) {
1265 HasEarlyClobber = true;
1266 NeedToAssignLiveThroughs = true;
1267 }
1268 if ((MO.isTied() && !TiedOpIsUndef(MO, I)) ||
1269 (MO.getSubReg() != 0 && !MO.isUndef()))
1270 NeedToAssignLiveThroughs = true;
1271 }
1272 } else if (Reg.isPhysical()) {
1273 if (!MRI->isReserved(Reg)) {
1274 if (MO.isDef()) {
1275 HasDef = true;
1276 bool displacedAny = definePhysReg(MI, Reg);
1277 if (MO.isEarlyClobber())
1278 HasEarlyClobber = true;
1279 if (!displacedAny)
1280 MO.setIsDead(true);
1281 }
1282 if (MO.readsReg())
1283 HasPhysRegUse = true;
1284 }
1285 }
1286 } else if (MO.isRegMask()) {
1287 HasRegMask = true;
1288 RegMasks.push_back(MO.getRegMask());
1289 }
1290 }
1291
1292 // Allocate virtreg defs.
1293 if (HasDef) {
1294 if (HasVRegDef) {
1295 // Note that Implicit MOs can get re-arranged by defineVirtReg(), so loop
1296 // multiple times to ensure no operand is missed.
1297 bool ReArrangedImplicitOps = true;
1298
1299 // Special handling for early clobbers, tied operands or subregister defs:
1300 // Compared to "normal" defs these:
1301 // - Must not use a register that is pre-assigned for a use operand.
1302 // - In order to solve tricky inline assembly constraints we change the
1303 // heuristic to figure out a good operand order before doing
1304 // assignments.
1305 if (NeedToAssignLiveThroughs) {
1306 PhysRegUses.clear();
1307
1308 while (ReArrangedImplicitOps) {
1309 ReArrangedImplicitOps = false;
1310 findAndSortDefOperandIndexes(MI);
1311 for (uint16_t OpIdx : DefOperandIndexes) {
1312 MachineOperand &MO = MI.getOperand(OpIdx);
1313 LLVM_DEBUG(dbgs() << "Allocating " << MO << '\n');
1314 unsigned Reg = MO.getReg();
1315 if (MO.isEarlyClobber() ||
1316 (MO.isTied() && !TiedOpIsUndef(MO, OpIdx)) ||
1317 (MO.getSubReg() && !MO.isUndef())) {
1318 ReArrangedImplicitOps = defineLiveThroughVirtReg(MI, OpIdx, Reg);
1319 } else {
1320 ReArrangedImplicitOps = defineVirtReg(MI, OpIdx, Reg);
1321 }
1322 if (ReArrangedImplicitOps) {
1323 // Implicit operands of MI were re-arranged,
1324 // re-compute DefOperandIndexes.
1325 break;
1326 }
1327 }
1328 }
1329 } else {
1330 // Assign virtual register defs.
1331 while (ReArrangedImplicitOps) {
1332 ReArrangedImplicitOps = false;
1333 for (unsigned I = 0, E = MI.getNumOperands(); I < E; ++I) {
1334 MachineOperand &MO = MI.getOperand(I);
1335 if (!MO.isReg() || !MO.isDef())
1336 continue;
1337 Register Reg = MO.getReg();
1338 if (Reg.isVirtual()) {
1339 ReArrangedImplicitOps = defineVirtReg(MI, I, Reg);
1340 if (ReArrangedImplicitOps) {
1341 break;
1342 }
1343 }
1344 }
1345 }
1346 }
1347 }
1348
1349 // Free registers occupied by defs.
1350 // Iterate operands in reverse order, so we see the implicit super register
1351 // defs first (we added them earlier in case of <def,read-undef>).
1352 for (signed I = MI.getNumOperands() - 1; I >= 0; --I) {
1353 MachineOperand &MO = MI.getOperand(I);
1354 if (!MO.isReg() || !MO.isDef())
1355 continue;
1356
1357 Register Reg = MO.getReg();
1358
1359 // subreg defs don't free the full register. We left the subreg number
1360 // around as a marker in setPhysReg() to recognize this case here.
1361 if (Reg.isPhysical() && MO.getSubReg() != 0) {
1362 MO.setSubReg(0);
1363 continue;
1364 }
1365
1366 assert((!MO.isTied() || !isClobberedByRegMasks(MO.getReg())) &&
1367 "tied def assigned to clobbered register");
1368
1369 // Do not free tied operands and early clobbers.
1370 if ((MO.isTied() && !TiedOpIsUndef(MO, I)) || MO.isEarlyClobber())
1371 continue;
1372 if (!Reg)
1373 continue;
1374 if (Reg.isVirtual()) {
1375 assert(!shouldAllocateRegister(Reg));
1376 continue;
1377 }
1378 assert(Reg.isPhysical());
1379 if (MRI->isReserved(Reg))
1380 continue;
1381 freePhysReg(Reg);
1382 unmarkRegUsedInInstr(Reg);
1383 }
1384 }
1385
1386 // Displace clobbered registers.
1387 if (HasRegMask) {
1388 assert(!RegMasks.empty() && "expected RegMask");
1389 // MRI bookkeeping.
1390 for (const auto *RM : RegMasks)
1391 MRI->addPhysRegsUsedFromRegMask(RM);
1392
1393 // Displace clobbered registers.
1394 for (const LiveReg &LR : LiveVirtRegs) {
1395 MCPhysReg PhysReg = LR.PhysReg;
1396 if (PhysReg != 0 && isClobberedByRegMasks(PhysReg))
1397 displacePhysReg(MI, PhysReg);
1398 }
1399 }
1400
1401 // Apply pre-assigned register uses to state.
1402 if (HasPhysRegUse) {
1403 for (MachineOperand &MO : MI.operands()) {
1404 if (!MO.isReg() || !MO.readsReg())
1405 continue;
1406 Register Reg = MO.getReg();
1407 if (!Reg.isPhysical())
1408 continue;
1409 if (MRI->isReserved(Reg))
1410 continue;
1411 bool displacedAny = usePhysReg(MI, Reg);
1412 if (!displacedAny)
1413 MO.setIsKill(true);
1414 }
1415 }
1416
1417 // Allocate virtreg uses and insert reloads as necessary.
1418 // Implicit MOs can get moved/removed by useVirtReg(), so loop multiple
1419 // times to ensure no operand is missed.
1420 bool HasUndefUse = false;
1421 bool ReArrangedImplicitMOs = true;
1422 while (ReArrangedImplicitMOs) {
1423 ReArrangedImplicitMOs = false;
1424 for (unsigned I = 0; I < MI.getNumOperands(); ++I) {
1425 MachineOperand &MO = MI.getOperand(I);
1426 if (!MO.isReg() || !MO.isUse())
1427 continue;
1428 Register Reg = MO.getReg();
1429 if (!Reg.isVirtual() || !shouldAllocateRegister(Reg))
1430 continue;
1431
1432 if (MO.isUndef()) {
1433 HasUndefUse = true;
1434 continue;
1435 }
1436
1437 // Populate MayLiveAcrossBlocks in case the use block is allocated before
1438 // the def block (removing the vreg uses).
1439 mayLiveIn(Reg);
1440
1441 assert(!MO.isInternalRead() && "Bundles not supported");
1442 assert(MO.readsReg() && "reading use");
1443 ReArrangedImplicitMOs = useVirtReg(MI, I, Reg);
1444 if (ReArrangedImplicitMOs)
1445 break;
1446 }
1447 }
1448
1449 // Allocate undef operands. This is a separate step because in a situation
1450 // like ` = OP undef %X, %X` both operands need the same register assign
1451 // so we should perform the normal assignment first.
1452 if (HasUndefUse) {
1453 for (MachineOperand &MO : MI.all_uses()) {
1454 Register Reg = MO.getReg();
1455 if (!Reg.isVirtual() || !shouldAllocateRegister(Reg))
1456 continue;
1457
1458 assert(MO.isUndef() && "Should only have undef virtreg uses left");
1459 allocVirtRegUndef(MO);
1460 }
1461 }
1462
1463 // Free early clobbers.
1464 if (HasEarlyClobber) {
1465 for (MachineOperand &MO : llvm::reverse(MI.all_defs())) {
1466 if (!MO.isEarlyClobber())
1467 continue;
1468 assert(!MO.getSubReg() && "should be already handled in def processing");
1469
1470 Register Reg = MO.getReg();
1471 if (!Reg)
1472 continue;
1473 if (Reg.isVirtual()) {
1474 assert(!shouldAllocateRegister(Reg));
1475 continue;
1476 }
1477 assert(Reg.isPhysical() && "should have register assigned");
1478
1479 // We sometimes get odd situations like:
1480 // early-clobber %x0 = INSTRUCTION %x0
1481 // which is semantically questionable as the early-clobber should
1482 // apply before the use. But in practice we consider the use to
1483 // happen before the early clobber now. Don't free the early clobber
1484 // register in this case.
1485 if (MI.readsRegister(Reg, TRI))
1486 continue;
1487
1488 freePhysReg(Reg);
1489 }
1490 }
1491
1492 LLVM_DEBUG(dbgs() << "<< " << MI);
1493 if (MI.isCopy() && MI.getOperand(0).getReg() == MI.getOperand(1).getReg() &&
1494 MI.getNumOperands() == 2) {
1495 LLVM_DEBUG(dbgs() << "Mark identity copy for removal\n");
1496 Coalesced.push_back(&MI);
1497 }
1498}
1499
1500void RegAllocFast::handleDebugValue(MachineInstr &MI) {
1501 // Ignore DBG_VALUEs that aren't based on virtual registers. These are
1502 // mostly constants and frame indices.
1503 for (Register Reg : MI.getUsedDebugRegs()) {
1504 if (!Reg.isVirtual())
1505 continue;
1506 if (!shouldAllocateRegister(Reg))
1507 continue;
1508
1509 // Already spilled to a stackslot?
1510 int SS = StackSlotForVirtReg[Reg];
1511 if (SS != -1) {
1512 // Modify DBG_VALUE now that the value is in a spill slot.
1513 updateDbgValueForSpill(MI, SS, Reg);
1514 LLVM_DEBUG(dbgs() << "Rewrite DBG_VALUE for spilled memory: " << MI);
1515 continue;
1516 }
1517
1518 // See if this virtual register has already been allocated to a physical
1519 // register or spilled to a stack slot.
1520 LiveRegMap::iterator LRI = findLiveVirtReg(Reg);
1522 for (MachineOperand &Op : MI.getDebugOperandsForReg(Reg))
1523 DbgOps.push_back(&Op);
1524
1525 if (LRI != LiveVirtRegs.end() && LRI->PhysReg) {
1526 // Update every use of Reg within MI.
1527 for (auto &RegMO : DbgOps)
1528 setPhysReg(MI, *RegMO, LRI->PhysReg);
1529 } else {
1530 DanglingDbgValues[Reg].push_back(&MI);
1531 }
1532
1533 // If Reg hasn't been spilled, put this DBG_VALUE in LiveDbgValueMap so
1534 // that future spills of Reg will have DBG_VALUEs.
1535 LiveDbgValueMap[Reg].append(DbgOps.begin(), DbgOps.end());
1536 }
1537}
1538
1539void RegAllocFast::handleBundle(MachineInstr &MI) {
1540 MachineBasicBlock::instr_iterator BundledMI = MI.getIterator();
1541 ++BundledMI;
1542 while (BundledMI->isBundledWithPred()) {
1543 for (MachineOperand &MO : BundledMI->operands()) {
1544 if (!MO.isReg())
1545 continue;
1546
1547 Register Reg = MO.getReg();
1548 if (!Reg.isVirtual() || !shouldAllocateRegister(Reg))
1549 continue;
1550
1552 DI = BundleVirtRegsMap.find(Reg);
1553 assert(DI != BundleVirtRegsMap.end() && "Unassigned virtual register");
1554
1555 setPhysReg(MI, MO, DI->second);
1556 }
1557
1558 ++BundledMI;
1559 }
1560}
1561
1562void RegAllocFast::allocateBasicBlock(MachineBasicBlock &MBB) {
1563 this->MBB = &MBB;
1564 LLVM_DEBUG(dbgs() << "\nAllocating " << MBB);
1565
1566 RegUnitStates.assign(TRI->getNumRegUnits(), regFree);
1567 assert(LiveVirtRegs.empty() && "Mapping not cleared from last block?");
1568
1569 for (const auto &LiveReg : MBB.liveouts())
1570 setPhysRegState(LiveReg.PhysReg, regPreAssigned);
1571
1572 Coalesced.clear();
1573
1574 // Traverse block in reverse order allocating instructions one by one.
1575 for (MachineInstr &MI : reverse(MBB)) {
1576 LLVM_DEBUG(
1577 dbgs() << "\n>> " << MI << "Regs:";
1578 dumpState()
1579 );
1580
1581 // Special handling for debug values. Note that they are not allowed to
1582 // affect codegen of the other instructions in any way.
1583 if (MI.isDebugValue()) {
1584 handleDebugValue(MI);
1585 continue;
1586 }
1587
1588 allocateInstruction(MI);
1589
1590 // Once BUNDLE header is assigned registers, same assignments need to be
1591 // done for bundled MIs.
1592 if (MI.getOpcode() == TargetOpcode::BUNDLE) {
1593 handleBundle(MI);
1594 }
1595 }
1596
1597 LLVM_DEBUG(
1598 dbgs() << "Begin Regs:";
1599 dumpState()
1600 );
1601
1602 // Spill all physical registers holding virtual registers now.
1603 LLVM_DEBUG(dbgs() << "Loading live registers at begin of block.\n");
1604 reloadAtBegin(MBB);
1605
1606 // Erase all the coalesced copies. We are delaying it until now because
1607 // LiveVirtRegs might refer to the instrs.
1608 for (MachineInstr *MI : Coalesced)
1609 MBB.erase(MI);
1610 NumCoalesced += Coalesced.size();
1611
1612 for (auto &UDBGPair : DanglingDbgValues) {
1613 for (MachineInstr *DbgValue : UDBGPair.second) {
1614 assert(DbgValue->isDebugValue() && "expected DBG_VALUE");
1615 // Nothing to do if the vreg was spilled in the meantime.
1616 if (!DbgValue->hasDebugOperandForReg(UDBGPair.first))
1617 continue;
1618 LLVM_DEBUG(dbgs() << "Register did not survive for " << *DbgValue
1619 << '\n');
1620 DbgValue->setDebugValueUndef();
1621 }
1622 }
1623 DanglingDbgValues.clear();
1624
1625 LLVM_DEBUG(MBB.dump());
1626}
1627
1628bool RegAllocFast::runOnMachineFunction(MachineFunction &MF) {
1629 LLVM_DEBUG(dbgs() << "********** FAST REGISTER ALLOCATION **********\n"
1630 << "********** Function: " << MF.getName() << '\n');
1631 MRI = &MF.getRegInfo();
1632 const TargetSubtargetInfo &STI = MF.getSubtarget();
1633 TRI = STI.getRegisterInfo();
1634 TII = STI.getInstrInfo();
1635 MFI = &MF.getFrameInfo();
1636 MRI->freezeReservedRegs(MF);
1637 RegClassInfo.runOnMachineFunction(MF);
1638 unsigned NumRegUnits = TRI->getNumRegUnits();
1639 UsedInInstr.clear();
1640 UsedInInstr.setUniverse(NumRegUnits);
1641 PhysRegUses.clear();
1642 PhysRegUses.setUniverse(NumRegUnits);
1643
1644 // initialize the virtual->physical register map to have a 'null'
1645 // mapping for all virtual registers
1646 unsigned NumVirtRegs = MRI->getNumVirtRegs();
1647 StackSlotForVirtReg.resize(NumVirtRegs);
1648 LiveVirtRegs.setUniverse(NumVirtRegs);
1649 MayLiveAcrossBlocks.clear();
1650 MayLiveAcrossBlocks.resize(NumVirtRegs);
1651
1652 // Loop over all of the basic blocks, eliminating virtual register references
1653 for (MachineBasicBlock &MBB : MF)
1654 allocateBasicBlock(MBB);
1655
1656 if (ClearVirtRegs) {
1657 // All machine operands and other references to virtual registers have been
1658 // replaced. Remove the virtual registers.
1659 MRI->clearVirtRegs();
1660 }
1661
1662 StackSlotForVirtReg.clear();
1663 LiveDbgValueMap.clear();
1664 return true;
1665}
1666
1668 return new RegAllocFast();
1669}
1670
1672 bool ClearVirtRegs) {
1673 return new RegAllocFast(Ftor, ClearVirtRegs);
1674}
unsigned const MachineRegisterInfo * MRI
aarch64 promote const
MachineBasicBlock & MBB
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
uint64_t Size
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
This file implements an indexed map.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
This file implements a map that provides insertion order iteration.
#define P(N)
if(VerifyEach)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
static bool dominates(MachineBasicBlock &MBB, MachineBasicBlock::const_iterator A, MachineBasicBlock::const_iterator B)
static bool isCoalescable(const MachineInstr &MI)
static cl::opt< bool > IgnoreMissingDefs("rafast-ignore-missing-defs", cl::Hidden)
static RegisterRegAlloc fastRegAlloc("fast", "fast register allocator", createFastRegisterAllocator)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the SparseSet class derived from the version described in Briggs,...
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
Class recording the (high level) value of a variable.
Iterator begin() const
Represent the analysis usage information of a pass.
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:265
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool test(unsigned Idx) const
Definition: BitVector.h:461
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
Definition: BitVector.h:341
void clear()
clear - Removes all bits from the bitvector.
Definition: BitVector.h:335
BitVector & set()
Definition: BitVector.h:351
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
iterator end()
Definition: DenseMap.h:84
Lightweight error class with error context and mandatory checking.
Definition: Error.h:156
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
Store the specified register of the given register class to the specified stack frame index.
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
Load the specified register of the given register class from the specified stack frame index.
void resize(typename StorageT::size_type s)
Definition: IndexedMap.h:60
MCRegAliasIterator enumerates all registers aliasing Reg.
bool isValid() const
isValid - returns true if this iterator is not yet at the end.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:24
iterator_range< liveout_iterator > liveouts() const
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator_range< livein_iterator > liveins() const
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateSpillStackObject(uint64_t Size, Align Alignment)
Create a new statically sized stack object that represents a spill slot, returning a nonnegative iden...
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
virtual MachineFunctionProperties getClearedProperties() const
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual MachineFunctionProperties getSetProperties() const
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
virtual MachineFunctionProperties getRequiredProperties() const
Properties which a MachineFunction may have at a given point in time.
MachineFunctionProperties & set(Property P)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MachineInstr * CloneMachineInstr(const MachineInstr *Orig)
Create a new MachineInstr which is a copy of Orig, identical in all ways except the instruction has n...
const MachineBasicBlock & front() const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
Definition: MachineInstr.h:68
bool isDebugValueList() const
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:320
bool isNonListDebugValue() const
MachineOperand & getDebugOperand(unsigned Index)
Definition: MachineInstr.h:542
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:533
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
void setIsRenamable(bool Val=true)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
MachineBasicBlock * getMBB() const
void setIsDead(bool Val=true)
void setReg(Register Reg)
Change the register this operand corresponds to.
void setIsKill(bool Val=true)
bool isEarlyClobber() const
Register getReg() const
getReg - Returns the register number.
bool isInternalRead() const
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
const uint32_t * getRegMask() const
getRegMask - Returns a bit mask of registers preserved by this RegMask operand.
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Definition: Pass.cpp:81
void runOnMachineFunction(const MachineFunction &MF)
runOnFunction - Prepare to answer questions about MF.
ArrayRef< MCPhysReg > getOrder(const TargetRegisterClass *RC) const
getOrder - Returns the preferred allocation order for RC.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static unsigned virtReg2Index(Register Reg)
Convert a virtual register number to a 0-based index.
Definition: Register.h:77
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:65
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
bool empty() const
Definition: SmallVector.h:94
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
SparseSet - Fast set implementation for objects that can be identified by small unsigned keys.
Definition: SparseSet.h:124
typename DenseT::iterator iterator
Definition: SparseSet.h:171
typename DenseT::const_iterator const_iterator
Definition: SparseSet.h:172
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
TargetInstrInfo - Interface to description of machine instruction set.
unsigned getID() const
Return the register class ID number.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
Iterator for intrusive lists based on ilist_node.
self_iterator getIterator()
Definition: ilist_node.h:82
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ Kill
The last use of a register.
@ SS
Definition: X86.h:209
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
FunctionPass * createFastRegisterAllocator()
FastRegisterAllocation Pass - This pass register allocates as fast as possible.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1777
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex, Register Reg)
Update a DBG_VALUE whose value has been spilled to FrameIndex.
Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI)
Create Printable object to print register units on a raw_ostream.
static bool allocateAllRegClasses(const TargetRegisterInfo &, const TargetRegisterClass &)
Default register class filter function for register allocation.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1826
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:511
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1744
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
MachineInstr * buildDbgValueForSpill(MachineBasicBlock &BB, MachineBasicBlock::iterator I, const MachineInstr &Orig, int FrameIndex, Register SpillReg)
Clone a DBG_VALUE whose value has been spilled to FrameIndex.
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
std::function< bool(const TargetRegisterInfo &TRI, const TargetRegisterClass &RC)> RegClassFilterFunc
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Pair of physical register and lane mask.
A MapVector that performs no allocations if smaller than a certain size.
Definition: MapVector.h:234