LLVM 18.0.0git
RegAllocFast.cpp
Go to the documentation of this file.
1//===- RegAllocFast.cpp - A fast register allocator for debug code --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file This register allocator allocates registers to a basic block at a
10/// time, attempting to keep values in registers and reusing registers as
11/// appropriate.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/ADT/ArrayRef.h"
16#include "llvm/ADT/DenseMap.h"
17#include "llvm/ADT/IndexedMap.h"
18#include "llvm/ADT/MapVector.h"
19#include "llvm/ADT/SmallSet.h"
21#include "llvm/ADT/SparseSet.h"
22#include "llvm/ADT/Statistic.h"
40#include "llvm/Pass.h"
41#include "llvm/Support/Debug.h"
44#include <cassert>
45#include <tuple>
46#include <vector>
47
48using namespace llvm;
49
50#define DEBUG_TYPE "regalloc"
51
52STATISTIC(NumStores, "Number of stores added");
53STATISTIC(NumLoads, "Number of loads added");
54STATISTIC(NumCoalesced, "Number of copies coalesced");
55
56// FIXME: Remove this switch when all testcases are fixed!
57static cl::opt<bool> IgnoreMissingDefs("rafast-ignore-missing-defs",
59
60static RegisterRegAlloc fastRegAlloc("fast", "fast register allocator",
62
63namespace {
64
65class RegAllocFast : public MachineFunctionPass {
66public:
67 static char ID;
68
69 RegAllocFast(const RegClassFilterFunc F = allocateAllRegClasses,
70 bool ClearVirtRegs_ = true)
71 : MachineFunctionPass(ID), ShouldAllocateClass(F),
72 StackSlotForVirtReg(-1), ClearVirtRegs(ClearVirtRegs_) {}
73
74private:
75 MachineFrameInfo *MFI = nullptr;
76 MachineRegisterInfo *MRI = nullptr;
77 const TargetRegisterInfo *TRI = nullptr;
78 const TargetInstrInfo *TII = nullptr;
79 RegisterClassInfo RegClassInfo;
80 const RegClassFilterFunc ShouldAllocateClass;
81
82 /// Basic block currently being allocated.
83 MachineBasicBlock *MBB = nullptr;
84
85 /// Maps virtual regs to the frame index where these values are spilled.
86 IndexedMap<int, VirtReg2IndexFunctor> StackSlotForVirtReg;
87
88 bool ClearVirtRegs;
89
90 /// Everything we know about a live virtual register.
91 struct LiveReg {
92 MachineInstr *LastUse = nullptr; ///< Last instr to use reg.
93 Register VirtReg; ///< Virtual register number.
94 MCPhysReg PhysReg = 0; ///< Currently held here.
95 bool LiveOut = false; ///< Register is possibly live out.
96 bool Reloaded = false; ///< Register was reloaded.
97 bool Error = false; ///< Could not allocate.
98
99 explicit LiveReg(Register VirtReg) : VirtReg(VirtReg) {}
100
101 unsigned getSparseSetIndex() const {
102 return Register::virtReg2Index(VirtReg);
103 }
104 };
105
107 /// This map contains entries for each virtual register that is currently
108 /// available in a physical register.
109 LiveRegMap LiveVirtRegs;
110
111 /// Stores assigned virtual registers present in the bundle MI.
112 DenseMap<Register, MCPhysReg> BundleVirtRegsMap;
113
115 /// List of DBG_VALUE that we encountered without the vreg being assigned
116 /// because they were placed after the last use of the vreg.
118
119 /// Has a bit set for every virtual register for which it was determined
120 /// that it is alive across blocks.
121 BitVector MayLiveAcrossBlocks;
122
123 /// State of a register unit.
124 enum RegUnitState {
125 /// A free register is not currently in use and can be allocated
126 /// immediately without checking aliases.
127 regFree,
128
129 /// A pre-assigned register has been assigned before register allocation
130 /// (e.g., setting up a call parameter).
131 regPreAssigned,
132
133 /// Used temporarily in reloadAtBegin() to mark register units that are
134 /// live-in to the basic block.
135 regLiveIn,
136
137 /// A register state may also be a virtual register number, indication
138 /// that the physical register is currently allocated to a virtual
139 /// register. In that case, LiveVirtRegs contains the inverse mapping.
140 };
141
142 /// Maps each physical register to a RegUnitState enum or virtual register.
143 std::vector<unsigned> RegUnitStates;
144
146
147 using RegUnitSet = SparseSet<uint16_t, identity<uint16_t>>;
148 /// Set of register units that are used in the current instruction, and so
149 /// cannot be allocated.
150 RegUnitSet UsedInInstr;
151 RegUnitSet PhysRegUses;
152 SmallVector<uint16_t, 8> DefOperandIndexes;
153 // Register masks attached to the current instruction.
155
156 void setPhysRegState(MCPhysReg PhysReg, unsigned NewState);
157 bool isPhysRegFree(MCPhysReg PhysReg) const;
158
159 /// Mark a physreg as used in this instruction.
160 void markRegUsedInInstr(MCPhysReg PhysReg) {
161 for (MCRegUnit Unit : TRI->regunits(PhysReg))
162 UsedInInstr.insert(Unit);
163 }
164
165 // Check if physreg is clobbered by instruction's regmask(s).
166 bool isClobberedByRegMasks(MCPhysReg PhysReg) const {
167 return llvm::any_of(RegMasks, [PhysReg](const uint32_t *Mask) {
168 return MachineOperand::clobbersPhysReg(Mask, PhysReg);
169 });
170 }
171
172 /// Check if a physreg or any of its aliases are used in this instruction.
173 bool isRegUsedInInstr(MCPhysReg PhysReg, bool LookAtPhysRegUses) const {
174 if (LookAtPhysRegUses && isClobberedByRegMasks(PhysReg))
175 return true;
176 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
177 if (UsedInInstr.count(Unit))
178 return true;
179 if (LookAtPhysRegUses && PhysRegUses.count(Unit))
180 return true;
181 }
182 return false;
183 }
184
185 /// Mark physical register as being used in a register use operand.
186 /// This is only used by the special livethrough handling code.
187 void markPhysRegUsedInInstr(MCPhysReg PhysReg) {
188 for (MCRegUnit Unit : TRI->regunits(PhysReg))
189 PhysRegUses.insert(Unit);
190 }
191
192 /// Remove mark of physical register being used in the instruction.
193 void unmarkRegUsedInInstr(MCPhysReg PhysReg) {
194 for (MCRegUnit Unit : TRI->regunits(PhysReg))
195 UsedInInstr.erase(Unit);
196 }
197
198 enum : unsigned {
199 spillClean = 50,
200 spillDirty = 100,
201 spillPrefBonus = 20,
202 spillImpossible = ~0u
203 };
204
205public:
206 StringRef getPassName() const override { return "Fast Register Allocator"; }
207
208 void getAnalysisUsage(AnalysisUsage &AU) const override {
209 AU.setPreservesCFG();
211 }
212
215 MachineFunctionProperties::Property::NoPHIs);
216 }
217
219 if (ClearVirtRegs) {
221 MachineFunctionProperties::Property::NoVRegs);
222 }
223
225 }
226
229 MachineFunctionProperties::Property::IsSSA);
230 }
231
232private:
233 bool runOnMachineFunction(MachineFunction &MF) override;
234
235 void allocateBasicBlock(MachineBasicBlock &MBB);
236
237 void addRegClassDefCounts(std::vector<unsigned> &RegClassDefCounts,
238 Register Reg) const;
239
240 void findAndSortDefOperandIndexes(const MachineInstr &MI);
241
242 void allocateInstruction(MachineInstr &MI);
243 void handleDebugValue(MachineInstr &MI);
244 void handleBundle(MachineInstr &MI);
245
246 bool usePhysReg(MachineInstr &MI, MCPhysReg PhysReg);
247 bool definePhysReg(MachineInstr &MI, MCPhysReg PhysReg);
248 bool displacePhysReg(MachineInstr &MI, MCPhysReg PhysReg);
249 void freePhysReg(MCPhysReg PhysReg);
250
251 unsigned calcSpillCost(MCPhysReg PhysReg) const;
252
253 LiveRegMap::iterator findLiveVirtReg(Register VirtReg) {
254 return LiveVirtRegs.find(Register::virtReg2Index(VirtReg));
255 }
256
257 LiveRegMap::const_iterator findLiveVirtReg(Register VirtReg) const {
258 return LiveVirtRegs.find(Register::virtReg2Index(VirtReg));
259 }
260
261 void assignVirtToPhysReg(MachineInstr &MI, LiveReg &, MCPhysReg PhysReg);
262 void allocVirtReg(MachineInstr &MI, LiveReg &LR, Register Hint,
263 bool LookAtPhysRegUses = false);
264 void allocVirtRegUndef(MachineOperand &MO);
265 void assignDanglingDebugValues(MachineInstr &Def, Register VirtReg,
266 MCPhysReg Reg);
267 bool defineLiveThroughVirtReg(MachineInstr &MI, unsigned OpNum,
268 Register VirtReg);
269 bool defineVirtReg(MachineInstr &MI, unsigned OpNum, Register VirtReg,
270 bool LookAtPhysRegUses = false);
271 bool useVirtReg(MachineInstr &MI, unsigned OpNum, Register VirtReg);
272
274 getMBBBeginInsertionPoint(MachineBasicBlock &MBB,
275 SmallSet<Register, 2> &PrologLiveIns) const;
276
277 void reloadAtBegin(MachineBasicBlock &MBB);
278 bool setPhysReg(MachineInstr &MI, MachineOperand &MO, MCPhysReg PhysReg);
279
280 Register traceCopies(Register VirtReg) const;
281 Register traceCopyChain(Register Reg) const;
282
283 bool shouldAllocateRegister(const Register Reg) const;
284 int getStackSpaceFor(Register VirtReg);
285 void spill(MachineBasicBlock::iterator Before, Register VirtReg,
286 MCPhysReg AssignedReg, bool Kill, bool LiveOut);
287 void reload(MachineBasicBlock::iterator Before, Register VirtReg,
288 MCPhysReg PhysReg);
289
290 bool mayLiveOut(Register VirtReg);
291 bool mayLiveIn(Register VirtReg);
292
293 void dumpState() const;
294};
295
296} // end anonymous namespace
297
298char RegAllocFast::ID = 0;
299
300INITIALIZE_PASS(RegAllocFast, "regallocfast", "Fast Register Allocator", false,
301 false)
302
303bool RegAllocFast::shouldAllocateRegister(const Register Reg) const {
304 assert(Reg.isVirtual());
305 const TargetRegisterClass &RC = *MRI->getRegClass(Reg);
306 return ShouldAllocateClass(*TRI, RC);
307}
308
309void RegAllocFast::setPhysRegState(MCPhysReg PhysReg, unsigned NewState) {
310 for (MCRegUnit Unit : TRI->regunits(PhysReg))
311 RegUnitStates[Unit] = NewState;
312}
313
314bool RegAllocFast::isPhysRegFree(MCPhysReg PhysReg) const {
315 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
316 if (RegUnitStates[Unit] != regFree)
317 return false;
318 }
319 return true;
320}
321
322/// This allocates space for the specified virtual register to be held on the
323/// stack.
324int RegAllocFast::getStackSpaceFor(Register VirtReg) {
325 // Find the location Reg would belong...
326 int SS = StackSlotForVirtReg[VirtReg];
327 // Already has space allocated?
328 if (SS != -1)
329 return SS;
330
331 // Allocate a new stack object for this spill location...
332 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
333 unsigned Size = TRI->getSpillSize(RC);
334 Align Alignment = TRI->getSpillAlign(RC);
335 int FrameIdx = MFI->CreateSpillStackObject(Size, Alignment);
336
337 // Assign the slot.
338 StackSlotForVirtReg[VirtReg] = FrameIdx;
339 return FrameIdx;
340}
341
345 auto MBBEnd = MBB.end();
346 if (B == MBBEnd)
347 return true;
348
350 for (; &*I != A && &*I != B; ++I)
351 ;
352
353 return &*I == A;
354}
355
356/// Returns false if \p VirtReg is known to not live out of the current block.
357bool RegAllocFast::mayLiveOut(Register VirtReg) {
358 if (MayLiveAcrossBlocks.test(Register::virtReg2Index(VirtReg))) {
359 // Cannot be live-out if there are no successors.
360 return !MBB->succ_empty();
361 }
362
363 const MachineInstr *SelfLoopDef = nullptr;
364
365 // If this block loops back to itself, it is necessary to check whether the
366 // use comes after the def.
367 if (MBB->isSuccessor(MBB)) {
368 // Find the first def in the self loop MBB.
369 for (const MachineInstr &DefInst : MRI->def_instructions(VirtReg)) {
370 if (DefInst.getParent() != MBB) {
371 MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg));
372 return true;
373 } else {
374 if (!SelfLoopDef || dominates(*MBB, DefInst.getIterator(), SelfLoopDef))
375 SelfLoopDef = &DefInst;
376 }
377 }
378 if (!SelfLoopDef) {
379 MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg));
380 return true;
381 }
382 }
383
384 // See if the first \p Limit uses of the register are all in the current
385 // block.
386 static const unsigned Limit = 8;
387 unsigned C = 0;
388 for (const MachineInstr &UseInst : MRI->use_nodbg_instructions(VirtReg)) {
389 if (UseInst.getParent() != MBB || ++C >= Limit) {
390 MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg));
391 // Cannot be live-out if there are no successors.
392 return !MBB->succ_empty();
393 }
394
395 if (SelfLoopDef) {
396 // Try to handle some simple cases to avoid spilling and reloading every
397 // value inside a self looping block.
398 if (SelfLoopDef == &UseInst ||
399 !dominates(*MBB, SelfLoopDef->getIterator(), UseInst.getIterator())) {
400 MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg));
401 return true;
402 }
403 }
404 }
405
406 return false;
407}
408
409/// Returns false if \p VirtReg is known to not be live into the current block.
410bool RegAllocFast::mayLiveIn(Register VirtReg) {
411 if (MayLiveAcrossBlocks.test(Register::virtReg2Index(VirtReg)))
412 return !MBB->pred_empty();
413
414 // See if the first \p Limit def of the register are all in the current block.
415 static const unsigned Limit = 8;
416 unsigned C = 0;
417 for (const MachineInstr &DefInst : MRI->def_instructions(VirtReg)) {
418 if (DefInst.getParent() != MBB || ++C >= Limit) {
419 MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg));
420 return !MBB->pred_empty();
421 }
422 }
423
424 return false;
425}
426
427/// Insert spill instruction for \p AssignedReg before \p Before. Update
428/// DBG_VALUEs with \p VirtReg operands with the stack slot.
429void RegAllocFast::spill(MachineBasicBlock::iterator Before, Register VirtReg,
430 MCPhysReg AssignedReg, bool Kill, bool LiveOut) {
431 LLVM_DEBUG(dbgs() << "Spilling " << printReg(VirtReg, TRI) << " in "
432 << printReg(AssignedReg, TRI));
433 int FI = getStackSpaceFor(VirtReg);
434 LLVM_DEBUG(dbgs() << " to stack slot #" << FI << '\n');
435
436 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
437 TII->storeRegToStackSlot(*MBB, Before, AssignedReg, Kill, FI, &RC, TRI,
438 VirtReg);
439 ++NumStores;
440
442
443 // When we spill a virtual register, we will have spill instructions behind
444 // every definition of it, meaning we can switch all the DBG_VALUEs over
445 // to just reference the stack slot.
446 SmallVectorImpl<MachineOperand *> &LRIDbgOperands = LiveDbgValueMap[VirtReg];
448 SpilledOperandsMap;
449 for (MachineOperand *MO : LRIDbgOperands)
450 SpilledOperandsMap[MO->getParent()].push_back(MO);
451 for (auto MISpilledOperands : SpilledOperandsMap) {
452 MachineInstr &DBG = *MISpilledOperands.first;
453 // We don't have enough support for tracking operands of DBG_VALUE_LISTs.
454 if (DBG.isDebugValueList())
455 continue;
457 *MBB, Before, *MISpilledOperands.first, FI, MISpilledOperands.second);
458 assert(NewDV->getParent() == MBB && "dangling parent pointer");
459 (void)NewDV;
460 LLVM_DEBUG(dbgs() << "Inserting debug info due to spill:\n" << *NewDV);
461
462 if (LiveOut) {
463 // We need to insert a DBG_VALUE at the end of the block if the spill slot
464 // is live out, but there is another use of the value after the
465 // spill. This will allow LiveDebugValues to see the correct live out
466 // value to propagate to the successors.
467 MachineInstr *ClonedDV = MBB->getParent()->CloneMachineInstr(NewDV);
468 MBB->insert(FirstTerm, ClonedDV);
469 LLVM_DEBUG(dbgs() << "Cloning debug info due to live out spill\n");
470 }
471
472 // Rewrite unassigned dbg_values to use the stack slot.
473 // TODO We can potentially do this for list debug values as well if we know
474 // how the dbg_values are getting unassigned.
475 if (DBG.isNonListDebugValue()) {
476 MachineOperand &MO = DBG.getDebugOperand(0);
477 if (MO.isReg() && MO.getReg() == 0) {
478 updateDbgValueForSpill(DBG, FI, 0);
479 }
480 }
481 }
482 // Now this register is spilled there is should not be any DBG_VALUE
483 // pointing to this register because they are all pointing to spilled value
484 // now.
485 LRIDbgOperands.clear();
486}
487
488/// Insert reload instruction for \p PhysReg before \p Before.
489void RegAllocFast::reload(MachineBasicBlock::iterator Before, Register VirtReg,
490 MCPhysReg PhysReg) {
491 LLVM_DEBUG(dbgs() << "Reloading " << printReg(VirtReg, TRI) << " into "
492 << printReg(PhysReg, TRI) << '\n');
493 int FI = getStackSpaceFor(VirtReg);
494 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
495 TII->loadRegFromStackSlot(*MBB, Before, PhysReg, FI, &RC, TRI, VirtReg);
496 ++NumLoads;
497}
498
499/// Get basic block begin insertion point.
500/// This is not just MBB.begin() because surprisingly we have EH_LABEL
501/// instructions marking the begin of a basic block. This means we must insert
502/// new instructions after such labels...
503MachineBasicBlock::iterator RegAllocFast::getMBBBeginInsertionPoint(
504 MachineBasicBlock &MBB, SmallSet<Register, 2> &PrologLiveIns) const {
506 while (I != MBB.end()) {
507 if (I->isLabel()) {
508 ++I;
509 continue;
510 }
511
512 // Most reloads should be inserted after prolog instructions.
513 if (!TII->isBasicBlockPrologue(*I))
514 break;
515
516 // However if a prolog instruction reads a register that needs to be
517 // reloaded, the reload should be inserted before the prolog.
518 for (MachineOperand &MO : I->operands()) {
519 if (MO.isReg())
520 PrologLiveIns.insert(MO.getReg());
521 }
522
523 ++I;
524 }
525
526 return I;
527}
528
529/// Reload all currently assigned virtual registers.
530void RegAllocFast::reloadAtBegin(MachineBasicBlock &MBB) {
531 if (LiveVirtRegs.empty())
532 return;
533
535 MCPhysReg Reg = P.PhysReg;
536 // Set state to live-in. This possibly overrides mappings to virtual
537 // registers but we don't care anymore at this point.
538 setPhysRegState(Reg, regLiveIn);
539 }
540
541 SmallSet<Register, 2> PrologLiveIns;
542
543 // The LiveRegMap is keyed by an unsigned (the virtreg number), so the order
544 // of spilling here is deterministic, if arbitrary.
545 MachineBasicBlock::iterator InsertBefore =
546 getMBBBeginInsertionPoint(MBB, PrologLiveIns);
547 for (const LiveReg &LR : LiveVirtRegs) {
548 MCPhysReg PhysReg = LR.PhysReg;
549 if (PhysReg == 0)
550 continue;
551
552 MCRegister FirstUnit = *TRI->regunits(PhysReg).begin();
553 if (RegUnitStates[FirstUnit] == regLiveIn)
554 continue;
555
557 "no reload in start block. Missing vreg def?");
558
559 if (PrologLiveIns.count(PhysReg)) {
560 // FIXME: Theoretically this should use an insert point skipping labels
561 // but I'm not sure how labels should interact with prolog instruction
562 // that need reloads.
563 reload(MBB.begin(), LR.VirtReg, PhysReg);
564 } else
565 reload(InsertBefore, LR.VirtReg, PhysReg);
566 }
567 LiveVirtRegs.clear();
568}
569
570/// Handle the direct use of a physical register. Check that the register is
571/// not used by a virtreg. Kill the physreg, marking it free. This may add
572/// implicit kills to MO->getParent() and invalidate MO.
573bool RegAllocFast::usePhysReg(MachineInstr &MI, MCPhysReg Reg) {
574 assert(Register::isPhysicalRegister(Reg) && "expected physreg");
575 bool displacedAny = displacePhysReg(MI, Reg);
576 setPhysRegState(Reg, regPreAssigned);
577 markRegUsedInInstr(Reg);
578 return displacedAny;
579}
580
581bool RegAllocFast::definePhysReg(MachineInstr &MI, MCPhysReg Reg) {
582 bool displacedAny = displacePhysReg(MI, Reg);
583 setPhysRegState(Reg, regPreAssigned);
584 return displacedAny;
585}
586
587/// Mark PhysReg as reserved or free after spilling any virtregs. This is very
588/// similar to defineVirtReg except the physreg is reserved instead of
589/// allocated.
590bool RegAllocFast::displacePhysReg(MachineInstr &MI, MCPhysReg PhysReg) {
591 bool displacedAny = false;
592
593 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
594 switch (unsigned VirtReg = RegUnitStates[Unit]) {
595 default: {
596 LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
597 assert(LRI != LiveVirtRegs.end() && "datastructures in sync");
598 MachineBasicBlock::iterator ReloadBefore =
599 std::next((MachineBasicBlock::iterator)MI.getIterator());
600 reload(ReloadBefore, VirtReg, LRI->PhysReg);
601
602 setPhysRegState(LRI->PhysReg, regFree);
603 LRI->PhysReg = 0;
604 LRI->Reloaded = true;
605 displacedAny = true;
606 break;
607 }
608 case regPreAssigned:
609 RegUnitStates[Unit] = regFree;
610 displacedAny = true;
611 break;
612 case regFree:
613 break;
614 }
615 }
616 return displacedAny;
617}
618
619void RegAllocFast::freePhysReg(MCPhysReg PhysReg) {
620 LLVM_DEBUG(dbgs() << "Freeing " << printReg(PhysReg, TRI) << ':');
621
622 MCRegister FirstUnit = *TRI->regunits(PhysReg).begin();
623 switch (unsigned VirtReg = RegUnitStates[FirstUnit]) {
624 case regFree:
625 LLVM_DEBUG(dbgs() << '\n');
626 return;
627 case regPreAssigned:
628 LLVM_DEBUG(dbgs() << '\n');
629 setPhysRegState(PhysReg, regFree);
630 return;
631 default: {
632 LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
633 assert(LRI != LiveVirtRegs.end());
634 LLVM_DEBUG(dbgs() << ' ' << printReg(LRI->VirtReg, TRI) << '\n');
635 setPhysRegState(LRI->PhysReg, regFree);
636 LRI->PhysReg = 0;
637 }
638 return;
639 }
640}
641
642/// Return the cost of spilling clearing out PhysReg and aliases so it is free
643/// for allocation. Returns 0 when PhysReg is free or disabled with all aliases
644/// disabled - it can be allocated directly.
645/// \returns spillImpossible when PhysReg or an alias can't be spilled.
646unsigned RegAllocFast::calcSpillCost(MCPhysReg PhysReg) const {
647 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
648 switch (unsigned VirtReg = RegUnitStates[Unit]) {
649 case regFree:
650 break;
651 case regPreAssigned:
652 LLVM_DEBUG(dbgs() << "Cannot spill pre-assigned "
653 << printReg(PhysReg, TRI) << '\n');
654 return spillImpossible;
655 default: {
656 bool SureSpill = StackSlotForVirtReg[VirtReg] != -1 ||
657 findLiveVirtReg(VirtReg)->LiveOut;
658 return SureSpill ? spillClean : spillDirty;
659 }
660 }
661 }
662 return 0;
663}
664
665void RegAllocFast::assignDanglingDebugValues(MachineInstr &Definition,
666 Register VirtReg, MCPhysReg Reg) {
667 auto UDBGValIter = DanglingDbgValues.find(VirtReg);
668 if (UDBGValIter == DanglingDbgValues.end())
669 return;
670
671 SmallVectorImpl<MachineInstr *> &Dangling = UDBGValIter->second;
672 for (MachineInstr *DbgValue : Dangling) {
673 assert(DbgValue->isDebugValue());
674 if (!DbgValue->hasDebugOperandForReg(VirtReg))
675 continue;
676
677 // Test whether the physreg survives from the definition to the DBG_VALUE.
678 MCPhysReg SetToReg = Reg;
679 unsigned Limit = 20;
680 for (MachineBasicBlock::iterator I = std::next(Definition.getIterator()),
681 E = DbgValue->getIterator();
682 I != E; ++I) {
683 if (I->modifiesRegister(Reg, TRI) || --Limit == 0) {
684 LLVM_DEBUG(dbgs() << "Register did not survive for " << *DbgValue
685 << '\n');
686 SetToReg = 0;
687 break;
688 }
689 }
690 for (MachineOperand &MO : DbgValue->getDebugOperandsForReg(VirtReg)) {
691 MO.setReg(SetToReg);
692 if (SetToReg != 0)
693 MO.setIsRenamable();
694 }
695 }
696 Dangling.clear();
697}
698
699/// This method updates local state so that we know that PhysReg is the
700/// proper container for VirtReg now. The physical register must not be used
701/// for anything else when this is called.
702void RegAllocFast::assignVirtToPhysReg(MachineInstr &AtMI, LiveReg &LR,
703 MCPhysReg PhysReg) {
704 Register VirtReg = LR.VirtReg;
705 LLVM_DEBUG(dbgs() << "Assigning " << printReg(VirtReg, TRI) << " to "
706 << printReg(PhysReg, TRI) << '\n');
707 assert(LR.PhysReg == 0 && "Already assigned a physreg");
708 assert(PhysReg != 0 && "Trying to assign no register");
709 LR.PhysReg = PhysReg;
710 setPhysRegState(PhysReg, VirtReg);
711
712 assignDanglingDebugValues(AtMI, VirtReg, PhysReg);
713}
714
715static bool isCoalescable(const MachineInstr &MI) { return MI.isFullCopy(); }
716
717Register RegAllocFast::traceCopyChain(Register Reg) const {
718 static const unsigned ChainLengthLimit = 3;
719 unsigned C = 0;
720 do {
721 if (Reg.isPhysical())
722 return Reg;
723 assert(Reg.isVirtual());
724
725 MachineInstr *VRegDef = MRI->getUniqueVRegDef(Reg);
726 if (!VRegDef || !isCoalescable(*VRegDef))
727 return 0;
728 Reg = VRegDef->getOperand(1).getReg();
729 } while (++C <= ChainLengthLimit);
730 return 0;
731}
732
733/// Check if any of \p VirtReg's definitions is a copy. If it is follow the
734/// chain of copies to check whether we reach a physical register we can
735/// coalesce with.
736Register RegAllocFast::traceCopies(Register VirtReg) const {
737 static const unsigned DefLimit = 3;
738 unsigned C = 0;
739 for (const MachineInstr &MI : MRI->def_instructions(VirtReg)) {
740 if (isCoalescable(MI)) {
741 Register Reg = MI.getOperand(1).getReg();
742 Reg = traceCopyChain(Reg);
743 if (Reg.isValid())
744 return Reg;
745 }
746
747 if (++C >= DefLimit)
748 break;
749 }
750 return Register();
751}
752
753/// Allocates a physical register for VirtReg.
754void RegAllocFast::allocVirtReg(MachineInstr &MI, LiveReg &LR, Register Hint0,
755 bool LookAtPhysRegUses) {
756 const Register VirtReg = LR.VirtReg;
757 assert(LR.PhysReg == 0);
758
759 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
760 LLVM_DEBUG(dbgs() << "Search register for " << printReg(VirtReg)
761 << " in class " << TRI->getRegClassName(&RC)
762 << " with hint " << printReg(Hint0, TRI) << '\n');
763
764 // Take hint when possible.
765 if (Hint0.isPhysical() && MRI->isAllocatable(Hint0) && RC.contains(Hint0) &&
766 !isRegUsedInInstr(Hint0, LookAtPhysRegUses)) {
767 // Take hint if the register is currently free.
768 if (isPhysRegFree(Hint0)) {
769 LLVM_DEBUG(dbgs() << "\tPreferred Register 1: " << printReg(Hint0, TRI)
770 << '\n');
771 assignVirtToPhysReg(MI, LR, Hint0);
772 return;
773 } else {
774 LLVM_DEBUG(dbgs() << "\tPreferred Register 0: " << printReg(Hint0, TRI)
775 << " occupied\n");
776 }
777 } else {
778 Hint0 = Register();
779 }
780
781 // Try other hint.
782 Register Hint1 = traceCopies(VirtReg);
783 if (Hint1.isPhysical() && MRI->isAllocatable(Hint1) && RC.contains(Hint1) &&
784 !isRegUsedInInstr(Hint1, LookAtPhysRegUses)) {
785 // Take hint if the register is currently free.
786 if (isPhysRegFree(Hint1)) {
787 LLVM_DEBUG(dbgs() << "\tPreferred Register 0: " << printReg(Hint1, TRI)
788 << '\n');
789 assignVirtToPhysReg(MI, LR, Hint1);
790 return;
791 } else {
792 LLVM_DEBUG(dbgs() << "\tPreferred Register 1: " << printReg(Hint1, TRI)
793 << " occupied\n");
794 }
795 } else {
796 Hint1 = Register();
797 }
798
799 MCPhysReg BestReg = 0;
800 unsigned BestCost = spillImpossible;
801 ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(&RC);
802 for (MCPhysReg PhysReg : AllocationOrder) {
803 LLVM_DEBUG(dbgs() << "\tRegister: " << printReg(PhysReg, TRI) << ' ');
804 if (isRegUsedInInstr(PhysReg, LookAtPhysRegUses)) {
805 LLVM_DEBUG(dbgs() << "already used in instr.\n");
806 continue;
807 }
808
809 unsigned Cost = calcSpillCost(PhysReg);
810 LLVM_DEBUG(dbgs() << "Cost: " << Cost << " BestCost: " << BestCost << '\n');
811 // Immediate take a register with cost 0.
812 if (Cost == 0) {
813 assignVirtToPhysReg(MI, LR, PhysReg);
814 return;
815 }
816
817 if (PhysReg == Hint0 || PhysReg == Hint1)
818 Cost -= spillPrefBonus;
819
820 if (Cost < BestCost) {
821 BestReg = PhysReg;
822 BestCost = Cost;
823 }
824 }
825
826 if (!BestReg) {
827 // Nothing we can do: Report an error and keep going with an invalid
828 // allocation.
829 if (MI.isInlineAsm())
830 MI.emitError("inline assembly requires more registers than available");
831 else
832 MI.emitError("ran out of registers during register allocation");
833
834 LR.Error = true;
835 LR.PhysReg = 0;
836 return;
837 }
838
839 displacePhysReg(MI, BestReg);
840 assignVirtToPhysReg(MI, LR, BestReg);
841}
842
843void RegAllocFast::allocVirtRegUndef(MachineOperand &MO) {
844 assert(MO.isUndef() && "expected undef use");
845 Register VirtReg = MO.getReg();
846 assert(VirtReg.isVirtual() && "Expected virtreg");
847 if (!shouldAllocateRegister(VirtReg))
848 return;
849
850 LiveRegMap::const_iterator LRI = findLiveVirtReg(VirtReg);
851 MCPhysReg PhysReg;
852 if (LRI != LiveVirtRegs.end() && LRI->PhysReg) {
853 PhysReg = LRI->PhysReg;
854 } else {
855 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
856 ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(&RC);
857 assert(!AllocationOrder.empty() && "Allocation order must not be empty");
858 PhysReg = AllocationOrder[0];
859 }
860
861 unsigned SubRegIdx = MO.getSubReg();
862 if (SubRegIdx != 0) {
863 PhysReg = TRI->getSubReg(PhysReg, SubRegIdx);
864 MO.setSubReg(0);
865 }
866 MO.setReg(PhysReg);
867 MO.setIsRenamable(true);
868}
869
870/// Variation of defineVirtReg() with special handling for livethrough regs
871/// (tied or earlyclobber) that may interfere with preassigned uses.
872/// \return true if MI's MachineOperands were re-arranged/invalidated.
873bool RegAllocFast::defineLiveThroughVirtReg(MachineInstr &MI, unsigned OpNum,
874 Register VirtReg) {
875 if (!shouldAllocateRegister(VirtReg))
876 return false;
877 LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
878 if (LRI != LiveVirtRegs.end()) {
879 MCPhysReg PrevReg = LRI->PhysReg;
880 if (PrevReg != 0 && isRegUsedInInstr(PrevReg, true)) {
881 LLVM_DEBUG(dbgs() << "Need new assignment for " << printReg(PrevReg, TRI)
882 << " (tied/earlyclobber resolution)\n");
883 freePhysReg(PrevReg);
884 LRI->PhysReg = 0;
885 allocVirtReg(MI, *LRI, 0, true);
886 MachineBasicBlock::iterator InsertBefore =
887 std::next((MachineBasicBlock::iterator)MI.getIterator());
888 LLVM_DEBUG(dbgs() << "Copy " << printReg(LRI->PhysReg, TRI) << " to "
889 << printReg(PrevReg, TRI) << '\n');
890 BuildMI(*MBB, InsertBefore, MI.getDebugLoc(),
891 TII->get(TargetOpcode::COPY), PrevReg)
892 .addReg(LRI->PhysReg, llvm::RegState::Kill);
893 }
894 MachineOperand &MO = MI.getOperand(OpNum);
895 if (MO.getSubReg() && !MO.isUndef()) {
896 LRI->LastUse = &MI;
897 }
898 }
899 return defineVirtReg(MI, OpNum, VirtReg, true);
900}
901
902/// Allocates a register for VirtReg definition. Typically the register is
903/// already assigned from a use of the virtreg, however we still need to
904/// perform an allocation if:
905/// - It is a dead definition without any uses.
906/// - The value is live out and all uses are in different basic blocks.
907///
908/// \return true if MI's MachineOperands were re-arranged/invalidated.
909bool RegAllocFast::defineVirtReg(MachineInstr &MI, unsigned OpNum,
910 Register VirtReg, bool LookAtPhysRegUses) {
911 assert(VirtReg.isVirtual() && "Not a virtual register");
912 if (!shouldAllocateRegister(VirtReg))
913 return false;
914 MachineOperand &MO = MI.getOperand(OpNum);
915 LiveRegMap::iterator LRI;
916 bool New;
917 std::tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg));
918 if (New) {
919 if (!MO.isDead()) {
920 if (mayLiveOut(VirtReg)) {
921 LRI->LiveOut = true;
922 } else {
923 // It is a dead def without the dead flag; add the flag now.
924 MO.setIsDead(true);
925 }
926 }
927 }
928 if (LRI->PhysReg == 0) {
929 allocVirtReg(MI, *LRI, 0, LookAtPhysRegUses);
930 // If no physical register is available for LRI, we assign one at random
931 // and bail out of this function immediately.
932 if (LRI->Error) {
933 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
934 ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(&RC);
935 if (AllocationOrder.empty())
936 return setPhysReg(MI, MO, MCRegister::NoRegister);
937 return setPhysReg(MI, MO, *AllocationOrder.begin());
938 }
939 } else {
940 assert(!isRegUsedInInstr(LRI->PhysReg, LookAtPhysRegUses) &&
941 "TODO: preassign mismatch");
942 LLVM_DEBUG(dbgs() << "In def of " << printReg(VirtReg, TRI)
943 << " use existing assignment to "
944 << printReg(LRI->PhysReg, TRI) << '\n');
945 }
946
947 MCPhysReg PhysReg = LRI->PhysReg;
948 if (LRI->Reloaded || LRI->LiveOut) {
949 if (!MI.isImplicitDef()) {
950 MachineBasicBlock::iterator SpillBefore =
951 std::next((MachineBasicBlock::iterator)MI.getIterator());
952 LLVM_DEBUG(dbgs() << "Spill Reason: LO: " << LRI->LiveOut
953 << " RL: " << LRI->Reloaded << '\n');
954 bool Kill = LRI->LastUse == nullptr;
955 spill(SpillBefore, VirtReg, PhysReg, Kill, LRI->LiveOut);
956
957 // We need to place additional spills for each indirect destination of an
958 // INLINEASM_BR.
959 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR) {
960 int FI = StackSlotForVirtReg[VirtReg];
961 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
962 for (MachineOperand &MO : MI.operands()) {
963 if (MO.isMBB()) {
964 MachineBasicBlock *Succ = MO.getMBB();
965 TII->storeRegToStackSlot(*Succ, Succ->begin(), PhysReg, Kill, FI,
966 &RC, TRI, VirtReg);
967 ++NumStores;
968 Succ->addLiveIn(PhysReg);
969 }
970 }
971 }
972
973 LRI->LastUse = nullptr;
974 }
975 LRI->LiveOut = false;
976 LRI->Reloaded = false;
977 }
978 if (MI.getOpcode() == TargetOpcode::BUNDLE) {
979 BundleVirtRegsMap[VirtReg] = PhysReg;
980 }
981 markRegUsedInInstr(PhysReg);
982 return setPhysReg(MI, MO, PhysReg);
983}
984
985/// Allocates a register for a VirtReg use.
986/// \return true if MI's MachineOperands were re-arranged/invalidated.
987bool RegAllocFast::useVirtReg(MachineInstr &MI, unsigned OpNum,
988 Register VirtReg) {
989 assert(VirtReg.isVirtual() && "Not a virtual register");
990 if (!shouldAllocateRegister(VirtReg))
991 return false;
992 MachineOperand &MO = MI.getOperand(OpNum);
993 LiveRegMap::iterator LRI;
994 bool New;
995 std::tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg));
996 if (New) {
997 MachineOperand &MO = MI.getOperand(OpNum);
998 if (!MO.isKill()) {
999 if (mayLiveOut(VirtReg)) {
1000 LRI->LiveOut = true;
1001 } else {
1002 // It is a last (killing) use without the kill flag; add the flag now.
1003 MO.setIsKill(true);
1004 }
1005 }
1006 } else {
1007 assert((!MO.isKill() || LRI->LastUse == &MI) && "Invalid kill flag");
1008 }
1009
1010 // If necessary allocate a register.
1011 if (LRI->PhysReg == 0) {
1012 assert(!MO.isTied() && "tied op should be allocated");
1013 Register Hint;
1014 if (MI.isCopy() && MI.getOperand(1).getSubReg() == 0) {
1015 Hint = MI.getOperand(0).getReg();
1016 if (Hint.isVirtual()) {
1017 assert(!shouldAllocateRegister(Hint));
1018 Hint = Register();
1019 } else {
1020 assert(Hint.isPhysical() &&
1021 "Copy destination should already be assigned");
1022 }
1023 }
1024 allocVirtReg(MI, *LRI, Hint, false);
1025 if (LRI->Error) {
1026 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
1027 ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(&RC);
1028 if (AllocationOrder.empty())
1029 return setPhysReg(MI, MO, MCRegister::NoRegister);
1030 return setPhysReg(MI, MO, *AllocationOrder.begin());
1031 }
1032 }
1033
1034 LRI->LastUse = &MI;
1035
1036 if (MI.getOpcode() == TargetOpcode::BUNDLE) {
1037 BundleVirtRegsMap[VirtReg] = LRI->PhysReg;
1038 }
1039 markRegUsedInInstr(LRI->PhysReg);
1040 return setPhysReg(MI, MO, LRI->PhysReg);
1041}
1042
1043/// Changes operand OpNum in MI the refer the PhysReg, considering subregs.
1044/// \return true if MI's MachineOperands were re-arranged/invalidated.
1045bool RegAllocFast::setPhysReg(MachineInstr &MI, MachineOperand &MO,
1046 MCPhysReg PhysReg) {
1047 if (!MO.getSubReg()) {
1048 MO.setReg(PhysReg);
1049 MO.setIsRenamable(true);
1050 return false;
1051 }
1052
1053 // Handle subregister index.
1054 MO.setReg(PhysReg ? TRI->getSubReg(PhysReg, MO.getSubReg()) : MCRegister());
1055 MO.setIsRenamable(true);
1056 // Note: We leave the subreg number around a little longer in case of defs.
1057 // This is so that the register freeing logic in allocateInstruction can still
1058 // recognize this as subregister defs. The code there will clear the number.
1059 if (!MO.isDef())
1060 MO.setSubReg(0);
1061
1062 // A kill flag implies killing the full register. Add corresponding super
1063 // register kill.
1064 if (MO.isKill()) {
1065 MI.addRegisterKilled(PhysReg, TRI, true);
1066 // Conservatively assume implicit MOs were re-arranged
1067 return true;
1068 }
1069
1070 // A <def,read-undef> of a sub-register requires an implicit def of the full
1071 // register.
1072 if (MO.isDef() && MO.isUndef()) {
1073 if (MO.isDead())
1074 MI.addRegisterDead(PhysReg, TRI, true);
1075 else
1076 MI.addRegisterDefined(PhysReg, TRI);
1077 // Conservatively assume implicit MOs were re-arranged
1078 return true;
1079 }
1080 return false;
1081}
1082
1083#ifndef NDEBUG
1084
1085void RegAllocFast::dumpState() const {
1086 for (unsigned Unit = 1, UnitE = TRI->getNumRegUnits(); Unit != UnitE;
1087 ++Unit) {
1088 switch (unsigned VirtReg = RegUnitStates[Unit]) {
1089 case regFree:
1090 break;
1091 case regPreAssigned:
1092 dbgs() << " " << printRegUnit(Unit, TRI) << "[P]";
1093 break;
1094 case regLiveIn:
1095 llvm_unreachable("Should not have regLiveIn in map");
1096 default: {
1097 dbgs() << ' ' << printRegUnit(Unit, TRI) << '=' << printReg(VirtReg);
1098 LiveRegMap::const_iterator I = findLiveVirtReg(VirtReg);
1099 assert(I != LiveVirtRegs.end() && "have LiveVirtRegs entry");
1100 if (I->LiveOut || I->Reloaded) {
1101 dbgs() << '[';
1102 if (I->LiveOut)
1103 dbgs() << 'O';
1104 if (I->Reloaded)
1105 dbgs() << 'R';
1106 dbgs() << ']';
1107 }
1108 assert(TRI->hasRegUnit(I->PhysReg, Unit) && "inverse mapping present");
1109 break;
1110 }
1111 }
1112 }
1113 dbgs() << '\n';
1114 // Check that LiveVirtRegs is the inverse.
1115 for (const LiveReg &LR : LiveVirtRegs) {
1116 Register VirtReg = LR.VirtReg;
1117 assert(VirtReg.isVirtual() && "Bad map key");
1118 MCPhysReg PhysReg = LR.PhysReg;
1119 if (PhysReg != 0) {
1120 assert(Register::isPhysicalRegister(PhysReg) && "mapped to physreg");
1121 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
1122 assert(RegUnitStates[Unit] == VirtReg && "inverse map valid");
1123 }
1124 }
1125 }
1126}
1127#endif
1128
1129/// Count number of defs consumed from each register class by \p Reg
1130void RegAllocFast::addRegClassDefCounts(
1131 std::vector<unsigned> &RegClassDefCounts, Register Reg) const {
1132 assert(RegClassDefCounts.size() == TRI->getNumRegClasses());
1133
1134 if (Reg.isVirtual()) {
1135 if (!shouldAllocateRegister(Reg))
1136 return;
1137 const TargetRegisterClass *OpRC = MRI->getRegClass(Reg);
1138 for (unsigned RCIdx = 0, RCIdxEnd = TRI->getNumRegClasses();
1139 RCIdx != RCIdxEnd; ++RCIdx) {
1140 const TargetRegisterClass *IdxRC = TRI->getRegClass(RCIdx);
1141 // FIXME: Consider aliasing sub/super registers.
1142 if (OpRC->hasSubClassEq(IdxRC))
1143 ++RegClassDefCounts[RCIdx];
1144 }
1145
1146 return;
1147 }
1148
1149 for (unsigned RCIdx = 0, RCIdxEnd = TRI->getNumRegClasses();
1150 RCIdx != RCIdxEnd; ++RCIdx) {
1151 const TargetRegisterClass *IdxRC = TRI->getRegClass(RCIdx);
1152 for (MCRegAliasIterator Alias(Reg, TRI, true); Alias.isValid(); ++Alias) {
1153 if (IdxRC->contains(*Alias)) {
1154 ++RegClassDefCounts[RCIdx];
1155 break;
1156 }
1157 }
1158 }
1159}
1160
1161/// Compute \ref DefOperandIndexes so it contains the indices of "def" operands
1162/// that are to be allocated. Those are ordered in a way that small classes,
1163/// early clobbers and livethroughs are allocated first.
1164void RegAllocFast::findAndSortDefOperandIndexes(const MachineInstr &MI) {
1165 DefOperandIndexes.clear();
1166
1167 // Track number of defs which may consume a register from the class.
1168 std::vector<unsigned> RegClassDefCounts(TRI->getNumRegClasses(), 0);
1169 assert(RegClassDefCounts[0] == 0);
1170
1171 LLVM_DEBUG(dbgs() << "Need to assign livethroughs\n");
1172 for (unsigned I = 0, E = MI.getNumOperands(); I < E; ++I) {
1173 const MachineOperand &MO = MI.getOperand(I);
1174 if (!MO.isReg())
1175 continue;
1176 Register Reg = MO.getReg();
1177 if (MO.readsReg()) {
1178 if (Reg.isPhysical()) {
1179 LLVM_DEBUG(dbgs() << "mark extra used: " << printReg(Reg, TRI) << '\n');
1180 markPhysRegUsedInInstr(Reg);
1181 }
1182 }
1183
1184 if (MO.isDef()) {
1185 if (Reg.isVirtual() && shouldAllocateRegister(Reg))
1186 DefOperandIndexes.push_back(I);
1187
1188 addRegClassDefCounts(RegClassDefCounts, Reg);
1189 }
1190 }
1191
1192 llvm::sort(DefOperandIndexes, [&](uint16_t I0, uint16_t I1) {
1193 const MachineOperand &MO0 = MI.getOperand(I0);
1194 const MachineOperand &MO1 = MI.getOperand(I1);
1195 Register Reg0 = MO0.getReg();
1196 Register Reg1 = MO1.getReg();
1197 const TargetRegisterClass &RC0 = *MRI->getRegClass(Reg0);
1198 const TargetRegisterClass &RC1 = *MRI->getRegClass(Reg1);
1199
1200 // Identify regclass that are easy to use up completely just in this
1201 // instruction.
1202 unsigned ClassSize0 = RegClassInfo.getOrder(&RC0).size();
1203 unsigned ClassSize1 = RegClassInfo.getOrder(&RC1).size();
1204
1205 bool SmallClass0 = ClassSize0 < RegClassDefCounts[RC0.getID()];
1206 bool SmallClass1 = ClassSize1 < RegClassDefCounts[RC1.getID()];
1207 if (SmallClass0 > SmallClass1)
1208 return true;
1209 if (SmallClass0 < SmallClass1)
1210 return false;
1211
1212 // Allocate early clobbers and livethrough operands first.
1213 bool Livethrough0 = MO0.isEarlyClobber() || MO0.isTied() ||
1214 (MO0.getSubReg() == 0 && !MO0.isUndef());
1215 bool Livethrough1 = MO1.isEarlyClobber() || MO1.isTied() ||
1216 (MO1.getSubReg() == 0 && !MO1.isUndef());
1217 if (Livethrough0 > Livethrough1)
1218 return true;
1219 if (Livethrough0 < Livethrough1)
1220 return false;
1221
1222 // Tie-break rule: operand index.
1223 return I0 < I1;
1224 });
1225}
1226
1227void RegAllocFast::allocateInstruction(MachineInstr &MI) {
1228 // The basic algorithm here is:
1229 // 1. Mark registers of def operands as free
1230 // 2. Allocate registers to use operands and place reload instructions for
1231 // registers displaced by the allocation.
1232 //
1233 // However we need to handle some corner cases:
1234 // - pre-assigned defs and uses need to be handled before the other def/use
1235 // operands are processed to avoid the allocation heuristics clashing with
1236 // the pre-assignment.
1237 // - The "free def operands" step has to come last instead of first for tied
1238 // operands and early-clobbers.
1239
1240 UsedInInstr.clear();
1241 RegMasks.clear();
1242 BundleVirtRegsMap.clear();
1243
1244 auto TiedOpIsUndef = [&](const MachineOperand &MO, unsigned Idx) {
1245 assert(MO.isTied());
1246 unsigned TiedIdx = MI.findTiedOperandIdx(Idx);
1247 const MachineOperand &TiedMO = MI.getOperand(TiedIdx);
1248 return TiedMO.isUndef();
1249 };
1250 // Scan for special cases; Apply pre-assigned register defs to state.
1251 bool HasPhysRegUse = false;
1252 bool HasRegMask = false;
1253 bool HasVRegDef = false;
1254 bool HasDef = false;
1255 bool HasEarlyClobber = false;
1256 bool NeedToAssignLiveThroughs = false;
1257 for (unsigned I = 0; I < MI.getNumOperands(); ++I) {
1258 MachineOperand &MO = MI.getOperand(I);
1259 if (MO.isReg()) {
1260 Register Reg = MO.getReg();
1261 if (Reg.isVirtual()) {
1262 if (!shouldAllocateRegister(Reg))
1263 continue;
1264 if (MO.isDef()) {
1265 HasDef = true;
1266 HasVRegDef = true;
1267 if (MO.isEarlyClobber()) {
1268 HasEarlyClobber = true;
1269 NeedToAssignLiveThroughs = true;
1270 }
1271 if ((MO.isTied() && !TiedOpIsUndef(MO, I)) ||
1272 (MO.getSubReg() != 0 && !MO.isUndef()))
1273 NeedToAssignLiveThroughs = true;
1274 }
1275 } else if (Reg.isPhysical()) {
1276 if (!MRI->isReserved(Reg)) {
1277 if (MO.isDef()) {
1278 HasDef = true;
1279 bool displacedAny = definePhysReg(MI, Reg);
1280 if (MO.isEarlyClobber())
1281 HasEarlyClobber = true;
1282 if (!displacedAny)
1283 MO.setIsDead(true);
1284 }
1285 if (MO.readsReg())
1286 HasPhysRegUse = true;
1287 }
1288 }
1289 } else if (MO.isRegMask()) {
1290 HasRegMask = true;
1291 RegMasks.push_back(MO.getRegMask());
1292 }
1293 }
1294
1295 // Allocate virtreg defs.
1296 if (HasDef) {
1297 if (HasVRegDef) {
1298 // Note that Implicit MOs can get re-arranged by defineVirtReg(), so loop
1299 // multiple times to ensure no operand is missed.
1300 bool ReArrangedImplicitOps = true;
1301
1302 // Special handling for early clobbers, tied operands or subregister defs:
1303 // Compared to "normal" defs these:
1304 // - Must not use a register that is pre-assigned for a use operand.
1305 // - In order to solve tricky inline assembly constraints we change the
1306 // heuristic to figure out a good operand order before doing
1307 // assignments.
1308 if (NeedToAssignLiveThroughs) {
1309 PhysRegUses.clear();
1310
1311 while (ReArrangedImplicitOps) {
1312 ReArrangedImplicitOps = false;
1313 findAndSortDefOperandIndexes(MI);
1314 for (uint16_t OpIdx : DefOperandIndexes) {
1315 MachineOperand &MO = MI.getOperand(OpIdx);
1316 LLVM_DEBUG(dbgs() << "Allocating " << MO << '\n');
1317 unsigned Reg = MO.getReg();
1318 if (MO.isEarlyClobber() ||
1319 (MO.isTied() && !TiedOpIsUndef(MO, OpIdx)) ||
1320 (MO.getSubReg() && !MO.isUndef())) {
1321 ReArrangedImplicitOps = defineLiveThroughVirtReg(MI, OpIdx, Reg);
1322 } else {
1323 ReArrangedImplicitOps = defineVirtReg(MI, OpIdx, Reg);
1324 }
1325 if (ReArrangedImplicitOps) {
1326 // Implicit operands of MI were re-arranged,
1327 // re-compute DefOperandIndexes.
1328 break;
1329 }
1330 }
1331 }
1332 } else {
1333 // Assign virtual register defs.
1334 while (ReArrangedImplicitOps) {
1335 ReArrangedImplicitOps = false;
1336 for (unsigned I = 0, E = MI.getNumOperands(); I < E; ++I) {
1337 MachineOperand &MO = MI.getOperand(I);
1338 if (!MO.isReg() || !MO.isDef())
1339 continue;
1340 Register Reg = MO.getReg();
1341 if (Reg.isVirtual()) {
1342 ReArrangedImplicitOps = defineVirtReg(MI, I, Reg);
1343 if (ReArrangedImplicitOps) {
1344 break;
1345 }
1346 }
1347 }
1348 }
1349 }
1350 }
1351
1352 // Free registers occupied by defs.
1353 // Iterate operands in reverse order, so we see the implicit super register
1354 // defs first (we added them earlier in case of <def,read-undef>).
1355 for (signed I = MI.getNumOperands() - 1; I >= 0; --I) {
1356 MachineOperand &MO = MI.getOperand(I);
1357 if (!MO.isReg() || !MO.isDef())
1358 continue;
1359
1360 Register Reg = MO.getReg();
1361
1362 // subreg defs don't free the full register. We left the subreg number
1363 // around as a marker in setPhysReg() to recognize this case here.
1364 if (Reg.isPhysical() && MO.getSubReg() != 0) {
1365 MO.setSubReg(0);
1366 continue;
1367 }
1368
1369 assert((!MO.isTied() || !isClobberedByRegMasks(MO.getReg())) &&
1370 "tied def assigned to clobbered register");
1371
1372 // Do not free tied operands and early clobbers.
1373 if ((MO.isTied() && !TiedOpIsUndef(MO, I)) || MO.isEarlyClobber())
1374 continue;
1375 if (!Reg)
1376 continue;
1377 if (Reg.isVirtual()) {
1378 assert(!shouldAllocateRegister(Reg));
1379 continue;
1380 }
1381 assert(Reg.isPhysical());
1382 if (MRI->isReserved(Reg))
1383 continue;
1384 freePhysReg(Reg);
1385 unmarkRegUsedInInstr(Reg);
1386 }
1387 }
1388
1389 // Displace clobbered registers.
1390 if (HasRegMask) {
1391 assert(!RegMasks.empty() && "expected RegMask");
1392 // MRI bookkeeping.
1393 for (const auto *RM : RegMasks)
1394 MRI->addPhysRegsUsedFromRegMask(RM);
1395
1396 // Displace clobbered registers.
1397 for (const LiveReg &LR : LiveVirtRegs) {
1398 MCPhysReg PhysReg = LR.PhysReg;
1399 if (PhysReg != 0 && isClobberedByRegMasks(PhysReg))
1400 displacePhysReg(MI, PhysReg);
1401 }
1402 }
1403
1404 // Apply pre-assigned register uses to state.
1405 if (HasPhysRegUse) {
1406 for (MachineOperand &MO : MI.operands()) {
1407 if (!MO.isReg() || !MO.readsReg())
1408 continue;
1409 Register Reg = MO.getReg();
1410 if (!Reg.isPhysical())
1411 continue;
1412 if (MRI->isReserved(Reg))
1413 continue;
1414 bool displacedAny = usePhysReg(MI, Reg);
1415 if (!displacedAny)
1416 MO.setIsKill(true);
1417 }
1418 }
1419
1420 // Allocate virtreg uses and insert reloads as necessary.
1421 // Implicit MOs can get moved/removed by useVirtReg(), so loop multiple
1422 // times to ensure no operand is missed.
1423 bool HasUndefUse = false;
1424 bool ReArrangedImplicitMOs = true;
1425 while (ReArrangedImplicitMOs) {
1426 ReArrangedImplicitMOs = false;
1427 for (unsigned I = 0; I < MI.getNumOperands(); ++I) {
1428 MachineOperand &MO = MI.getOperand(I);
1429 if (!MO.isReg() || !MO.isUse())
1430 continue;
1431 Register Reg = MO.getReg();
1432 if (!Reg.isVirtual() || !shouldAllocateRegister(Reg))
1433 continue;
1434
1435 if (MO.isUndef()) {
1436 HasUndefUse = true;
1437 continue;
1438 }
1439
1440 // Populate MayLiveAcrossBlocks in case the use block is allocated before
1441 // the def block (removing the vreg uses).
1442 mayLiveIn(Reg);
1443
1444 assert(!MO.isInternalRead() && "Bundles not supported");
1445 assert(MO.readsReg() && "reading use");
1446 ReArrangedImplicitMOs = useVirtReg(MI, I, Reg);
1447 if (ReArrangedImplicitMOs)
1448 break;
1449 }
1450 }
1451
1452 // Allocate undef operands. This is a separate step because in a situation
1453 // like ` = OP undef %X, %X` both operands need the same register assign
1454 // so we should perform the normal assignment first.
1455 if (HasUndefUse) {
1456 for (MachineOperand &MO : MI.all_uses()) {
1457 Register Reg = MO.getReg();
1458 if (!Reg.isVirtual() || !shouldAllocateRegister(Reg))
1459 continue;
1460
1461 assert(MO.isUndef() && "Should only have undef virtreg uses left");
1462 allocVirtRegUndef(MO);
1463 }
1464 }
1465
1466 // Free early clobbers.
1467 if (HasEarlyClobber) {
1468 for (MachineOperand &MO : llvm::reverse(MI.all_defs())) {
1469 if (!MO.isEarlyClobber())
1470 continue;
1471 assert(!MO.getSubReg() && "should be already handled in def processing");
1472
1473 Register Reg = MO.getReg();
1474 if (!Reg)
1475 continue;
1476 if (Reg.isVirtual()) {
1477 assert(!shouldAllocateRegister(Reg));
1478 continue;
1479 }
1480 assert(Reg.isPhysical() && "should have register assigned");
1481
1482 // We sometimes get odd situations like:
1483 // early-clobber %x0 = INSTRUCTION %x0
1484 // which is semantically questionable as the early-clobber should
1485 // apply before the use. But in practice we consider the use to
1486 // happen before the early clobber now. Don't free the early clobber
1487 // register in this case.
1488 if (MI.readsRegister(Reg, TRI))
1489 continue;
1490
1491 freePhysReg(Reg);
1492 }
1493 }
1494
1495 LLVM_DEBUG(dbgs() << "<< " << MI);
1496 if (MI.isCopy() && MI.getOperand(0).getReg() == MI.getOperand(1).getReg() &&
1497 MI.getNumOperands() == 2) {
1498 LLVM_DEBUG(dbgs() << "Mark identity copy for removal\n");
1499 Coalesced.push_back(&MI);
1500 }
1501}
1502
1503void RegAllocFast::handleDebugValue(MachineInstr &MI) {
1504 // Ignore DBG_VALUEs that aren't based on virtual registers. These are
1505 // mostly constants and frame indices.
1506 assert(MI.isDebugValue() && "not a DBG_VALUE*");
1507 for (const auto &MO : MI.debug_operands()) {
1508 if (!MO.isReg())
1509 continue;
1510 Register Reg = MO.getReg();
1511 if (!Reg.isVirtual())
1512 continue;
1513 if (!shouldAllocateRegister(Reg))
1514 continue;
1515
1516 // Already spilled to a stackslot?
1517 int SS = StackSlotForVirtReg[Reg];
1518 if (SS != -1) {
1519 // Modify DBG_VALUE now that the value is in a spill slot.
1520 updateDbgValueForSpill(MI, SS, Reg);
1521 LLVM_DEBUG(dbgs() << "Rewrite DBG_VALUE for spilled memory: " << MI);
1522 continue;
1523 }
1524
1525 // See if this virtual register has already been allocated to a physical
1526 // register or spilled to a stack slot.
1527 LiveRegMap::iterator LRI = findLiveVirtReg(Reg);
1529 for (MachineOperand &Op : MI.getDebugOperandsForReg(Reg))
1530 DbgOps.push_back(&Op);
1531
1532 if (LRI != LiveVirtRegs.end() && LRI->PhysReg) {
1533 // Update every use of Reg within MI.
1534 for (auto &RegMO : DbgOps)
1535 setPhysReg(MI, *RegMO, LRI->PhysReg);
1536 } else {
1537 DanglingDbgValues[Reg].push_back(&MI);
1538 }
1539
1540 // If Reg hasn't been spilled, put this DBG_VALUE in LiveDbgValueMap so
1541 // that future spills of Reg will have DBG_VALUEs.
1542 LiveDbgValueMap[Reg].append(DbgOps.begin(), DbgOps.end());
1543 }
1544}
1545
1546void RegAllocFast::handleBundle(MachineInstr &MI) {
1547 MachineBasicBlock::instr_iterator BundledMI = MI.getIterator();
1548 ++BundledMI;
1549 while (BundledMI->isBundledWithPred()) {
1550 for (MachineOperand &MO : BundledMI->operands()) {
1551 if (!MO.isReg())
1552 continue;
1553
1554 Register Reg = MO.getReg();
1555 if (!Reg.isVirtual() || !shouldAllocateRegister(Reg))
1556 continue;
1557
1559 DI = BundleVirtRegsMap.find(Reg);
1560 assert(DI != BundleVirtRegsMap.end() && "Unassigned virtual register");
1561
1562 setPhysReg(MI, MO, DI->second);
1563 }
1564
1565 ++BundledMI;
1566 }
1567}
1568
1569void RegAllocFast::allocateBasicBlock(MachineBasicBlock &MBB) {
1570 this->MBB = &MBB;
1571 LLVM_DEBUG(dbgs() << "\nAllocating " << MBB);
1572
1573 RegUnitStates.assign(TRI->getNumRegUnits(), regFree);
1574 assert(LiveVirtRegs.empty() && "Mapping not cleared from last block?");
1575
1576 for (const auto &LiveReg : MBB.liveouts())
1577 setPhysRegState(LiveReg.PhysReg, regPreAssigned);
1578
1579 Coalesced.clear();
1580
1581 // Traverse block in reverse order allocating instructions one by one.
1582 for (MachineInstr &MI : reverse(MBB)) {
1583 LLVM_DEBUG(dbgs() << "\n>> " << MI << "Regs:"; dumpState());
1584
1585 // Special handling for debug values. Note that they are not allowed to
1586 // affect codegen of the other instructions in any way.
1587 if (MI.isDebugValue()) {
1588 handleDebugValue(MI);
1589 continue;
1590 }
1591
1592 allocateInstruction(MI);
1593
1594 // Once BUNDLE header is assigned registers, same assignments need to be
1595 // done for bundled MIs.
1596 if (MI.getOpcode() == TargetOpcode::BUNDLE) {
1597 handleBundle(MI);
1598 }
1599 }
1600
1601 LLVM_DEBUG(dbgs() << "Begin Regs:"; dumpState());
1602
1603 // Spill all physical registers holding virtual registers now.
1604 LLVM_DEBUG(dbgs() << "Loading live registers at begin of block.\n");
1605 reloadAtBegin(MBB);
1606
1607 // Erase all the coalesced copies. We are delaying it until now because
1608 // LiveVirtRegs might refer to the instrs.
1609 for (MachineInstr *MI : Coalesced)
1610 MBB.erase(MI);
1611 NumCoalesced += Coalesced.size();
1612
1613 for (auto &UDBGPair : DanglingDbgValues) {
1614 for (MachineInstr *DbgValue : UDBGPair.second) {
1615 assert(DbgValue->isDebugValue() && "expected DBG_VALUE");
1616 // Nothing to do if the vreg was spilled in the meantime.
1617 if (!DbgValue->hasDebugOperandForReg(UDBGPair.first))
1618 continue;
1619 LLVM_DEBUG(dbgs() << "Register did not survive for " << *DbgValue
1620 << '\n');
1621 DbgValue->setDebugValueUndef();
1622 }
1623 }
1624 DanglingDbgValues.clear();
1625
1626 LLVM_DEBUG(MBB.dump());
1627}
1628
1629bool RegAllocFast::runOnMachineFunction(MachineFunction &MF) {
1630 LLVM_DEBUG(dbgs() << "********** FAST REGISTER ALLOCATION **********\n"
1631 << "********** Function: " << MF.getName() << '\n');
1632 MRI = &MF.getRegInfo();
1633 const TargetSubtargetInfo &STI = MF.getSubtarget();
1634 TRI = STI.getRegisterInfo();
1635 TII = STI.getInstrInfo();
1636 MFI = &MF.getFrameInfo();
1637 MRI->freezeReservedRegs(MF);
1638 RegClassInfo.runOnMachineFunction(MF);
1639 unsigned NumRegUnits = TRI->getNumRegUnits();
1640 UsedInInstr.clear();
1641 UsedInInstr.setUniverse(NumRegUnits);
1642 PhysRegUses.clear();
1643 PhysRegUses.setUniverse(NumRegUnits);
1644
1645 // initialize the virtual->physical register map to have a 'null'
1646 // mapping for all virtual registers
1647 unsigned NumVirtRegs = MRI->getNumVirtRegs();
1648 StackSlotForVirtReg.resize(NumVirtRegs);
1649 LiveVirtRegs.setUniverse(NumVirtRegs);
1650 MayLiveAcrossBlocks.clear();
1651 MayLiveAcrossBlocks.resize(NumVirtRegs);
1652
1653 // Loop over all of the basic blocks, eliminating virtual register references
1654 for (MachineBasicBlock &MBB : MF)
1655 allocateBasicBlock(MBB);
1656
1657 if (ClearVirtRegs) {
1658 // All machine operands and other references to virtual registers have been
1659 // replaced. Remove the virtual registers.
1660 MRI->clearVirtRegs();
1661 }
1662
1663 StackSlotForVirtReg.clear();
1664 LiveDbgValueMap.clear();
1665 return true;
1666}
1667
1668FunctionPass *llvm::createFastRegisterAllocator() { return new RegAllocFast(); }
1669
1671 bool ClearVirtRegs) {
1672 return new RegAllocFast(Ftor, ClearVirtRegs);
1673}
unsigned const MachineRegisterInfo * MRI
aarch64 promote const
MachineBasicBlock & MBB
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
uint64_t Size
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
This file implements an indexed map.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
This file implements a map that provides insertion order iteration.
#define P(N)
if(VerifyEach)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
static bool dominates(MachineBasicBlock &MBB, MachineBasicBlock::const_iterator A, MachineBasicBlock::const_iterator B)
static bool isCoalescable(const MachineInstr &MI)
static cl::opt< bool > IgnoreMissingDefs("rafast-ignore-missing-defs", cl::Hidden)
static RegisterRegAlloc fastRegAlloc("fast", "fast register allocator", createFastRegisterAllocator)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the SparseSet class derived from the version described in Briggs,...
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
Class recording the (high level) value of a variable.
Iterator begin() const
Represent the analysis usage information of a pass.
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:269
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool test(unsigned Idx) const
Definition: BitVector.h:461
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
Definition: BitVector.h:341
void clear()
clear - Removes all bits from the bitvector.
Definition: BitVector.h:335
BitVector & set()
Definition: BitVector.h:351
This class represents an Operation in the Expression.
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
iterator end()
Definition: DenseMap.h:84
Lightweight error class with error context and mandatory checking.
Definition: Error.h:160
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
Store the specified register of the given register class to the specified stack frame index.
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
Load the specified register of the given register class from the specified stack frame index.
void resize(typename StorageT::size_type s)
Definition: IndexedMap.h:61
MCRegAliasIterator enumerates all registers aliasing Reg.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
static constexpr unsigned NoRegister
Definition: MCRegister.h:52
iterator_range< liveout_iterator > liveouts() const
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator_range< livein_iterator > liveins() const
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
Instructions::iterator instr_iterator
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateSpillStackObject(uint64_t Size, Align Alignment)
Create a new statically sized stack object that represents a spill slot, returning a nonnegative iden...
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
virtual MachineFunctionProperties getClearedProperties() const
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual MachineFunctionProperties getSetProperties() const
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
virtual MachineFunctionProperties getRequiredProperties() const
Properties which a MachineFunction may have at a given point in time.
MachineFunctionProperties & set(Property P)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MachineInstr * CloneMachineInstr(const MachineInstr *Orig)
Create a new MachineInstr which is a copy of Orig, identical in all ways except the instruction has n...
const MachineBasicBlock & front() const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
Definition: MachineInstr.h:68
bool isDebugValueList() const
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:326
bool isNonListDebugValue() const
MachineOperand & getDebugOperand(unsigned Index)
Definition: MachineInstr.h:562
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:553
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
void setIsRenamable(bool Val=true)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
MachineBasicBlock * getMBB() const
void setIsDead(bool Val=true)
void setReg(Register Reg)
Change the register this operand corresponds to.
void setIsKill(bool Val=true)
bool isEarlyClobber() const
Register getReg() const
getReg - Returns the register number.
bool isInternalRead() const
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
const uint32_t * getRegMask() const
getRegMask - Returns a bit mask of registers preserved by this RegMask operand.
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Definition: Pass.cpp:81
void runOnMachineFunction(const MachineFunction &MF)
runOnFunction - Prepare to answer questions about MF.
ArrayRef< MCPhysReg > getOrder(const TargetRegisterClass *RC) const
getOrder - Returns the preferred allocation order for RC.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static unsigned virtReg2Index(Register Reg)
Convert a virtual register number to a 0-based index.
Definition: Register.h:77
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:65
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
bool empty() const
Definition: SmallVector.h:94
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
SparseSet - Fast set implementation for objects that can be identified by small unsigned keys.
Definition: SparseSet.h:124
typename DenseT::iterator iterator
Definition: SparseSet.h:171
typename DenseT::const_iterator const_iterator
Definition: SparseSet.h:172
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
TargetInstrInfo - Interface to description of machine instruction set.
unsigned getID() const
Return the register class ID number.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
self_iterator getIterator()
Definition: ilist_node.h:109
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ Kill
The last use of a register.
@ SS
Definition: X86.h:207
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
FunctionPass * createFastRegisterAllocator()
FastRegisterAllocation Pass - This pass register allocates as fast as possible.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1684
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex, Register Reg)
Update a DBG_VALUE whose value has been spilled to FrameIndex.
Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI)
Create Printable object to print register units on a raw_ostream.
static bool allocateAllRegClasses(const TargetRegisterInfo &, const TargetRegisterClass &)
Default register class filter function for register allocation.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1733
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:428
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1651
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
MachineInstr * buildDbgValueForSpill(MachineBasicBlock &BB, MachineBasicBlock::iterator I, const MachineInstr &Orig, int FrameIndex, Register SpillReg)
Clone a DBG_VALUE whose value has been spilled to FrameIndex.
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
std::function< bool(const TargetRegisterInfo &TRI, const TargetRegisterClass &RC)> RegClassFilterFunc
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Pair of physical register and lane mask.
A MapVector that performs no allocations if smaller than a certain size.
Definition: MapVector.h:254