LLVM  15.0.0git
RegAllocGreedy.cpp
Go to the documentation of this file.
1 //===- RegAllocGreedy.cpp - greedy register allocator ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the RAGreedy function pass for register allocation in
10 // optimized builds.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RegAllocGreedy.h"
15 #include "AllocationOrder.h"
16 #include "InterferenceCache.h"
17 #include "LiveDebugVariables.h"
18 #include "RegAllocBase.h"
20 #include "SpillPlacement.h"
21 #include "SplitKit.h"
22 #include "llvm/ADT/ArrayRef.h"
23 #include "llvm/ADT/BitVector.h"
24 #include "llvm/ADT/IndexedMap.h"
25 #include "llvm/ADT/SetVector.h"
26 #include "llvm/ADT/SmallPtrSet.h"
27 #include "llvm/ADT/SmallSet.h"
28 #include "llvm/ADT/SmallVector.h"
29 #include "llvm/ADT/Statistic.h"
30 #include "llvm/ADT/StringRef.h"
55 #include "llvm/CodeGen/Spiller.h"
61 #include "llvm/IR/Function.h"
62 #include "llvm/IR/LLVMContext.h"
63 #include "llvm/InitializePasses.h"
64 #include "llvm/MC/MCRegisterInfo.h"
65 #include "llvm/Pass.h"
69 #include "llvm/Support/Debug.h"
71 #include "llvm/Support/Timer.h"
73 #include <algorithm>
74 #include <cassert>
75 #include <cstdint>
76 #include <utility>
77 
78 using namespace llvm;
79 
80 #define DEBUG_TYPE "regalloc"
81 
82 STATISTIC(NumGlobalSplits, "Number of split global live ranges");
83 STATISTIC(NumLocalSplits, "Number of split local live ranges");
84 STATISTIC(NumEvicted, "Number of interferences evicted");
85 
87  "split-spill-mode", cl::Hidden,
88  cl::desc("Spill mode for splitting live ranges"),
89  cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"),
90  clEnumValN(SplitEditor::SM_Size, "size", "Optimize for size"),
91  clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed")),
93 
94 static cl::opt<unsigned>
96  cl::desc("Last chance recoloring max depth"),
97  cl::init(5));
98 
100  "lcr-max-interf", cl::Hidden,
101  cl::desc("Last chance recoloring maximum number of considered"
102  " interference at a time"),
103  cl::init(8));
104 
106  "exhaustive-register-search", cl::NotHidden,
107  cl::desc("Exhaustive Search for registers bypassing the depth "
108  "and interference cutoffs of last chance recoloring"),
109  cl::Hidden);
110 
112  "enable-deferred-spilling", cl::Hidden,
113  cl::desc("Instead of spilling a variable right away, defer the actual "
114  "code insertion to the end of the allocation. That way the "
115  "allocator might still find a suitable coloring for this "
116  "variable because of other evicted variables."),
117  cl::init(false));
118 
119 // FIXME: Find a good default for this flag and remove the flag.
120 static cl::opt<unsigned>
121 CSRFirstTimeCost("regalloc-csr-first-time-cost",
122  cl::desc("Cost for first time use of callee-saved register."),
123  cl::init(0), cl::Hidden);
124 
126  "grow-region-complexity-budget",
127  cl::desc("growRegion() does not scale with the number of BB edges, so "
128  "limit its budget and bail out once we reach the limit."),
129  cl::init(10000), cl::Hidden);
130 
132  "greedy-regclass-priority-trumps-globalness",
133  cl::desc("Change the greedy register allocator's live range priority "
134  "calculation to make the AllocationPriority of the register class "
135  "more important then whether the range is global"),
136  cl::Hidden);
137 
138 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
140 
141 char RAGreedy::ID = 0;
143 
145  "Greedy Register Allocator", false, false)
149 INITIALIZE_PASS_DEPENDENCY(RegisterCoalescer)
150 INITIALIZE_PASS_DEPENDENCY(MachineScheduler)
162 
163 #ifndef NDEBUG
164 const char *const RAGreedy::StageName[] = {
165  "RS_New",
166  "RS_Assign",
167  "RS_Split",
168  "RS_Split2",
169  "RS_Spill",
170  "RS_Memory",
171  "RS_Done"
172 };
173 #endif
174 
175 // Hysteresis to use when comparing floats.
176 // This helps stabilize decisions based on float comparisons.
177 const float Hysteresis = (2007 / 2048.0f); // 0.97998046875
178 
180  return new RAGreedy();
181 }
182 
183 namespace llvm {
186  const TargetRegisterClass &RC)> Ftor);
187 
188 }
189 
192  const TargetRegisterClass &RC)> Ftor) {
193  return new RAGreedy(Ftor);
194 }
195 
198  RegAllocBase(F) {
199 }
200 
202  AU.setPreservesCFG();
209  AU.addRequired<SlotIndexes>();
213  AU.addRequired<LiveStacks>();
214  AU.addPreserved<LiveStacks>();
219  AU.addRequired<VirtRegMap>();
220  AU.addPreserved<VirtRegMap>();
223  AU.addRequired<EdgeBundles>();
228 }
229 
230 //===----------------------------------------------------------------------===//
231 // LiveRangeEdit delegate methods
232 //===----------------------------------------------------------------------===//
233 
234 bool RAGreedy::LRE_CanEraseVirtReg(Register VirtReg) {
235  LiveInterval &LI = LIS->getInterval(VirtReg);
236  if (VRM->hasPhys(VirtReg)) {
237  Matrix->unassign(LI);
239  return true;
240  }
241  // Unassigned virtreg is probably in the priority queue.
242  // RegAllocBase will erase it after dequeueing.
243  // Nonetheless, clear the live-range so that the debug
244  // dump will show the right state for that VirtReg.
245  LI.clear();
246  return false;
247 }
248 
249 void RAGreedy::LRE_WillShrinkVirtReg(Register VirtReg) {
250  if (!VRM->hasPhys(VirtReg))
251  return;
252 
253  // Register is assigned, put it back on the queue for reassignment.
254  LiveInterval &LI = LIS->getInterval(VirtReg);
255  Matrix->unassign(LI);
257 }
258 
259 void RAGreedy::LRE_DidCloneVirtReg(Register New, Register Old) {
260  ExtraInfo->LRE_DidCloneVirtReg(New, Old);
261 }
262 
264  // Cloning a register we haven't even heard about yet? Just ignore it.
265  if (!Info.inBounds(Old))
266  return;
267 
268  // LRE may clone a virtual register because dead code elimination causes it to
269  // be split into connected components. The new components are much smaller
270  // than the original, so they should get a new chance at being assigned.
271  // same stage as the parent.
272  Info[Old].Stage = RS_Assign;
273  Info.grow(New.id());
274  Info[New] = Info[Old];
275 }
276 
278  SpillerInstance.reset();
279  GlobalCand.clear();
280 }
281 
282 void RAGreedy::enqueueImpl(const LiveInterval *LI) { enqueue(Queue, LI); }
283 
284 void RAGreedy::enqueue(PQueue &CurQueue, const LiveInterval *LI) {
285  // Prioritize live ranges by size, assigning larger ranges first.
286  // The queue holds (size, reg) pairs.
287  const unsigned Size = LI->getSize();
288  const Register Reg = LI->reg();
289  assert(Reg.isVirtual() && "Can only enqueue virtual registers");
290  unsigned Prio;
291 
292  auto Stage = ExtraInfo->getOrInitStage(Reg);
293  if (Stage == RS_New) {
294  Stage = RS_Assign;
295  ExtraInfo->setStage(Reg, Stage);
296  }
297  if (Stage == RS_Split) {
298  // Unsplit ranges that couldn't be allocated immediately are deferred until
299  // everything else has been allocated.
300  Prio = Size;
301  } else if (Stage == RS_Memory) {
302  // Memory operand should be considered last.
303  // Change the priority such that Memory operand are assigned in
304  // the reverse order that they came in.
305  // TODO: Make this a member variable and probably do something about hints.
306  static unsigned MemOp = 0;
307  Prio = MemOp++;
308  } else {
309  // Giant live ranges fall back to the global assignment heuristic, which
310  // prevents excessive spilling in pathological cases.
311  bool ReverseLocal = TRI->reverseLocalAssignment();
312  const TargetRegisterClass &RC = *MRI->getRegClass(Reg);
313  bool ForceGlobal =
314  !ReverseLocal && (Size / SlotIndex::InstrDist) >
316  unsigned GlobalBit = 0;
317 
318  if (Stage == RS_Assign && !ForceGlobal && !LI->empty() &&
319  LIS->intervalIsInOneMBB(*LI)) {
320  // Allocate original local ranges in linear instruction order. Since they
321  // are singly defined, this produces optimal coloring in the absence of
322  // global interference and other constraints.
323  if (!ReverseLocal)
324  Prio = LI->beginIndex().getInstrDistance(Indexes->getLastIndex());
325  else {
326  // Allocating bottom up may allow many short LRGs to be assigned first
327  // to one of the cheap registers. This could be much faster for very
328  // large blocks on targets with many physical registers.
329  Prio = Indexes->getZeroIndex().getInstrDistance(LI->endIndex());
330  }
331  } else {
332  // Allocate global and split ranges in long->short order. Long ranges that
333  // don't fit should be spilled (or split) ASAP so they don't create
334  // interference. Mark a bit to prioritize global above local ranges.
335  Prio = Size;
336  GlobalBit = 1;
337  }
338  if (RegClassPriorityTrumpsGlobalness)
339  Prio |= RC.AllocationPriority << 25 | GlobalBit << 24;
340  else
341  Prio |= GlobalBit << 29 | RC.AllocationPriority << 24;
342 
343  // Mark a higher bit to prioritize global and local above RS_Split.
344  Prio |= (1u << 31);
345 
346  // Boost ranges that have a physical register hint.
347  if (VRM->hasKnownPreference(Reg))
348  Prio |= (1u << 30);
349  }
350  // The virtual register number is a tie breaker for same-sized ranges.
351  // Give lower vreg numbers higher priority to assign them first.
352  CurQueue.push(std::make_pair(Prio, ~Reg));
353 }
354 
355 const LiveInterval *RAGreedy::dequeue() { return dequeue(Queue); }
356 
357 const LiveInterval *RAGreedy::dequeue(PQueue &CurQueue) {
358  if (CurQueue.empty())
359  return nullptr;
360  LiveInterval *LI = &LIS->getInterval(~CurQueue.top().second);
361  CurQueue.pop();
362  return LI;
363 }
364 
365 //===----------------------------------------------------------------------===//
366 // Direct Assignment
367 //===----------------------------------------------------------------------===//
368 
369 /// tryAssign - Try to assign VirtReg to an available register.
370 MCRegister RAGreedy::tryAssign(const LiveInterval &VirtReg,
371  AllocationOrder &Order,
372  SmallVectorImpl<Register> &NewVRegs,
373  const SmallVirtRegSet &FixedRegisters) {
374  MCRegister PhysReg;
375  for (auto I = Order.begin(), E = Order.end(); I != E && !PhysReg; ++I) {
376  assert(*I);
377  if (!Matrix->checkInterference(VirtReg, *I)) {
378  if (I.isHint())
379  return *I;
380  else
381  PhysReg = *I;
382  }
383  }
384  if (!PhysReg.isValid())
385  return PhysReg;
386 
387  // PhysReg is available, but there may be a better choice.
388 
389  // If we missed a simple hint, try to cheaply evict interference from the
390  // preferred register.
391  if (Register Hint = MRI->getSimpleHint(VirtReg.reg()))
392  if (Order.isHint(Hint)) {
393  MCRegister PhysHint = Hint.asMCReg();
394  LLVM_DEBUG(dbgs() << "missed hint " << printReg(PhysHint, TRI) << '\n');
395 
396  if (EvictAdvisor->canEvictHintInterference(VirtReg, PhysHint,
397  FixedRegisters)) {
398  evictInterference(VirtReg, PhysHint, NewVRegs);
399  return PhysHint;
400  }
401  // Record the missed hint, we may be able to recover
402  // at the end if the surrounding allocation changed.
403  SetOfBrokenHints.insert(&VirtReg);
404  }
405 
406  // Try to evict interference from a cheaper alternative.
407  uint8_t Cost = RegCosts[PhysReg];
408 
409  // Most registers have 0 additional cost.
410  if (!Cost)
411  return PhysReg;
412 
413  LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << " is available at cost "
414  << (unsigned)Cost << '\n');
415  MCRegister CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost, FixedRegisters);
416  return CheapReg ? CheapReg : PhysReg;
417 }
418 
419 //===----------------------------------------------------------------------===//
420 // Interference eviction
421 //===----------------------------------------------------------------------===//
422 
424  Register PrevReg) const {
425  auto Order =
427  MCRegister PhysReg;
428  for (auto I = Order.begin(), E = Order.end(); I != E && !PhysReg; ++I) {
429  if ((*I).id() == PrevReg.id())
430  continue;
431 
432  MCRegUnitIterator Units(*I, TRI);
433  for (; Units.isValid(); ++Units) {
434  // Instantiate a "subquery", not to be confused with the Queries array.
435  LiveIntervalUnion::Query subQ(VirtReg, Matrix->getLiveUnions()[*Units]);
436  if (subQ.checkInterference())
437  break;
438  }
439  // If no units have interference, break out with the current PhysReg.
440  if (!Units.isValid())
441  PhysReg = *I;
442  }
443  if (PhysReg)
444  LLVM_DEBUG(dbgs() << "can reassign: " << VirtReg << " from "
445  << printReg(PrevReg, TRI) << " to "
446  << printReg(PhysReg, TRI) << '\n');
447  return PhysReg;
448 }
449 
450 /// Return true if all interferences between VirtReg and PhysReg between
451 /// Start and End can be evicted.
452 ///
453 /// \param VirtReg Live range that is about to be assigned.
454 /// \param PhysReg Desired register for assignment.
455 /// \param Start Start of range to look for interferences.
456 /// \param End End of range to look for interferences.
457 /// \param MaxCost Only look for cheaper candidates and update with new cost
458 /// when returning true.
459 /// \return True when interference can be evicted cheaper than MaxCost.
460 bool RAGreedy::canEvictInterferenceInRange(const LiveInterval &VirtReg,
461  MCRegister PhysReg, SlotIndex Start,
462  SlotIndex End,
463  EvictionCost &MaxCost) const {
464  EvictionCost Cost;
465 
466  for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
467  LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
468 
469  // Check if any interfering live range is heavier than MaxWeight.
470  for (const LiveInterval *Intf : reverse(Q.interferingVRegs())) {
471  // Check if interference overlast the segment in interest.
472  if (!Intf->overlaps(Start, End))
473  continue;
474 
475  // Never evict spill products. They cannot split or spill.
476  if (ExtraInfo->getStage(*Intf) == RS_Done)
477  return false;
478 
479  // Would this break a satisfied hint?
480  bool BreaksHint = VRM->hasPreferredPhys(Intf->reg());
481  // Update eviction cost.
482  Cost.BrokenHints += BreaksHint;
483  Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight());
484  // Abort if this would be too expensive.
485  if (!(Cost < MaxCost))
486  return false;
487  }
488  }
489 
490  if (Cost.MaxWeight == 0)
491  return false;
492 
493  MaxCost = Cost;
494  return true;
495 }
496 
497 /// Return the physical register that will be best
498 /// candidate for eviction by a local split interval that will be created
499 /// between Start and End.
500 ///
501 /// \param Order The allocation order
502 /// \param VirtReg Live range that is about to be assigned.
503 /// \param Start Start of range to look for interferences
504 /// \param End End of range to look for interferences
505 /// \param BestEvictweight The eviction cost of that eviction
506 /// \return The PhysReg which is the best candidate for eviction and the
507 /// eviction cost in BestEvictweight
508 MCRegister RAGreedy::getCheapestEvicteeWeight(const AllocationOrder &Order,
509  const LiveInterval &VirtReg,
510  SlotIndex Start, SlotIndex End,
511  float *BestEvictweight) const {
512  EvictionCost BestEvictCost;
513  BestEvictCost.setMax();
514  BestEvictCost.MaxWeight = VirtReg.weight();
515  MCRegister BestEvicteePhys;
516 
517  // Go over all physical registers and find the best candidate for eviction
518  for (MCRegister PhysReg : Order.getOrder()) {
519 
520  if (!canEvictInterferenceInRange(VirtReg, PhysReg, Start, End,
521  BestEvictCost))
522  continue;
523 
524  // Best so far.
525  BestEvicteePhys = PhysReg;
526  }
527  *BestEvictweight = BestEvictCost.MaxWeight;
528  return BestEvicteePhys;
529 }
530 
531 /// evictInterference - Evict any interferring registers that prevent VirtReg
532 /// from being assigned to Physreg. This assumes that canEvictInterference
533 /// returned true.
534 void RAGreedy::evictInterference(const LiveInterval &VirtReg,
535  MCRegister PhysReg,
536  SmallVectorImpl<Register> &NewVRegs) {
537  // Make sure that VirtReg has a cascade number, and assign that cascade
538  // number to every evicted register. These live ranges than then only be
539  // evicted by a newer cascade, preventing infinite loops.
540  unsigned Cascade = ExtraInfo->getOrAssignNewCascade(VirtReg.reg());
541 
542  LLVM_DEBUG(dbgs() << "evicting " << printReg(PhysReg, TRI)
543  << " interference: Cascade " << Cascade << '\n');
544 
545  // Collect all interfering virtregs first.
547  for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
548  LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
549  // We usually have the interfering VRegs cached so collectInterferingVRegs()
550  // should be fast, we may need to recalculate if when different physregs
551  // overlap the same register unit so we had different SubRanges queried
552  // against it.
554  Intfs.append(IVR.begin(), IVR.end());
555  }
556 
557  // Evict them second. This will invalidate the queries.
558  for (const LiveInterval *Intf : Intfs) {
559  // The same VirtReg may be present in multiple RegUnits. Skip duplicates.
560  if (!VRM->hasPhys(Intf->reg()))
561  continue;
562 
563  LastEvicted.addEviction(PhysReg, VirtReg.reg(), Intf->reg());
564 
565  Matrix->unassign(*Intf);
566  assert((ExtraInfo->getCascade(Intf->reg()) < Cascade ||
567  VirtReg.isSpillable() < Intf->isSpillable()) &&
568  "Cannot decrease cascade number, illegal eviction");
569  ExtraInfo->setCascade(Intf->reg(), Cascade);
570  ++NumEvicted;
571  NewVRegs.push_back(Intf->reg());
572  }
573 }
574 
575 /// Returns true if the given \p PhysReg is a callee saved register and has not
576 /// been used for allocation yet.
579  if (!CSR)
580  return false;
581 
582  return !Matrix->isPhysRegUsed(PhysReg);
583 }
584 
587  const AllocationOrder &Order,
588  unsigned CostPerUseLimit) const {
589  unsigned OrderLimit = Order.getOrder().size();
590 
591  if (CostPerUseLimit < uint8_t(~0u)) {
592  // Check of any registers in RC are below CostPerUseLimit.
593  const TargetRegisterClass *RC = MRI->getRegClass(VirtReg.reg());
594  uint8_t MinCost = RegClassInfo.getMinCost(RC);
595  if (MinCost >= CostPerUseLimit) {
596  LLVM_DEBUG(dbgs() << TRI->getRegClassName(RC) << " minimum cost = "
597  << MinCost << ", no cheaper registers to be found.\n");
598  return None;
599  }
600 
601  // It is normal for register classes to have a long tail of registers with
602  // the same cost. We don't need to look at them if they're too expensive.
603  if (RegCosts[Order.getOrder().back()] >= CostPerUseLimit) {
604  OrderLimit = RegClassInfo.getLastCostChange(RC);
605  LLVM_DEBUG(dbgs() << "Only trying the first " << OrderLimit
606  << " regs.\n");
607  }
608  }
609  return OrderLimit;
610 }
611 
612 bool RegAllocEvictionAdvisor::canAllocatePhysReg(unsigned CostPerUseLimit,
613  MCRegister PhysReg) const {
614  if (RegCosts[PhysReg] >= CostPerUseLimit)
615  return false;
616  // The first use of a callee-saved register in a function has cost 1.
617  // Don't start using a CSR when the CostPerUseLimit is low.
618  if (CostPerUseLimit == 1 && isUnusedCalleeSavedReg(PhysReg)) {
619  LLVM_DEBUG(
620  dbgs() << printReg(PhysReg, TRI) << " would clobber CSR "
622  << '\n');
623  return false;
624  }
625  return true;
626 }
627 
628 /// tryEvict - Try to evict all interferences for a physreg.
629 /// @param VirtReg Currently unassigned virtual register.
630 /// @param Order Physregs to try.
631 /// @return Physreg to assign VirtReg, or 0.
632 MCRegister RAGreedy::tryEvict(const LiveInterval &VirtReg,
633  AllocationOrder &Order,
634  SmallVectorImpl<Register> &NewVRegs,
635  uint8_t CostPerUseLimit,
636  const SmallVirtRegSet &FixedRegisters) {
639 
640  MCRegister BestPhys = EvictAdvisor->tryFindEvictionCandidate(
641  VirtReg, Order, CostPerUseLimit, FixedRegisters);
642  if (BestPhys.isValid())
643  evictInterference(VirtReg, BestPhys, NewVRegs);
644  return BestPhys;
645 }
646 
647 //===----------------------------------------------------------------------===//
648 // Region Splitting
649 //===----------------------------------------------------------------------===//
650 
651 /// addSplitConstraints - Fill out the SplitConstraints vector based on the
652 /// interference pattern in Physreg and its aliases. Add the constraints to
653 /// SpillPlacement and return the static cost of this split in Cost, assuming
654 /// that all preferences in SplitConstraints are met.
655 /// Return false if there are no bundles with positive bias.
656 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf,
657  BlockFrequency &Cost) {
658  ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
659 
660  // Reset interference dependent info.
661  SplitConstraints.resize(UseBlocks.size());
662  BlockFrequency StaticCost = 0;
663  for (unsigned I = 0; I != UseBlocks.size(); ++I) {
664  const SplitAnalysis::BlockInfo &BI = UseBlocks[I];
665  SpillPlacement::BlockConstraint &BC = SplitConstraints[I];
666 
667  BC.Number = BI.MBB->getNumber();
668  Intf.moveToBlock(BC.Number);
670  BC.Exit = (BI.LiveOut &&
674  BC.ChangesValue = BI.FirstDef.isValid();
675 
676  if (!Intf.hasInterference())
677  continue;
678 
679  // Number of spill code instructions to insert.
680  unsigned Ins = 0;
681 
682  // Interference for the live-in value.
683  if (BI.LiveIn) {
684  if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number)) {
686  ++Ins;
687  } else if (Intf.first() < BI.FirstInstr) {
689  ++Ins;
690  } else if (Intf.first() < BI.LastInstr) {
691  ++Ins;
692  }
693 
694  // Abort if the spill cannot be inserted at the MBB' start
695  if (((BC.Entry == SpillPlacement::MustSpill) ||
696  (BC.Entry == SpillPlacement::PrefSpill)) &&
698  SA->getFirstSplitPoint(BC.Number)))
699  return false;
700  }
701 
702  // Interference for the live-out value.
703  if (BI.LiveOut) {
704  if (Intf.last() >= SA->getLastSplitPoint(BC.Number)) {
706  ++Ins;
707  } else if (Intf.last() > BI.LastInstr) {
709  ++Ins;
710  } else if (Intf.last() > BI.FirstInstr) {
711  ++Ins;
712  }
713  }
714 
715  // Accumulate the total frequency of inserted spill code.
716  while (Ins--)
717  StaticCost += SpillPlacer->getBlockFrequency(BC.Number);
718  }
719  Cost = StaticCost;
720 
721  // Add constraints for use-blocks. Note that these are the only constraints
722  // that may add a positive bias, it is downhill from here.
723  SpillPlacer->addConstraints(SplitConstraints);
724  return SpillPlacer->scanActiveBundles();
725 }
726 
727 /// addThroughConstraints - Add constraints and links to SpillPlacer from the
728 /// live-through blocks in Blocks.
729 bool RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf,
730  ArrayRef<unsigned> Blocks) {
731  const unsigned GroupSize = 8;
732  SpillPlacement::BlockConstraint BCS[GroupSize];
733  unsigned TBS[GroupSize];
734  unsigned B = 0, T = 0;
735 
736  for (unsigned Number : Blocks) {
737  Intf.moveToBlock(Number);
738 
739  if (!Intf.hasInterference()) {
740  assert(T < GroupSize && "Array overflow");
741  TBS[T] = Number;
742  if (++T == GroupSize) {
743  SpillPlacer->addLinks(makeArrayRef(TBS, T));
744  T = 0;
745  }
746  continue;
747  }
748 
749  assert(B < GroupSize && "Array overflow");
750  BCS[B].Number = Number;
751 
752  // Abort if the spill cannot be inserted at the MBB' start
753  MachineBasicBlock *MBB = MF->getBlockNumbered(Number);
754  auto FirstNonDebugInstr = MBB->getFirstNonDebugInstr();
755  if (FirstNonDebugInstr != MBB->end() &&
756  SlotIndex::isEarlierInstr(LIS->getInstructionIndex(*FirstNonDebugInstr),
757  SA->getFirstSplitPoint(Number)))
758  return false;
759  // Interference for the live-in value.
760  if (Intf.first() <= Indexes->getMBBStartIdx(Number))
762  else
764 
765  // Interference for the live-out value.
766  if (Intf.last() >= SA->getLastSplitPoint(Number))
768  else
770 
771  if (++B == GroupSize) {
772  SpillPlacer->addConstraints(makeArrayRef(BCS, B));
773  B = 0;
774  }
775  }
776 
777  SpillPlacer->addConstraints(makeArrayRef(BCS, B));
778  SpillPlacer->addLinks(makeArrayRef(TBS, T));
779  return true;
780 }
781 
782 bool RAGreedy::growRegion(GlobalSplitCandidate &Cand) {
783  // Keep track of through blocks that have not been added to SpillPlacer.
784  BitVector Todo = SA->getThroughBlocks();
785  SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks;
786  unsigned AddedTo = 0;
787 #ifndef NDEBUG
788  unsigned Visited = 0;
789 #endif
790 
791  unsigned long Budget = GrowRegionComplexityBudget;
792  while (true) {
793  ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive();
794  // Find new through blocks in the periphery of PrefRegBundles.
795  for (unsigned Bundle : NewBundles) {
796  // Look at all blocks connected to Bundle in the full graph.
797  ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle);
798  // Limit compilation time by bailing out after we use all our budget.
799  if (Blocks.size() >= Budget)
800  return false;
801  Budget -= Blocks.size();
802  for (unsigned Block : Blocks) {
803  if (!Todo.test(Block))
804  continue;
805  Todo.reset(Block);
806  // This is a new through block. Add it to SpillPlacer later.
807  ActiveBlocks.push_back(Block);
808 #ifndef NDEBUG
809  ++Visited;
810 #endif
811  }
812  }
813  // Any new blocks to add?
814  if (ActiveBlocks.size() == AddedTo)
815  break;
816 
817  // Compute through constraints from the interference, or assume that all
818  // through blocks prefer spilling when forming compact regions.
819  auto NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo);
820  if (Cand.PhysReg) {
821  if (!addThroughConstraints(Cand.Intf, NewBlocks))
822  return false;
823  } else
824  // Provide a strong negative bias on through blocks to prevent unwanted
825  // liveness on loop backedges.
826  SpillPlacer->addPrefSpill(NewBlocks, /* Strong= */ true);
827  AddedTo = ActiveBlocks.size();
828 
829  // Perhaps iterating can enable more bundles?
830  SpillPlacer->iterate();
831  }
832  LLVM_DEBUG(dbgs() << ", v=" << Visited);
833  return true;
834 }
835 
836 /// calcCompactRegion - Compute the set of edge bundles that should be live
837 /// when splitting the current live range into compact regions. Compact
838 /// regions can be computed without looking at interference. They are the
839 /// regions formed by removing all the live-through blocks from the live range.
840 ///
841 /// Returns false if the current live range is already compact, or if the
842 /// compact regions would form single block regions anyway.
843 bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) {
844  // Without any through blocks, the live range is already compact.
845  if (!SA->getNumThroughBlocks())
846  return false;
847 
848  // Compact regions don't correspond to any physreg.
849  Cand.reset(IntfCache, MCRegister::NoRegister);
850 
851  LLVM_DEBUG(dbgs() << "Compact region bundles");
852 
853  // Use the spill placer to determine the live bundles. GrowRegion pretends
854  // that all the through blocks have interference when PhysReg is unset.
855  SpillPlacer->prepare(Cand.LiveBundles);
856 
857  // The static split cost will be zero since Cand.Intf reports no interference.
858  BlockFrequency Cost;
859  if (!addSplitConstraints(Cand.Intf, Cost)) {
860  LLVM_DEBUG(dbgs() << ", none.\n");
861  return false;
862  }
863 
864  if (!growRegion(Cand)) {
865  LLVM_DEBUG(dbgs() << ", cannot spill all interferences.\n");
866  return false;
867  }
868 
869  SpillPlacer->finish();
870 
871  if (!Cand.LiveBundles.any()) {
872  LLVM_DEBUG(dbgs() << ", none.\n");
873  return false;
874  }
875 
876  LLVM_DEBUG({
877  for (int I : Cand.LiveBundles.set_bits())
878  dbgs() << " EB#" << I;
879  dbgs() << ".\n";
880  });
881  return true;
882 }
883 
884 /// calcSpillCost - Compute how expensive it would be to split the live range in
885 /// SA around all use blocks instead of forming bundle regions.
886 BlockFrequency RAGreedy::calcSpillCost() {
887  BlockFrequency Cost = 0;
888  ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
889  for (const SplitAnalysis::BlockInfo &BI : UseBlocks) {
890  unsigned Number = BI.MBB->getNumber();
891  // We normally only need one spill instruction - a load or a store.
892  Cost += SpillPlacer->getBlockFrequency(Number);
893 
894  // Unless the value is redefined in the block.
895  if (BI.LiveIn && BI.LiveOut && BI.FirstDef)
896  Cost += SpillPlacer->getBlockFrequency(Number);
897  }
898  return Cost;
899 }
900 
901 /// Check if splitting Evictee will create a local split interval in
902 /// basic block number BBNumber that may cause a bad eviction chain. This is
903 /// intended to prevent bad eviction sequences like:
904 /// movl %ebp, 8(%esp) # 4-byte Spill
905 /// movl %ecx, %ebp
906 /// movl %ebx, %ecx
907 /// movl %edi, %ebx
908 /// movl %edx, %edi
909 /// cltd
910 /// idivl %esi
911 /// movl %edi, %edx
912 /// movl %ebx, %edi
913 /// movl %ecx, %ebx
914 /// movl %ebp, %ecx
915 /// movl 16(%esp), %ebp # 4 - byte Reload
916 ///
917 /// Such sequences are created in 2 scenarios:
918 ///
919 /// Scenario #1:
920 /// %0 is evicted from physreg0 by %1.
921 /// Evictee %0 is intended for region splitting with split candidate
922 /// physreg0 (the reg %0 was evicted from).
923 /// Region splitting creates a local interval because of interference with the
924 /// evictor %1 (normally region splitting creates 2 interval, the "by reg"
925 /// and "by stack" intervals and local interval created when interference
926 /// occurs).
927 /// One of the split intervals ends up evicting %2 from physreg1.
928 /// Evictee %2 is intended for region splitting with split candidate
929 /// physreg1.
930 /// One of the split intervals ends up evicting %3 from physreg2, etc.
931 ///
932 /// Scenario #2
933 /// %0 is evicted from physreg0 by %1.
934 /// %2 is evicted from physreg2 by %3 etc.
935 /// Evictee %0 is intended for region splitting with split candidate
936 /// physreg1.
937 /// Region splitting creates a local interval because of interference with the
938 /// evictor %1.
939 /// One of the split intervals ends up evicting back original evictor %1
940 /// from physreg0 (the reg %0 was evicted from).
941 /// Another evictee %2 is intended for region splitting with split candidate
942 /// physreg1.
943 /// One of the split intervals ends up evicting %3 from physreg2, etc.
944 ///
945 /// \param Evictee The register considered to be split.
946 /// \param Cand The split candidate that determines the physical register
947 /// we are splitting for and the interferences.
948 /// \param BBNumber The number of a BB for which the region split process will
949 /// create a local split interval.
950 /// \param Order The physical registers that may get evicted by a split
951 /// artifact of Evictee.
952 /// \return True if splitting Evictee may cause a bad eviction chain, false
953 /// otherwise.
954 bool RAGreedy::splitCanCauseEvictionChain(Register Evictee,
955  GlobalSplitCandidate &Cand,
956  unsigned BBNumber,
957  const AllocationOrder &Order) {
958  EvictionTrack::EvictorInfo VregEvictorInfo = LastEvicted.getEvictor(Evictee);
959  unsigned Evictor = VregEvictorInfo.first;
960  MCRegister PhysReg = VregEvictorInfo.second;
961 
962  // No actual evictor.
963  if (!Evictor || !PhysReg)
964  return false;
965 
966  float MaxWeight = 0;
967  MCRegister FutureEvictedPhysReg =
968  getCheapestEvicteeWeight(Order, LIS->getInterval(Evictee),
969  Cand.Intf.first(), Cand.Intf.last(), &MaxWeight);
970 
971  // The bad eviction chain occurs when either the split candidate is the
972  // evicting reg or one of the split artifact will evict the evicting reg.
973  if ((PhysReg != Cand.PhysReg) && (PhysReg != FutureEvictedPhysReg))
974  return false;
975 
976  Cand.Intf.moveToBlock(BBNumber);
977 
978  // Check to see if the Evictor contains interference (with Evictee) in the
979  // given BB. If so, this interference caused the eviction of Evictee from
980  // PhysReg. This suggest that we will create a local interval during the
981  // region split to avoid this interference This local interval may cause a bad
982  // eviction chain.
983  if (!LIS->hasInterval(Evictor))
984  return false;
985  LiveInterval &EvictorLI = LIS->getInterval(Evictor);
986  if (EvictorLI.FindSegmentContaining(Cand.Intf.first()) == EvictorLI.end())
987  return false;
988 
989  // Now, check to see if the local interval we will create is going to be
990  // expensive enough to evict somebody If so, this may cause a bad eviction
991  // chain.
992  float splitArtifactWeight =
993  VRAI->futureWeight(LIS->getInterval(Evictee),
994  Cand.Intf.first().getPrevIndex(), Cand.Intf.last());
995  if (splitArtifactWeight >= 0 && splitArtifactWeight < MaxWeight)
996  return false;
997 
998  return true;
999 }
1000 
1001 /// calcGlobalSplitCost - Return the global split cost of following the split
1002 /// pattern in LiveBundles. This cost should be added to the local cost of the
1003 /// interference pattern in SplitConstraints.
1004 ///
1005 BlockFrequency RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand,
1006  const AllocationOrder &Order) {
1007  BlockFrequency GlobalCost = 0;
1008  const BitVector &LiveBundles = Cand.LiveBundles;
1009  ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
1010  for (unsigned I = 0; I != UseBlocks.size(); ++I) {
1011  const SplitAnalysis::BlockInfo &BI = UseBlocks[I];
1012  SpillPlacement::BlockConstraint &BC = SplitConstraints[I];
1013  bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, false)];
1014  bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, true)];
1015  unsigned Ins = 0;
1016 
1017  Cand.Intf.moveToBlock(BC.Number);
1018 
1019  if (BI.LiveIn)
1020  Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg);
1021  if (BI.LiveOut)
1022  Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg);
1023  while (Ins--)
1024  GlobalCost += SpillPlacer->getBlockFrequency(BC.Number);
1025  }
1026 
1027  for (unsigned Number : Cand.ActiveBlocks) {
1028  bool RegIn = LiveBundles[Bundles->getBundle(Number, false)];
1029  bool RegOut = LiveBundles[Bundles->getBundle(Number, true)];
1030  if (!RegIn && !RegOut)
1031  continue;
1032  if (RegIn && RegOut) {
1033  // We need double spill code if this block has interference.
1034  Cand.Intf.moveToBlock(Number);
1035  if (Cand.Intf.hasInterference()) {
1036  GlobalCost += SpillPlacer->getBlockFrequency(Number);
1037  GlobalCost += SpillPlacer->getBlockFrequency(Number);
1038  }
1039  continue;
1040  }
1041  // live-in / stack-out or stack-in live-out.
1042  GlobalCost += SpillPlacer->getBlockFrequency(Number);
1043  }
1044  return GlobalCost;
1045 }
1046 
1047 /// splitAroundRegion - Split the current live range around the regions
1048 /// determined by BundleCand and GlobalCand.
1049 ///
1050 /// Before calling this function, GlobalCand and BundleCand must be initialized
1051 /// so each bundle is assigned to a valid candidate, or NoCand for the
1052 /// stack-bound bundles. The shared SA/SE SplitAnalysis and SplitEditor
1053 /// objects must be initialized for the current live range, and intervals
1054 /// created for the used candidates.
1055 ///
1056 /// @param LREdit The LiveRangeEdit object handling the current split.
1057 /// @param UsedCands List of used GlobalCand entries. Every BundleCand value
1058 /// must appear in this list.
1059 void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit,
1060  ArrayRef<unsigned> UsedCands) {
1061  // These are the intervals created for new global ranges. We may create more
1062  // intervals for local ranges.
1063  const unsigned NumGlobalIntvs = LREdit.size();
1064  LLVM_DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs
1065  << " globals.\n");
1066  assert(NumGlobalIntvs && "No global intervals configured");
1067 
1068  // Isolate even single instructions when dealing with a proper sub-class.
1069  // That guarantees register class inflation for the stack interval because it
1070  // is all copies.
1071  Register Reg = SA->getParent().reg();
1072  bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg));
1073 
1074  // First handle all the blocks with uses.
1075  ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
1076  for (const SplitAnalysis::BlockInfo &BI : UseBlocks) {
1077  unsigned Number = BI.MBB->getNumber();
1078  unsigned IntvIn = 0, IntvOut = 0;
1079  SlotIndex IntfIn, IntfOut;
1080  if (BI.LiveIn) {
1081  unsigned CandIn = BundleCand[Bundles->getBundle(Number, false)];
1082  if (CandIn != NoCand) {
1083  GlobalSplitCandidate &Cand = GlobalCand[CandIn];
1084  IntvIn = Cand.IntvIdx;
1085  Cand.Intf.moveToBlock(Number);
1086  IntfIn = Cand.Intf.first();
1087  }
1088  }
1089  if (BI.LiveOut) {
1090  unsigned CandOut = BundleCand[Bundles->getBundle(Number, true)];
1091  if (CandOut != NoCand) {
1092  GlobalSplitCandidate &Cand = GlobalCand[CandOut];
1093  IntvOut = Cand.IntvIdx;
1094  Cand.Intf.moveToBlock(Number);
1095  IntfOut = Cand.Intf.last();
1096  }
1097  }
1098 
1099  // Create separate intervals for isolated blocks with multiple uses.
1100  if (!IntvIn && !IntvOut) {
1101  LLVM_DEBUG(dbgs() << printMBBReference(*BI.MBB) << " isolated.\n");
1102  if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
1103  SE->splitSingleBlock(BI);
1104  continue;
1105  }
1106 
1107  if (IntvIn && IntvOut)
1108  SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut);
1109  else if (IntvIn)
1110  SE->splitRegInBlock(BI, IntvIn, IntfIn);
1111  else
1112  SE->splitRegOutBlock(BI, IntvOut, IntfOut);
1113  }
1114 
1115  // Handle live-through blocks. The relevant live-through blocks are stored in
1116  // the ActiveBlocks list with each candidate. We need to filter out
1117  // duplicates.
1118  BitVector Todo = SA->getThroughBlocks();
1119  for (unsigned UsedCand : UsedCands) {
1120  ArrayRef<unsigned> Blocks = GlobalCand[UsedCand].ActiveBlocks;
1121  for (unsigned Number : Blocks) {
1122  if (!Todo.test(Number))
1123  continue;
1124  Todo.reset(Number);
1125 
1126  unsigned IntvIn = 0, IntvOut = 0;
1127  SlotIndex IntfIn, IntfOut;
1128 
1129  unsigned CandIn = BundleCand[Bundles->getBundle(Number, false)];
1130  if (CandIn != NoCand) {
1131  GlobalSplitCandidate &Cand = GlobalCand[CandIn];
1132  IntvIn = Cand.IntvIdx;
1133  Cand.Intf.moveToBlock(Number);
1134  IntfIn = Cand.Intf.first();
1135  }
1136 
1137  unsigned CandOut = BundleCand[Bundles->getBundle(Number, true)];
1138  if (CandOut != NoCand) {
1139  GlobalSplitCandidate &Cand = GlobalCand[CandOut];
1140  IntvOut = Cand.IntvIdx;
1141  Cand.Intf.moveToBlock(Number);
1142  IntfOut = Cand.Intf.last();
1143  }
1144  if (!IntvIn && !IntvOut)
1145  continue;
1146  SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut);
1147  }
1148  }
1149 
1150  ++NumGlobalSplits;
1151 
1152  SmallVector<unsigned, 8> IntvMap;
1153  SE->finish(&IntvMap);
1154  DebugVars->splitRegister(Reg, LREdit.regs(), *LIS);
1155 
1156  unsigned OrigBlocks = SA->getNumLiveBlocks();
1157 
1158  // Sort out the new intervals created by splitting. We get four kinds:
1159  // - Remainder intervals should not be split again.
1160  // - Candidate intervals can be assigned to Cand.PhysReg.
1161  // - Block-local splits are candidates for local splitting.
1162  // - DCE leftovers should go back on the queue.
1163  for (unsigned I = 0, E = LREdit.size(); I != E; ++I) {
1164  const LiveInterval &Reg = LIS->getInterval(LREdit.get(I));
1165 
1166  // Ignore old intervals from DCE.
1167  if (ExtraInfo->getOrInitStage(Reg.reg()) != RS_New)
1168  continue;
1169 
1170  // Remainder interval. Don't try splitting again, spill if it doesn't
1171  // allocate.
1172  if (IntvMap[I] == 0) {
1173  ExtraInfo->setStage(Reg, RS_Spill);
1174  continue;
1175  }
1176 
1177  // Global intervals. Allow repeated splitting as long as the number of live
1178  // blocks is strictly decreasing.
1179  if (IntvMap[I] < NumGlobalIntvs) {
1180  if (SA->countLiveBlocks(&Reg) >= OrigBlocks) {
1181  LLVM_DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks
1182  << " blocks as original.\n");
1183  // Don't allow repeated splitting as a safe guard against looping.
1184  ExtraInfo->setStage(Reg, RS_Split2);
1185  }
1186  continue;
1187  }
1188 
1189  // Other intervals are treated as new. This includes local intervals created
1190  // for blocks with multiple uses, and anything created by DCE.
1191  }
1192 
1193  if (VerifyEnabled)
1194  MF->verify(this, "After splitting live range around region");
1195 }
1196 
1197 MCRegister RAGreedy::tryRegionSplit(const LiveInterval &VirtReg,
1198  AllocationOrder &Order,
1199  SmallVectorImpl<Register> &NewVRegs) {
1200  if (!TRI->shouldRegionSplitForVirtReg(*MF, VirtReg))
1201  return MCRegister::NoRegister;
1202  unsigned NumCands = 0;
1203  BlockFrequency SpillCost = calcSpillCost();
1204  BlockFrequency BestCost;
1205 
1206  // Check if we can split this live range around a compact region.
1207  bool HasCompact = calcCompactRegion(GlobalCand.front());
1208  if (HasCompact) {
1209  // Yes, keep GlobalCand[0] as the compact region candidate.
1210  NumCands = 1;
1211  BestCost = BlockFrequency::getMaxFrequency();
1212  } else {
1213  // No benefit from the compact region, our fallback will be per-block
1214  // splitting. Make sure we find a solution that is cheaper than spilling.
1215  BestCost = SpillCost;
1216  LLVM_DEBUG(dbgs() << "Cost of isolating all blocks = ";
1217  MBFI->printBlockFreq(dbgs(), BestCost) << '\n');
1218  }
1219 
1220  unsigned BestCand = calculateRegionSplitCost(VirtReg, Order, BestCost,
1221  NumCands, false /*IgnoreCSR*/);
1222 
1223  // No solutions found, fall back to single block splitting.
1224  if (!HasCompact && BestCand == NoCand)
1225  return MCRegister::NoRegister;
1226 
1227  return doRegionSplit(VirtReg, BestCand, HasCompact, NewVRegs);
1228 }
1229 
1230 unsigned RAGreedy::calculateRegionSplitCost(const LiveInterval &VirtReg,
1231  AllocationOrder &Order,
1232  BlockFrequency &BestCost,
1233  unsigned &NumCands,
1234  bool IgnoreCSR) {
1235  unsigned BestCand = NoCand;
1236  for (MCPhysReg PhysReg : Order) {
1237  assert(PhysReg);
1238  if (IgnoreCSR && EvictAdvisor->isUnusedCalleeSavedReg(PhysReg))
1239  continue;
1240 
1241  // Discard bad candidates before we run out of interference cache cursors.
1242  // This will only affect register classes with a lot of registers (>32).
1243  if (NumCands == IntfCache.getMaxCursors()) {
1244  unsigned WorstCount = ~0u;
1245  unsigned Worst = 0;
1246  for (unsigned CandIndex = 0; CandIndex != NumCands; ++CandIndex) {
1247  if (CandIndex == BestCand || !GlobalCand[CandIndex].PhysReg)
1248  continue;
1249  unsigned Count = GlobalCand[CandIndex].LiveBundles.count();
1250  if (Count < WorstCount) {
1251  Worst = CandIndex;
1252  WorstCount = Count;
1253  }
1254  }
1255  --NumCands;
1256  GlobalCand[Worst] = GlobalCand[NumCands];
1257  if (BestCand == NumCands)
1258  BestCand = Worst;
1259  }
1260 
1261  if (GlobalCand.size() <= NumCands)
1262  GlobalCand.resize(NumCands+1);
1263  GlobalSplitCandidate &Cand = GlobalCand[NumCands];
1264  Cand.reset(IntfCache, PhysReg);
1265 
1266  SpillPlacer->prepare(Cand.LiveBundles);
1267  BlockFrequency Cost;
1268  if (!addSplitConstraints(Cand.Intf, Cost)) {
1269  LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << "\tno positive bundles\n");
1270  continue;
1271  }
1272  LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << "\tstatic = ";
1273  MBFI->printBlockFreq(dbgs(), Cost));
1274  if (Cost >= BestCost) {
1275  LLVM_DEBUG({
1276  if (BestCand == NoCand)
1277  dbgs() << " worse than no bundles\n";
1278  else
1279  dbgs() << " worse than "
1280  << printReg(GlobalCand[BestCand].PhysReg, TRI) << '\n';
1281  });
1282  continue;
1283  }
1284  if (!growRegion(Cand)) {
1285  LLVM_DEBUG(dbgs() << ", cannot spill all interferences.\n");
1286  continue;
1287  }
1288 
1289  SpillPlacer->finish();
1290 
1291  // No live bundles, defer to splitSingleBlocks().
1292  if (!Cand.LiveBundles.any()) {
1293  LLVM_DEBUG(dbgs() << " no bundles.\n");
1294  continue;
1295  }
1296 
1297  Cost += calcGlobalSplitCost(Cand, Order);
1298  LLVM_DEBUG({
1299  dbgs() << ", total = ";
1300  MBFI->printBlockFreq(dbgs(), Cost) << " with bundles";
1301  for (int I : Cand.LiveBundles.set_bits())
1302  dbgs() << " EB#" << I;
1303  dbgs() << ".\n";
1304  });
1305  if (Cost < BestCost) {
1306  BestCand = NumCands;
1307  BestCost = Cost;
1308  }
1309  ++NumCands;
1310  }
1311 
1312  return BestCand;
1313 }
1314 
1315 unsigned RAGreedy::doRegionSplit(const LiveInterval &VirtReg, unsigned BestCand,
1316  bool HasCompact,
1317  SmallVectorImpl<Register> &NewVRegs) {
1318  SmallVector<unsigned, 8> UsedCands;
1319  // Prepare split editor.
1320  LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats);
1321  SE->reset(LREdit, SplitSpillMode);
1322 
1323  // Assign all edge bundles to the preferred candidate, or NoCand.
1324  BundleCand.assign(Bundles->getNumBundles(), NoCand);
1325 
1326  // Assign bundles for the best candidate region.
1327  if (BestCand != NoCand) {
1328  GlobalSplitCandidate &Cand = GlobalCand[BestCand];
1329  if (unsigned B = Cand.getBundles(BundleCand, BestCand)) {
1330  UsedCands.push_back(BestCand);
1331  Cand.IntvIdx = SE->openIntv();
1332  LLVM_DEBUG(dbgs() << "Split for " << printReg(Cand.PhysReg, TRI) << " in "
1333  << B << " bundles, intv " << Cand.IntvIdx << ".\n");
1334  (void)B;
1335  }
1336  }
1337 
1338  // Assign bundles for the compact region.
1339  if (HasCompact) {
1340  GlobalSplitCandidate &Cand = GlobalCand.front();
1341  assert(!Cand.PhysReg && "Compact region has no physreg");
1342  if (unsigned B = Cand.getBundles(BundleCand, 0)) {
1343  UsedCands.push_back(0);
1344  Cand.IntvIdx = SE->openIntv();
1345  LLVM_DEBUG(dbgs() << "Split for compact region in " << B
1346  << " bundles, intv " << Cand.IntvIdx << ".\n");
1347  (void)B;
1348  }
1349  }
1350 
1351  splitAroundRegion(LREdit, UsedCands);
1352  return 0;
1353 }
1354 
1355 //===----------------------------------------------------------------------===//
1356 // Per-Block Splitting
1357 //===----------------------------------------------------------------------===//
1358 
1359 /// tryBlockSplit - Split a global live range around every block with uses. This
1360 /// creates a lot of local live ranges, that will be split by tryLocalSplit if
1361 /// they don't allocate.
1362 unsigned RAGreedy::tryBlockSplit(const LiveInterval &VirtReg,
1363  AllocationOrder &Order,
1364  SmallVectorImpl<Register> &NewVRegs) {
1365  assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed");
1366  Register Reg = VirtReg.reg();
1367  bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg));
1368  LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats);
1369  SE->reset(LREdit, SplitSpillMode);
1370  ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
1371  for (const SplitAnalysis::BlockInfo &BI : UseBlocks) {
1372  if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
1373  SE->splitSingleBlock(BI);
1374  }
1375  // No blocks were split.
1376  if (LREdit.empty())
1377  return 0;
1378 
1379  // We did split for some blocks.
1380  SmallVector<unsigned, 8> IntvMap;
1381  SE->finish(&IntvMap);
1382 
1383  // Tell LiveDebugVariables about the new ranges.
1384  DebugVars->splitRegister(Reg, LREdit.regs(), *LIS);
1385 
1386  // Sort out the new intervals created by splitting. The remainder interval
1387  // goes straight to spilling, the new local ranges get to stay RS_New.
1388  for (unsigned I = 0, E = LREdit.size(); I != E; ++I) {
1389  const LiveInterval &LI = LIS->getInterval(LREdit.get(I));
1390  if (ExtraInfo->getOrInitStage(LI.reg()) == RS_New && IntvMap[I] == 0)
1391  ExtraInfo->setStage(LI, RS_Spill);
1392  }
1393 
1394  if (VerifyEnabled)
1395  MF->verify(this, "After splitting live range around basic blocks");
1396  return 0;
1397 }
1398 
1399 //===----------------------------------------------------------------------===//
1400 // Per-Instruction Splitting
1401 //===----------------------------------------------------------------------===//
1402 
1403 /// Get the number of allocatable registers that match the constraints of \p Reg
1404 /// on \p MI and that are also in \p SuperRC.
1406  const MachineInstr *MI, Register Reg, const TargetRegisterClass *SuperRC,
1407  const TargetInstrInfo *TII, const TargetRegisterInfo *TRI,
1408  const RegisterClassInfo &RCI) {
1409  assert(SuperRC && "Invalid register class");
1410 
1411  const TargetRegisterClass *ConstrainedRC =
1412  MI->getRegClassConstraintEffectForVReg(Reg, SuperRC, TII, TRI,
1413  /* ExploreBundle */ true);
1414  if (!ConstrainedRC)
1415  return 0;
1416  return RCI.getNumAllocatableRegs(ConstrainedRC);
1417 }
1418 
1419 /// tryInstructionSplit - Split a live range around individual instructions.
1420 /// This is normally not worthwhile since the spiller is doing essentially the
1421 /// same thing. However, when the live range is in a constrained register
1422 /// class, it may help to insert copies such that parts of the live range can
1423 /// be moved to a larger register class.
1424 ///
1425 /// This is similar to spilling to a larger register class.
1426 unsigned RAGreedy::tryInstructionSplit(const LiveInterval &VirtReg,
1427  AllocationOrder &Order,
1428  SmallVectorImpl<Register> &NewVRegs) {
1429  const TargetRegisterClass *CurRC = MRI->getRegClass(VirtReg.reg());
1430  // There is no point to this if there are no larger sub-classes.
1431  if (!RegClassInfo.isProperSubClass(CurRC))
1432  return 0;
1433 
1434  // Always enable split spill mode, since we're effectively spilling to a
1435  // register.
1436  LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats);
1437  SE->reset(LREdit, SplitEditor::SM_Size);
1438 
1439  ArrayRef<SlotIndex> Uses = SA->getUseSlots();
1440  if (Uses.size() <= 1)
1441  return 0;
1442 
1443  LLVM_DEBUG(dbgs() << "Split around " << Uses.size()
1444  << " individual instrs.\n");
1445 
1446  const TargetRegisterClass *SuperRC =
1447  TRI->getLargestLegalSuperClass(CurRC, *MF);
1448  unsigned SuperRCNumAllocatableRegs =
1450  // Split around every non-copy instruction if this split will relax
1451  // the constraints on the virtual register.
1452  // Otherwise, splitting just inserts uncoalescable copies that do not help
1453  // the allocation.
1454  for (const SlotIndex Use : Uses) {
1455  if (const MachineInstr *MI = Indexes->getInstructionFromIndex(Use))
1456  if (MI->isFullCopy() ||
1457  SuperRCNumAllocatableRegs ==
1458  getNumAllocatableRegsForConstraints(MI, VirtReg.reg(), SuperRC,
1459  TII, TRI, RegClassInfo)) {
1460  LLVM_DEBUG(dbgs() << " skip:\t" << Use << '\t' << *MI);
1461  continue;
1462  }
1463  SE->openIntv();
1464  SlotIndex SegStart = SE->enterIntvBefore(Use);
1465  SlotIndex SegStop = SE->leaveIntvAfter(Use);
1466  SE->useIntv(SegStart, SegStop);
1467  }
1468 
1469  if (LREdit.empty()) {
1470  LLVM_DEBUG(dbgs() << "All uses were copies.\n");
1471  return 0;
1472  }
1473 
1474  SmallVector<unsigned, 8> IntvMap;
1475  SE->finish(&IntvMap);
1476  DebugVars->splitRegister(VirtReg.reg(), LREdit.regs(), *LIS);
1477  // Assign all new registers to RS_Spill. This was the last chance.
1478  ExtraInfo->setStage(LREdit.begin(), LREdit.end(), RS_Spill);
1479  return 0;
1480 }
1481 
1482 //===----------------------------------------------------------------------===//
1483 // Local Splitting
1484 //===----------------------------------------------------------------------===//
1485 
1486 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted
1487 /// in order to use PhysReg between two entries in SA->UseSlots.
1488 ///
1489 /// GapWeight[I] represents the gap between UseSlots[I] and UseSlots[I + 1].
1490 ///
1491 void RAGreedy::calcGapWeights(MCRegister PhysReg,
1492  SmallVectorImpl<float> &GapWeight) {
1493  assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1494  const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1495  ArrayRef<SlotIndex> Uses = SA->getUseSlots();
1496  const unsigned NumGaps = Uses.size()-1;
1497 
1498  // Start and end points for the interference check.
1499  SlotIndex StartIdx =
1500  BI.LiveIn ? BI.FirstInstr.getBaseIndex() : BI.FirstInstr;
1501  SlotIndex StopIdx =
1503 
1504  GapWeight.assign(NumGaps, 0.0f);
1505 
1506  // Add interference from each overlapping register.
1507  for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
1508  if (!Matrix->query(const_cast<LiveInterval&>(SA->getParent()), *Units)
1509  .checkInterference())
1510  continue;
1511 
1512  // We know that VirtReg is a continuous interval from FirstInstr to
1513  // LastInstr, so we don't need InterferenceQuery.
1514  //
1515  // Interference that overlaps an instruction is counted in both gaps
1516  // surrounding the instruction. The exception is interference before
1517  // StartIdx and after StopIdx.
1518  //
1520  Matrix->getLiveUnions()[*Units] .find(StartIdx);
1521  for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
1522  // Skip the gaps before IntI.
1523  while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
1524  if (++Gap == NumGaps)
1525  break;
1526  if (Gap == NumGaps)
1527  break;
1528 
1529  // Update the gaps covered by IntI.
1530  const float weight = IntI.value()->weight();
1531  for (; Gap != NumGaps; ++Gap) {
1532  GapWeight[Gap] = std::max(GapWeight[Gap], weight);
1533  if (Uses[Gap+1].getBaseIndex() >= IntI.stop())
1534  break;
1535  }
1536  if (Gap == NumGaps)
1537  break;
1538  }
1539  }
1540 
1541  // Add fixed interference.
1542  for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
1543  const LiveRange &LR = LIS->getRegUnit(*Units);
1544  LiveRange::const_iterator I = LR.find(StartIdx);
1546 
1547  // Same loop as above. Mark any overlapped gaps as HUGE_VALF.
1548  for (unsigned Gap = 0; I != E && I->start < StopIdx; ++I) {
1549  while (Uses[Gap+1].getBoundaryIndex() < I->start)
1550  if (++Gap == NumGaps)
1551  break;
1552  if (Gap == NumGaps)
1553  break;
1554 
1555  for (; Gap != NumGaps; ++Gap) {
1556  GapWeight[Gap] = huge_valf;
1557  if (Uses[Gap+1].getBaseIndex() >= I->end)
1558  break;
1559  }
1560  if (Gap == NumGaps)
1561  break;
1562  }
1563  }
1564 }
1565 
1566 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
1567 /// basic block.
1568 ///
1569 unsigned RAGreedy::tryLocalSplit(const LiveInterval &VirtReg,
1570  AllocationOrder &Order,
1571  SmallVectorImpl<Register> &NewVRegs) {
1572  // TODO: the function currently only handles a single UseBlock; it should be
1573  // possible to generalize.
1574  if (SA->getUseBlocks().size() != 1)
1575  return 0;
1576 
1577  const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1578 
1579  // Note that it is possible to have an interval that is live-in or live-out
1580  // while only covering a single block - A phi-def can use undef values from
1581  // predecessors, and the block could be a single-block loop.
1582  // We don't bother doing anything clever about such a case, we simply assume
1583  // that the interval is continuous from FirstInstr to LastInstr. We should
1584  // make sure that we don't do anything illegal to such an interval, though.
1585 
1586  ArrayRef<SlotIndex> Uses = SA->getUseSlots();
1587  if (Uses.size() <= 2)
1588  return 0;
1589  const unsigned NumGaps = Uses.size()-1;
1590 
1591  LLVM_DEBUG({
1592  dbgs() << "tryLocalSplit: ";
1593  for (const auto &Use : Uses)
1594  dbgs() << ' ' << Use;
1595  dbgs() << '\n';
1596  });
1597 
1598  // If VirtReg is live across any register mask operands, compute a list of
1599  // gaps with register masks.
1600  SmallVector<unsigned, 8> RegMaskGaps;
1601  if (Matrix->checkRegMaskInterference(VirtReg)) {
1602  // Get regmask slots for the whole block.
1604  LLVM_DEBUG(dbgs() << RMS.size() << " regmasks in block:");
1605  // Constrain to VirtReg's live range.
1606  unsigned RI =
1607  llvm::lower_bound(RMS, Uses.front().getRegSlot()) - RMS.begin();
1608  unsigned RE = RMS.size();
1609  for (unsigned I = 0; I != NumGaps && RI != RE; ++I) {
1610  // Look for Uses[I] <= RMS <= Uses[I + 1].
1611  assert(!SlotIndex::isEarlierInstr(RMS[RI], Uses[I]));
1612  if (SlotIndex::isEarlierInstr(Uses[I + 1], RMS[RI]))
1613  continue;
1614  // Skip a regmask on the same instruction as the last use. It doesn't
1615  // overlap the live range.
1616  if (SlotIndex::isSameInstr(Uses[I + 1], RMS[RI]) && I + 1 == NumGaps)
1617  break;
1618  LLVM_DEBUG(dbgs() << ' ' << RMS[RI] << ':' << Uses[I] << '-'
1619  << Uses[I + 1]);
1620  RegMaskGaps.push_back(I);
1621  // Advance ri to the next gap. A regmask on one of the uses counts in
1622  // both gaps.
1623  while (RI != RE && SlotIndex::isEarlierInstr(RMS[RI], Uses[I + 1]))
1624  ++RI;
1625  }
1626  LLVM_DEBUG(dbgs() << '\n');
1627  }
1628 
1629  // Since we allow local split results to be split again, there is a risk of
1630  // creating infinite loops. It is tempting to require that the new live
1631  // ranges have less instructions than the original. That would guarantee
1632  // convergence, but it is too strict. A live range with 3 instructions can be
1633  // split 2+3 (including the COPY), and we want to allow that.
1634  //
1635  // Instead we use these rules:
1636  //
1637  // 1. Allow any split for ranges with getStage() < RS_Split2. (Except for the
1638  // noop split, of course).
1639  // 2. Require progress be made for ranges with getStage() == RS_Split2. All
1640  // the new ranges must have fewer instructions than before the split.
1641  // 3. New ranges with the same number of instructions are marked RS_Split2,
1642  // smaller ranges are marked RS_New.
1643  //
1644  // These rules allow a 3 -> 2+3 split once, which we need. They also prevent
1645  // excessive splitting and infinite loops.
1646  //
1647  bool ProgressRequired = ExtraInfo->getStage(VirtReg) >= RS_Split2;
1648 
1649  // Best split candidate.
1650  unsigned BestBefore = NumGaps;
1651  unsigned BestAfter = 0;
1652  float BestDiff = 0;
1653 
1654  const float blockFreq =
1655  SpillPlacer->getBlockFrequency(BI.MBB->getNumber()).getFrequency() *
1656  (1.0f / MBFI->getEntryFreq());
1657  SmallVector<float, 8> GapWeight;
1658 
1659  for (MCPhysReg PhysReg : Order) {
1660  assert(PhysReg);
1661  // Keep track of the largest spill weight that would need to be evicted in
1662  // order to make use of PhysReg between UseSlots[I] and UseSlots[I + 1].
1663  calcGapWeights(PhysReg, GapWeight);
1664 
1665  // Remove any gaps with regmask clobbers.
1666  if (Matrix->checkRegMaskInterference(VirtReg, PhysReg))
1667  for (unsigned I = 0, E = RegMaskGaps.size(); I != E; ++I)
1668  GapWeight[RegMaskGaps[I]] = huge_valf;
1669 
1670  // Try to find the best sequence of gaps to close.
1671  // The new spill weight must be larger than any gap interference.
1672 
1673  // We will split before Uses[SplitBefore] and after Uses[SplitAfter].
1674  unsigned SplitBefore = 0, SplitAfter = 1;
1675 
1676  // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
1677  // It is the spill weight that needs to be evicted.
1678  float MaxGap = GapWeight[0];
1679 
1680  while (true) {
1681  // Live before/after split?
1682  const bool LiveBefore = SplitBefore != 0 || BI.LiveIn;
1683  const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut;
1684 
1685  LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << ' ' << Uses[SplitBefore]
1686  << '-' << Uses[SplitAfter] << " I=" << MaxGap);
1687 
1688  // Stop before the interval gets so big we wouldn't be making progress.
1689  if (!LiveBefore && !LiveAfter) {
1690  LLVM_DEBUG(dbgs() << " all\n");
1691  break;
1692  }
1693  // Should the interval be extended or shrunk?
1694  bool Shrink = true;
1695 
1696  // How many gaps would the new range have?
1697  unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter;
1698 
1699  // Legally, without causing looping?
1700  bool Legal = !ProgressRequired || NewGaps < NumGaps;
1701 
1702  if (Legal && MaxGap < huge_valf) {
1703  // Estimate the new spill weight. Each instruction reads or writes the
1704  // register. Conservatively assume there are no read-modify-write
1705  // instructions.
1706  //
1707  // Try to guess the size of the new interval.
1708  const float EstWeight = normalizeSpillWeight(
1709  blockFreq * (NewGaps + 1),
1710  Uses[SplitBefore].distance(Uses[SplitAfter]) +
1711  (LiveBefore + LiveAfter) * SlotIndex::InstrDist,
1712  1);
1713  // Would this split be possible to allocate?
1714  // Never allocate all gaps, we wouldn't be making progress.
1715  LLVM_DEBUG(dbgs() << " w=" << EstWeight);
1716  if (EstWeight * Hysteresis >= MaxGap) {
1717  Shrink = false;
1718  float Diff = EstWeight - MaxGap;
1719  if (Diff > BestDiff) {
1720  LLVM_DEBUG(dbgs() << " (best)");
1721  BestDiff = Hysteresis * Diff;
1722  BestBefore = SplitBefore;
1723  BestAfter = SplitAfter;
1724  }
1725  }
1726  }
1727 
1728  // Try to shrink.
1729  if (Shrink) {
1730  if (++SplitBefore < SplitAfter) {
1731  LLVM_DEBUG(dbgs() << " shrink\n");
1732  // Recompute the max when necessary.
1733  if (GapWeight[SplitBefore - 1] >= MaxGap) {
1734  MaxGap = GapWeight[SplitBefore];
1735  for (unsigned I = SplitBefore + 1; I != SplitAfter; ++I)
1736  MaxGap = std::max(MaxGap, GapWeight[I]);
1737  }
1738  continue;
1739  }
1740  MaxGap = 0;
1741  }
1742 
1743  // Try to extend the interval.
1744  if (SplitAfter >= NumGaps) {
1745  LLVM_DEBUG(dbgs() << " end\n");
1746  break;
1747  }
1748 
1749  LLVM_DEBUG(dbgs() << " extend\n");
1750  MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]);
1751  }
1752  }
1753 
1754  // Didn't find any candidates?
1755  if (BestBefore == NumGaps)
1756  return 0;
1757 
1758  LLVM_DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore] << '-'
1759  << Uses[BestAfter] << ", " << BestDiff << ", "
1760  << (BestAfter - BestBefore + 1) << " instrs\n");
1761 
1762  LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats);
1763  SE->reset(LREdit);
1764 
1765  SE->openIntv();
1766  SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]);
1767  SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]);
1768  SE->useIntv(SegStart, SegStop);
1769  SmallVector<unsigned, 8> IntvMap;
1770  SE->finish(&IntvMap);
1771  DebugVars->splitRegister(VirtReg.reg(), LREdit.regs(), *LIS);
1772  // If the new range has the same number of instructions as before, mark it as
1773  // RS_Split2 so the next split will be forced to make progress. Otherwise,
1774  // leave the new intervals as RS_New so they can compete.
1775  bool LiveBefore = BestBefore != 0 || BI.LiveIn;
1776  bool LiveAfter = BestAfter != NumGaps || BI.LiveOut;
1777  unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter;
1778  if (NewGaps >= NumGaps) {
1779  LLVM_DEBUG(dbgs() << "Tagging non-progress ranges:");
1780  assert(!ProgressRequired && "Didn't make progress when it was required.");
1781  for (unsigned I = 0, E = IntvMap.size(); I != E; ++I)
1782  if (IntvMap[I] == 1) {
1783  ExtraInfo->setStage(LIS->getInterval(LREdit.get(I)), RS_Split2);
1784  LLVM_DEBUG(dbgs() << ' ' << printReg(LREdit.get(I)));
1785  }
1786  LLVM_DEBUG(dbgs() << '\n');
1787  }
1788  ++NumLocalSplits;
1789 
1790  return 0;
1791 }
1792 
1793 //===----------------------------------------------------------------------===//
1794 // Live Range Splitting
1795 //===----------------------------------------------------------------------===//
1796 
1797 /// trySplit - Try to split VirtReg or one of its interferences, making it
1798 /// assignable.
1799 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
1800 unsigned RAGreedy::trySplit(const LiveInterval &VirtReg, AllocationOrder &Order,
1801  SmallVectorImpl<Register> &NewVRegs,
1802  const SmallVirtRegSet &FixedRegisters) {
1803  // Ranges must be Split2 or less.
1804  if (ExtraInfo->getStage(VirtReg) >= RS_Spill)
1805  return 0;
1806 
1807  // Local intervals are handled separately.
1808  if (LIS->intervalIsInOneMBB(VirtReg)) {
1809  NamedRegionTimer T("local_split", "Local Splitting", TimerGroupName,
1811  SA->analyze(&VirtReg);
1812  Register PhysReg = tryLocalSplit(VirtReg, Order, NewVRegs);
1813  if (PhysReg || !NewVRegs.empty())
1814  return PhysReg;
1815  return tryInstructionSplit(VirtReg, Order, NewVRegs);
1816  }
1817 
1818  NamedRegionTimer T("global_split", "Global Splitting", TimerGroupName,
1820 
1821  SA->analyze(&VirtReg);
1822 
1823  // First try to split around a region spanning multiple blocks. RS_Split2
1824  // ranges already made dubious progress with region splitting, so they go
1825  // straight to single block splitting.
1826  if (ExtraInfo->getStage(VirtReg) < RS_Split2) {
1827  MCRegister PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
1828  if (PhysReg || !NewVRegs.empty())
1829  return PhysReg;
1830  }
1831 
1832  // Then isolate blocks.
1833  return tryBlockSplit(VirtReg, Order, NewVRegs);
1834 }
1835 
1836 //===----------------------------------------------------------------------===//
1837 // Last Chance Recoloring
1838 //===----------------------------------------------------------------------===//
1839 
1840 /// Return true if \p reg has any tied def operand.
1841 static bool hasTiedDef(MachineRegisterInfo *MRI, unsigned reg) {
1842  for (const MachineOperand &MO : MRI->def_operands(reg))
1843  if (MO.isTied())
1844  return true;
1845 
1846  return false;
1847 }
1848 
1849 /// Return true if the existing assignment of \p Intf overlaps, but is not the
1850 /// same, as \p PhysReg.
1852  const VirtRegMap &VRM,
1853  MCRegister PhysReg,
1854  const LiveInterval &Intf) {
1855  MCRegister AssignedReg = VRM.getPhys(Intf.reg());
1856  if (PhysReg == AssignedReg)
1857  return false;
1858  return TRI.regsOverlap(PhysReg, AssignedReg);
1859 }
1860 
1861 /// mayRecolorAllInterferences - Check if the virtual registers that
1862 /// interfere with \p VirtReg on \p PhysReg (or one of its aliases) may be
1863 /// recolored to free \p PhysReg.
1864 /// When true is returned, \p RecoloringCandidates has been augmented with all
1865 /// the live intervals that need to be recolored in order to free \p PhysReg
1866 /// for \p VirtReg.
1867 /// \p FixedRegisters contains all the virtual registers that cannot be
1868 /// recolored.
1869 bool RAGreedy::mayRecolorAllInterferences(
1870  MCRegister PhysReg, const LiveInterval &VirtReg,
1871  SmallLISet &RecoloringCandidates, const SmallVirtRegSet &FixedRegisters) {
1872  const TargetRegisterClass *CurRC = MRI->getRegClass(VirtReg.reg());
1873 
1874  for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
1875  LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
1876  // If there is LastChanceRecoloringMaxInterference or more interferences,
1877  // chances are one would not be recolorable.
1880  !ExhaustiveSearch) {
1881  LLVM_DEBUG(dbgs() << "Early abort: too many interferences.\n");
1882  CutOffInfo |= CO_Interf;
1883  return false;
1884  }
1885  for (const LiveInterval *Intf : reverse(Q.interferingVRegs())) {
1886  // If Intf is done and sits on the same register class as VirtReg, it
1887  // would not be recolorable as it is in the same state as
1888  // VirtReg. However there are at least two exceptions.
1889  //
1890  // If VirtReg has tied defs and Intf doesn't, then
1891  // there is still a point in examining if it can be recolorable.
1892  //
1893  // Additionally, if the register class has overlapping tuple members, it
1894  // may still be recolorable using a different tuple. This is more likely
1895  // if the existing assignment aliases with the candidate.
1896  //
1897  if (((ExtraInfo->getStage(*Intf) == RS_Done &&
1898  MRI->getRegClass(Intf->reg()) == CurRC &&
1899  !assignedRegPartiallyOverlaps(*TRI, *VRM, PhysReg, *Intf)) &&
1900  !(hasTiedDef(MRI, VirtReg.reg()) &&
1901  !hasTiedDef(MRI, Intf->reg()))) ||
1902  FixedRegisters.count(Intf->reg())) {
1903  LLVM_DEBUG(
1904  dbgs() << "Early abort: the interference is not recolorable.\n");
1905  return false;
1906  }
1907  RecoloringCandidates.insert(Intf);
1908  }
1909  }
1910  return true;
1911 }
1912 
1913 /// tryLastChanceRecoloring - Try to assign a color to \p VirtReg by recoloring
1914 /// its interferences.
1915 /// Last chance recoloring chooses a color for \p VirtReg and recolors every
1916 /// virtual register that was using it. The recoloring process may recursively
1917 /// use the last chance recoloring. Therefore, when a virtual register has been
1918 /// assigned a color by this mechanism, it is marked as Fixed, i.e., it cannot
1919 /// be last-chance-recolored again during this recoloring "session".
1920 /// E.g.,
1921 /// Let
1922 /// vA can use {R1, R2 }
1923 /// vB can use { R2, R3}
1924 /// vC can use {R1 }
1925 /// Where vA, vB, and vC cannot be split anymore (they are reloads for
1926 /// instance) and they all interfere.
1927 ///
1928 /// vA is assigned R1
1929 /// vB is assigned R2
1930 /// vC tries to evict vA but vA is already done.
1931 /// Regular register allocation fails.
1932 ///
1933 /// Last chance recoloring kicks in:
1934 /// vC does as if vA was evicted => vC uses R1.
1935 /// vC is marked as fixed.
1936 /// vA needs to find a color.
1937 /// None are available.
1938 /// vA cannot evict vC: vC is a fixed virtual register now.
1939 /// vA does as if vB was evicted => vA uses R2.
1940 /// vB needs to find a color.
1941 /// R3 is available.
1942 /// Recoloring => vC = R1, vA = R2, vB = R3
1943 ///
1944 /// \p Order defines the preferred allocation order for \p VirtReg.
1945 /// \p NewRegs will contain any new virtual register that have been created
1946 /// (split, spill) during the process and that must be assigned.
1947 /// \p FixedRegisters contains all the virtual registers that cannot be
1948 /// recolored.
1949 ///
1950 /// \p RecolorStack tracks the original assignments of successfully recolored
1951 /// registers.
1952 ///
1953 /// \p Depth gives the current depth of the last chance recoloring.
1954 /// \return a physical register that can be used for VirtReg or ~0u if none
1955 /// exists.
1956 unsigned RAGreedy::tryLastChanceRecoloring(const LiveInterval &VirtReg,
1957  AllocationOrder &Order,
1958  SmallVectorImpl<Register> &NewVRegs,
1959  SmallVirtRegSet &FixedRegisters,
1960  RecoloringStack &RecolorStack,
1961  unsigned Depth) {
1962  if (!TRI->shouldUseLastChanceRecoloringForVirtReg(*MF, VirtReg))
1963  return ~0u;
1964 
1965  LLVM_DEBUG(dbgs() << "Try last chance recoloring for " << VirtReg << '\n');
1966 
1967  const ssize_t EntryStackSize = RecolorStack.size();
1968 
1969  // Ranges must be Done.
1970  assert((ExtraInfo->getStage(VirtReg) >= RS_Done || !VirtReg.isSpillable()) &&
1971  "Last chance recoloring should really be last chance");
1972  // Set the max depth to LastChanceRecoloringMaxDepth.
1973  // We may want to reconsider that if we end up with a too large search space
1974  // for target with hundreds of registers.
1975  // Indeed, in that case we may want to cut the search space earlier.
1977  LLVM_DEBUG(dbgs() << "Abort because max depth has been reached.\n");
1978  CutOffInfo |= CO_Depth;
1979  return ~0u;
1980  }
1981 
1982  // Set of Live intervals that will need to be recolored.
1983  SmallLISet RecoloringCandidates;
1984 
1985  // Mark VirtReg as fixed, i.e., it will not be recolored pass this point in
1986  // this recoloring "session".
1987  assert(!FixedRegisters.count(VirtReg.reg()));
1988  FixedRegisters.insert(VirtReg.reg());
1989  SmallVector<Register, 4> CurrentNewVRegs;
1990 
1991  for (MCRegister PhysReg : Order) {
1992  assert(PhysReg.isValid());
1993  LLVM_DEBUG(dbgs() << "Try to assign: " << VirtReg << " to "
1994  << printReg(PhysReg, TRI) << '\n');
1995  RecoloringCandidates.clear();
1996  CurrentNewVRegs.clear();
1997 
1998  // It is only possible to recolor virtual register interference.
1999  if (Matrix->checkInterference(VirtReg, PhysReg) >
2001  LLVM_DEBUG(
2002  dbgs() << "Some interferences are not with virtual registers.\n");
2003 
2004  continue;
2005  }
2006 
2007  // Early give up on this PhysReg if it is obvious we cannot recolor all
2008  // the interferences.
2009  if (!mayRecolorAllInterferences(PhysReg, VirtReg, RecoloringCandidates,
2010  FixedRegisters)) {
2011  LLVM_DEBUG(dbgs() << "Some interferences cannot be recolored.\n");
2012  continue;
2013  }
2014 
2015  // RecoloringCandidates contains all the virtual registers that interfere
2016  // with VirtReg on PhysReg (or one of its aliases). Enqueue them for
2017  // recoloring and perform the actual recoloring.
2018  PQueue RecoloringQueue;
2019  for (const LiveInterval *RC : RecoloringCandidates) {
2020  Register ItVirtReg = RC->reg();
2021  enqueue(RecoloringQueue, RC);
2022  assert(VRM->hasPhys(ItVirtReg) &&
2023  "Interferences are supposed to be with allocated variables");
2024 
2025  // Record the current allocation.
2026  RecolorStack.push_back(std::make_pair(RC, VRM->getPhys(ItVirtReg)));
2027 
2028  // unset the related struct.
2029  Matrix->unassign(*RC);
2030  }
2031 
2032  // Do as if VirtReg was assigned to PhysReg so that the underlying
2033  // recoloring has the right information about the interferes and
2034  // available colors.
2035  Matrix->assign(VirtReg, PhysReg);
2036 
2037  // Save the current recoloring state.
2038  // If we cannot recolor all the interferences, we will have to start again
2039  // at this point for the next physical register.
2040  SmallVirtRegSet SaveFixedRegisters(FixedRegisters);
2041  if (tryRecoloringCandidates(RecoloringQueue, CurrentNewVRegs,
2042  FixedRegisters, RecolorStack, Depth)) {
2043  // Push the queued vregs into the main queue.
2044  for (Register NewVReg : CurrentNewVRegs)
2045  NewVRegs.push_back(NewVReg);
2046  // Do not mess up with the global assignment process.
2047  // I.e., VirtReg must be unassigned.
2048  Matrix->unassign(VirtReg);
2049  return PhysReg;
2050  }
2051 
2052  LLVM_DEBUG(dbgs() << "Fail to assign: " << VirtReg << " to "
2053  << printReg(PhysReg, TRI) << '\n');
2054 
2055  // The recoloring attempt failed, undo the changes.
2056  FixedRegisters = SaveFixedRegisters;
2057  Matrix->unassign(VirtReg);
2058 
2059  // For a newly created vreg which is also in RecoloringCandidates,
2060  // don't add it to NewVRegs because its physical register will be restored
2061  // below. Other vregs in CurrentNewVRegs are created by calling
2062  // selectOrSplit and should be added into NewVRegs.
2063  for (Register &R : CurrentNewVRegs) {
2064  if (RecoloringCandidates.count(&LIS->getInterval(R)))
2065  continue;
2066  NewVRegs.push_back(R);
2067  }
2068 
2069  // Roll back our unsuccessful recoloring. Also roll back any successful
2070  // recolorings in any recursive recoloring attempts, since it's possible
2071  // they would have introduced conflicts with assignments we will be
2072  // restoring further up the stack. Perform all unassignments prior to
2073  // reassigning, since sub-recolorings may have conflicted with the registers
2074  // we are going to restore to their original assignments.
2075  for (ssize_t I = RecolorStack.size() - 1; I >= EntryStackSize; --I) {
2076  const LiveInterval *LI;
2077  MCRegister PhysReg;
2078  std::tie(LI, PhysReg) = RecolorStack[I];
2079 
2080  if (VRM->hasPhys(LI->reg()))
2081  Matrix->unassign(*LI);
2082  }
2083 
2084  for (size_t I = EntryStackSize; I != RecolorStack.size(); ++I) {
2085  const LiveInterval *LI;
2086  MCRegister PhysReg;
2087  std::tie(LI, PhysReg) = RecolorStack[I];
2088  Matrix->assign(*LI, PhysReg);
2089  }
2090 
2091  // Pop the stack of recoloring attempts.
2092  RecolorStack.resize(EntryStackSize);
2093  }
2094 
2095  // Last chance recoloring did not worked either, give up.
2096  return ~0u;
2097 }
2098 
2099 /// tryRecoloringCandidates - Try to assign a new color to every register
2100 /// in \RecoloringQueue.
2101 /// \p NewRegs will contain any new virtual register created during the
2102 /// recoloring process.
2103 /// \p FixedRegisters[in/out] contains all the registers that have been
2104 /// recolored.
2105 /// \return true if all virtual registers in RecoloringQueue were successfully
2106 /// recolored, false otherwise.
2107 bool RAGreedy::tryRecoloringCandidates(PQueue &RecoloringQueue,
2108  SmallVectorImpl<Register> &NewVRegs,
2109  SmallVirtRegSet &FixedRegisters,
2110  RecoloringStack &RecolorStack,
2111  unsigned Depth) {
2112  while (!RecoloringQueue.empty()) {
2113  const LiveInterval *LI = dequeue(RecoloringQueue);
2114  LLVM_DEBUG(dbgs() << "Try to recolor: " << *LI << '\n');
2115  MCRegister PhysReg = selectOrSplitImpl(*LI, NewVRegs, FixedRegisters,
2116  RecolorStack, Depth + 1);
2117  // When splitting happens, the live-range may actually be empty.
2118  // In that case, this is okay to continue the recoloring even
2119  // if we did not find an alternative color for it. Indeed,
2120  // there will not be anything to color for LI in the end.
2121  if (PhysReg == ~0u || (!PhysReg && !LI->empty()))
2122  return false;
2123 
2124  if (!PhysReg) {
2125  assert(LI->empty() && "Only empty live-range do not require a register");
2126  LLVM_DEBUG(dbgs() << "Recoloring of " << *LI
2127  << " succeeded. Empty LI.\n");
2128  continue;
2129  }
2130  LLVM_DEBUG(dbgs() << "Recoloring of " << *LI
2131  << " succeeded with: " << printReg(PhysReg, TRI) << '\n');
2132 
2133  Matrix->assign(*LI, PhysReg);
2134  FixedRegisters.insert(LI->reg());
2135  }
2136  return true;
2137 }
2138 
2139 //===----------------------------------------------------------------------===//
2140 // Main Entry Point
2141 //===----------------------------------------------------------------------===//
2142 
2144  SmallVectorImpl<Register> &NewVRegs) {
2145  CutOffInfo = CO_None;
2146  LLVMContext &Ctx = MF->getFunction().getContext();
2147  SmallVirtRegSet FixedRegisters;
2148  RecoloringStack RecolorStack;
2149  MCRegister Reg =
2150  selectOrSplitImpl(VirtReg, NewVRegs, FixedRegisters, RecolorStack);
2151  if (Reg == ~0U && (CutOffInfo != CO_None)) {
2152  uint8_t CutOffEncountered = CutOffInfo & (CO_Depth | CO_Interf);
2153  if (CutOffEncountered == CO_Depth)
2154  Ctx.emitError("register allocation failed: maximum depth for recoloring "
2155  "reached. Use -fexhaustive-register-search to skip "
2156  "cutoffs");
2157  else if (CutOffEncountered == CO_Interf)
2158  Ctx.emitError("register allocation failed: maximum interference for "
2159  "recoloring reached. Use -fexhaustive-register-search "
2160  "to skip cutoffs");
2161  else if (CutOffEncountered == (CO_Depth | CO_Interf))
2162  Ctx.emitError("register allocation failed: maximum interference and "
2163  "depth for recoloring reached. Use "
2164  "-fexhaustive-register-search to skip cutoffs");
2165  }
2166  return Reg;
2167 }
2168 
2169 /// Using a CSR for the first time has a cost because it causes push|pop
2170 /// to be added to prologue|epilogue. Splitting a cold section of the live
2171 /// range can have lower cost than using the CSR for the first time;
2172 /// Spilling a live range in the cold path can have lower cost than using
2173 /// the CSR for the first time. Returns the physical register if we decide
2174 /// to use the CSR; otherwise return 0.
2175 MCRegister RAGreedy::tryAssignCSRFirstTime(
2176  const LiveInterval &VirtReg, AllocationOrder &Order, MCRegister PhysReg,
2177  uint8_t &CostPerUseLimit, SmallVectorImpl<Register> &NewVRegs) {
2178  if (ExtraInfo->getStage(VirtReg) == RS_Spill && VirtReg.isSpillable()) {
2179  // We choose spill over using the CSR for the first time if the spill cost
2180  // is lower than CSRCost.
2181  SA->analyze(&VirtReg);
2182  if (calcSpillCost() >= CSRCost)
2183  return PhysReg;
2184 
2185  // We are going to spill, set CostPerUseLimit to 1 to make sure that
2186  // we will not use a callee-saved register in tryEvict.
2187  CostPerUseLimit = 1;
2188  return 0;
2189  }
2190  if (ExtraInfo->getStage(VirtReg) < RS_Split) {
2191  // We choose pre-splitting over using the CSR for the first time if
2192  // the cost of splitting is lower than CSRCost.
2193  SA->analyze(&VirtReg);
2194  unsigned NumCands = 0;
2195  BlockFrequency BestCost = CSRCost; // Don't modify CSRCost.
2196  unsigned BestCand = calculateRegionSplitCost(VirtReg, Order, BestCost,
2197  NumCands, true /*IgnoreCSR*/);
2198  if (BestCand == NoCand)
2199  // Use the CSR if we can't find a region split below CSRCost.
2200  return PhysReg;
2201 
2202  // Perform the actual pre-splitting.
2203  doRegionSplit(VirtReg, BestCand, false/*HasCompact*/, NewVRegs);
2204  return 0;
2205  }
2206  return PhysReg;
2207 }
2208 
2210  // Do not keep invalid information around.
2211  SetOfBrokenHints.remove(&LI);
2212 }
2213 
2214 void RAGreedy::initializeCSRCost() {
2215  // We use the larger one out of the command-line option and the value report
2216  // by TRI.
2217  CSRCost = BlockFrequency(
2219  if (!CSRCost.getFrequency())
2220  return;
2221 
2222  // Raw cost is relative to Entry == 2^14; scale it appropriately.
2223  uint64_t ActualEntry = MBFI->getEntryFreq();
2224  if (!ActualEntry) {
2225  CSRCost = 0;
2226  return;
2227  }
2228  uint64_t FixedEntry = 1 << 14;
2229  if (ActualEntry < FixedEntry)
2230  CSRCost *= BranchProbability(ActualEntry, FixedEntry);
2231  else if (ActualEntry <= UINT32_MAX)
2232  // Invert the fraction and divide.
2233  CSRCost /= BranchProbability(FixedEntry, ActualEntry);
2234  else
2235  // Can't use BranchProbability in general, since it takes 32-bit numbers.
2236  CSRCost = CSRCost.getFrequency() * (ActualEntry / FixedEntry);
2237 }
2238 
2239 /// Collect the hint info for \p Reg.
2240 /// The results are stored into \p Out.
2241 /// \p Out is not cleared before being populated.
2242 void RAGreedy::collectHintInfo(Register Reg, HintsInfo &Out) {
2243  for (const MachineInstr &Instr : MRI->reg_nodbg_instructions(Reg)) {
2244  if (!Instr.isFullCopy())
2245  continue;
2246  // Look for the other end of the copy.
2247  Register OtherReg = Instr.getOperand(0).getReg();
2248  if (OtherReg == Reg) {
2249  OtherReg = Instr.getOperand(1).getReg();
2250  if (OtherReg == Reg)
2251  continue;
2252  }
2253  // Get the current assignment.
2254  MCRegister OtherPhysReg =
2255  OtherReg.isPhysical() ? OtherReg.asMCReg() : VRM->getPhys(OtherReg);
2256  // Push the collected information.
2257  Out.push_back(HintInfo(MBFI->getBlockFreq(Instr.getParent()), OtherReg,
2258  OtherPhysReg));
2259  }
2260 }
2261 
2262 /// Using the given \p List, compute the cost of the broken hints if
2263 /// \p PhysReg was used.
2264 /// \return The cost of \p List for \p PhysReg.
2265 BlockFrequency RAGreedy::getBrokenHintFreq(const HintsInfo &List,
2266  MCRegister PhysReg) {
2267  BlockFrequency Cost = 0;
2268  for (const HintInfo &Info : List) {
2269  if (Info.PhysReg != PhysReg)
2270  Cost += Info.Freq;
2271  }
2272  return Cost;
2273 }
2274 
2275 /// Using the register assigned to \p VirtReg, try to recolor
2276 /// all the live ranges that are copy-related with \p VirtReg.
2277 /// The recoloring is then propagated to all the live-ranges that have
2278 /// been recolored and so on, until no more copies can be coalesced or
2279 /// it is not profitable.
2280 /// For a given live range, profitability is determined by the sum of the
2281 /// frequencies of the non-identity copies it would introduce with the old
2282 /// and new register.
2283 void RAGreedy::tryHintRecoloring(const LiveInterval &VirtReg) {
2284  // We have a broken hint, check if it is possible to fix it by
2285  // reusing PhysReg for the copy-related live-ranges. Indeed, we evicted
2286  // some register and PhysReg may be available for the other live-ranges.
2287  SmallSet<Register, 4> Visited;
2288  SmallVector<unsigned, 2> RecoloringCandidates;
2289  HintsInfo Info;
2290  Register Reg = VirtReg.reg();
2291  MCRegister PhysReg = VRM->getPhys(Reg);
2292  // Start the recoloring algorithm from the input live-interval, then
2293  // it will propagate to the ones that are copy-related with it.
2294  Visited.insert(Reg);
2295  RecoloringCandidates.push_back(Reg);
2296 
2297  LLVM_DEBUG(dbgs() << "Trying to reconcile hints for: " << printReg(Reg, TRI)
2298  << '(' << printReg(PhysReg, TRI) << ")\n");
2299 
2300  do {
2301  Reg = RecoloringCandidates.pop_back_val();
2302 
2303  // We cannot recolor physical register.
2305  continue;
2306 
2307  // This may be a skipped class
2308  if (!VRM->hasPhys(Reg)) {
2310  "We have an unallocated variable which should have been handled");
2311  continue;
2312  }
2313 
2314  // Get the live interval mapped with this virtual register to be able
2315  // to check for the interference with the new color.
2316  LiveInterval &LI = LIS->getInterval(Reg);
2317  MCRegister CurrPhys = VRM->getPhys(Reg);
2318  // Check that the new color matches the register class constraints and
2319  // that it is free for this live range.
2320  if (CurrPhys != PhysReg && (!MRI->getRegClass(Reg)->contains(PhysReg) ||
2321  Matrix->checkInterference(LI, PhysReg)))
2322  continue;
2323 
2324  LLVM_DEBUG(dbgs() << printReg(Reg, TRI) << '(' << printReg(CurrPhys, TRI)
2325  << ") is recolorable.\n");
2326 
2327  // Gather the hint info.
2328  Info.clear();
2329  collectHintInfo(Reg, Info);
2330  // Check if recoloring the live-range will increase the cost of the
2331  // non-identity copies.
2332  if (CurrPhys != PhysReg) {
2333  LLVM_DEBUG(dbgs() << "Checking profitability:\n");
2334  BlockFrequency OldCopiesCost = getBrokenHintFreq(Info, CurrPhys);
2335  BlockFrequency NewCopiesCost = getBrokenHintFreq(Info, PhysReg);
2336  LLVM_DEBUG(dbgs() << "Old Cost: " << OldCopiesCost.getFrequency()
2337  << "\nNew Cost: " << NewCopiesCost.getFrequency()
2338  << '\n');
2339  if (OldCopiesCost < NewCopiesCost) {
2340  LLVM_DEBUG(dbgs() << "=> Not profitable.\n");
2341  continue;
2342  }
2343  // At this point, the cost is either cheaper or equal. If it is
2344  // equal, we consider this is profitable because it may expose
2345  // more recoloring opportunities.
2346  LLVM_DEBUG(dbgs() << "=> Profitable.\n");
2347  // Recolor the live-range.
2348  Matrix->unassign(LI);
2349  Matrix->assign(LI, PhysReg);
2350  }
2351  // Push all copy-related live-ranges to keep reconciling the broken
2352  // hints.
2353  for (const HintInfo &HI : Info) {
2354  if (Visited.insert(HI.Reg).second)
2355  RecoloringCandidates.push_back(HI.Reg);
2356  }
2357  } while (!RecoloringCandidates.empty());
2358 }
2359 
2360 /// Try to recolor broken hints.
2361 /// Broken hints may be repaired by recoloring when an evicted variable
2362 /// freed up a register for a larger live-range.
2363 /// Consider the following example:
2364 /// BB1:
2365 /// a =
2366 /// b =
2367 /// BB2:
2368 /// ...
2369 /// = b
2370 /// = a
2371 /// Let us assume b gets split:
2372 /// BB1:
2373 /// a =
2374 /// b =
2375 /// BB2:
2376 /// c = b
2377 /// ...
2378 /// d = c
2379 /// = d
2380 /// = a
2381 /// Because of how the allocation work, b, c, and d may be assigned different
2382 /// colors. Now, if a gets evicted later:
2383 /// BB1:
2384 /// a =
2385 /// st a, SpillSlot
2386 /// b =
2387 /// BB2:
2388 /// c = b
2389 /// ...
2390 /// d = c
2391 /// = d
2392 /// e = ld SpillSlot
2393 /// = e
2394 /// This is likely that we can assign the same register for b, c, and d,
2395 /// getting rid of 2 copies.
2396 void RAGreedy::tryHintsRecoloring() {
2397  for (const LiveInterval *LI : SetOfBrokenHints) {
2399  "Recoloring is possible only for virtual registers");
2400  // Some dead defs may be around (e.g., because of debug uses).
2401  // Ignore those.
2402  if (!VRM->hasPhys(LI->reg()))
2403  continue;
2404  tryHintRecoloring(*LI);
2405  }
2406 }
2407 
2408 MCRegister RAGreedy::selectOrSplitImpl(const LiveInterval &VirtReg,
2409  SmallVectorImpl<Register> &NewVRegs,
2410  SmallVirtRegSet &FixedRegisters,
2411  RecoloringStack &RecolorStack,
2412  unsigned Depth) {
2413  uint8_t CostPerUseLimit = uint8_t(~0u);
2414  // First try assigning a free register.
2415  auto Order =
2417  if (MCRegister PhysReg =
2418  tryAssign(VirtReg, Order, NewVRegs, FixedRegisters)) {
2419  // If VirtReg got an assignment, the eviction info is no longer relevant.
2420  LastEvicted.clearEvicteeInfo(VirtReg.reg());
2421  // When NewVRegs is not empty, we may have made decisions such as evicting
2422  // a virtual register, go with the earlier decisions and use the physical
2423  // register.
2424  if (CSRCost.getFrequency() &&
2425  EvictAdvisor->isUnusedCalleeSavedReg(PhysReg) && NewVRegs.empty()) {
2426  MCRegister CSRReg = tryAssignCSRFirstTime(VirtReg, Order, PhysReg,
2427  CostPerUseLimit, NewVRegs);
2428  if (CSRReg || !NewVRegs.empty())
2429  // Return now if we decide to use a CSR or create new vregs due to
2430  // pre-splitting.
2431  return CSRReg;
2432  } else
2433  return PhysReg;
2434  }
2435 
2436  LiveRangeStage Stage = ExtraInfo->getStage(VirtReg);
2437  LLVM_DEBUG(dbgs() << StageName[Stage] << " Cascade "
2438  << ExtraInfo->getCascade(VirtReg.reg()) << '\n');
2439 
2440  // Try to evict a less worthy live range, but only for ranges from the primary
2441  // queue. The RS_Split ranges already failed to do this, and they should not
2442  // get a second chance until they have been split.
2443  if (Stage != RS_Split)
2444  if (Register PhysReg =
2445  tryEvict(VirtReg, Order, NewVRegs, CostPerUseLimit,
2446  FixedRegisters)) {
2447  Register Hint = MRI->getSimpleHint(VirtReg.reg());
2448  // If VirtReg has a hint and that hint is broken record this
2449  // virtual register as a recoloring candidate for broken hint.
2450  // Indeed, since we evicted a variable in its neighborhood it is
2451  // likely we can at least partially recolor some of the
2452  // copy-related live-ranges.
2453  if (Hint && Hint != PhysReg)
2454  SetOfBrokenHints.insert(&VirtReg);
2455  // If VirtReg eviction someone, the eviction info for it as an evictee is
2456  // no longer relevant.
2457  LastEvicted.clearEvicteeInfo(VirtReg.reg());
2458  return PhysReg;
2459  }
2460 
2461  assert((NewVRegs.empty() || Depth) && "Cannot append to existing NewVRegs");
2462 
2463  // The first time we see a live range, don't try to split or spill.
2464  // Wait until the second time, when all smaller ranges have been allocated.
2465  // This gives a better picture of the interference to split around.
2466  if (Stage < RS_Split) {
2467  ExtraInfo->setStage(VirtReg, RS_Split);
2468  LLVM_DEBUG(dbgs() << "wait for second round\n");
2469  NewVRegs.push_back(VirtReg.reg());
2470  return 0;
2471  }
2472 
2473  if (Stage < RS_Spill) {
2474  // Try splitting VirtReg or interferences.
2475  unsigned NewVRegSizeBefore = NewVRegs.size();
2476  Register PhysReg = trySplit(VirtReg, Order, NewVRegs, FixedRegisters);
2477  if (PhysReg || (NewVRegs.size() - NewVRegSizeBefore)) {
2478  // If VirtReg got split, the eviction info is no longer relevant.
2479  LastEvicted.clearEvicteeInfo(VirtReg.reg());
2480  return PhysReg;
2481  }
2482  }
2483 
2484  // If we couldn't allocate a register from spilling, there is probably some
2485  // invalid inline assembly. The base class will report it.
2486  if (Stage >= RS_Done || !VirtReg.isSpillable()) {
2487  return tryLastChanceRecoloring(VirtReg, Order, NewVRegs, FixedRegisters,
2488  RecolorStack, Depth);
2489  }
2490 
2491  // Finally spill VirtReg itself.
2492  if ((EnableDeferredSpilling ||
2493  TRI->shouldUseDeferredSpillingForVirtReg(*MF, VirtReg)) &&
2494  ExtraInfo->getStage(VirtReg) < RS_Memory) {
2495  // TODO: This is experimental and in particular, we do not model
2496  // the live range splitting done by spilling correctly.
2497  // We would need a deep integration with the spiller to do the
2498  // right thing here. Anyway, that is still good for early testing.
2499  ExtraInfo->setStage(VirtReg, RS_Memory);
2500  LLVM_DEBUG(dbgs() << "Do as if this register is in memory\n");
2501  NewVRegs.push_back(VirtReg.reg());
2502  } else {
2503  NamedRegionTimer T("spill", "Spiller", TimerGroupName,
2505  LiveRangeEdit LRE(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats);
2506  spiller().spill(LRE);
2507  ExtraInfo->setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done);
2508 
2509  // Tell LiveDebugVariables about the new ranges. Ranges not being covered by
2510  // the new regs are kept in LDV (still mapping to the old register), until
2511  // we rewrite spilled locations in LDV at a later stage.
2512  DebugVars->splitRegister(VirtReg.reg(), LRE.regs(), *LIS);
2513 
2514  if (VerifyEnabled)
2515  MF->verify(this, "After spilling");
2516  }
2517 
2518  // The live virtual register requesting allocation was spilled, so tell
2519  // the caller not to allocate anything during this round.
2520  return 0;
2521 }
2522 
2523 void RAGreedy::RAGreedyStats::report(MachineOptimizationRemarkMissed &R) {
2524  using namespace ore;
2525  if (Spills) {
2526  R << NV("NumSpills", Spills) << " spills ";
2527  R << NV("TotalSpillsCost", SpillsCost) << " total spills cost ";
2528  }
2529  if (FoldedSpills) {
2530  R << NV("NumFoldedSpills", FoldedSpills) << " folded spills ";
2531  R << NV("TotalFoldedSpillsCost", FoldedSpillsCost)
2532  << " total folded spills cost ";
2533  }
2534  if (Reloads) {
2535  R << NV("NumReloads", Reloads) << " reloads ";
2536  R << NV("TotalReloadsCost", ReloadsCost) << " total reloads cost ";
2537  }
2538  if (FoldedReloads) {
2539  R << NV("NumFoldedReloads", FoldedReloads) << " folded reloads ";
2540  R << NV("TotalFoldedReloadsCost", FoldedReloadsCost)
2541  << " total folded reloads cost ";
2542  }
2543  if (ZeroCostFoldedReloads)
2544  R << NV("NumZeroCostFoldedReloads", ZeroCostFoldedReloads)
2545  << " zero cost folded reloads ";
2546  if (Copies) {
2547  R << NV("NumVRCopies", Copies) << " virtual registers copies ";
2548  R << NV("TotalCopiesCost", CopiesCost) << " total copies cost ";
2549  }
2550 }
2551 
2552 RAGreedy::RAGreedyStats RAGreedy::computeStats(MachineBasicBlock &MBB) {
2553  RAGreedyStats Stats;
2554  const MachineFrameInfo &MFI = MF->getFrameInfo();
2555  int FI;
2556 
2557  auto isSpillSlotAccess = [&MFI](const MachineMemOperand *A) {
2558  return MFI.isSpillSlotObjectIndex(cast<FixedStackPseudoSourceValue>(
2559  A->getPseudoValue())->getFrameIndex());
2560  };
2561  auto isPatchpointInstr = [](const MachineInstr &MI) {
2562  return MI.getOpcode() == TargetOpcode::PATCHPOINT ||
2563  MI.getOpcode() == TargetOpcode::STACKMAP ||
2564  MI.getOpcode() == TargetOpcode::STATEPOINT;
2565  };
2566  for (MachineInstr &MI : MBB) {
2567  if (MI.isCopy()) {
2568  MachineOperand &Dest = MI.getOperand(0);
2569  MachineOperand &Src = MI.getOperand(1);
2570  if (Dest.isReg() && Src.isReg() && Dest.getReg().isVirtual() &&
2571  Src.getReg().isVirtual())
2572  ++Stats.Copies;
2573  continue;
2574  }
2575 
2577  if (TII->isLoadFromStackSlot(MI, FI) && MFI.isSpillSlotObjectIndex(FI)) {
2578  ++Stats.Reloads;
2579  continue;
2580  }
2581  if (TII->isStoreToStackSlot(MI, FI) && MFI.isSpillSlotObjectIndex(FI)) {
2582  ++Stats.Spills;
2583  continue;
2584  }
2585  if (TII->hasLoadFromStackSlot(MI, Accesses) &&
2586  llvm::any_of(Accesses, isSpillSlotAccess)) {
2587  if (!isPatchpointInstr(MI)) {
2588  Stats.FoldedReloads += Accesses.size();
2589  continue;
2590  }
2591  // For statepoint there may be folded and zero cost folded stack reloads.
2592  std::pair<unsigned, unsigned> NonZeroCostRange =
2594  SmallSet<unsigned, 16> FoldedReloads;
2595  SmallSet<unsigned, 16> ZeroCostFoldedReloads;
2596  for (unsigned Idx = 0, E = MI.getNumOperands(); Idx < E; ++Idx) {
2597  MachineOperand &MO = MI.getOperand(Idx);
2598  if (!MO.isFI() || !MFI.isSpillSlotObjectIndex(MO.getIndex()))
2599  continue;
2600  if (Idx >= NonZeroCostRange.first && Idx < NonZeroCostRange.second)
2601  FoldedReloads.insert(MO.getIndex());
2602  else
2603  ZeroCostFoldedReloads.insert(MO.getIndex());
2604  }
2605  // If stack slot is used in folded reload it is not zero cost then.
2606  for (unsigned Slot : FoldedReloads)
2607  ZeroCostFoldedReloads.erase(Slot);
2608  Stats.FoldedReloads += FoldedReloads.size();
2609  Stats.ZeroCostFoldedReloads += ZeroCostFoldedReloads.size();
2610  continue;
2611  }
2612  Accesses.clear();
2613  if (TII->hasStoreToStackSlot(MI, Accesses) &&
2614  llvm::any_of(Accesses, isSpillSlotAccess)) {
2615  Stats.FoldedSpills += Accesses.size();
2616  }
2617  }
2618  // Set cost of collected statistic by multiplication to relative frequency of
2619  // this basic block.
2620  float RelFreq = MBFI->getBlockFreqRelativeToEntryBlock(&MBB);
2621  Stats.ReloadsCost = RelFreq * Stats.Reloads;
2622  Stats.FoldedReloadsCost = RelFreq * Stats.FoldedReloads;
2623  Stats.SpillsCost = RelFreq * Stats.Spills;
2624  Stats.FoldedSpillsCost = RelFreq * Stats.FoldedSpills;
2625  Stats.CopiesCost = RelFreq * Stats.Copies;
2626  return Stats;
2627 }
2628 
2629 RAGreedy::RAGreedyStats RAGreedy::reportStats(MachineLoop *L) {
2630  RAGreedyStats Stats;
2631 
2632  // Sum up the spill and reloads in subloops.
2633  for (MachineLoop *SubLoop : *L)
2634  Stats.add(reportStats(SubLoop));
2635 
2636  for (MachineBasicBlock *MBB : L->getBlocks())
2637  // Handle blocks that were not included in subloops.
2638  if (Loops->getLoopFor(MBB) == L)
2639  Stats.add(computeStats(*MBB));
2640 
2641  if (!Stats.isEmpty()) {
2642  using namespace ore;
2643 
2644  ORE->emit([&]() {
2645  MachineOptimizationRemarkMissed R(DEBUG_TYPE, "LoopSpillReloadCopies",
2646  L->getStartLoc(), L->getHeader());
2647  Stats.report(R);
2648  R << "generated in loop";
2649  return R;
2650  });
2651  }
2652  return Stats;
2653 }
2654 
2655 void RAGreedy::reportStats() {
2656  if (!ORE->allowExtraAnalysis(DEBUG_TYPE))
2657  return;
2658  RAGreedyStats Stats;
2659  for (MachineLoop *L : *Loops)
2660  Stats.add(reportStats(L));
2661  // Process non-loop blocks.
2662  for (MachineBasicBlock &MBB : *MF)
2663  if (!Loops->getLoopFor(&MBB))
2664  Stats.add(computeStats(MBB));
2665  if (!Stats.isEmpty()) {
2666  using namespace ore;
2667 
2668  ORE->emit([&]() {
2669  DebugLoc Loc;
2670  if (auto *SP = MF->getFunction().getSubprogram())
2671  Loc = DILocation::get(SP->getContext(), SP->getLine(), 1, SP);
2672  MachineOptimizationRemarkMissed R(DEBUG_TYPE, "SpillReloadCopies", Loc,
2673  &MF->front());
2674  Stats.report(R);
2675  R << "generated in function";
2676  return R;
2677  });
2678  }
2679 }
2680 
2682  LLVM_DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
2683  << "********** Function: " << mf.getName() << '\n');
2684 
2685  MF = &mf;
2686  TII = MF->getSubtarget().getInstrInfo();
2687 
2688  if (VerifyEnabled)
2689  MF->verify(this, "Before greedy register allocator");
2690 
2691  RegAllocBase::init(getAnalysis<VirtRegMap>(),
2692  getAnalysis<LiveIntervals>(),
2693  getAnalysis<LiveRegMatrix>());
2694  Indexes = &getAnalysis<SlotIndexes>();
2695  MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
2696  DomTree = &getAnalysis<MachineDominatorTree>();
2697  ORE = &getAnalysis<MachineOptimizationRemarkEmitterPass>().getORE();
2698  Loops = &getAnalysis<MachineLoopInfo>();
2699  Bundles = &getAnalysis<EdgeBundles>();
2700  SpillPlacer = &getAnalysis<SpillPlacement>();
2701  DebugVars = &getAnalysis<LiveDebugVariables>();
2702  AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2703 
2704  initializeCSRCost();
2705 
2706  RegCosts = TRI->getRegisterCosts(*MF);
2707  RegClassPriorityTrumpsGlobalness =
2711 
2712  ExtraInfo.emplace();
2713  EvictAdvisor =
2714  getAnalysis<RegAllocEvictionAdvisorAnalysis>().getAdvisor(*MF, *this);
2715 
2716  VRAI = std::make_unique<VirtRegAuxInfo>(*MF, *LIS, *VRM, *Loops, *MBFI);
2717  SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM, *VRAI));
2718 
2719  VRAI->calculateSpillWeightsAndHints();
2720 
2721  LLVM_DEBUG(LIS->dump());
2722 
2723  SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
2724  SE.reset(new SplitEditor(*SA, *AA, *LIS, *VRM, *DomTree, *MBFI, *VRAI));
2725 
2726  IntfCache.init(MF, Matrix->getLiveUnions(), Indexes, LIS, TRI);
2727  GlobalCand.resize(32); // This will grow as needed.
2728  SetOfBrokenHints.clear();
2729  LastEvicted.clear();
2730 
2731  allocatePhysRegs();
2732  tryHintsRecoloring();
2733 
2734  if (VerifyEnabled)
2735  MF->verify(this, "Before post optimization");
2736  postOptimization();
2737  reportStats();
2738 
2739  releaseMemory();
2740  return true;
2741 }
llvm::Check::Size
@ Size
Definition: FileCheck.h:76
llvm::LiveRange::find
iterator find(SlotIndex Pos)
find - Return an iterator pointing to the first segment that ends after Pos, or end().
Definition: LiveInterval.cpp:350
LiveRegMatrix.h
llvm::TargetRegisterInfo::getLargestLegalSuperClass
virtual const TargetRegisterClass * getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &) const
Returns the largest super class of RC that is legal to use in the current sub-target and has the same...
Definition: TargetRegisterInfo.h:788
llvm::RAGreedy::ExtraRegInfo::LRE_DidCloneVirtReg
void LRE_DidCloneVirtReg(Register New, Register Old)
Definition: RegAllocGreedy.cpp:263
llvm::normalizeSpillWeight
static float normalizeSpillWeight(float UseDefFreq, unsigned Size, unsigned NumInstr)
Normalize the spill weight of a live interval.
Definition: CalcSpillWeights.h:33
llvm::AllocationOrder::end
Iterator end() const
Definition: AllocationOrder.h:99
llvm::SpillPlacement
Definition: SpillPlacement.h:43
llvm::TargetRegisterInfo::shouldRegionSplitForVirtReg
virtual bool shouldRegionSplitForVirtReg(const MachineFunction &MF, const LiveInterval &VirtReg) const
Region split has a high compile time cost especially for large live range.
Definition: TargetRegisterInfo.cpp:69
llvm::RAGreedy::enqueueImpl
void enqueueImpl(const LiveInterval *LI) override
enqueue - Add VirtReg to the priority queue of unassigned registers.
Definition: RegAllocGreedy.cpp:282
IndexedMap.h
llvm::RAGreedy::ID
static char ID
Definition: RegAllocGreedy.h:356
llvm::MachineOptimizationRemarkEmitter::allowExtraAnalysis
bool allowExtraAnalysis(StringRef PassName) const
Whether we allow for extra compile-time budget to perform more analysis to be more informative.
Definition: MachineOptimizationRemarkEmitter.h:167
llvm::SplitAnalysis::BlockInfo::LiveIn
bool LiveIn
Current reg is live in.
Definition: SplitKit.h:127
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:104
llvm::AArch64CC::HI
@ HI
Definition: AArch64BaseInfo.h:263
MachineInstr.h
MathExtras.h
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:17
llvm::TargetRegisterInfo::shouldUseLastChanceRecoloringForVirtReg
virtual bool shouldUseLastChanceRecoloringForVirtReg(const MachineFunction &MF, const LiveInterval &VirtReg) const
Last chance recoloring has a high compile time cost especially for targets with a lot of registers.
Definition: TargetRegisterInfo.h:1061
llvm::MachineInstr::isImplicitDef
bool isImplicitDef() const
Definition: MachineInstr.h:1262
llvm::LLVMContext::emitError
void emitError(uint64_t LocCookie, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
Definition: LLVMContext.cpp:266
llvm::SplitAnalysis::BlockInfo::FirstInstr
SlotIndex FirstInstr
First instr accessing current reg.
Definition: SplitKit.h:124
llvm::createGreedyRegisterAllocator
FunctionPass * createGreedyRegisterAllocator()
Greedy register allocation pass - This pass implements a global register allocator for optimized buil...
Definition: RegAllocGreedy.cpp:179
llvm::LiveRange::empty
bool empty() const
Definition: LiveInterval.h:382
llvm::RegAllocEvictionAdvisorAnalysis
ImmutableAnalysis abstraction for fetching the Eviction Advisor.
Definition: RegAllocEvictionAdvisor.h:171
llvm::MachineLoopInfo::getLoopFor
MachineLoop * getLoopFor(const MachineBasicBlock *BB) const
Return the innermost loop that BB lives in.
Definition: MachineLoopInfo.h:126
llvm::RegisterClassInfo::getLastCostChange
unsigned getLastCostChange(const TargetRegisterClass *RC) const
Get the position of the last cost change in getOrder(RC).
Definition: RegisterClassInfo.h:132
GrowRegionComplexityBudget
static cl::opt< unsigned long > GrowRegionComplexityBudget("grow-region-complexity-budget", cl::desc("growRegion() does not scale with the number of BB edges, so " "limit its budget and bail out once we reach the limit."), cl::init(10000), cl::Hidden)
DebugInfoMetadata.h
llvm::SlotIndex::isValid
bool isValid() const
Returns true if this is a valid index.
Definition: SlotIndexes.h:150
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:50
AllocationOrder.h
T
llvm::VirtRegMap::hasPreferredPhys
bool hasPreferredPhys(Register VirtReg) const
returns true if VirtReg is assigned to its preferred physreg.
Definition: VirtRegMap.cpp:109
llvm::lower_bound
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1726
StringRef.h
Pass.h
llvm::Register::id
unsigned id() const
Definition: Register.h:111
llvm::RS_New
@ RS_New
Newly created live range that has never been queued.
Definition: RegAllocEvictionAdvisor.h:49
llvm::SlotIndex::getBoundaryIndex
SlotIndex getBoundaryIndex() const
Returns the boundary index for associated with this index.
Definition: SlotIndexes.h:247
llvm::RegAllocBase::enqueue
void enqueue(const LiveInterval *LI)
enqueue - Add VirtReg to the priority queue of unassigned registers.
Definition: RegAllocBase.cpp:176
Loops
Hexagon Hardware Loops
Definition: HexagonHardwareLoops.cpp:372
llvm::LiveInterval::isSpillable
bool isSpillable() const
isSpillable - Can this interval be spilled?
Definition: LiveInterval.h:819
llvm::LiveRegMatrix::unassign
void unassign(const LiveInterval &VirtReg)
Unassign VirtReg from its PhysReg.
Definition: LiveRegMatrix.cpp:121
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1185
Statistic.h
LiveDebugVariables.h
llvm::TargetRegisterInfo::getRegisterCosts
ArrayRef< uint8_t > getRegisterCosts(const MachineFunction &MF) const
Get a list of cost values for all registers that correspond to the index returned by RegisterCostTabl...
Definition: TargetRegisterInfo.h:353
llvm::VirtRegMap::hasKnownPreference
bool hasKnownPreference(Register VirtReg) const
returns true if VirtReg has a known preferred register.
Definition: VirtRegMap.cpp:118
llvm::LiveInterval::weight
float weight() const
Definition: LiveInterval.h:718
llvm::printMBBReference
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
Definition: MachineBasicBlock.cpp:116
llvm::LiveIntervals::getRegMaskSlotsInBlock
ArrayRef< SlotIndex > getRegMaskSlotsInBlock(unsigned MBBNum) const
Returns a sorted array of slot indices of all instructions with register mask operands in the basic b...
Definition: LiveIntervals.h:355
llvm::MCRegister::isValid
bool isValid() const
Definition: MCRegister.h:76
llvm::LiveIntervals::getInstructionFromIndex
MachineInstr * getInstructionFromIndex(SlotIndex index) const
Returns the instruction associated with the given index.
Definition: LiveIntervals.h:231
llvm::LiveRegMatrix::getLiveUnions
LiveIntervalUnion * getLiveUnions()
Directly access the live interval unions per regunit.
Definition: LiveRegMatrix.h:157
llvm::VirtRegMap
Definition: VirtRegMap.h:33
llvm::X86Disassembler::Reg
Reg
All possible values of the reg field in the ModR/M byte.
Definition: X86DisassemblerDecoder.h:462
llvm::MachineBlockFrequencyInfo::getBlockFreqRelativeToEntryBlock
float getBlockFreqRelativeToEntryBlock(const MachineBasicBlock *MBB) const
Compute the frequency of the block, relative to the entry block.
Definition: MachineBlockFrequencyInfo.h:68
llvm::RAGreedy::releaseMemory
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
Definition: RegAllocGreedy.cpp:277
llvm::LiveIntervals::intervalIsInOneMBB
MachineBasicBlock * intervalIsInOneMBB(const LiveInterval &LI) const
If LI is confined to a single basic block, return a pointer to that block.
Definition: LiveIntervals.cpp:827
llvm::MachineFunctionPass
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
Definition: MachineFunctionPass.h:30
llvm::huge_valf
const float huge_valf
Use this rather than HUGE_VALF; the latter causes warnings on MSVC.
Definition: MathExtras.cpp:28
llvm::RAGreedy
Definition: RegAllocGreedy.h:62
llvm::RegAllocEvictionAdvisor::canAllocatePhysReg
bool canAllocatePhysReg(unsigned CostPerUseLimit, MCRegister PhysReg) const
Definition: RegAllocGreedy.cpp:612
llvm::RS_Split
@ RS_Split
Attempt live range splitting if assignment is impossible.
Definition: RegAllocEvictionAdvisor.h:55
OptimizationRemarkEmitter.h
RegisterClassInfo.h
MachineBasicBlock.h
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:139
llvm::MemOp
Definition: TargetLowering.h:111
llvm::SplitAnalysis::BlockInfo
Additional information about basic blocks where the current variable is live.
Definition: SplitKit.h:122
ExhaustiveSearch
static cl::opt< bool > ExhaustiveSearch("exhaustive-register-search", cl::NotHidden, cl::desc("Exhaustive Search for registers bypassing the depth " "and interference cutoffs of last chance recoloring"), cl::Hidden)
llvm::TargetRegisterInfo
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Definition: TargetRegisterInfo.h:234
llvm::SpillPlacement::iterate
void iterate()
iterate - Update the network iteratively until convergence, or new bundles are found.
Definition: SpillPlacement.cpp:334
llvm::Depth
@ Depth
Definition: SIMachineScheduler.h:36
llvm::BlockFrequency::getMaxFrequency
static uint64_t getMaxFrequency()
Returns the maximum possible frequency, the saturation value.
Definition: BlockFrequency.h:30
llvm::Function::getContext
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:319
BlockFrequency.h
Spiller.h
llvm::RAGreedy::selectOrSplit
MCRegister selectOrSplit(const LiveInterval &, SmallVectorImpl< Register > &) override
Definition: RegAllocGreedy.cpp:2143
llvm::TimePassesIsEnabled
bool TimePassesIsEnabled
If the user specifies the -time-passes argument on an LLVM tool command line then the value of this b...
Definition: PassTimingInfo.cpp:37
llvm::reverse
auto reverse(ContainerTy &&C, std::enable_if_t< has_rbegin< ContainerTy >::value > *=nullptr)
Definition: STLExtras.h:380
TargetInstrInfo.h
llvm::MachineMemOperand
A description of a memory reference used in the backend.
Definition: MachineMemOperand.h:126
llvm::SmallSet
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:136
llvm::MCRegister::NoRegister
static constexpr unsigned NoRegister
Definition: MCRegister.h:43
llvm::RS_Spill
@ RS_Spill
Live range will be spilled. No more splitting will be attempted.
Definition: RegAllocEvictionAdvisor.h:63
llvm::Optional< unsigned >
llvm::TargetRegisterInfo::getCSRFirstUseCost
virtual unsigned getCSRFirstUseCost() const
Allow the target to override the cost of using a callee-saved register for the first time.
Definition: TargetRegisterInfo.h:890
llvm::ore::NV
DiagnosticInfoOptimizationBase::Argument NV
Definition: OptimizationRemarkEmitter.h:136
llvm::AllocationOrder::begin
Iterator begin() const
Definition: AllocationOrder.h:95
llvm::SpillPlacement::addConstraints
void addConstraints(ArrayRef< BlockConstraint > LiveBlocks)
addConstraints - Add constraints and biases.
Definition: SpillPlacement.cpp:260
llvm::SmallVectorImpl::pop_back_val
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:654
SpillPlacement.h
llvm::MachineOperand::isFI
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
Definition: MachineOperand.h:330
llvm::SplitEditor
SplitEditor - Edit machine code and LiveIntervals for live range splitting.
Definition: SplitKit.h:258
llvm::SplitAnalysis::BlockInfo::FirstDef
SlotIndex FirstDef
First non-phi valno->def, or SlotIndex().
Definition: SplitKit.h:126
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1620
llvm::RegAllocBase::DeadRemats
SmallPtrSet< MachineInstr *, 32 > DeadRemats
Inst which is a def of an original reg and whose defs are already all dead after remat is saved in De...
Definition: RegAllocBase.h:77
llvm::RegAllocBase::TimerGroupDescription
static const char TimerGroupDescription[]
Definition: RegAllocBase.h:116
llvm::RegisterRegAlloc
Definition: RegAllocRegistry.h:61
llvm::MachineBlockFrequencyInfo::getBlockFreq
BlockFrequency getBlockFreq(const MachineBasicBlock *MBB) const
getblockFreq - Return block frequency.
Definition: MachineBlockFrequencyInfo.cpp:230
llvm::MachineFunctionPass::getAnalysisUsage
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
Definition: MachineFunctionPass.cpp:103
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
llvm::MDNode::get
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1300
F
#define F(x, y, z)
Definition: MD5.cpp:55
llvm::LiveIntervals::getInstructionIndex
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
Definition: LiveIntervals.h:226
llvm::RISCVFenceField::R
@ R
Definition: RISCVBaseInfo.h:240
llvm::MachineLoopInfo
Definition: MachineLoopInfo.h:89
INITIALIZE_PASS_BEGIN
INITIALIZE_PASS_BEGIN(RAGreedy, "greedy", "Greedy Register Allocator", false, false) INITIALIZE_PASS_END(RAGreedy
MachineRegisterInfo.h
Uses
SmallPtrSet< MachineInstr *, 2 > Uses
Definition: ARMLowOverheadLoops.cpp:585
llvm::EdgeBundles
Definition: EdgeBundles.h:24
llvm::LiveRange::beginIndex
SlotIndex beginIndex() const
beginIndex - Return the lowest numbered slot covered.
Definition: LiveInterval.h:385
AliasAnalysis.h
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::SplitEditor::SM_Size
@ SM_Size
SM_Size - Overlap intervals to minimize the number of inserted COPY instructions.
Definition: SplitKit.h:289
llvm::LiveDebugVariables::splitRegister
void splitRegister(Register OldReg, ArrayRef< Register > NewRegs, LiveIntervals &LIS)
splitRegister - Move any user variables in OldReg to the live ranges in NewRegs where they are live.
Definition: LiveDebugVariables.cpp:1501
llvm::LiveRangeEdit::regs
ArrayRef< Register > regs() const
Definition: LiveRangeEdit.h:169
CommandLine.h
llvm::MachineRegisterInfo::reg_nodbg_instructions
iterator_range< reg_instr_nodbg_iterator > reg_nodbg_instructions(Register Reg) const
Definition: MachineRegisterInfo.h:362
llvm::Register::isPhysical
bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:97
llvm::RAGreedy::runOnMachineFunction
bool runOnMachineFunction(MachineFunction &mf) override
Perform register allocation.
Definition: RegAllocGreedy.cpp:2681
llvm::createInlineSpiller
Spiller * createInlineSpiller(MachineFunctionPass &Pass, MachineFunction &MF, VirtRegMap &VRM, VirtRegAuxInfo &VRAI)
Create and return a spiller that will insert spill code directly instead of deferring though VirtRegM...
Definition: InlineSpiller.cpp:245
llvm::TargetRegisterClass::contains
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
Definition: TargetRegisterInfo.h:94
llvm::RAGreedy::aboutToRemoveInterval
void aboutToRemoveInterval(const LiveInterval &) override
Method called when the allocator is about to remove a LiveInterval.
Definition: RegAllocGreedy.cpp:2209
llvm::TargetInstrInfo
TargetInstrInfo - Interface to description of machine instruction set.
Definition: TargetInstrInfo.h:97
MachineLoopInfo.h
llvm::LiveRangeEdit::get
Register get(unsigned idx) const
Definition: LiveRangeEdit.h:157
llvm::SpillPlacement::BlockConstraint::ChangesValue
bool ChangesValue
True when this block changes the value of the live range.
Definition: SpillPlacement.h:97
llvm::InterferenceCache::init
void init(MachineFunction *mf, LiveIntervalUnion *liuarray, SlotIndexes *indexes, LiveIntervals *lis, const TargetRegisterInfo *tri)
init - Prepare cache for a new function.
Definition: InterferenceCache.cpp:50
DEBUG_TYPE
#define DEBUG_TYPE
Definition: RegAllocGreedy.cpp:80
llvm::MachineBlockFrequencyInfo
MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate machine basic b...
Definition: MachineBlockFrequencyInfo.h:33
greedy
greedy
Definition: RegAllocGreedy.cpp:160
LastChanceRecoloringMaxDepth
static cl::opt< unsigned > LastChanceRecoloringMaxDepth("lcr-max-depth", cl::Hidden, cl::desc("Last chance recoloring max depth"), cl::init(5))
f
Itanium Name Demangler i e convert the string _Z1fv into f()". You can also use the CRTP base ManglingParser to perform some simple analysis on the mangled name
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::SmallVectorImpl::append
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:667
llvm::Spiller::spill
virtual void spill(LiveRangeEdit &LRE)=0
spill - Spill the LRE.getParent() live interval.
llvm::RS_Done
@ RS_Done
There is nothing more we can do to this live range.
Definition: RegAllocEvictionAdvisor.h:71
llvm::ARM_PROC::A
@ A
Definition: ARMBaseInfo.h:34
llvm::SlotIndex::isEarlierInstr
static bool isEarlierInstr(SlotIndex A, SlotIndex B)
isEarlierInstr - Return true if A refers to an instruction earlier than B.
Definition: SlotIndexes.h:202
llvm::SplitEditor::SM_Partition
@ SM_Partition
SM_Partition(Default) - Try to create the complement interval so it doesn't overlap any other interva...
Definition: SplitKit.h:282
llvm::LiveInterval::getSize
unsigned getSize() const
getSize - Returns the sum of sizes of all the LiveRange's.
Definition: LiveInterval.cpp:970
llvm::LiveRange::const_iterator
Segments::const_iterator const_iterator
Definition: LiveInterval.h:213
llvm::RegAllocBase::Matrix
LiveRegMatrix * Matrix
Definition: RegAllocBase.h:69
llvm::Register::isPhysicalRegister
static bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:65
llvm::cl::NotHidden
@ NotHidden
Definition: CommandLine.h:138
llvm::AnalysisUsage
Represent the analysis usage information of a pass.
Definition: PassAnalysisSupport.h:47
llvm::TargetRegisterClass
Definition: TargetRegisterInfo.h:45
llvm::EvictionCost::MaxWeight
float MaxWeight
Maximum spill weight evicted.
Definition: RegAllocEvictionAdvisor.h:80
SplitKit.h
llvm::TargetRegisterClass::AllocationPriority
const uint8_t AllocationPriority
Classes with a higher priority value are assigned first by register allocators using a greedy heurist...
Definition: TargetRegisterInfo.h:60
false
Definition: StackSlotColoring.cpp:141
llvm::EvictionCost
Cost of evicting interference - used by default advisor, and the eviction chain heuristic in RegAlloc...
Definition: RegAllocEvictionAdvisor.h:78
TII
const HexagonInstrInfo * TII
Definition: HexagonCopyToCombine.cpp:127
assignedRegPartiallyOverlaps
static bool assignedRegPartiallyOverlaps(const TargetRegisterInfo &TRI, const VirtRegMap &VRM, MCRegister PhysReg, const LiveInterval &Intf)
Return true if the existing assignment of Intf overlaps, but is not the same, as PhysReg.
Definition: RegAllocGreedy.cpp:1851
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
llvm::MachineOperand
MachineOperand class - Representation of each machine instruction operand.
Definition: MachineOperand.h:48
llvm::RegAllocBase::init
void init(VirtRegMap &vrm, LiveIntervals &lis, LiveRegMatrix &mat)
Definition: RegAllocBase.cpp:57
LiveIntervalUnion.h
llvm::LiveRangeEdit
Definition: LiveRangeEdit.h:44
llvm::SpillPlacement::finish
bool finish()
finish - Compute the optimal spill code placement given the constraints.
Definition: SpillPlacement.cpp:363
llvm::STATISTIC
STATISTIC(NumFunctions, "Total number of functions")
llvm::LiveRegMatrix::isPhysRegUsed
bool isPhysRegUsed(MCRegister PhysReg) const
Returns true if the given PhysReg has any live intervals assigned.
Definition: LiveRegMatrix.cpp:138
llvm::SmallVectorImpl::resize
void resize(size_type N)
Definition: SmallVector.h:619
llvm::cl::Option::getNumOccurrences
int getNumOccurrences() const
Definition: CommandLine.h:395
llvm::InterferenceCache::Cursor::first
SlotIndex first()
first - Return the starting index of the first interfering range in the current block.
Definition: InterferenceCache.h:228
BitVector.h
llvm::SlotIndexes
SlotIndexes pass.
Definition: SlotIndexes.h:313
SmallPtrSet.h
Copies
SI Lower i1 Copies
Definition: SILowerI1Copies.cpp:406
Info
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
llvm::BitVector
Definition: BitVector.h:75
llvm::MachineFunction::verify
bool verify(Pass *p=nullptr, const char *Banner=nullptr, bool AbortOnError=true) const
Run the current MachineFunction through the machine code verifier, useful for debugger use.
Definition: MachineVerifier.cpp:337
llvm::LiveInterval
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:686
llvm::SpillPlacement::getBlockFrequency
BlockFrequency getBlockFrequency(unsigned Number) const
getBlockFrequency - Return the estimated block execution frequency per function invocation.
Definition: SpillPlacement.h:155
llvm::EdgeBundles::getNumBundles
unsigned getNumBundles() const
getNumBundles - Return the total number of bundles in the CFG.
Definition: EdgeBundles.h:44
llvm::RegAllocBase::RegClassInfo
RegisterClassInfo RegClassInfo
Definition: RegAllocBase.h:70
EdgeBundles.h
llvm::SlotIndexes::getInstructionFromIndex
MachineInstr * getInstructionFromIndex(SlotIndex index) const
Returns the instruction for the given index, or null if the given index has no instruction associated...
Definition: SlotIndexes.h:402
llvm::SlotIndex
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:82
llvm::None
const NoneType None
Definition: None.h:24
llvm::LiveIntervalUnion::find
SegmentIter find(SlotIndex x)
Definition: LiveIntervalUnion.h:71
llvm::RegisterClassInfo::getMinCost
uint8_t getMinCost(const TargetRegisterClass *RC) const
Get the minimum register cost in RC's allocation order.
Definition: RegisterClassInfo.h:124
llvm::CallingConv::ID
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
BranchProbability.h
llvm::MachineBasicBlock
Definition: MachineBasicBlock.h:94
llvm::RegisterClassInfo::isProperSubClass
bool isProperSubClass(const TargetRegisterClass *RC) const
isProperSubClass - Returns true if RC has a legal super-class with more allocatable registers.
Definition: RegisterClassInfo.h:108
INITIALIZE_PASS_END
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:58
llvm::RegisterClassInfo::getNumAllocatableRegs
unsigned getNumAllocatableRegs(const TargetRegisterClass *RC) const
getNumAllocatableRegs - Returns the number of actually allocatable registers in RC in the current fun...
Definition: RegisterClassInfo.h:91
llvm::SpillPlacement::BlockConstraint::Number
unsigned Number
Basic block number (from MBB::getNumber()).
Definition: SpillPlacement.h:90
llvm::MachineRegisterInfo::getRegClass
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
Definition: MachineRegisterInfo.h:642
MachineOptimizationRemarkEmitter.h
===- MachineOptimizationRemarkEmitter.h - Opt Diagnostics -*- C++ -*-—===//
llvm::VirtRegMap::hasPhys
bool hasPhys(Register virtReg) const
returns true if the specified virtual register is mapped to a physical register
Definition: VirtRegMap.h:99
llvm::TargetInstrInfo::isStoreToStackSlot
virtual unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct store to a stack slot, return the virtual or physica...
Definition: TargetInstrInfo.h:300
llvm::LiveRegMatrix::query
LiveIntervalUnion::Query & query(const LiveRange &LR, MCRegister RegUnit)
Query a line of the assigned virtual register matrix directly.
Definition: LiveRegMatrix.cpp:178
llvm::Register::isVirtual
bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
llvm::RegAllocBase::TimerGroupName
static const char TimerGroupName[]
Definition: RegAllocBase.h:115
llvm::SpillPlacement::BlockConstraint::Entry
BorderConstraint Entry
Constraint on block entry.
Definition: SpillPlacement.h:91
Timer.h
llvm::cl::opt
Definition: CommandLine.h:1392
llvm::SlotIndex::getBaseIndex
SlotIndex getBaseIndex() const
Returns the base index for associated with this index.
Definition: SlotIndexes.h:240
llvm::AllocationOrder
Definition: AllocationOrder.h:30
llvm::SpillPlacement::getRecentPositive
ArrayRef< unsigned > getRecentPositive()
getRecentPositive - Return an array of bundles that became positive during the previous call to scanA...
Definition: SpillPlacement.h:142
llvm::MachineLoop
Definition: MachineLoopInfo.h:44
llvm::BlockFrequency
Definition: BlockFrequency.h:23
llvm::SmallSet::count
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
llvm::SpillPlacement::PrefReg
@ PrefReg
Block entry/exit prefers a register.
Definition: SpillPlacement.h:82
llvm::cl::values
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:685
llvm::SlotIndexes::getMBBStartIdx
SlotIndex getMBBStartIdx(unsigned Num) const
Returns the first index in the given basic block number.
Definition: SlotIndexes.h:465
llvm::TargetRegisterInfo::regsOverlap
bool regsOverlap(Register RegA, Register RegB) const
Returns true if the two registers are equal or alias each other.
Definition: TargetRegisterInfo.h:419
llvm::SplitAnalysis::BlockInfo::LastInstr
SlotIndex LastInstr
Last instr accessing current reg.
Definition: SplitKit.h:125
llvm::RegAllocBase::TRI
const TargetRegisterInfo * TRI
Definition: RegAllocBase.h:65
llvm::IRSimilarity::Legal
@ Legal
Definition: IRSimilarityIdentifier.h:76
llvm::MachineOperand::isReg
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Definition: MachineOperand.h:320
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:66
llvm::RAGreedyID
char & RAGreedyID
Greedy register allocator.
Definition: RegAllocGreedy.cpp:142
uint64_t
LiveIntervals.h
llvm::SplitAnalysis::BlockInfo::LiveOut
bool LiveOut
Current reg is live out.
Definition: SplitKit.h:128
llvm::RAGreedy::getAnalysisUsage
void getAnalysisUsage(AnalysisUsage &AU) const override
RAGreedy analysis usage.
Definition: RegAllocGreedy.cpp:201
llvm::MachineOptimizationRemarkMissed
Diagnostic information for missed-optimization remarks.
Definition: MachineOptimizationRemarkEmitter.h:83
llvm::LiveRange
This class represents the liveness of a register, stack slot, etc.
Definition: LiveInterval.h:157
VirtRegMap.h
llvm::TargetRegisterInfo::getRegClassName
const char * getRegClassName(const TargetRegisterClass *Class) const
Returns the name of the register class.
Definition: TargetRegisterInfo.h:756
GreedyRegClassPriorityTrumpsGlobalness
static cl::opt< bool > GreedyRegClassPriorityTrumpsGlobalness("greedy-regclass-priority-trumps-globalness", cl::desc("Change the greedy register allocator's live range priority " "calculation to make the AllocationPriority of the register class " "more important then whether the range is global"), cl::Hidden)
llvm::LiveRegMatrix::checkRegMaskInterference
bool checkRegMaskInterference(const LiveInterval &VirtReg, MCRegister PhysReg=MCRegister::NoRegister)
Check for regmask interference only.
Definition: LiveRegMatrix.cpp:146
INITIALIZE_PASS_DEPENDENCY
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
llvm::LLVMContext
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
llvm::RegClassFilterFunc
std::function< bool(const TargetRegisterInfo &TRI, const TargetRegisterClass &RC)> RegClassFilterFunc
Definition: RegAllocCommon.h:17
llvm::RegAllocBase::LIS
LiveIntervals * LIS
Definition: RegAllocBase.h:68
llvm::RAGreedy::RAGreedy
RAGreedy(const RegClassFilterFunc F=allocateAllRegClasses)
Definition: RegAllocGreedy.cpp:196
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::MCPhysReg
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition: MCRegister.h:21
llvm::VirtRegMap::getPhys
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
Definition: VirtRegMap.h:105
llvm::SpillPlacement::addPrefSpill
void addPrefSpill(ArrayRef< unsigned > Blocks, bool Strong)
addPrefSpill - Add PrefSpill constraints to all blocks listed.
Definition: SpillPlacement.cpp:281
llvm::RegAllocBase::VRM
VirtRegMap * VRM
Definition: RegAllocBase.h:67
llvm::BlockFrequency::getFrequency
uint64_t getFrequency() const
Returns the frequency as a fixpoint number scaled by the entry frequency.
Definition: BlockFrequency.h:34
llvm::TargetRegisterInfo::reverseLocalAssignment
virtual bool reverseLocalAssignment() const
Allow the target to reverse allocation order of local live ranges.
Definition: TargetRegisterInfo.h:885
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:432
llvm::InterferenceCache::Cursor::moveToBlock
void moveToBlock(unsigned MBBNum)
moveTo - Move cursor to basic block MBBNum.
Definition: InterferenceCache.h:217
MCRegisterInfo.h
llvm::TargetInstrInfo::hasStoreToStackSlot
virtual bool hasStoreToStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const
If the specified machine instruction has a store to a stack slot, return true along with the FrameInd...
Definition: TargetInstrInfo.cpp:373
llvm::RegAllocBase::MRI
MachineRegisterInfo * MRI
Definition: RegAllocBase.h:66
llvm::SlotIndex::isSameInstr
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
Definition: SlotIndexes.h:196
llvm::TargetRegisterInfo::regClassPriorityTrumpsGlobalness
virtual bool regClassPriorityTrumpsGlobalness(const MachineFunction &MF) const
When prioritizing live ranges in register allocation, if this hook returns true then the AllocationPr...
Definition: TargetRegisterInfo.h:1085
ArrayRef.h
MachineFunctionPass.h
Allocator
Greedy Register Allocator
Definition: RegAllocGreedy.cpp:161
llvm::Register::isVirtualRegister
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:71
llvm::MachineFunction::getName
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
Definition: MachineFunction.cpp:567
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::RAGreedy::dequeue
const LiveInterval * dequeue() override
dequeue - Return the next unassigned register, or NULL.
Definition: RegAllocGreedy.cpp:355
llvm::RegisterClassInfo
Definition: RegisterClassInfo.h:29
llvm::MachineFunction::getFrameInfo
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Definition: MachineFunction.h:656
Hysteresis
const float Hysteresis
Definition: RegAllocGreedy.cpp:177
llvm::RegAllocBase::allocatePhysRegs
void allocatePhysRegs()
Definition: RegAllocBase.cpp:84
llvm::InterferenceCache::Cursor::last
SlotIndex last()
last - Return the ending index of the last interfering range in the current block.
Definition: InterferenceCache.h:234
function
print Print MemDeps of function
Definition: MemDepPrinter.cpp:82
llvm::AllocationOrder::getOrder
ArrayRef< MCPhysReg > getOrder() const
Get the allocation order without reordered hints.
Definition: AllocationOrder.h:111
llvm::MachineOperand::getReg
Register getReg() const
getReg - Returns the register number.
Definition: MachineOperand.h:359
CSRFirstTimeCost
static cl::opt< unsigned > CSRFirstTimeCost("regalloc-csr-first-time-cost", cl::desc("Cost for first time use of callee-saved register."), cl::init(0), cl::Hidden)
llvm::LiveDebugVariables
Definition: LiveDebugVariables.h:32
llvm::SpillPlacement::PrefSpill
@ PrefSpill
Block entry/exit prefers a stack slot.
Definition: SpillPlacement.h:83
llvm::Register::asMCReg
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
Definition: Register.h:120
llvm::SpillPlacement::addLinks
void addLinks(ArrayRef< unsigned > Links)
addLinks - Add transparent blocks with the given numbers.
Definition: SpillPlacement.cpp:295
llvm::LiveIntervalUnion::SegmentIter
LiveSegments::iterator SegmentIter
Definition: LiveIntervalUnion.h:52
llvm::SpillPlacement::DontCare
@ DontCare
Block doesn't care / variable not live.
Definition: SpillPlacement.h:81
llvm::LiveIntervals::getInterval
LiveInterval & getInterval(Register Reg)
Definition: LiveIntervals.h:114
llvm::TargetInstrInfo::getPatchpointUnfoldableRange
virtual std::pair< unsigned, unsigned > getPatchpointUnfoldableRange(const MachineInstr &MI) const
For a patchpoint, stackmap, or statepoint intrinsic, return the range of operands which can't be fold...
Definition: TargetInstrInfo.cpp:476
llvm::SmallSet::erase
bool erase(const T &V)
Definition: SmallSet.h:209
llvm::MachineFunction
Definition: MachineFunction.h:241
llvm::LiveRange::FindSegmentContaining
iterator FindSegmentContaining(SlotIndex Idx)
Return an iterator to the segment that contains the specified index, or end() if there is none.
Definition: LiveInterval.h:436
llvm::LiveRegMatrix::IK_VirtReg
@ IK_VirtReg
Virtual register interference.
Definition: LiveRegMatrix.h:90
llvm::SlotIndex::InstrDist
@ InstrDist
The default distance between instructions as returned by distance().
Definition: SlotIndexes.h:136
llvm::TargetInstrInfo::hasLoadFromStackSlot
virtual bool hasLoadFromStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const
If the specified machine instruction has a load from a stack slot, return true along with the FrameIn...
Definition: TargetInstrInfo.cpp:359
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
LastChanceRecoloringMaxInterference
static cl::opt< unsigned > LastChanceRecoloringMaxInterference("lcr-max-interf", cl::Hidden, cl::desc("Last chance recoloring maximum number of considered" " interference at a time"), cl::init(8))
llvm::MachineBasicBlock::getNumber
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
Definition: MachineBasicBlock.h:1078
llvm::any_of
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1614
llvm::SlotIndexes::getLastIndex
SlotIndex getLastIndex()
Returns the base index of the last slot in this analysis.
Definition: SlotIndexes.h:373
llvm::MachineBasicBlock::getFirstNonDebugInstr
iterator getFirstNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the first non-debug instruction in the basic block, or end().
Definition: MachineBasicBlock.cpp:257
llvm::AnalysisUsage::setPreservesCFG
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:263
llvm::RegAllocBase::postOptimization
virtual void postOptimization()
Definition: RegAllocBase.cpp:167
Shrink
This would be a win on but not x86 or ppc64 Shrink
Definition: README.txt:41
llvm::MachineOptimizationRemarkEmitterPass
The analysis pass.
Definition: MachineOptimizationRemarkEmitter.h:219
llvm::SplitEditor::SM_Speed
@ SM_Speed
SM_Speed - Overlap intervals to minimize the expected execution frequency of the inserted copies.
Definition: SplitKit.h:294
llvm::AnalysisUsage::addPreserved
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
Definition: PassAnalysisSupport.h:98
llvm::RS_Assign
@ RS_Assign
Only attempt assignment and eviction. Then requeue as RS_Split.
Definition: RegAllocEvictionAdvisor.h:52
Number
uint32_t Number
Definition: Profile.cpp:47
llvm::LiveRegMatrix::checkInterference
InterferenceKind checkInterference(const LiveInterval &VirtReg, MCRegister PhysReg)
Check for interference before assigning VirtReg to PhysReg.
Definition: LiveRegMatrix.cpp:186
llvm::BranchProbability
Definition: BranchProbability.h:29
TargetSubtargetInfo.h
clEnumValN
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Definition: CommandLine.h:660
llvm::MachineBlockFrequencyInfo::printBlockFreq
raw_ostream & printBlockFreq(raw_ostream &OS, const BlockFrequency Freq) const
Definition: MachineBlockFrequencyInfo.cpp:278
llvm::EvictionCost::BrokenHints
unsigned BrokenHints
Total number of broken hints.
Definition: RegAllocEvictionAdvisor.h:79
llvm::SplitAnalysis::BlockInfo::MBB
MachineBasicBlock * MBB
Definition: SplitKit.h:123
llvm::LiveRangeEdit::empty
bool empty() const
Definition: LiveRangeEdit.h:156
llvm::LiveIntervalUnion::Query::interferingVRegs
const SmallVectorImpl< const LiveInterval * > & interferingVRegs(unsigned MaxInterferingRegs=std::numeric_limits< unsigned >::max())
Definition: LiveIntervalUnion.h:162
llvm::SmallSet::insert
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:182
llvm::Pass::dump
void dump() const
Definition: Pass.cpp:135
llvm::SpillPlacement::prepare
void prepare(BitVector &RegBundles)
prepare - Reset state and prepare for a new spill placement computation.
Definition: SpillPlacement.cpp:353
llvm::InterferenceCache::Cursor
Cursor - The primary query interface for the block interference cache.
Definition: InterferenceCache.h:176
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
llvm::LiveRangeEdit::begin
iterator begin() const
Definition: LiveRangeEdit.h:153
LiveRangeEdit.h
llvm::LiveRange::endIndex
SlotIndex endIndex() const
endNumber - return the maximum point of the range of the whole, exclusive.
Definition: LiveInterval.h:392
llvm::SlotIndexes::getZeroIndex
SlotIndex getZeroIndex()
Returns the zero index for this analysis.
Definition: SlotIndexes.h:367
llvm::SpillPlacement::MustSpill
@ MustSpill
A register is impossible, variable must be spilled.
Definition: SpillPlacement.h:85
RegAllocBase.h
llvm::RegAllocEvictionAdvisor::getOrderLimit
Optional< unsigned > getOrderLimit(const LiveInterval &VirtReg, const AllocationOrder &Order, unsigned CostPerUseLimit) const
Definition: RegAllocGreedy.cpp:586
MBB
MachineBasicBlock & MBB
Definition: AArch64SLSHardening.cpp:74
llvm::MachineRegisterInfo::def_operands
iterator_range< def_iterator > def_operands(Register Reg) const
Definition: MachineRegisterInfo.h:397
llvm::MachineFunction::getBlockNumbered
MachineBasicBlock * getBlockNumbered(unsigned N) const
getBlockNumbered - MachineBasicBlocks are automatically numbered when they are inserted into the mach...
Definition: MachineFunction.h:757
llvm::SpillPlacement::scanActiveBundles
bool scanActiveBundles()
scanActiveBundles - Perform an initial scan of all bundles activated by addConstraints and addLinks,...
Definition: SpillPlacement.cpp:311
llvm::BitVector::test
bool test(unsigned Idx) const
Definition: BitVector.h:454
llvm::MachineRegisterInfo::getSimpleHint
Register getSimpleHint(Register VReg) const
getSimpleHint - same as getRegAllocationHint except it will only return a target independent hint.
Definition: MachineRegisterInfo.h:813
llvm::TargetRegisterInfo::shouldUseDeferredSpillingForVirtReg
virtual bool shouldUseDeferredSpillingForVirtReg(const MachineFunction &MF, const LiveInterval &VirtReg) const
Deferred spilling delays the spill insertion of a virtual register after every other allocation.
Definition: TargetRegisterInfo.h:1076
llvm::MachineFunction::getFunction
Function & getFunction()
Return the LLVM function that this machine code represents.
Definition: MachineFunction.h:606
Stats
block placement Basic Block Placement Stats
Definition: MachineBlockPlacement.cpp:3655
llvm::RegAllocBase
RegAllocBase provides the register allocation driver and interface that can be extended to add intere...
Definition: RegAllocBase.h:61
llvm::RS_Memory
@ RS_Memory
Live range is in memory.
Definition: RegAllocEvictionAdvisor.h:67
llvm::LiveIntervalUnion::Query::checkInterference
bool checkInterference()
Definition: LiveIntervalUnion.h:159
llvm::SmallVectorImpl::assign
void assign(size_type NumElts, ValueParamT Elt)
Definition: SmallVector.h:688
llvm::ArrayRef::begin
iterator begin() const
Definition: ArrayRef.h:152
MachineFrameInfo.h
llvm::NamedRegionTimer
This class is basically a combination of TimeRegion and Timer.
Definition: Timer.h:165
llvm::MachineOperand::getIndex
int getIndex() const
Definition: MachineOperand.h:566
Function.h
llvm::EvictionCost::setMax
void setMax()
Definition: RegAllocEvictionAdvisor.h:86
RegAllocGreedy.h
llvm::LiveRangeStage
LiveRangeStage
Definition: RegAllocEvictionAdvisor.h:47
getNumAllocatableRegsForConstraints
static unsigned getNumAllocatableRegsForConstraints(const MachineInstr *MI, Register Reg, const TargetRegisterClass *SuperRC, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, const RegisterClassInfo &RCI)
Get the number of allocatable registers that match the constraints of Reg on MI and that are also in ...
Definition: RegAllocGreedy.cpp:1405
InterferenceCache.h
llvm::LiveIntervals
Definition: LiveIntervals.h:54
llvm::AllocationOrder::isHint
bool isHint(Register Reg) const
Return true if Reg is a preferred physical register.
Definition: AllocationOrder.h:114
llvm::SmallVectorImpl::clear
void clear()
Definition: SmallVector.h:591
llvm::MachineFrameInfo::isSpillSlotObjectIndex
bool isSpillSlotObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a spill slot.
Definition: MachineFrameInfo.h:710
llvm::SlotIndex::getInstrDistance
int getInstrDistance(SlotIndex other) const
Return the scaled distance from this index to the given one, where all slots on the same instruction ...
Definition: SlotIndexes.h:219
llvm::BitVector::reset
BitVector & reset()
Definition: BitVector.h:385
RegAllocEvictionAdvisor.h
llvm::makeArrayRef
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:475
AA
llvm::RegisterClassInfo::getLastCalleeSavedAlias
MCRegister getLastCalleeSavedAlias(MCRegister PhysReg) const
getLastCalleeSavedAlias - Returns the last callee saved register that overlaps PhysReg,...
Definition: RegisterClassInfo.h:115
LiveInterval.h
llvm::MCRegUnitIterator
Definition: MCRegisterInfo.h:680
llvm::MachineFrameInfo
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Definition: MachineFrameInfo.h:105
llvm::InterferenceCache::getMaxCursors
unsigned getMaxCursors() const
getMaxCursors - Return the maximum number of concurrent cursors that can be supported.
Definition: InterferenceCache.h:173
SmallVector.h
llvm::LiveIntervalUnion::Query
Query interferences between a single live virtual register and a live interval union.
Definition: LiveIntervalUnion.h:112
llvm::RegAllocEvictionAdvisor::canReassign
Register canReassign(const LiveInterval &VirtReg, Register PrevReg) const
Definition: RegAllocGreedy.cpp:423
LiveStacks.h
llvm::InterferenceCache::Cursor::hasInterference
bool hasInterference()
hasInterference - Return true if the current block has any interference.
Definition: InterferenceCache.h:222
llvm::SmallSet::size
size_type size() const
Definition: SmallSet.h:161
llvm::RS_Split2
@ RS_Split2
Attempt more aggressive live range splitting that is guaranteed to make progress.
Definition: RegAllocEvictionAdvisor.h:60
llvm::LiveRangeEdit::size
unsigned size() const
Definition: LiveRangeEdit.h:155
llvm::MCRegisterInfo::DiffListIterator::isValid
bool isValid() const
isValid - returns true if this iterator is not yet at the end.
Definition: MCRegisterInfo.h:224
llvm::AAResultsWrapperPass
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Definition: AliasAnalysis.h:1347
llvm::SpillPlacement::BlockConstraint::Exit
BorderConstraint Exit
Constraint on block exit.
Definition: SpillPlacement.h:92
llvm::LiveInterval::reg
Register reg() const
Definition: LiveInterval.h:717
llvm::ArrayRef::size
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:164
llvm::max
Align max(MaybeAlign Lhs, Align Rhs)
Definition: Alignment.h:340
llvm::LiveRange::clear
void clear()
Definition: LiveInterval.h:300
llvm::EdgeBundles::getBlocks
ArrayRef< unsigned > getBlocks(unsigned Bundle) const
getBlocks - Return an array of blocks that are connected to Bundle.
Definition: EdgeBundles.h:47
llvm::MipsISD::Ins
@ Ins
Definition: MipsISelLowering.h:160
greedyRegAlloc
static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator", createGreedyRegisterAllocator)
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
llvm::RegAllocEvictionAdvisor::isUnusedCalleeSavedReg
bool isUnusedCalleeSavedReg(MCRegister PhysReg) const
Returns true if the given PhysReg is a callee saved register and has not been used for allocation yet...
Definition: RegAllocGreedy.cpp:577
MachineOperand.h
SplitSpillMode
static cl::opt< SplitEditor::ComplementSpillMode > SplitSpillMode("split-spill-mode", cl::Hidden, cl::desc("Spill mode for splitting live ranges"), cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"), clEnumValN(SplitEditor::SM_Size, "size", "Optimize for size"), clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed")), cl::init(SplitEditor::SM_Speed))
llvm::RegAllocBase::VerifyEnabled
static bool VerifyEnabled
VerifyEnabled - True when -verify-regalloc is given.
Definition: RegAllocBase.h:123
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:308
llvm::LiveRegMatrix::assign
void assign(const LiveInterval &VirtReg, MCRegister PhysReg)
Assign VirtReg to PhysReg.
Definition: LiveRegMatrix.cpp:104
hasTiedDef
static bool hasTiedDef(MachineRegisterInfo *MRI, unsigned reg)
Return true if reg has any tied def operand.
Definition: RegAllocGreedy.cpp:1841
llvm::AllocationOrder::create
static AllocationOrder create(unsigned VirtReg, const VirtRegMap &VRM, const RegisterClassInfo &RegClassInfo, const LiveRegMatrix *Matrix)
Create a new AllocationOrder for VirtReg.
Definition: AllocationOrder.cpp:29
llvm::LiveRangeEdit::end
iterator end() const
Definition: LiveRangeEdit.h:154
llvm::AnalysisUsage::addRequired
AnalysisUsage & addRequired()
Definition: PassAnalysisSupport.h:75
LLVMContext.h
llvm::LiveIntervals::hasInterval
bool hasInterval(Register Reg) const
Definition: LiveIntervals.h:125
CalcSpillWeights.h
llvm::DebugLoc
A debug info location.
Definition: DebugLoc.h:33
SlotIndexes.h
llvm::cl::desc
Definition: CommandLine.h:405
raw_ostream.h
llvm::MachineDominatorTree
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
Definition: MachineDominators.h:51
MachineFunction.h
llvm::printReg
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
Definition: TargetRegisterInfo.cpp:111
llvm::MachineBlockFrequencyInfo::getEntryFreq
uint64_t getEntryFreq() const
Divide a block's BlockFrequency::getFrequency() value by this value to obtain the entry block - relat...
Definition: MachineBlockFrequencyInfo.cpp:289
InitializePasses.h
llvm::LiveStacks
Definition: LiveStacks.h:35
MachineBlockFrequencyInfo.h
TargetRegisterInfo.h
Debug.h
llvm::SplitAnalysis
SplitAnalysis - Analyze a LiveInterval, looking for live range splitting opportunities.
Definition: SplitKit.h:97
llvm::MachineBasicBlock::end
iterator end()
Definition: MachineBasicBlock.h:279
llvm::ArrayRef::end
iterator end() const
Definition: ArrayRef.h:153
llvm::SpillPlacement::BlockConstraint
BlockConstraint - Entry and exit constraints for a basic block.
Definition: SpillPlacement.h:89
RegAllocRegistry.h
llvm::LiveRange::end
iterator end()
Definition: LiveInterval.h:216
llvm::RegAllocBase::ShouldAllocateClass
const RegClassFilterFunc ShouldAllocateClass
Definition: RegAllocBase.h:71
SetVector.h
llvm::TargetInstrInfo::isLoadFromStackSlot
virtual unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct load from a stack slot, return the virtual or physic...
Definition: TargetInstrInfo.h:262
llvm::Use
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
llvm::LiveIntervals::getRegUnit
LiveRange & getRegUnit(unsigned Unit)
Return the live range for register unit Unit.
Definition: LiveIntervals.h:393
MachineDominators.h
llvm::MCRegister
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:24
llvm::EdgeBundles::getBundle
unsigned getBundle(unsigned N, bool Out) const
getBundle - Return the ingoing (Out = false) or outgoing (Out = true) bundle number for basic block N
Definition: EdgeBundles.h:41
SmallSet.h
EnableDeferredSpilling
static cl::opt< bool > EnableDeferredSpilling("enable-deferred-spilling", cl::Hidden, cl::desc("Instead of spilling a variable right away, defer the actual " "code insertion to the end of the allocation. That way the " "allocator might still find a suitable coloring for this " "variable because of other evicted variables."), cl::init(false))
llvm::RAGreedy::spiller
Spiller & spiller() override
Definition: RegAllocGreedy.h:336
llvm::LiveRegMatrix
Definition: LiveRegMatrix.h:40