LLVM  9.0.0svn
InlineSpiller.cpp
Go to the documentation of this file.
1 //===- InlineSpiller.cpp - Insert spills and restores inline --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // The inline spiller modifies the machine function directly instead of
10 // inserting spills and restores in VirtRegMap.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "LiveRangeCalc.h"
15 #include "Spiller.h"
16 #include "SplitKit.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/MapVector.h"
20 #include "llvm/ADT/None.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SetVector.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/Statistic.h"
48 #include "llvm/Config/llvm-config.h"
52 #include "llvm/Support/Compiler.h"
53 #include "llvm/Support/Debug.h"
56 #include <cassert>
57 #include <iterator>
58 #include <tuple>
59 #include <utility>
60 #include <vector>
61 
62 using namespace llvm;
63 
64 #define DEBUG_TYPE "regalloc"
65 
66 STATISTIC(NumSpilledRanges, "Number of spilled live ranges");
67 STATISTIC(NumSnippets, "Number of spilled snippets");
68 STATISTIC(NumSpills, "Number of spills inserted");
69 STATISTIC(NumSpillsRemoved, "Number of spills removed");
70 STATISTIC(NumReloads, "Number of reloads inserted");
71 STATISTIC(NumReloadsRemoved, "Number of reloads removed");
72 STATISTIC(NumFolded, "Number of folded stack accesses");
73 STATISTIC(NumFoldedLoads, "Number of folded loads");
74 STATISTIC(NumRemats, "Number of rematerialized defs for spilling");
75 
76 static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden,
77  cl::desc("Disable inline spill hoisting"));
78 static cl::opt<bool>
79 RestrictStatepointRemat("restrict-statepoint-remat",
80  cl::init(false), cl::Hidden,
81  cl::desc("Restrict remat for statepoint operands"));
82 
83 namespace {
84 
85 class HoistSpillHelper : private LiveRangeEdit::Delegate {
86  MachineFunction &MF;
87  LiveIntervals &LIS;
88  LiveStacks &LSS;
89  AliasAnalysis *AA;
92  VirtRegMap &VRM;
94  const TargetInstrInfo &TII;
95  const TargetRegisterInfo &TRI;
96  const MachineBlockFrequencyInfo &MBFI;
97 
99 
100  // Map from StackSlot to the LiveInterval of the original register.
101  // Note the LiveInterval of the original register may have been deleted
102  // after it is spilled. We keep a copy here to track the range where
103  // spills can be moved.
105 
106  // Map from pair of (StackSlot and Original VNI) to a set of spills which
107  // have the same stackslot and have equal values defined by Original VNI.
108  // These spills are mergeable and are hoist candiates.
109  using MergeableSpillsMap =
111  MergeableSpillsMap MergeableSpills;
112 
113  /// This is the map from original register to a set containing all its
114  /// siblings. To hoist a spill to another BB, we need to find out a live
115  /// sibling there and use it as the source of the new spill.
117 
118  bool isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
119  MachineBasicBlock &BB, unsigned &LiveReg);
120 
121  void rmRedundantSpills(
122  SmallPtrSet<MachineInstr *, 16> &Spills,
125 
126  void getVisitOrders(
127  MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills,
132 
133  void runHoistSpills(LiveInterval &OrigLI, VNInfo &OrigVNI,
134  SmallPtrSet<MachineInstr *, 16> &Spills,
137 
138 public:
139  HoistSpillHelper(MachineFunctionPass &pass, MachineFunction &mf,
140  VirtRegMap &vrm)
141  : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
142  LSS(pass.getAnalysis<LiveStacks>()),
143  AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
144  MDT(pass.getAnalysis<MachineDominatorTree>()),
145  Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
146  MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()),
147  TRI(*mf.getSubtarget().getRegisterInfo()),
148  MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),
149  IPA(LIS, mf.getNumBlockIDs()) {}
150 
151  void addToMergeableSpills(MachineInstr &Spill, int StackSlot,
152  unsigned Original);
153  bool rmFromMergeableSpills(MachineInstr &Spill, int StackSlot);
154  void hoistAllSpills();
155  void LRE_DidCloneVirtReg(unsigned, unsigned) override;
156 };
157 
158 class InlineSpiller : public Spiller {
159  MachineFunction &MF;
160  LiveIntervals &LIS;
161  LiveStacks &LSS;
162  AliasAnalysis *AA;
165  VirtRegMap &VRM;
167  const TargetInstrInfo &TII;
168  const TargetRegisterInfo &TRI;
169  const MachineBlockFrequencyInfo &MBFI;
170 
171  // Variables that are valid during spill(), but used by multiple methods.
172  LiveRangeEdit *Edit;
173  LiveInterval *StackInt;
174  int StackSlot;
175  unsigned Original;
176 
177  // All registers to spill to StackSlot, including the main register.
178  SmallVector<unsigned, 8> RegsToSpill;
179 
180  // All COPY instructions to/from snippets.
181  // They are ignored since both operands refer to the same stack slot.
182  SmallPtrSet<MachineInstr*, 8> SnippetCopies;
183 
184  // Values that failed to remat at some point.
185  SmallPtrSet<VNInfo*, 8> UsedValues;
186 
187  // Dead defs generated during spilling.
189 
190  // Object records spills information and does the hoisting.
191  HoistSpillHelper HSpiller;
192 
193  ~InlineSpiller() override = default;
194 
195 public:
196  InlineSpiller(MachineFunctionPass &pass, MachineFunction &mf, VirtRegMap &vrm)
197  : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
198  LSS(pass.getAnalysis<LiveStacks>()),
199  AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
200  MDT(pass.getAnalysis<MachineDominatorTree>()),
201  Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
202  MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()),
203  TRI(*mf.getSubtarget().getRegisterInfo()),
204  MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),
205  HSpiller(pass, mf, vrm) {}
206 
207  void spill(LiveRangeEdit &) override;
208  void postOptimization() override;
209 
210 private:
211  bool isSnippet(const LiveInterval &SnipLI);
212  void collectRegsToSpill();
213 
214  bool isRegToSpill(unsigned Reg) { return is_contained(RegsToSpill, Reg); }
215 
216  bool isSibling(unsigned Reg);
217  bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI);
218  void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI);
219 
220  void markValueUsed(LiveInterval*, VNInfo*);
221  bool canGuaranteeAssignmentAfterRemat(unsigned VReg, MachineInstr &MI);
222  bool reMaterializeFor(LiveInterval &, MachineInstr &MI);
223  void reMaterializeAll();
224 
225  bool coalesceStackAccess(MachineInstr *MI, unsigned Reg);
226  bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>>,
227  MachineInstr *LoadMI = nullptr);
228  void insertReload(unsigned VReg, SlotIndex, MachineBasicBlock::iterator MI);
229  void insertSpill(unsigned VReg, bool isKill, MachineBasicBlock::iterator MI);
230 
231  void spillAroundUses(unsigned Reg);
232  void spillAll();
233 };
234 
235 } // end anonymous namespace
236 
237 Spiller::~Spiller() = default;
238 
239 void Spiller::anchor() {}
240 
242  MachineFunction &mf,
243  VirtRegMap &vrm) {
244  return new InlineSpiller(pass, mf, vrm);
245 }
246 
247 //===----------------------------------------------------------------------===//
248 // Snippets
249 //===----------------------------------------------------------------------===//
250 
251 // When spilling a virtual register, we also spill any snippets it is connected
252 // to. The snippets are small live ranges that only have a single real use,
253 // leftovers from live range splitting. Spilling them enables memory operand
254 // folding or tightens the live range around the single use.
255 //
256 // This minimizes register pressure and maximizes the store-to-load distance for
257 // spill slots which can be important in tight loops.
258 
259 /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
260 /// otherwise return 0.
261 static unsigned isFullCopyOf(const MachineInstr &MI, unsigned Reg) {
262  if (!MI.isFullCopy())
263  return 0;
264  if (MI.getOperand(0).getReg() == Reg)
265  return MI.getOperand(1).getReg();
266  if (MI.getOperand(1).getReg() == Reg)
267  return MI.getOperand(0).getReg();
268  return 0;
269 }
270 
271 /// isSnippet - Identify if a live interval is a snippet that should be spilled.
272 /// It is assumed that SnipLI is a virtual register with the same original as
273 /// Edit->getReg().
274 bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
275  unsigned Reg = Edit->getReg();
276 
277  // A snippet is a tiny live range with only a single instruction using it
278  // besides copies to/from Reg or spills/fills. We accept:
279  //
280  // %snip = COPY %Reg / FILL fi#
281  // %snip = USE %snip
282  // %Reg = COPY %snip / SPILL %snip, fi#
283  //
284  if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI))
285  return false;
286 
287  MachineInstr *UseMI = nullptr;
288 
289  // Check that all uses satisfy our criteria.
291  RI = MRI.reg_instr_nodbg_begin(SnipLI.reg),
292  E = MRI.reg_instr_nodbg_end(); RI != E; ) {
293  MachineInstr &MI = *RI++;
294 
295  // Allow copies to/from Reg.
296  if (isFullCopyOf(MI, Reg))
297  continue;
298 
299  // Allow stack slot loads.
300  int FI;
301  if (SnipLI.reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot)
302  continue;
303 
304  // Allow stack slot stores.
305  if (SnipLI.reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot)
306  continue;
307 
308  // Allow a single additional instruction.
309  if (UseMI && &MI != UseMI)
310  return false;
311  UseMI = &MI;
312  }
313  return true;
314 }
315 
316 /// collectRegsToSpill - Collect live range snippets that only have a single
317 /// real use.
318 void InlineSpiller::collectRegsToSpill() {
319  unsigned Reg = Edit->getReg();
320 
321  // Main register always spills.
322  RegsToSpill.assign(1, Reg);
323  SnippetCopies.clear();
324 
325  // Snippets all have the same original, so there can't be any for an original
326  // register.
327  if (Original == Reg)
328  return;
329 
331  RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end(); RI != E; ) {
332  MachineInstr &MI = *RI++;
333  unsigned SnipReg = isFullCopyOf(MI, Reg);
334  if (!isSibling(SnipReg))
335  continue;
336  LiveInterval &SnipLI = LIS.getInterval(SnipReg);
337  if (!isSnippet(SnipLI))
338  continue;
339  SnippetCopies.insert(&MI);
340  if (isRegToSpill(SnipReg))
341  continue;
342  RegsToSpill.push_back(SnipReg);
343  LLVM_DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n');
344  ++NumSnippets;
345  }
346 }
347 
348 bool InlineSpiller::isSibling(unsigned Reg) {
350  VRM.getOriginal(Reg) == Original;
351 }
352 
353 /// It is beneficial to spill to earlier place in the same BB in case
354 /// as follows:
355 /// There is an alternative def earlier in the same MBB.
356 /// Hoist the spill as far as possible in SpillMBB. This can ease
357 /// register pressure:
358 ///
359 /// x = def
360 /// y = use x
361 /// s = copy x
362 ///
363 /// Hoisting the spill of s to immediately after the def removes the
364 /// interference between x and y:
365 ///
366 /// x = def
367 /// spill x
368 /// y = use killed x
369 ///
370 /// This hoist only helps when the copy kills its source.
371 ///
372 bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,
373  MachineInstr &CopyMI) {
374  SlotIndex Idx = LIS.getInstructionIndex(CopyMI);
375 #ifndef NDEBUG
376  VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot());
377  assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy");
378 #endif
379 
380  unsigned SrcReg = CopyMI.getOperand(1).getReg();
381  LiveInterval &SrcLI = LIS.getInterval(SrcReg);
382  VNInfo *SrcVNI = SrcLI.getVNInfoAt(Idx);
383  LiveQueryResult SrcQ = SrcLI.Query(Idx);
384  MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(SrcVNI->def);
385  if (DefMBB != CopyMI.getParent() || !SrcQ.isKill())
386  return false;
387 
388  // Conservatively extend the stack slot range to the range of the original
389  // value. We may be able to do better with stack slot coloring by being more
390  // careful here.
391  assert(StackInt && "No stack slot assigned yet.");
392  LiveInterval &OrigLI = LIS.getInterval(Original);
393  VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx);
394  StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0));
395  LLVM_DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": "
396  << *StackInt << '\n');
397 
398  // We are going to spill SrcVNI immediately after its def, so clear out
399  // any later spills of the same value.
400  eliminateRedundantSpills(SrcLI, SrcVNI);
401 
402  MachineBasicBlock *MBB = LIS.getMBBFromIndex(SrcVNI->def);
404  if (SrcVNI->isPHIDef())
405  MII = MBB->SkipPHIsLabelsAndDebug(MBB->begin());
406  else {
407  MachineInstr *DefMI = LIS.getInstructionFromIndex(SrcVNI->def);
408  assert(DefMI && "Defining instruction disappeared");
409  MII = DefMI;
410  ++MII;
411  }
412  // Insert spill without kill flag immediately after def.
413  TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot,
414  MRI.getRegClass(SrcReg), &TRI);
415  --MII; // Point to store instruction.
416  LIS.InsertMachineInstrInMaps(*MII);
417  LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII);
418 
419  HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
420  ++NumSpills;
421  return true;
422 }
423 
424 /// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any
425 /// redundant spills of this value in SLI.reg and sibling copies.
426 void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
427  assert(VNI && "Missing value");
429  WorkList.push_back(std::make_pair(&SLI, VNI));
430  assert(StackInt && "No stack slot assigned yet.");
431 
432  do {
433  LiveInterval *LI;
434  std::tie(LI, VNI) = WorkList.pop_back_val();
435  unsigned Reg = LI->reg;
436  LLVM_DEBUG(dbgs() << "Checking redundant spills for " << VNI->id << '@'
437  << VNI->def << " in " << *LI << '\n');
438 
439  // Regs to spill are taken care of.
440  if (isRegToSpill(Reg))
441  continue;
442 
443  // Add all of VNI's live range to StackInt.
444  StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0));
445  LLVM_DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n');
446 
447  // Find all spills and copies of VNI.
449  UI = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
450  UI != E; ) {
451  MachineInstr &MI = *UI++;
452  if (!MI.isCopy() && !MI.mayStore())
453  continue;
454  SlotIndex Idx = LIS.getInstructionIndex(MI);
455  if (LI->getVNInfoAt(Idx) != VNI)
456  continue;
457 
458  // Follow sibling copies down the dominator tree.
459  if (unsigned DstReg = isFullCopyOf(MI, Reg)) {
460  if (isSibling(DstReg)) {
461  LiveInterval &DstLI = LIS.getInterval(DstReg);
462  VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot());
463  assert(DstVNI && "Missing defined value");
464  assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot");
465  WorkList.push_back(std::make_pair(&DstLI, DstVNI));
466  }
467  continue;
468  }
469 
470  // Erase spills.
471  int FI;
472  if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) {
473  LLVM_DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI);
474  // eliminateDeadDefs won't normally remove stores, so switch opcode.
475  MI.setDesc(TII.get(TargetOpcode::KILL));
476  DeadDefs.push_back(&MI);
477  ++NumSpillsRemoved;
478  if (HSpiller.rmFromMergeableSpills(MI, StackSlot))
479  --NumSpills;
480  }
481  }
482  } while (!WorkList.empty());
483 }
484 
485 //===----------------------------------------------------------------------===//
486 // Rematerialization
487 //===----------------------------------------------------------------------===//
488 
489 /// markValueUsed - Remember that VNI failed to rematerialize, so its defining
490 /// instruction cannot be eliminated. See through snippet copies
491 void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
493  WorkList.push_back(std::make_pair(LI, VNI));
494  do {
495  std::tie(LI, VNI) = WorkList.pop_back_val();
496  if (!UsedValues.insert(VNI).second)
497  continue;
498 
499  if (VNI->isPHIDef()) {
500  MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
501  for (MachineBasicBlock *P : MBB->predecessors()) {
502  VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(P));
503  if (PVNI)
504  WorkList.push_back(std::make_pair(LI, PVNI));
505  }
506  continue;
507  }
508 
509  // Follow snippet copies.
510  MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
511  if (!SnippetCopies.count(MI))
512  continue;
513  LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg());
514  assert(isRegToSpill(SnipLI.reg) && "Unexpected register in copy");
515  VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true));
516  assert(SnipVNI && "Snippet undefined before copy");
517  WorkList.push_back(std::make_pair(&SnipLI, SnipVNI));
518  } while (!WorkList.empty());
519 }
520 
521 bool InlineSpiller::canGuaranteeAssignmentAfterRemat(unsigned VReg,
522  MachineInstr &MI) {
524  return true;
525  // Here's a quick explanation of the problem we're trying to handle here:
526  // * There are some pseudo instructions with more vreg uses than there are
527  // physical registers on the machine.
528  // * This is normally handled by spilling the vreg, and folding the reload
529  // into the user instruction. (Thus decreasing the number of used vregs
530  // until the remainder can be assigned to physregs.)
531  // * However, since we may try to spill vregs in any order, we can end up
532  // trying to spill each operand to the instruction, and then rematting it
533  // instead. When that happens, the new live intervals (for the remats) are
534  // expected to be trivially assignable (i.e. RS_Done). However, since we
535  // may have more remats than physregs, we're guaranteed to fail to assign
536  // one.
537  // At the moment, we only handle this for STATEPOINTs since they're the only
538  // psuedo op where we've seen this. If we start seeing other instructions
539  // with the same problem, we need to revisit this.
540  return (MI.getOpcode() != TargetOpcode::STATEPOINT);
541 }
542 
543 /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
544 bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
545  // Analyze instruction
547  MIBundleOperands::VirtRegInfo RI =
548  MIBundleOperands(MI).analyzeVirtReg(VirtReg.reg, &Ops);
549 
550  if (!RI.Reads)
551  return false;
552 
553  SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true);
554  VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex());
555 
556  if (!ParentVNI) {
557  LLVM_DEBUG(dbgs() << "\tadding <undef> flags: ");
558  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
559  MachineOperand &MO = MI.getOperand(i);
560  if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg)
561  MO.setIsUndef();
562  }
563  LLVM_DEBUG(dbgs() << UseIdx << '\t' << MI);
564  return true;
565  }
566 
567  if (SnippetCopies.count(&MI))
568  return false;
569 
570  LiveInterval &OrigLI = LIS.getInterval(Original);
571  VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx);
572  LiveRangeEdit::Remat RM(ParentVNI);
573  RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def);
574 
575  if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx, false)) {
576  markValueUsed(&VirtReg, ParentVNI);
577  LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
578  return false;
579  }
580 
581  // If the instruction also writes VirtReg.reg, it had better not require the
582  // same register for uses and defs.
583  if (RI.Tied) {
584  markValueUsed(&VirtReg, ParentVNI);
585  LLVM_DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI);
586  return false;
587  }
588 
589  // Before rematerializing into a register for a single instruction, try to
590  // fold a load into the instruction. That avoids allocating a new register.
591  if (RM.OrigMI->canFoldAsLoad() &&
592  foldMemoryOperand(Ops, RM.OrigMI)) {
593  Edit->markRematerialized(RM.ParentVNI);
594  ++NumFoldedLoads;
595  return true;
596  }
597 
598  // If we can't guarantee that we'll be able to actually assign the new vreg,
599  // we can't remat.
600  if (!canGuaranteeAssignmentAfterRemat(VirtReg.reg, MI)) {
601  markValueUsed(&VirtReg, ParentVNI);
602  LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
603  return false;
604  }
605 
606  // Allocate a new register for the remat.
607  unsigned NewVReg = Edit->createFrom(Original);
608 
609  // Finally we can rematerialize OrigMI before MI.
610  SlotIndex DefIdx =
611  Edit->rematerializeAt(*MI.getParent(), MI, NewVReg, RM, TRI);
612 
613  // We take the DebugLoc from MI, since OrigMI may be attributed to a
614  // different source location.
615  auto *NewMI = LIS.getInstructionFromIndex(DefIdx);
616  NewMI->setDebugLoc(MI.getDebugLoc());
617 
618  (void)DefIdx;
619  LLVM_DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'
620  << *LIS.getInstructionFromIndex(DefIdx));
621 
622  // Replace operands
623  for (const auto &OpPair : Ops) {
624  MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
625  if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg) {
626  MO.setReg(NewVReg);
627  MO.setIsKill();
628  }
629  }
630  LLVM_DEBUG(dbgs() << "\t " << UseIdx << '\t' << MI << '\n');
631 
632  ++NumRemats;
633  return true;
634 }
635 
636 /// reMaterializeAll - Try to rematerialize as many uses as possible,
637 /// and trim the live ranges after.
638 void InlineSpiller::reMaterializeAll() {
639  if (!Edit->anyRematerializable(AA))
640  return;
641 
642  UsedValues.clear();
643 
644  // Try to remat before all uses of snippets.
645  bool anyRemat = false;
646  for (unsigned Reg : RegsToSpill) {
647  LiveInterval &LI = LIS.getInterval(Reg);
649  RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end();
650  RegI != E; ) {
651  MachineInstr &MI = *RegI++;
652 
653  // Debug values are not allowed to affect codegen.
654  if (MI.isDebugValue())
655  continue;
656 
657  assert(!MI.isDebugInstr() && "Did not expect to find a use in debug "
658  "instruction that isn't a DBG_VALUE");
659 
660  anyRemat |= reMaterializeFor(LI, MI);
661  }
662  }
663  if (!anyRemat)
664  return;
665 
666  // Remove any values that were completely rematted.
667  for (unsigned Reg : RegsToSpill) {
668  LiveInterval &LI = LIS.getInterval(Reg);
669  for (LiveInterval::vni_iterator I = LI.vni_begin(), E = LI.vni_end();
670  I != E; ++I) {
671  VNInfo *VNI = *I;
672  if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI))
673  continue;
674  MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
675  MI->addRegisterDead(Reg, &TRI);
676  if (!MI->allDefsAreDead())
677  continue;
678  LLVM_DEBUG(dbgs() << "All defs dead: " << *MI);
679  DeadDefs.push_back(MI);
680  }
681  }
682 
683  // Eliminate dead code after remat. Note that some snippet copies may be
684  // deleted here.
685  if (DeadDefs.empty())
686  return;
687  LLVM_DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n");
688  Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
689 
690  // LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions
691  // after rematerialization. To remove a VNI for a vreg from its LiveInterval,
692  // LiveIntervals::removeVRegDefAt is used. However, after non-PHI VNIs are all
693  // removed, PHI VNI are still left in the LiveInterval.
694  // So to get rid of unused reg, we need to check whether it has non-dbg
695  // reference instead of whether it has non-empty interval.
696  unsigned ResultPos = 0;
697  for (unsigned Reg : RegsToSpill) {
698  if (MRI.reg_nodbg_empty(Reg)) {
699  Edit->eraseVirtReg(Reg);
700  continue;
701  }
702 
703  assert(LIS.hasInterval(Reg) &&
704  (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&
705  "Empty and not used live-range?!");
706 
707  RegsToSpill[ResultPos++] = Reg;
708  }
709  RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end());
710  LLVM_DEBUG(dbgs() << RegsToSpill.size()
711  << " registers to spill after remat.\n");
712 }
713 
714 //===----------------------------------------------------------------------===//
715 // Spilling
716 //===----------------------------------------------------------------------===//
717 
718 /// If MI is a load or store of StackSlot, it can be removed.
719 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, unsigned Reg) {
720  int FI = 0;
721  unsigned InstrReg = TII.isLoadFromStackSlot(*MI, FI);
722  bool IsLoad = InstrReg;
723  if (!IsLoad)
724  InstrReg = TII.isStoreToStackSlot(*MI, FI);
725 
726  // We have a stack access. Is it the right register and slot?
727  if (InstrReg != Reg || FI != StackSlot)
728  return false;
729 
730  if (!IsLoad)
731  HSpiller.rmFromMergeableSpills(*MI, StackSlot);
732 
733  LLVM_DEBUG(dbgs() << "Coalescing stack access: " << *MI);
734  LIS.RemoveMachineInstrFromMaps(*MI);
735  MI->eraseFromParent();
736 
737  if (IsLoad) {
738  ++NumReloadsRemoved;
739  --NumReloads;
740  } else {
741  ++NumSpillsRemoved;
742  --NumSpills;
743  }
744 
745  return true;
746 }
747 
748 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
750 // Dump the range of instructions from B to E with their slot indexes.
753  LiveIntervals const &LIS,
754  const char *const header,
755  unsigned VReg =0) {
756  char NextLine = '\n';
757  char SlotIndent = '\t';
758 
759  if (std::next(B) == E) {
760  NextLine = ' ';
761  SlotIndent = ' ';
762  }
763 
764  dbgs() << '\t' << header << ": " << NextLine;
765 
766  for (MachineBasicBlock::iterator I = B; I != E; ++I) {
767  SlotIndex Idx = LIS.getInstructionIndex(*I).getRegSlot();
768 
769  // If a register was passed in and this instruction has it as a
770  // destination that is marked as an early clobber, print the
771  // early-clobber slot index.
772  if (VReg) {
773  MachineOperand *MO = I->findRegisterDefOperand(VReg);
774  if (MO && MO->isEarlyClobber())
775  Idx = Idx.getRegSlot(true);
776  }
777 
778  dbgs() << SlotIndent << Idx << '\t' << *I;
779  }
780 }
781 #endif
782 
783 /// foldMemoryOperand - Try folding stack slot references in Ops into their
784 /// instructions.
785 ///
786 /// @param Ops Operand indices from analyzeVirtReg().
787 /// @param LoadMI Load instruction to use instead of stack slot when non-null.
788 /// @return True on success.
789 bool InlineSpiller::
790 foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
791  MachineInstr *LoadMI) {
792  if (Ops.empty())
793  return false;
794  // Don't attempt folding in bundles.
795  MachineInstr *MI = Ops.front().first;
796  if (Ops.back().first != MI || MI->isBundled())
797  return false;
798 
799  bool WasCopy = MI->isCopy();
800  unsigned ImpReg = 0;
801 
802  // Spill subregs if the target allows it.
803  // We always want to spill subregs for stackmap/patchpoint pseudos.
804  bool SpillSubRegs = TII.isSubregFoldable() ||
805  MI->getOpcode() == TargetOpcode::STATEPOINT ||
806  MI->getOpcode() == TargetOpcode::PATCHPOINT ||
807  MI->getOpcode() == TargetOpcode::STACKMAP;
808 
809  // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
810  // operands.
811  SmallVector<unsigned, 8> FoldOps;
812  for (const auto &OpPair : Ops) {
813  unsigned Idx = OpPair.second;
814  assert(MI == OpPair.first && "Instruction conflict during operand folding");
815  MachineOperand &MO = MI->getOperand(Idx);
816  if (MO.isImplicit()) {
817  ImpReg = MO.getReg();
818  continue;
819  }
820 
821  if (!SpillSubRegs && MO.getSubReg())
822  return false;
823  // We cannot fold a load instruction into a def.
824  if (LoadMI && MO.isDef())
825  return false;
826  // Tied use operands should not be passed to foldMemoryOperand.
827  if (!MI->isRegTiedToDefOperand(Idx))
828  FoldOps.push_back(Idx);
829  }
830 
831  // If we only have implicit uses, we won't be able to fold that.
832  // Moreover, TargetInstrInfo::foldMemoryOperand will assert if we try!
833  if (FoldOps.empty())
834  return false;
835 
836  MachineInstrSpan MIS(MI);
837 
838  MachineInstr *FoldMI =
839  LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS)
840  : TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS, &VRM);
841  if (!FoldMI)
842  return false;
843 
844  // Remove LIS for any dead defs in the original MI not in FoldMI.
845  for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) {
846  if (!MO->isReg())
847  continue;
848  unsigned Reg = MO->getReg();
849  if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg) ||
850  MRI.isReserved(Reg)) {
851  continue;
852  }
853  // Skip non-Defs, including undef uses and internal reads.
854  if (MO->isUse())
855  continue;
856  MIBundleOperands::PhysRegInfo RI =
857  MIBundleOperands(*FoldMI).analyzePhysReg(Reg, &TRI);
858  if (RI.FullyDefined)
859  continue;
860  // FoldMI does not define this physreg. Remove the LI segment.
861  assert(MO->isDead() && "Cannot fold physreg def");
862  SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
863  LIS.removePhysRegDefAt(Reg, Idx);
864  }
865 
866  int FI;
867  if (TII.isStoreToStackSlot(*MI, FI) &&
868  HSpiller.rmFromMergeableSpills(*MI, FI))
869  --NumSpills;
870  LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI);
871  MI->eraseFromParent();
872 
873  // Insert any new instructions other than FoldMI into the LIS maps.
874  assert(!MIS.empty() && "Unexpected empty span of instructions!");
875  for (MachineInstr &MI : MIS)
876  if (&MI != FoldMI)
877  LIS.InsertMachineInstrInMaps(MI);
878 
879  // TII.foldMemoryOperand may have left some implicit operands on the
880  // instruction. Strip them.
881  if (ImpReg)
882  for (unsigned i = FoldMI->getNumOperands(); i; --i) {
883  MachineOperand &MO = FoldMI->getOperand(i - 1);
884  if (!MO.isReg() || !MO.isImplicit())
885  break;
886  if (MO.getReg() == ImpReg)
887  FoldMI->RemoveOperand(i - 1);
888  }
889 
890  LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS,
891  "folded"));
892 
893  if (!WasCopy)
894  ++NumFolded;
895  else if (Ops.front().second == 0) {
896  ++NumSpills;
897  HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original);
898  } else
899  ++NumReloads;
900  return true;
901 }
902 
903 void InlineSpiller::insertReload(unsigned NewVReg,
904  SlotIndex Idx,
906  MachineBasicBlock &MBB = *MI->getParent();
907 
908  MachineInstrSpan MIS(MI);
909  TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot,
910  MRI.getRegClass(NewVReg), &TRI);
911 
912  LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI);
913 
915  NewVReg));
916  ++NumReloads;
917 }
918 
919 /// Check if \p Def fully defines a VReg with an undefined value.
920 /// If that's the case, that means the value of VReg is actually
921 /// not relevant.
922 static bool isFullUndefDef(const MachineInstr &Def) {
923  if (!Def.isImplicitDef())
924  return false;
925  assert(Def.getNumOperands() == 1 &&
926  "Implicit def with more than one definition");
927  // We can say that the VReg defined by Def is undef, only if it is
928  // fully defined by Def. Otherwise, some of the lanes may not be
929  // undef and the value of the VReg matters.
930  return !Def.getOperand(0).getSubReg();
931 }
932 
933 /// insertSpill - Insert a spill of NewVReg after MI.
934 void InlineSpiller::insertSpill(unsigned NewVReg, bool isKill,
936  MachineBasicBlock &MBB = *MI->getParent();
937 
938  MachineInstrSpan MIS(MI);
939  bool IsRealSpill = true;
940  if (isFullUndefDef(*MI)) {
941  // Don't spill undef value.
942  // Anything works for undef, in particular keeping the memory
943  // uninitialized is a viable option and it saves code size and
944  // run time.
945  BuildMI(MBB, std::next(MI), MI->getDebugLoc(), TII.get(TargetOpcode::KILL))
946  .addReg(NewVReg, getKillRegState(isKill));
947  IsRealSpill = false;
948  } else
949  TII.storeRegToStackSlot(MBB, std::next(MI), NewVReg, isKill, StackSlot,
950  MRI.getRegClass(NewVReg), &TRI);
951 
952  LIS.InsertMachineInstrRangeInMaps(std::next(MI), MIS.end());
953 
954  LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(std::next(MI), MIS.end(), LIS,
955  "spill"));
956  ++NumSpills;
957  if (IsRealSpill)
958  HSpiller.addToMergeableSpills(*std::next(MI), StackSlot, Original);
959 }
960 
961 /// spillAroundUses - insert spill code around each use of Reg.
962 void InlineSpiller::spillAroundUses(unsigned Reg) {
963  LLVM_DEBUG(dbgs() << "spillAroundUses " << printReg(Reg) << '\n');
964  LiveInterval &OldLI = LIS.getInterval(Reg);
965 
966  // Iterate over instructions using Reg.
968  RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end();
969  RegI != E; ) {
970  MachineInstr *MI = &*(RegI++);
971 
972  // Debug values are not allowed to affect codegen.
973  if (MI->isDebugValue()) {
974  // Modify DBG_VALUE now that the value is in a spill slot.
975  MachineBasicBlock *MBB = MI->getParent();
976  LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:\t" << *MI);
977  buildDbgValueForSpill(*MBB, MI, *MI, StackSlot);
978  MBB->erase(MI);
979  continue;
980  }
981 
982  assert(!MI->isDebugInstr() && "Did not expect to find a use in debug "
983  "instruction that isn't a DBG_VALUE");
984 
985  // Ignore copies to/from snippets. We'll delete them.
986  if (SnippetCopies.count(MI))
987  continue;
988 
989  // Stack slot accesses may coalesce away.
990  if (coalesceStackAccess(MI, Reg))
991  continue;
992 
993  // Analyze instruction.
995  MIBundleOperands::VirtRegInfo RI =
996  MIBundleOperands(*MI).analyzeVirtReg(Reg, &Ops);
997 
998  // Find the slot index where this instruction reads and writes OldLI.
999  // This is usually the def slot, except for tied early clobbers.
1000  SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
1001  if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true)))
1002  if (SlotIndex::isSameInstr(Idx, VNI->def))
1003  Idx = VNI->def;
1004 
1005  // Check for a sibling copy.
1006  unsigned SibReg = isFullCopyOf(*MI, Reg);
1007  if (SibReg && isSibling(SibReg)) {
1008  // This may actually be a copy between snippets.
1009  if (isRegToSpill(SibReg)) {
1010  LLVM_DEBUG(dbgs() << "Found new snippet copy: " << *MI);
1011  SnippetCopies.insert(MI);
1012  continue;
1013  }
1014  if (RI.Writes) {
1015  if (hoistSpillInsideBB(OldLI, *MI)) {
1016  // This COPY is now dead, the value is already in the stack slot.
1017  MI->getOperand(0).setIsDead();
1018  DeadDefs.push_back(MI);
1019  continue;
1020  }
1021  } else {
1022  // This is a reload for a sib-reg copy. Drop spills downstream.
1023  LiveInterval &SibLI = LIS.getInterval(SibReg);
1024  eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx));
1025  // The COPY will fold to a reload below.
1026  }
1027  }
1028 
1029  // Attempt to fold memory ops.
1030  if (foldMemoryOperand(Ops))
1031  continue;
1032 
1033  // Create a new virtual register for spill/fill.
1034  // FIXME: Infer regclass from instruction alone.
1035  unsigned NewVReg = Edit->createFrom(Reg);
1036 
1037  if (RI.Reads)
1038  insertReload(NewVReg, Idx, MI);
1039 
1040  // Rewrite instruction operands.
1041  bool hasLiveDef = false;
1042  for (const auto &OpPair : Ops) {
1043  MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
1044  MO.setReg(NewVReg);
1045  if (MO.isUse()) {
1046  if (!OpPair.first->isRegTiedToDefOperand(OpPair.second))
1047  MO.setIsKill();
1048  } else {
1049  if (!MO.isDead())
1050  hasLiveDef = true;
1051  }
1052  }
1053  LLVM_DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << *MI << '\n');
1054 
1055  // FIXME: Use a second vreg if instruction has no tied ops.
1056  if (RI.Writes)
1057  if (hasLiveDef)
1058  insertSpill(NewVReg, true, MI);
1059  }
1060 }
1061 
1062 /// spillAll - Spill all registers remaining after rematerialization.
1063 void InlineSpiller::spillAll() {
1064  // Update LiveStacks now that we are committed to spilling.
1065  if (StackSlot == VirtRegMap::NO_STACK_SLOT) {
1066  StackSlot = VRM.assignVirt2StackSlot(Original);
1067  StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original));
1068  StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator());
1069  } else
1070  StackInt = &LSS.getInterval(StackSlot);
1071 
1072  if (Original != Edit->getReg())
1073  VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot);
1074 
1075  assert(StackInt->getNumValNums() == 1 && "Bad stack interval values");
1076  for (unsigned Reg : RegsToSpill)
1077  StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg),
1078  StackInt->getValNumInfo(0));
1079  LLVM_DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n');
1080 
1081  // Spill around uses of all RegsToSpill.
1082  for (unsigned Reg : RegsToSpill)
1083  spillAroundUses(Reg);
1084 
1085  // Hoisted spills may cause dead code.
1086  if (!DeadDefs.empty()) {
1087  LLVM_DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n");
1088  Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
1089  }
1090 
1091  // Finally delete the SnippetCopies.
1092  for (unsigned Reg : RegsToSpill) {
1094  RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end();
1095  RI != E; ) {
1096  MachineInstr &MI = *(RI++);
1097  assert(SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy");
1098  // FIXME: Do this with a LiveRangeEdit callback.
1099  LIS.RemoveMachineInstrFromMaps(MI);
1100  MI.eraseFromParent();
1101  }
1102  }
1103 
1104  // Delete all spilled registers.
1105  for (unsigned Reg : RegsToSpill)
1106  Edit->eraseVirtReg(Reg);
1107 }
1108 
1109 void InlineSpiller::spill(LiveRangeEdit &edit) {
1110  ++NumSpilledRanges;
1111  Edit = &edit;
1113  && "Trying to spill a stack slot.");
1114  // Share a stack slot among all descendants of Original.
1115  Original = VRM.getOriginal(edit.getReg());
1116  StackSlot = VRM.getStackSlot(Original);
1117  StackInt = nullptr;
1118 
1119  LLVM_DEBUG(dbgs() << "Inline spilling "
1120  << TRI.getRegClassName(MRI.getRegClass(edit.getReg()))
1121  << ':' << edit.getParent() << "\nFrom original "
1122  << printReg(Original) << '\n');
1123  assert(edit.getParent().isSpillable() &&
1124  "Attempting to spill already spilled value.");
1125  assert(DeadDefs.empty() && "Previous spill didn't remove dead defs");
1126 
1127  collectRegsToSpill();
1128  reMaterializeAll();
1129 
1130  // Remat may handle everything.
1131  if (!RegsToSpill.empty())
1132  spillAll();
1133 
1134  Edit->calculateRegClassAndHint(MF, Loops, MBFI);
1135 }
1136 
1137 /// Optimizations after all the reg selections and spills are done.
1138 void InlineSpiller::postOptimization() { HSpiller.hoistAllSpills(); }
1139 
1140 /// When a spill is inserted, add the spill to MergeableSpills map.
1141 void HoistSpillHelper::addToMergeableSpills(MachineInstr &Spill, int StackSlot,
1142  unsigned Original) {
1143  BumpPtrAllocator &Allocator = LIS.getVNInfoAllocator();
1144  LiveInterval &OrigLI = LIS.getInterval(Original);
1145  // save a copy of LiveInterval in StackSlotToOrigLI because the original
1146  // LiveInterval may be cleared after all its references are spilled.
1147  if (StackSlotToOrigLI.find(StackSlot) == StackSlotToOrigLI.end()) {
1148  auto LI = llvm::make_unique<LiveInterval>(OrigLI.reg, OrigLI.weight);
1149  LI->assign(OrigLI, Allocator);
1150  StackSlotToOrigLI[StackSlot] = std::move(LI);
1151  }
1152  SlotIndex Idx = LIS.getInstructionIndex(Spill);
1153  VNInfo *OrigVNI = StackSlotToOrigLI[StackSlot]->getVNInfoAt(Idx.getRegSlot());
1154  std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1155  MergeableSpills[MIdx].insert(&Spill);
1156 }
1157 
1158 /// When a spill is removed, remove the spill from MergeableSpills map.
1159 /// Return true if the spill is removed successfully.
1160 bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr &Spill,
1161  int StackSlot) {
1162  auto It = StackSlotToOrigLI.find(StackSlot);
1163  if (It == StackSlotToOrigLI.end())
1164  return false;
1165  SlotIndex Idx = LIS.getInstructionIndex(Spill);
1166  VNInfo *OrigVNI = It->second->getVNInfoAt(Idx.getRegSlot());
1167  std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1168  return MergeableSpills[MIdx].erase(&Spill);
1169 }
1170 
1171 /// Check BB to see if it is a possible target BB to place a hoisted spill,
1172 /// i.e., there should be a living sibling of OrigReg at the insert point.
1173 bool HoistSpillHelper::isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
1174  MachineBasicBlock &BB, unsigned &LiveReg) {
1175  SlotIndex Idx;
1176  unsigned OrigReg = OrigLI.reg;
1177  MachineBasicBlock::iterator MI = IPA.getLastInsertPointIter(OrigLI, BB);
1178  if (MI != BB.end())
1179  Idx = LIS.getInstructionIndex(*MI);
1180  else
1181  Idx = LIS.getMBBEndIdx(&BB).getPrevSlot();
1182  SmallSetVector<unsigned, 16> &Siblings = Virt2SiblingsMap[OrigReg];
1183  assert(OrigLI.getVNInfoAt(Idx) == &OrigVNI && "Unexpected VNI");
1184 
1185  for (auto const SibReg : Siblings) {
1186  LiveInterval &LI = LIS.getInterval(SibReg);
1187  VNInfo *VNI = LI.getVNInfoAt(Idx);
1188  if (VNI) {
1189  LiveReg = SibReg;
1190  return true;
1191  }
1192  }
1193  return false;
1194 }
1195 
1196 /// Remove redundant spills in the same BB. Save those redundant spills in
1197 /// SpillsToRm, and save the spill to keep and its BB in SpillBBToSpill map.
1198 void HoistSpillHelper::rmRedundantSpills(
1200  SmallVectorImpl<MachineInstr *> &SpillsToRm,
1202  // For each spill saw, check SpillBBToSpill[] and see if its BB already has
1203  // another spill inside. If a BB contains more than one spill, only keep the
1204  // earlier spill with smaller SlotIndex.
1205  for (const auto CurrentSpill : Spills) {
1206  MachineBasicBlock *Block = CurrentSpill->getParent();
1207  MachineDomTreeNode *Node = MDT.getBase().getNode(Block);
1208  MachineInstr *PrevSpill = SpillBBToSpill[Node];
1209  if (PrevSpill) {
1210  SlotIndex PIdx = LIS.getInstructionIndex(*PrevSpill);
1211  SlotIndex CIdx = LIS.getInstructionIndex(*CurrentSpill);
1212  MachineInstr *SpillToRm = (CIdx > PIdx) ? CurrentSpill : PrevSpill;
1213  MachineInstr *SpillToKeep = (CIdx > PIdx) ? PrevSpill : CurrentSpill;
1214  SpillsToRm.push_back(SpillToRm);
1215  SpillBBToSpill[MDT.getBase().getNode(Block)] = SpillToKeep;
1216  } else {
1217  SpillBBToSpill[MDT.getBase().getNode(Block)] = CurrentSpill;
1218  }
1219  }
1220  for (const auto SpillToRm : SpillsToRm)
1221  Spills.erase(SpillToRm);
1222 }
1223 
1224 /// Starting from \p Root find a top-down traversal order of the dominator
1225 /// tree to visit all basic blocks containing the elements of \p Spills.
1226 /// Redundant spills will be found and put into \p SpillsToRm at the same
1227 /// time. \p SpillBBToSpill will be populated as part of the process and
1228 /// maps a basic block to the first store occurring in the basic block.
1229 /// \post SpillsToRm.union(Spills\@post) == Spills\@pre
1230 void HoistSpillHelper::getVisitOrders(
1233  SmallVectorImpl<MachineInstr *> &SpillsToRm,
1236  // The set contains all the possible BB nodes to which we may hoist
1237  // original spills.
1239  // Save the BB nodes on the path from the first BB node containing
1240  // non-redundant spill to the Root node.
1242  // All the spills to be hoisted must originate from a single def instruction
1243  // to the OrigReg. It means the def instruction should dominate all the spills
1244  // to be hoisted. We choose the BB where the def instruction is located as
1245  // the Root.
1246  MachineDomTreeNode *RootIDomNode = MDT[Root]->getIDom();
1247  // For every node on the dominator tree with spill, walk up on the dominator
1248  // tree towards the Root node until it is reached. If there is other node
1249  // containing spill in the middle of the path, the previous spill saw will
1250  // be redundant and the node containing it will be removed. All the nodes on
1251  // the path starting from the first node with non-redundant spill to the Root
1252  // node will be added to the WorkSet, which will contain all the possible
1253  // locations where spills may be hoisted to after the loop below is done.
1254  for (const auto Spill : Spills) {
1255  MachineBasicBlock *Block = Spill->getParent();
1256  MachineDomTreeNode *Node = MDT[Block];
1257  MachineInstr *SpillToRm = nullptr;
1258  while (Node != RootIDomNode) {
1259  // If Node dominates Block, and it already contains a spill, the spill in
1260  // Block will be redundant.
1261  if (Node != MDT[Block] && SpillBBToSpill[Node]) {
1262  SpillToRm = SpillBBToSpill[MDT[Block]];
1263  break;
1264  /// If we see the Node already in WorkSet, the path from the Node to
1265  /// the Root node must already be traversed by another spill.
1266  /// Then no need to repeat.
1267  } else if (WorkSet.count(Node)) {
1268  break;
1269  } else {
1270  NodesOnPath.insert(Node);
1271  }
1272  Node = Node->getIDom();
1273  }
1274  if (SpillToRm) {
1275  SpillsToRm.push_back(SpillToRm);
1276  } else {
1277  // Add a BB containing the original spills to SpillsToKeep -- i.e.,
1278  // set the initial status before hoisting start. The value of BBs
1279  // containing original spills is set to 0, in order to descriminate
1280  // with BBs containing hoisted spills which will be inserted to
1281  // SpillsToKeep later during hoisting.
1282  SpillsToKeep[MDT[Block]] = 0;
1283  WorkSet.insert(NodesOnPath.begin(), NodesOnPath.end());
1284  }
1285  NodesOnPath.clear();
1286  }
1287 
1288  // Sort the nodes in WorkSet in top-down order and save the nodes
1289  // in Orders. Orders will be used for hoisting in runHoistSpills.
1290  unsigned idx = 0;
1291  Orders.push_back(MDT.getBase().getNode(Root));
1292  do {
1293  MachineDomTreeNode *Node = Orders[idx++];
1294  const std::vector<MachineDomTreeNode *> &Children = Node->getChildren();
1295  unsigned NumChildren = Children.size();
1296  for (unsigned i = 0; i != NumChildren; ++i) {
1297  MachineDomTreeNode *Child = Children[i];
1298  if (WorkSet.count(Child))
1299  Orders.push_back(Child);
1300  }
1301  } while (idx != Orders.size());
1302  assert(Orders.size() == WorkSet.size() &&
1303  "Orders have different size with WorkSet");
1304 
1305 #ifndef NDEBUG
1306  LLVM_DEBUG(dbgs() << "Orders size is " << Orders.size() << "\n");
1308  for (; RIt != Orders.rend(); RIt++)
1309  LLVM_DEBUG(dbgs() << "BB" << (*RIt)->getBlock()->getNumber() << ",");
1310  LLVM_DEBUG(dbgs() << "\n");
1311 #endif
1312 }
1313 
1314 /// Try to hoist spills according to BB hotness. The spills to removed will
1315 /// be saved in \p SpillsToRm. The spills to be inserted will be saved in
1316 /// \p SpillsToIns.
1317 void HoistSpillHelper::runHoistSpills(
1318  LiveInterval &OrigLI, VNInfo &OrigVNI,
1320  SmallVectorImpl<MachineInstr *> &SpillsToRm,
1322  // Visit order of dominator tree nodes.
1324  // SpillsToKeep contains all the nodes where spills are to be inserted
1325  // during hoisting. If the spill to be inserted is an original spill
1326  // (not a hoisted one), the value of the map entry is 0. If the spill
1327  // is a hoisted spill, the value of the map entry is the VReg to be used
1328  // as the source of the spill.
1330  // Map from BB to the first spill inside of it.
1332 
1333  rmRedundantSpills(Spills, SpillsToRm, SpillBBToSpill);
1334 
1335  MachineBasicBlock *Root = LIS.getMBBFromIndex(OrigVNI.def);
1336  getVisitOrders(Root, Spills, Orders, SpillsToRm, SpillsToKeep,
1337  SpillBBToSpill);
1338 
1339  // SpillsInSubTreeMap keeps the map from a dom tree node to a pair of
1340  // nodes set and the cost of all the spills inside those nodes.
1341  // The nodes set are the locations where spills are to be inserted
1342  // in the subtree of current node.
1343  using NodesCostPair =
1344  std::pair<SmallPtrSet<MachineDomTreeNode *, 16>, BlockFrequency>;
1346 
1347  // Iterate Orders set in reverse order, which will be a bottom-up order
1348  // in the dominator tree. Once we visit a dom tree node, we know its
1349  // children have already been visited and the spill locations in the
1350  // subtrees of all the children have been determined.
1352  for (; RIt != Orders.rend(); RIt++) {
1353  MachineBasicBlock *Block = (*RIt)->getBlock();
1354 
1355  // If Block contains an original spill, simply continue.
1356  if (SpillsToKeep.find(*RIt) != SpillsToKeep.end() && !SpillsToKeep[*RIt]) {
1357  SpillsInSubTreeMap[*RIt].first.insert(*RIt);
1358  // SpillsInSubTreeMap[*RIt].second contains the cost of spill.
1359  SpillsInSubTreeMap[*RIt].second = MBFI.getBlockFreq(Block);
1360  continue;
1361  }
1362 
1363  // Collect spills in subtree of current node (*RIt) to
1364  // SpillsInSubTreeMap[*RIt].first.
1365  const std::vector<MachineDomTreeNode *> &Children = (*RIt)->getChildren();
1366  unsigned NumChildren = Children.size();
1367  for (unsigned i = 0; i != NumChildren; ++i) {
1368  MachineDomTreeNode *Child = Children[i];
1369  if (SpillsInSubTreeMap.find(Child) == SpillsInSubTreeMap.end())
1370  continue;
1371  // The stmt "SpillsInSubTree = SpillsInSubTreeMap[*RIt].first" below
1372  // should be placed before getting the begin and end iterators of
1373  // SpillsInSubTreeMap[Child].first, or else the iterators may be
1374  // invalidated when SpillsInSubTreeMap[*RIt] is seen the first time
1375  // and the map grows and then the original buckets in the map are moved.
1376  SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1377  SpillsInSubTreeMap[*RIt].first;
1378  BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1379  SubTreeCost += SpillsInSubTreeMap[Child].second;
1380  auto BI = SpillsInSubTreeMap[Child].first.begin();
1381  auto EI = SpillsInSubTreeMap[Child].first.end();
1382  SpillsInSubTree.insert(BI, EI);
1383  SpillsInSubTreeMap.erase(Child);
1384  }
1385 
1386  SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1387  SpillsInSubTreeMap[*RIt].first;
1388  BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1389  // No spills in subtree, simply continue.
1390  if (SpillsInSubTree.empty())
1391  continue;
1392 
1393  // Check whether Block is a possible candidate to insert spill.
1394  unsigned LiveReg = 0;
1395  if (!isSpillCandBB(OrigLI, OrigVNI, *Block, LiveReg))
1396  continue;
1397 
1398  // If there are multiple spills that could be merged, bias a little
1399  // to hoist the spill.
1400  BranchProbability MarginProb = (SpillsInSubTree.size() > 1)
1401  ? BranchProbability(9, 10)
1402  : BranchProbability(1, 1);
1403  if (SubTreeCost > MBFI.getBlockFreq(Block) * MarginProb) {
1404  // Hoist: Move spills to current Block.
1405  for (const auto SpillBB : SpillsInSubTree) {
1406  // When SpillBB is a BB contains original spill, insert the spill
1407  // to SpillsToRm.
1408  if (SpillsToKeep.find(SpillBB) != SpillsToKeep.end() &&
1409  !SpillsToKeep[SpillBB]) {
1410  MachineInstr *SpillToRm = SpillBBToSpill[SpillBB];
1411  SpillsToRm.push_back(SpillToRm);
1412  }
1413  // SpillBB will not contain spill anymore, remove it from SpillsToKeep.
1414  SpillsToKeep.erase(SpillBB);
1415  }
1416  // Current Block is the BB containing the new hoisted spill. Add it to
1417  // SpillsToKeep. LiveReg is the source of the new spill.
1418  SpillsToKeep[*RIt] = LiveReg;
1419  LLVM_DEBUG({
1420  dbgs() << "spills in BB: ";
1421  for (const auto Rspill : SpillsInSubTree)
1422  dbgs() << Rspill->getBlock()->getNumber() << " ";
1423  dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber()
1424  << "\n";
1425  });
1426  SpillsInSubTree.clear();
1427  SpillsInSubTree.insert(*RIt);
1428  SubTreeCost = MBFI.getBlockFreq(Block);
1429  }
1430  }
1431  // For spills in SpillsToKeep with LiveReg set (i.e., not original spill),
1432  // save them to SpillsToIns.
1433  for (const auto Ent : SpillsToKeep) {
1434  if (Ent.second)
1435  SpillsToIns[Ent.first->getBlock()] = Ent.second;
1436  }
1437 }
1438 
1439 /// For spills with equal values, remove redundant spills and hoist those left
1440 /// to less hot spots.
1441 ///
1442 /// Spills with equal values will be collected into the same set in
1443 /// MergeableSpills when spill is inserted. These equal spills are originated
1444 /// from the same defining instruction and are dominated by the instruction.
1445 /// Before hoisting all the equal spills, redundant spills inside in the same
1446 /// BB are first marked to be deleted. Then starting from the spills left, walk
1447 /// up on the dominator tree towards the Root node where the define instruction
1448 /// is located, mark the dominated spills to be deleted along the way and
1449 /// collect the BB nodes on the path from non-dominated spills to the define
1450 /// instruction into a WorkSet. The nodes in WorkSet are the candidate places
1451 /// where we are considering to hoist the spills. We iterate the WorkSet in
1452 /// bottom-up order, and for each node, we will decide whether to hoist spills
1453 /// inside its subtree to that node. In this way, we can get benefit locally
1454 /// even if hoisting all the equal spills to one cold place is impossible.
1455 void HoistSpillHelper::hoistAllSpills() {
1456  SmallVector<unsigned, 4> NewVRegs;
1457  LiveRangeEdit Edit(nullptr, NewVRegs, MF, LIS, &VRM, this);
1458 
1459  for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
1460  unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
1461  unsigned Original = VRM.getPreSplitReg(Reg);
1462  if (!MRI.def_empty(Reg))
1463  Virt2SiblingsMap[Original].insert(Reg);
1464  }
1465 
1466  // Each entry in MergeableSpills contains a spill set with equal values.
1467  for (auto &Ent : MergeableSpills) {
1468  int Slot = Ent.first.first;
1469  LiveInterval &OrigLI = *StackSlotToOrigLI[Slot];
1470  VNInfo *OrigVNI = Ent.first.second;
1471  SmallPtrSet<MachineInstr *, 16> &EqValSpills = Ent.second;
1472  if (Ent.second.empty())
1473  continue;
1474 
1475  LLVM_DEBUG({
1476  dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n"
1477  << "Equal spills in BB: ";
1478  for (const auto spill : EqValSpills)
1479  dbgs() << spill->getParent()->getNumber() << " ";
1480  dbgs() << "\n";
1481  });
1482 
1483  // SpillsToRm is the spill set to be removed from EqValSpills.
1485  // SpillsToIns is the spill set to be newly inserted after hoisting.
1487 
1488  runHoistSpills(OrigLI, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns);
1489 
1490  LLVM_DEBUG({
1491  dbgs() << "Finally inserted spills in BB: ";
1492  for (const auto Ispill : SpillsToIns)
1493  dbgs() << Ispill.first->getNumber() << " ";
1494  dbgs() << "\nFinally removed spills in BB: ";
1495  for (const auto Rspill : SpillsToRm)
1496  dbgs() << Rspill->getParent()->getNumber() << " ";
1497  dbgs() << "\n";
1498  });
1499 
1500  // Stack live range update.
1501  LiveInterval &StackIntvl = LSS.getInterval(Slot);
1502  if (!SpillsToIns.empty() || !SpillsToRm.empty())
1503  StackIntvl.MergeValueInAsValue(OrigLI, OrigVNI,
1504  StackIntvl.getValNumInfo(0));
1505 
1506  // Insert hoisted spills.
1507  for (auto const Insert : SpillsToIns) {
1508  MachineBasicBlock *BB = Insert.first;
1509  unsigned LiveReg = Insert.second;
1510  MachineBasicBlock::iterator MI = IPA.getLastInsertPointIter(OrigLI, *BB);
1511  TII.storeRegToStackSlot(*BB, MI, LiveReg, false, Slot,
1512  MRI.getRegClass(LiveReg), &TRI);
1513  LIS.InsertMachineInstrRangeInMaps(std::prev(MI), MI);
1514  ++NumSpills;
1515  }
1516 
1517  // Remove redundant spills or change them to dead instructions.
1518  NumSpills -= SpillsToRm.size();
1519  for (auto const RMEnt : SpillsToRm) {
1520  RMEnt->setDesc(TII.get(TargetOpcode::KILL));
1521  for (unsigned i = RMEnt->getNumOperands(); i; --i) {
1522  MachineOperand &MO = RMEnt->getOperand(i - 1);
1523  if (MO.isReg() && MO.isImplicit() && MO.isDef() && !MO.isDead())
1524  RMEnt->RemoveOperand(i - 1);
1525  }
1526  }
1527  Edit.eliminateDeadDefs(SpillsToRm, None, AA);
1528  }
1529 }
1530 
1531 /// For VirtReg clone, the \p New register should have the same physreg or
1532 /// stackslot as the \p old register.
1533 void HoistSpillHelper::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
1534  if (VRM.hasPhys(Old))
1535  VRM.assignVirt2Phys(New, VRM.getPhys(Old));
1536  else if (VRM.getStackSlot(Old) != VirtRegMap::NO_STACK_SLOT)
1537  VRM.assignVirt2StackSlot(New, VRM.getStackSlot(Old));
1538  else
1539  llvm_unreachable("VReg should be assigned either physreg or stackslot");
1540 }
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
Definition: LiveInterval.h:77
Safe Stack instrumentation pass
Definition: SafeStack.cpp:903
const unsigned reg
Definition: LiveInterval.h:704
SlotIndex getBaseIndex() const
Returns the base index for associated with this index.
Definition: SlotIndexes.h:241
SlotIndex def
The index of the defining instruction.
Definition: LiveInterval.h:60
This class represents lattice values for constants.
Definition: AllocatorList.h:23
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds...
Definition: Compiler.h:473
static unsigned index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
void MergeValueInAsValue(const LiveRange &RHS, const VNInfo *RHSValNo, VNInfo *LHSValNo)
MergeValueInAsValue - Merge all of the segments of a specific val# in RHS into this live range as the...
bool addRegisterDead(unsigned Reg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI defined a register without a use.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Store the specified register of the given register class to the specified stack frame index...
VNInfoList::iterator vni_iterator
Definition: LiveInterval.h:216
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:384
vni_iterator vni_begin()
Definition: LiveInterval.h:219
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:675
MIBundleOperands - Iterate over all operands in a bundle of machine instructions. ...
unsigned getReg() const
getReg - Returns the register number.
void setIsUndef(bool Val=true)
void eliminateDeadDefs(SmallVectorImpl< MachineInstr *> &Dead, ArrayRef< unsigned > RegsBeingSpilled=None, AliasAnalysis *AA=nullptr)
eliminateDeadDefs - Try to delete machine instructions that are now dead (allDefsAreDead returns true...
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
unsigned Reg
unsigned getSubReg() const
Spiller interface.
Definition: Spiller.h:23
MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate machine basic b...
This class implements a map that also provides access to all stored values in a deterministic order...
Definition: MapVector.h:37
STATISTIC(NumFunctions, "Total number of functions")
unsigned const TargetRegisterInfo * TRI
void setIsDead(bool Val=true)
VNInfo - Value Number Information.
Definition: LiveInterval.h:52
Determines the latest safe point in a block in which we can insert a split, spill or other instructio...
Definition: SplitKit.h:49
Callback methods for LiveRangeEdit owners.
Definition: LiveRangeEdit.h:48
VirtRegInfo analyzeVirtReg(unsigned Reg, SmallVectorImpl< std::pair< MachineInstr *, unsigned > > *Ops=nullptr)
analyzeVirtReg - Analyze how the current instruction or bundle uses a virtual register.
bool isUnused() const
Returns true if this value is unused.
Definition: LiveInterval.h:80
MachineInstrSpan provides an interface to get an iteration range containing the instruction it was in...
bool isEarlyClobber() const
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:221
Hexagon Hardware Loops
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Load the specified register of the given register class from the specified stack frame index...
MachineInstr * buildDbgValueForSpill(MachineBasicBlock &BB, MachineBasicBlock::iterator I, const MachineInstr &Orig, int FrameIndex)
Clone a DBG_VALUE whose value has been spilled to FrameIndex.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
const HexagonInstrInfo * TII
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:413
Printable printReg(unsigned Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
Result of a LiveRange query.
Definition: LiveInterval.h:89
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:410
PhysRegInfo analyzePhysReg(unsigned Reg, const TargetRegisterInfo *TRI)
analyzePhysReg - Analyze how the current instruction or bundle uses a physical register.
MachineBasicBlock::iterator end()
defusechain_iterator - This class provides iterator support for machine operands in the function that...
bool isValid() const
isValid - Returns true until all the operands have been visited.
bool isFullCopy() const
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def...
Definition: SlotIndexes.h:254
Base class for the actual dominator tree node.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
const std::vector< DomTreeNodeBase * > & getChildren() const
unsigned getReg() const
bool isKill() const
Return true if the live-in value is killed by this instruction.
Definition: LiveInterval.h:111
bool allDefsAreDead() const
Return true if all the defs of this instruction are dead.
unsigned getKillRegState(bool B)
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
Definition: LiveInterval.h:528
TargetInstrInfo - Interface to description of machine instruction set.
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
Definition: LiveInterval.h:408
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
Definition: MachineInstr.h:821
#define P(N)
static LLVM_DUMP_METHOD void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E, LiveIntervals const &LIS, const char *const header, unsigned VReg=0)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:432
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
unsigned const MachineRegisterInfo * MRI
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:140
bool isBundled() const
Return true if this instruction part of a bundle.
Definition: MachineInstr.h:357
MachineInstrBuilder & UseMI
DomTreeNodeBase * getIDom() const
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
LLVM_NODISCARD bool empty() const
Definition: SmallPtrSet.h:91
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:370
bool isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx=nullptr) const
Return true if the use operand of the specified index is tied to a def operand.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:381
iterator_range< pred_iterator > predecessors()
bool isCopy() const
bool isImplicitDef() const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
size_t size() const
Definition: SmallVector.h:52
bool isDebugInstr() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void setIsKill(bool Val=true)
virtual ~Spiller()=0
static bool isStackSlot(unsigned Reg)
isStackSlot - Sometimes it is useful the be able to store a non-negative frame index in a variable th...
size_type size() const
Definition: SmallPtrSet.h:92
unsigned id
The ID number of this value.
Definition: LiveInterval.h:57
Basic Register Allocator
LiveInterval & getParent() const
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:297
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:417
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
static bool isFullUndefDef(const MachineInstr &Def)
Check if Def fully defines a VReg with an undefined value.
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
Definition: SlotIndexes.h:197
bool isDebugValue() const
MachineOperand class - Representation of each machine instruction operand.
MachineInstrBuilder MachineInstrBuilder & DefMI
static cl::opt< bool > DisableHoisting("disable-spill-hoist", cl::Hidden, cl::desc("Disable inline spill hoisting"))
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
If the specified machine instruction is a direct store to a stack slot, return the virtual or physica...
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:374
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarilly including Idx...
Definition: LiveInterval.h:416
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
TargetInstrInfo overrides.
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:255
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
Spiller * createInlineSpiller(MachineFunctionPass &pass, MachineFunction &mf, VirtRegMap &vrm)
Create and return a spiller that will insert spill code directly instead of deferring though VirtRegM...
unsigned getNumValNums() const
Definition: LiveInterval.h:300
Representation of each machine instruction.
Definition: MachineInstr.h:63
iterator begin() const
Definition: SmallPtrSet.h:396
void assign(const LiveRange &Other, BumpPtrAllocator &Allocator)
Copies values numbers and live segments from Other into this range.
Definition: LiveInterval.h:239
SlotIndex getPrevSlot() const
Returns the previous slot in the index list.
Definition: SlotIndexes.h:289
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
bool canFoldAsLoad(QueryType Type=IgnoreBundle) const
Return true for instructions that can be folded as memory operands in other instructions.
Definition: MachineInstr.h:754
void setReg(unsigned Reg)
Change the register this operand corresponds to.
#define I(x, y, z)
Definition: MD5.cpp:58
static unsigned isFullCopyOf(const MachineInstr &MI, unsigned Reg)
isFullCopyOf - If MI is a COPY to or from Reg, return the other register, otherwise return 0...
iterator end() const
Definition: SmallPtrSet.h:401
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Remat - Information needed to rematerialize at a specific location.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool isSpillable() const
isSpillable - Can this interval be spilled?
Definition: LiveInterval.h:804
iterator SkipPHIsLabelsAndDebug(iterator I)
Return the first instruction in MBB after I that is not a PHI, label or debug.
IRTranslator LLVM IR MI
void RemoveOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with...
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object...
#define LLVM_DEBUG(X)
Definition: Debug.h:122
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:415
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:83
static cl::opt< bool > RestrictStatepointRemat("restrict-statepoint-remat", cl::init(false), cl::Hidden, cl::desc("Restrict remat for statepoint operands"))
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
vni_iterator vni_end()
Definition: LiveInterval.h:220
bool isImplicit() const
MachineBasicBlock::iterator begin()
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:1251