LLVM  4.0.0
InlineSpiller.cpp
Go to the documentation of this file.
1 //===-------- InlineSpiller.cpp - Insert spills and restores inline -------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // The inline spiller modifies the machine function directly instead of
11 // inserting spills and restores in VirtRegMap.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "Spiller.h"
16 #include "SplitKit.h"
17 #include "llvm/ADT/MapVector.h"
18 #include "llvm/ADT/SetVector.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/ADT/TinyPtrVector.h"
35 #include "llvm/IR/DebugInfo.h"
37 #include "llvm/Support/Debug.h"
40 
41 using namespace llvm;
42 
43 #define DEBUG_TYPE "regalloc"
44 
45 STATISTIC(NumSpilledRanges, "Number of spilled live ranges");
46 STATISTIC(NumSnippets, "Number of spilled snippets");
47 STATISTIC(NumSpills, "Number of spills inserted");
48 STATISTIC(NumSpillsRemoved, "Number of spills removed");
49 STATISTIC(NumReloads, "Number of reloads inserted");
50 STATISTIC(NumReloadsRemoved, "Number of reloads removed");
51 STATISTIC(NumFolded, "Number of folded stack accesses");
52 STATISTIC(NumFoldedLoads, "Number of folded loads");
53 STATISTIC(NumRemats, "Number of rematerialized defs for spilling");
54 
55 static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden,
56  cl::desc("Disable inline spill hoisting"));
57 
58 namespace {
59 class HoistSpillHelper : private LiveRangeEdit::Delegate {
60  MachineFunction &MF;
61  LiveIntervals &LIS;
62  LiveStacks &LSS;
63  AliasAnalysis *AA;
66  VirtRegMap &VRM;
67  MachineFrameInfo &MFI;
69  const TargetInstrInfo &TII;
70  const TargetRegisterInfo &TRI;
71  const MachineBlockFrequencyInfo &MBFI;
72 
74 
75  // Map from StackSlot to its original register.
76  DenseMap<int, unsigned> StackSlotToReg;
77  // Map from pair of (StackSlot and Original VNI) to a set of spills which
78  // have the same stackslot and have equal values defined by Original VNI.
79  // These spills are mergeable and are hoist candiates.
81  MergeableSpillsMap;
82  MergeableSpillsMap MergeableSpills;
83 
84  /// This is the map from original register to a set containing all its
85  /// siblings. To hoist a spill to another BB, we need to find out a live
86  /// sibling there and use it as the source of the new spill.
88 
89  bool isSpillCandBB(unsigned OrigReg, VNInfo &OrigVNI, MachineBasicBlock &BB,
90  unsigned &LiveReg);
91 
92  void rmRedundantSpills(
96 
97  void getVisitOrders(
103 
104  void runHoistSpills(unsigned OrigReg, VNInfo &OrigVNI,
108 
109 public:
110  HoistSpillHelper(MachineFunctionPass &pass, MachineFunction &mf,
111  VirtRegMap &vrm)
112  : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
113  LSS(pass.getAnalysis<LiveStacks>()),
114  AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
115  MDT(pass.getAnalysis<MachineDominatorTree>()),
116  Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
117  MFI(mf.getFrameInfo()), MRI(mf.getRegInfo()),
118  TII(*mf.getSubtarget().getInstrInfo()),
119  TRI(*mf.getSubtarget().getRegisterInfo()),
120  MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),
121  IPA(LIS, mf.getNumBlockIDs()) {}
122 
123  void addToMergeableSpills(MachineInstr &Spill, int StackSlot,
124  unsigned Original);
125  bool rmFromMergeableSpills(MachineInstr &Spill, int StackSlot);
126  void hoistAllSpills();
127  void LRE_DidCloneVirtReg(unsigned, unsigned) override;
128 };
129 
130 class InlineSpiller : public Spiller {
131  MachineFunction &MF;
132  LiveIntervals &LIS;
133  LiveStacks &LSS;
134  AliasAnalysis *AA;
137  VirtRegMap &VRM;
138  MachineFrameInfo &MFI;
140  const TargetInstrInfo &TII;
141  const TargetRegisterInfo &TRI;
142  const MachineBlockFrequencyInfo &MBFI;
143 
144  // Variables that are valid during spill(), but used by multiple methods.
145  LiveRangeEdit *Edit;
146  LiveInterval *StackInt;
147  int StackSlot;
148  unsigned Original;
149 
150  // All registers to spill to StackSlot, including the main register.
151  SmallVector<unsigned, 8> RegsToSpill;
152 
153  // All COPY instructions to/from snippets.
154  // They are ignored since both operands refer to the same stack slot.
155  SmallPtrSet<MachineInstr*, 8> SnippetCopies;
156 
157  // Values that failed to remat at some point.
158  SmallPtrSet<VNInfo*, 8> UsedValues;
159 
160  // Dead defs generated during spilling.
162 
163  // Object records spills information and does the hoisting.
164  HoistSpillHelper HSpiller;
165 
166  ~InlineSpiller() override {}
167 
168 public:
169  InlineSpiller(MachineFunctionPass &pass, MachineFunction &mf, VirtRegMap &vrm)
170  : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
171  LSS(pass.getAnalysis<LiveStacks>()),
172  AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
173  MDT(pass.getAnalysis<MachineDominatorTree>()),
174  Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
175  MFI(mf.getFrameInfo()), MRI(mf.getRegInfo()),
176  TII(*mf.getSubtarget().getInstrInfo()),
177  TRI(*mf.getSubtarget().getRegisterInfo()),
178  MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),
179  HSpiller(pass, mf, vrm) {}
180 
181  void spill(LiveRangeEdit &) override;
182  void postOptimization() override;
183 
184 private:
185  bool isSnippet(const LiveInterval &SnipLI);
186  void collectRegsToSpill();
187 
188  bool isRegToSpill(unsigned Reg) { return is_contained(RegsToSpill, Reg); }
189 
190  bool isSibling(unsigned Reg);
191  bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI);
192  void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI);
193 
194  void markValueUsed(LiveInterval*, VNInfo*);
195  bool reMaterializeFor(LiveInterval &, MachineInstr &MI);
196  void reMaterializeAll();
197 
198  bool coalesceStackAccess(MachineInstr *MI, unsigned Reg);
199  bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> >,
200  MachineInstr *LoadMI = nullptr);
201  void insertReload(unsigned VReg, SlotIndex, MachineBasicBlock::iterator MI);
202  void insertSpill(unsigned VReg, bool isKill, MachineBasicBlock::iterator MI);
203 
204  void spillAroundUses(unsigned Reg);
205  void spillAll();
206 };
207 }
208 
209 namespace llvm {
210 
212 void Spiller::anchor() { }
213 
215  MachineFunction &mf,
216  VirtRegMap &vrm) {
217  return new InlineSpiller(pass, mf, vrm);
218 }
219 
220 }
221 
222 //===----------------------------------------------------------------------===//
223 // Snippets
224 //===----------------------------------------------------------------------===//
225 
226 // When spilling a virtual register, we also spill any snippets it is connected
227 // to. The snippets are small live ranges that only have a single real use,
228 // leftovers from live range splitting. Spilling them enables memory operand
229 // folding or tightens the live range around the single use.
230 //
231 // This minimizes register pressure and maximizes the store-to-load distance for
232 // spill slots which can be important in tight loops.
233 
234 /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
235 /// otherwise return 0.
236 static unsigned isFullCopyOf(const MachineInstr &MI, unsigned Reg) {
237  if (!MI.isFullCopy())
238  return 0;
239  if (MI.getOperand(0).getReg() == Reg)
240  return MI.getOperand(1).getReg();
241  if (MI.getOperand(1).getReg() == Reg)
242  return MI.getOperand(0).getReg();
243  return 0;
244 }
245 
246 /// isSnippet - Identify if a live interval is a snippet that should be spilled.
247 /// It is assumed that SnipLI is a virtual register with the same original as
248 /// Edit->getReg().
249 bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
250  unsigned Reg = Edit->getReg();
251 
252  // A snippet is a tiny live range with only a single instruction using it
253  // besides copies to/from Reg or spills/fills. We accept:
254  //
255  // %snip = COPY %Reg / FILL fi#
256  // %snip = USE %snip
257  // %Reg = COPY %snip / SPILL %snip, fi#
258  //
259  if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI))
260  return false;
261 
262  MachineInstr *UseMI = nullptr;
263 
264  // Check that all uses satisfy our criteria.
266  RI = MRI.reg_instr_nodbg_begin(SnipLI.reg),
267  E = MRI.reg_instr_nodbg_end(); RI != E; ) {
268  MachineInstr &MI = *RI++;
269 
270  // Allow copies to/from Reg.
271  if (isFullCopyOf(MI, Reg))
272  continue;
273 
274  // Allow stack slot loads.
275  int FI;
276  if (SnipLI.reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot)
277  continue;
278 
279  // Allow stack slot stores.
280  if (SnipLI.reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot)
281  continue;
282 
283  // Allow a single additional instruction.
284  if (UseMI && &MI != UseMI)
285  return false;
286  UseMI = &MI;
287  }
288  return true;
289 }
290 
291 /// collectRegsToSpill - Collect live range snippets that only have a single
292 /// real use.
293 void InlineSpiller::collectRegsToSpill() {
294  unsigned Reg = Edit->getReg();
295 
296  // Main register always spills.
297  RegsToSpill.assign(1, Reg);
298  SnippetCopies.clear();
299 
300  // Snippets all have the same original, so there can't be any for an original
301  // register.
302  if (Original == Reg)
303  return;
304 
306  RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end(); RI != E; ) {
307  MachineInstr &MI = *RI++;
308  unsigned SnipReg = isFullCopyOf(MI, Reg);
309  if (!isSibling(SnipReg))
310  continue;
311  LiveInterval &SnipLI = LIS.getInterval(SnipReg);
312  if (!isSnippet(SnipLI))
313  continue;
314  SnippetCopies.insert(&MI);
315  if (isRegToSpill(SnipReg))
316  continue;
317  RegsToSpill.push_back(SnipReg);
318  DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n');
319  ++NumSnippets;
320  }
321 }
322 
323 bool InlineSpiller::isSibling(unsigned Reg) {
324  return TargetRegisterInfo::isVirtualRegister(Reg) &&
325  VRM.getOriginal(Reg) == Original;
326 }
327 
328 /// It is beneficial to spill to earlier place in the same BB in case
329 /// as follows:
330 /// There is an alternative def earlier in the same MBB.
331 /// Hoist the spill as far as possible in SpillMBB. This can ease
332 /// register pressure:
333 ///
334 /// x = def
335 /// y = use x
336 /// s = copy x
337 ///
338 /// Hoisting the spill of s to immediately after the def removes the
339 /// interference between x and y:
340 ///
341 /// x = def
342 /// spill x
343 /// y = use x<kill>
344 ///
345 /// This hoist only helps when the copy kills its source.
346 ///
347 bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,
348  MachineInstr &CopyMI) {
349  SlotIndex Idx = LIS.getInstructionIndex(CopyMI);
350 #ifndef NDEBUG
351  VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot());
352  assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy");
353 #endif
354 
355  unsigned SrcReg = CopyMI.getOperand(1).getReg();
356  LiveInterval &SrcLI = LIS.getInterval(SrcReg);
357  VNInfo *SrcVNI = SrcLI.getVNInfoAt(Idx);
358  LiveQueryResult SrcQ = SrcLI.Query(Idx);
359  MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(SrcVNI->def);
360  if (DefMBB != CopyMI.getParent() || !SrcQ.isKill())
361  return false;
362 
363  // Conservatively extend the stack slot range to the range of the original
364  // value. We may be able to do better with stack slot coloring by being more
365  // careful here.
366  assert(StackInt && "No stack slot assigned yet.");
367  LiveInterval &OrigLI = LIS.getInterval(Original);
368  VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx);
369  StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0));
370  DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": "
371  << *StackInt << '\n');
372 
373  // We are going to spill SrcVNI immediately after its def, so clear out
374  // any later spills of the same value.
375  eliminateRedundantSpills(SrcLI, SrcVNI);
376 
377  MachineBasicBlock *MBB = LIS.getMBBFromIndex(SrcVNI->def);
379  if (SrcVNI->isPHIDef())
380  MII = MBB->SkipPHIsLabelsAndDebug(MBB->begin());
381  else {
382  MachineInstr *DefMI = LIS.getInstructionFromIndex(SrcVNI->def);
383  assert(DefMI && "Defining instruction disappeared");
384  MII = DefMI;
385  ++MII;
386  }
387  // Insert spill without kill flag immediately after def.
388  TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot,
389  MRI.getRegClass(SrcReg), &TRI);
390  --MII; // Point to store instruction.
391  LIS.InsertMachineInstrInMaps(*MII);
392  DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII);
393 
394  HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
395  ++NumSpills;
396  return true;
397 }
398 
399 /// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any
400 /// redundant spills of this value in SLI.reg and sibling copies.
401 void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
402  assert(VNI && "Missing value");
404  WorkList.push_back(std::make_pair(&SLI, VNI));
405  assert(StackInt && "No stack slot assigned yet.");
406 
407  do {
408  LiveInterval *LI;
409  std::tie(LI, VNI) = WorkList.pop_back_val();
410  unsigned Reg = LI->reg;
411  DEBUG(dbgs() << "Checking redundant spills for "
412  << VNI->id << '@' << VNI->def << " in " << *LI << '\n');
413 
414  // Regs to spill are taken care of.
415  if (isRegToSpill(Reg))
416  continue;
417 
418  // Add all of VNI's live range to StackInt.
419  StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0));
420  DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n');
421 
422  // Find all spills and copies of VNI.
424  UI = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
425  UI != E; ) {
426  MachineInstr &MI = *UI++;
427  if (!MI.isCopy() && !MI.mayStore())
428  continue;
429  SlotIndex Idx = LIS.getInstructionIndex(MI);
430  if (LI->getVNInfoAt(Idx) != VNI)
431  continue;
432 
433  // Follow sibling copies down the dominator tree.
434  if (unsigned DstReg = isFullCopyOf(MI, Reg)) {
435  if (isSibling(DstReg)) {
436  LiveInterval &DstLI = LIS.getInterval(DstReg);
437  VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot());
438  assert(DstVNI && "Missing defined value");
439  assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot");
440  WorkList.push_back(std::make_pair(&DstLI, DstVNI));
441  }
442  continue;
443  }
444 
445  // Erase spills.
446  int FI;
447  if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) {
448  DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI);
449  // eliminateDeadDefs won't normally remove stores, so switch opcode.
450  MI.setDesc(TII.get(TargetOpcode::KILL));
451  DeadDefs.push_back(&MI);
452  ++NumSpillsRemoved;
453  if (HSpiller.rmFromMergeableSpills(MI, StackSlot))
454  --NumSpills;
455  }
456  }
457  } while (!WorkList.empty());
458 }
459 
460 
461 //===----------------------------------------------------------------------===//
462 // Rematerialization
463 //===----------------------------------------------------------------------===//
464 
465 /// markValueUsed - Remember that VNI failed to rematerialize, so its defining
466 /// instruction cannot be eliminated. See through snippet copies
467 void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
469  WorkList.push_back(std::make_pair(LI, VNI));
470  do {
471  std::tie(LI, VNI) = WorkList.pop_back_val();
472  if (!UsedValues.insert(VNI).second)
473  continue;
474 
475  if (VNI->isPHIDef()) {
476  MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
477  for (MachineBasicBlock *P : MBB->predecessors()) {
478  VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(P));
479  if (PVNI)
480  WorkList.push_back(std::make_pair(LI, PVNI));
481  }
482  continue;
483  }
484 
485  // Follow snippet copies.
486  MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
487  if (!SnippetCopies.count(MI))
488  continue;
489  LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg());
490  assert(isRegToSpill(SnipLI.reg) && "Unexpected register in copy");
491  VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true));
492  assert(SnipVNI && "Snippet undefined before copy");
493  WorkList.push_back(std::make_pair(&SnipLI, SnipVNI));
494  } while (!WorkList.empty());
495 }
496 
497 /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
498 bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
499 
500  // Analyze instruction
502  MIBundleOperands::VirtRegInfo RI =
503  MIBundleOperands(MI).analyzeVirtReg(VirtReg.reg, &Ops);
504 
505  if (!RI.Reads)
506  return false;
507 
508  SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true);
509  VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex());
510 
511  if (!ParentVNI) {
512  DEBUG(dbgs() << "\tadding <undef> flags: ");
513  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
514  MachineOperand &MO = MI.getOperand(i);
515  if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg)
516  MO.setIsUndef();
517  }
518  DEBUG(dbgs() << UseIdx << '\t' << MI);
519  return true;
520  }
521 
522  if (SnippetCopies.count(&MI))
523  return false;
524 
525  LiveInterval &OrigLI = LIS.getInterval(Original);
526  VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx);
527  LiveRangeEdit::Remat RM(ParentVNI);
528  RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def);
529 
530  if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx, false)) {
531  markValueUsed(&VirtReg, ParentVNI);
532  DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
533  return false;
534  }
535 
536  // If the instruction also writes VirtReg.reg, it had better not require the
537  // same register for uses and defs.
538  if (RI.Tied) {
539  markValueUsed(&VirtReg, ParentVNI);
540  DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI);
541  return false;
542  }
543 
544  // Before rematerializing into a register for a single instruction, try to
545  // fold a load into the instruction. That avoids allocating a new register.
546  if (RM.OrigMI->canFoldAsLoad() &&
547  foldMemoryOperand(Ops, RM.OrigMI)) {
548  Edit->markRematerialized(RM.ParentVNI);
549  ++NumFoldedLoads;
550  return true;
551  }
552 
553  // Allocate a new register for the remat.
554  unsigned NewVReg = Edit->createFrom(Original);
555 
556  // Finally we can rematerialize OrigMI before MI.
557  SlotIndex DefIdx =
558  Edit->rematerializeAt(*MI.getParent(), MI, NewVReg, RM, TRI);
559 
560  // We take the DebugLoc from MI, since OrigMI may be attributed to a
561  // different source location.
562  auto *NewMI = LIS.getInstructionFromIndex(DefIdx);
563  NewMI->setDebugLoc(MI.getDebugLoc());
564 
565  (void)DefIdx;
566  DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'
567  << *LIS.getInstructionFromIndex(DefIdx));
568 
569  // Replace operands
570  for (const auto &OpPair : Ops) {
571  MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
572  if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg) {
573  MO.setReg(NewVReg);
574  MO.setIsKill();
575  }
576  }
577  DEBUG(dbgs() << "\t " << UseIdx << '\t' << MI << '\n');
578 
579  ++NumRemats;
580  return true;
581 }
582 
583 /// reMaterializeAll - Try to rematerialize as many uses as possible,
584 /// and trim the live ranges after.
585 void InlineSpiller::reMaterializeAll() {
586  if (!Edit->anyRematerializable(AA))
587  return;
588 
589  UsedValues.clear();
590 
591  // Try to remat before all uses of snippets.
592  bool anyRemat = false;
593  for (unsigned Reg : RegsToSpill) {
594  LiveInterval &LI = LIS.getInterval(Reg);
596  RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end();
597  RegI != E; ) {
598  MachineInstr &MI = *RegI++;
599 
600  // Debug values are not allowed to affect codegen.
601  if (MI.isDebugValue())
602  continue;
603 
604  anyRemat |= reMaterializeFor(LI, MI);
605  }
606  }
607  if (!anyRemat)
608  return;
609 
610  // Remove any values that were completely rematted.
611  for (unsigned Reg : RegsToSpill) {
612  LiveInterval &LI = LIS.getInterval(Reg);
613  for (LiveInterval::vni_iterator I = LI.vni_begin(), E = LI.vni_end();
614  I != E; ++I) {
615  VNInfo *VNI = *I;
616  if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI))
617  continue;
618  MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
619  MI->addRegisterDead(Reg, &TRI);
620  if (!MI->allDefsAreDead())
621  continue;
622  DEBUG(dbgs() << "All defs dead: " << *MI);
623  DeadDefs.push_back(MI);
624  }
625  }
626 
627  // Eliminate dead code after remat. Note that some snippet copies may be
628  // deleted here.
629  if (DeadDefs.empty())
630  return;
631  DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n");
632  Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
633 
634  // LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions
635  // after rematerialization. To remove a VNI for a vreg from its LiveInterval,
636  // LiveIntervals::removeVRegDefAt is used. However, after non-PHI VNIs are all
637  // removed, PHI VNI are still left in the LiveInterval.
638  // So to get rid of unused reg, we need to check whether it has non-dbg
639  // reference instead of whether it has non-empty interval.
640  unsigned ResultPos = 0;
641  for (unsigned Reg : RegsToSpill) {
642  if (MRI.reg_nodbg_empty(Reg)) {
643  Edit->eraseVirtReg(Reg);
644  continue;
645  }
646  assert((LIS.hasInterval(Reg) && !LIS.getInterval(Reg).empty()) &&
647  "Reg with empty interval has reference");
648  RegsToSpill[ResultPos++] = Reg;
649  }
650  RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end());
651  DEBUG(dbgs() << RegsToSpill.size() << " registers to spill after remat.\n");
652 }
653 
654 
655 //===----------------------------------------------------------------------===//
656 // Spilling
657 //===----------------------------------------------------------------------===//
658 
659 /// If MI is a load or store of StackSlot, it can be removed.
660 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, unsigned Reg) {
661  int FI = 0;
662  unsigned InstrReg = TII.isLoadFromStackSlot(*MI, FI);
663  bool IsLoad = InstrReg;
664  if (!IsLoad)
665  InstrReg = TII.isStoreToStackSlot(*MI, FI);
666 
667  // We have a stack access. Is it the right register and slot?
668  if (InstrReg != Reg || FI != StackSlot)
669  return false;
670 
671  if (!IsLoad)
672  HSpiller.rmFromMergeableSpills(*MI, StackSlot);
673 
674  DEBUG(dbgs() << "Coalescing stack access: " << *MI);
675  LIS.RemoveMachineInstrFromMaps(*MI);
676  MI->eraseFromParent();
677 
678  if (IsLoad) {
679  ++NumReloadsRemoved;
680  --NumReloads;
681  } else {
682  ++NumSpillsRemoved;
683  --NumSpills;
684  }
685 
686  return true;
687 }
688 
689 #if !defined(NDEBUG)
690 // Dump the range of instructions from B to E with their slot indexes.
693  LiveIntervals const &LIS,
694  const char *const header,
695  unsigned VReg =0) {
696  char NextLine = '\n';
697  char SlotIndent = '\t';
698 
699  if (std::next(B) == E) {
700  NextLine = ' ';
701  SlotIndent = ' ';
702  }
703 
704  dbgs() << '\t' << header << ": " << NextLine;
705 
706  for (MachineBasicBlock::iterator I = B; I != E; ++I) {
707  SlotIndex Idx = LIS.getInstructionIndex(*I).getRegSlot();
708 
709  // If a register was passed in and this instruction has it as a
710  // destination that is marked as an early clobber, print the
711  // early-clobber slot index.
712  if (VReg) {
713  MachineOperand *MO = I->findRegisterDefOperand(VReg);
714  if (MO && MO->isEarlyClobber())
715  Idx = Idx.getRegSlot(true);
716  }
717 
718  dbgs() << SlotIndent << Idx << '\t' << *I;
719  }
720 }
721 #endif
722 
723 /// foldMemoryOperand - Try folding stack slot references in Ops into their
724 /// instructions.
725 ///
726 /// @param Ops Operand indices from analyzeVirtReg().
727 /// @param LoadMI Load instruction to use instead of stack slot when non-null.
728 /// @return True on success.
729 bool InlineSpiller::
730 foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> > Ops,
731  MachineInstr *LoadMI) {
732  if (Ops.empty())
733  return false;
734  // Don't attempt folding in bundles.
735  MachineInstr *MI = Ops.front().first;
736  if (Ops.back().first != MI || MI->isBundled())
737  return false;
738 
739  bool WasCopy = MI->isCopy();
740  unsigned ImpReg = 0;
741 
742  // Spill subregs if the target allows it.
743  // We always want to spill subregs for stackmap/patchpoint pseudos.
744  bool SpillSubRegs = TII.isSubregFoldable() ||
745  MI->getOpcode() == TargetOpcode::STATEPOINT ||
746  MI->getOpcode() == TargetOpcode::PATCHPOINT ||
747  MI->getOpcode() == TargetOpcode::STACKMAP;
748 
749  // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
750  // operands.
751  SmallVector<unsigned, 8> FoldOps;
752  for (const auto &OpPair : Ops) {
753  unsigned Idx = OpPair.second;
754  assert(MI == OpPair.first && "Instruction conflict during operand folding");
755  MachineOperand &MO = MI->getOperand(Idx);
756  if (MO.isImplicit()) {
757  ImpReg = MO.getReg();
758  continue;
759  }
760 
761  if (!SpillSubRegs && MO.getSubReg())
762  return false;
763  // We cannot fold a load instruction into a def.
764  if (LoadMI && MO.isDef())
765  return false;
766  // Tied use operands should not be passed to foldMemoryOperand.
767  if (!MI->isRegTiedToDefOperand(Idx))
768  FoldOps.push_back(Idx);
769  }
770 
771  // If we only have implicit uses, we won't be able to fold that.
772  // Moreover, TargetInstrInfo::foldMemoryOperand will assert if we try!
773  if (FoldOps.empty())
774  return false;
775 
776  MachineInstrSpan MIS(MI);
777 
778  MachineInstr *FoldMI =
779  LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS)
780  : TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS);
781  if (!FoldMI)
782  return false;
783 
784  // Remove LIS for any dead defs in the original MI not in FoldMI.
785  for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) {
786  if (!MO->isReg())
787  continue;
788  unsigned Reg = MO->getReg();
789  if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg) ||
790  MRI.isReserved(Reg)) {
791  continue;
792  }
793  // Skip non-Defs, including undef uses and internal reads.
794  if (MO->isUse())
795  continue;
796  MIBundleOperands::PhysRegInfo RI =
797  MIBundleOperands(*FoldMI).analyzePhysReg(Reg, &TRI);
798  if (RI.FullyDefined)
799  continue;
800  // FoldMI does not define this physreg. Remove the LI segment.
801  assert(MO->isDead() && "Cannot fold physreg def");
802  SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
803  LIS.removePhysRegDefAt(Reg, Idx);
804  }
805 
806  int FI;
807  if (TII.isStoreToStackSlot(*MI, FI) &&
808  HSpiller.rmFromMergeableSpills(*MI, FI))
809  --NumSpills;
810  LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI);
811  MI->eraseFromParent();
812 
813  // Insert any new instructions other than FoldMI into the LIS maps.
814  assert(!MIS.empty() && "Unexpected empty span of instructions!");
815  for (MachineInstr &MI : MIS)
816  if (&MI != FoldMI)
817  LIS.InsertMachineInstrInMaps(MI);
818 
819  // TII.foldMemoryOperand may have left some implicit operands on the
820  // instruction. Strip them.
821  if (ImpReg)
822  for (unsigned i = FoldMI->getNumOperands(); i; --i) {
823  MachineOperand &MO = FoldMI->getOperand(i - 1);
824  if (!MO.isReg() || !MO.isImplicit())
825  break;
826  if (MO.getReg() == ImpReg)
827  FoldMI->RemoveOperand(i - 1);
828  }
829 
830  DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS,
831  "folded"));
832 
833  if (!WasCopy)
834  ++NumFolded;
835  else if (Ops.front().second == 0) {
836  ++NumSpills;
837  HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original);
838  } else
839  ++NumReloads;
840  return true;
841 }
842 
843 void InlineSpiller::insertReload(unsigned NewVReg,
844  SlotIndex Idx,
846  MachineBasicBlock &MBB = *MI->getParent();
847 
848  MachineInstrSpan MIS(MI);
849  TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot,
850  MRI.getRegClass(NewVReg), &TRI);
851 
852  LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI);
853 
854  DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MI, LIS, "reload",
855  NewVReg));
856  ++NumReloads;
857 }
858 
859 /// insertSpill - Insert a spill of NewVReg after MI.
860 void InlineSpiller::insertSpill(unsigned NewVReg, bool isKill,
862  MachineBasicBlock &MBB = *MI->getParent();
863 
864  MachineInstrSpan MIS(MI);
865  TII.storeRegToStackSlot(MBB, std::next(MI), NewVReg, isKill, StackSlot,
866  MRI.getRegClass(NewVReg), &TRI);
867 
868  LIS.InsertMachineInstrRangeInMaps(std::next(MI), MIS.end());
869 
870  DEBUG(dumpMachineInstrRangeWithSlotIndex(std::next(MI), MIS.end(), LIS,
871  "spill"));
872  ++NumSpills;
873  HSpiller.addToMergeableSpills(*std::next(MI), StackSlot, Original);
874 }
875 
876 /// spillAroundUses - insert spill code around each use of Reg.
877 void InlineSpiller::spillAroundUses(unsigned Reg) {
878  DEBUG(dbgs() << "spillAroundUses " << PrintReg(Reg) << '\n');
879  LiveInterval &OldLI = LIS.getInterval(Reg);
880 
881  // Iterate over instructions using Reg.
883  RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end();
884  RegI != E; ) {
885  MachineInstr *MI = &*(RegI++);
886 
887  // Debug values are not allowed to affect codegen.
888  if (MI->isDebugValue()) {
889  // Modify DBG_VALUE now that the value is in a spill slot.
890  bool IsIndirect = MI->isIndirectDebugValue();
891  uint64_t Offset = IsIndirect ? MI->getOperand(1).getImm() : 0;
892  const MDNode *Var = MI->getDebugVariable();
893  const MDNode *Expr = MI->getDebugExpression();
894  DebugLoc DL = MI->getDebugLoc();
895  DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
896  MachineBasicBlock *MBB = MI->getParent();
897  assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
898  "Expected inlined-at fields to agree");
899  BuildMI(*MBB, MBB->erase(MI), DL, TII.get(TargetOpcode::DBG_VALUE))
900  .addFrameIndex(StackSlot)
901  .addImm(Offset)
902  .addMetadata(Var)
903  .addMetadata(Expr);
904  continue;
905  }
906 
907  // Ignore copies to/from snippets. We'll delete them.
908  if (SnippetCopies.count(MI))
909  continue;
910 
911  // Stack slot accesses may coalesce away.
912  if (coalesceStackAccess(MI, Reg))
913  continue;
914 
915  // Analyze instruction.
917  MIBundleOperands::VirtRegInfo RI =
918  MIBundleOperands(*MI).analyzeVirtReg(Reg, &Ops);
919 
920  // Find the slot index where this instruction reads and writes OldLI.
921  // This is usually the def slot, except for tied early clobbers.
922  SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
923  if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true)))
924  if (SlotIndex::isSameInstr(Idx, VNI->def))
925  Idx = VNI->def;
926 
927  // Check for a sibling copy.
928  unsigned SibReg = isFullCopyOf(*MI, Reg);
929  if (SibReg && isSibling(SibReg)) {
930  // This may actually be a copy between snippets.
931  if (isRegToSpill(SibReg)) {
932  DEBUG(dbgs() << "Found new snippet copy: " << *MI);
933  SnippetCopies.insert(MI);
934  continue;
935  }
936  if (RI.Writes) {
937  if (hoistSpillInsideBB(OldLI, *MI)) {
938  // This COPY is now dead, the value is already in the stack slot.
939  MI->getOperand(0).setIsDead();
940  DeadDefs.push_back(MI);
941  continue;
942  }
943  } else {
944  // This is a reload for a sib-reg copy. Drop spills downstream.
945  LiveInterval &SibLI = LIS.getInterval(SibReg);
946  eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx));
947  // The COPY will fold to a reload below.
948  }
949  }
950 
951  // Attempt to fold memory ops.
952  if (foldMemoryOperand(Ops))
953  continue;
954 
955  // Create a new virtual register for spill/fill.
956  // FIXME: Infer regclass from instruction alone.
957  unsigned NewVReg = Edit->createFrom(Reg);
958 
959  if (RI.Reads)
960  insertReload(NewVReg, Idx, MI);
961 
962  // Rewrite instruction operands.
963  bool hasLiveDef = false;
964  for (const auto &OpPair : Ops) {
965  MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
966  MO.setReg(NewVReg);
967  if (MO.isUse()) {
968  if (!OpPair.first->isRegTiedToDefOperand(OpPair.second))
969  MO.setIsKill();
970  } else {
971  if (!MO.isDead())
972  hasLiveDef = true;
973  }
974  }
975  DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << *MI << '\n');
976 
977  // FIXME: Use a second vreg if instruction has no tied ops.
978  if (RI.Writes)
979  if (hasLiveDef)
980  insertSpill(NewVReg, true, MI);
981  }
982 }
983 
984 /// spillAll - Spill all registers remaining after rematerialization.
985 void InlineSpiller::spillAll() {
986  // Update LiveStacks now that we are committed to spilling.
987  if (StackSlot == VirtRegMap::NO_STACK_SLOT) {
988  StackSlot = VRM.assignVirt2StackSlot(Original);
989  StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original));
990  StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator());
991  } else
992  StackInt = &LSS.getInterval(StackSlot);
993 
994  if (Original != Edit->getReg())
995  VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot);
996 
997  assert(StackInt->getNumValNums() == 1 && "Bad stack interval values");
998  for (unsigned Reg : RegsToSpill)
999  StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg),
1000  StackInt->getValNumInfo(0));
1001  DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n');
1002 
1003  // Spill around uses of all RegsToSpill.
1004  for (unsigned Reg : RegsToSpill)
1005  spillAroundUses(Reg);
1006 
1007  // Hoisted spills may cause dead code.
1008  if (!DeadDefs.empty()) {
1009  DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n");
1010  Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
1011  }
1012 
1013  // Finally delete the SnippetCopies.
1014  for (unsigned Reg : RegsToSpill) {
1016  RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end();
1017  RI != E; ) {
1018  MachineInstr &MI = *(RI++);
1019  assert(SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy");
1020  // FIXME: Do this with a LiveRangeEdit callback.
1021  LIS.RemoveMachineInstrFromMaps(MI);
1022  MI.eraseFromParent();
1023  }
1024  }
1025 
1026  // Delete all spilled registers.
1027  for (unsigned Reg : RegsToSpill)
1028  Edit->eraseVirtReg(Reg);
1029 }
1030 
1031 void InlineSpiller::spill(LiveRangeEdit &edit) {
1032  ++NumSpilledRanges;
1033  Edit = &edit;
1034  assert(!TargetRegisterInfo::isStackSlot(edit.getReg())
1035  && "Trying to spill a stack slot.");
1036  // Share a stack slot among all descendants of Original.
1037  Original = VRM.getOriginal(edit.getReg());
1038  StackSlot = VRM.getStackSlot(Original);
1039  StackInt = nullptr;
1040 
1041  DEBUG(dbgs() << "Inline spilling "
1042  << TRI.getRegClassName(MRI.getRegClass(edit.getReg()))
1043  << ':' << edit.getParent()
1044  << "\nFrom original " << PrintReg(Original) << '\n');
1045  assert(edit.getParent().isSpillable() &&
1046  "Attempting to spill already spilled value.");
1047  assert(DeadDefs.empty() && "Previous spill didn't remove dead defs");
1048 
1049  collectRegsToSpill();
1050  reMaterializeAll();
1051 
1052  // Remat may handle everything.
1053  if (!RegsToSpill.empty())
1054  spillAll();
1055 
1056  Edit->calculateRegClassAndHint(MF, Loops, MBFI);
1057 }
1058 
1059 /// Optimizations after all the reg selections and spills are done.
1060 ///
1061 void InlineSpiller::postOptimization() { HSpiller.hoistAllSpills(); }
1062 
1063 /// When a spill is inserted, add the spill to MergeableSpills map.
1064 ///
1065 void HoistSpillHelper::addToMergeableSpills(MachineInstr &Spill, int StackSlot,
1066  unsigned Original) {
1067  StackSlotToReg[StackSlot] = Original;
1068  SlotIndex Idx = LIS.getInstructionIndex(Spill);
1069  VNInfo *OrigVNI = LIS.getInterval(Original).getVNInfoAt(Idx.getRegSlot());
1070  std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1071  MergeableSpills[MIdx].insert(&Spill);
1072 }
1073 
1074 /// When a spill is removed, remove the spill from MergeableSpills map.
1075 /// Return true if the spill is removed successfully.
1076 ///
1077 bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr &Spill,
1078  int StackSlot) {
1079  int Original = StackSlotToReg[StackSlot];
1080  if (!Original)
1081  return false;
1082  SlotIndex Idx = LIS.getInstructionIndex(Spill);
1083  VNInfo *OrigVNI = LIS.getInterval(Original).getVNInfoAt(Idx.getRegSlot());
1084  std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1085  return MergeableSpills[MIdx].erase(&Spill);
1086 }
1087 
1088 /// Check BB to see if it is a possible target BB to place a hoisted spill,
1089 /// i.e., there should be a living sibling of OrigReg at the insert point.
1090 ///
1091 bool HoistSpillHelper::isSpillCandBB(unsigned OrigReg, VNInfo &OrigVNI,
1092  MachineBasicBlock &BB, unsigned &LiveReg) {
1093  SlotIndex Idx;
1094  LiveInterval &OrigLI = LIS.getInterval(OrigReg);
1095  MachineBasicBlock::iterator MI = IPA.getLastInsertPointIter(OrigLI, BB);
1096  if (MI != BB.end())
1097  Idx = LIS.getInstructionIndex(*MI);
1098  else
1099  Idx = LIS.getMBBEndIdx(&BB).getPrevSlot();
1100  SmallSetVector<unsigned, 16> &Siblings = Virt2SiblingsMap[OrigReg];
1101  assert((LIS.getInterval(OrigReg)).getVNInfoAt(Idx) == &OrigVNI &&
1102  "Unexpected VNI");
1103 
1104  for (auto const SibReg : Siblings) {
1105  LiveInterval &LI = LIS.getInterval(SibReg);
1106  VNInfo *VNI = LI.getVNInfoAt(Idx);
1107  if (VNI) {
1108  LiveReg = SibReg;
1109  return true;
1110  }
1111  }
1112  return false;
1113 }
1114 
1115 /// Remove redundant spills in the same BB. Save those redundant spills in
1116 /// SpillsToRm, and save the spill to keep and its BB in SpillBBToSpill map.
1117 ///
1118 void HoistSpillHelper::rmRedundantSpills(
1120  SmallVectorImpl<MachineInstr *> &SpillsToRm,
1122  // For each spill saw, check SpillBBToSpill[] and see if its BB already has
1123  // another spill inside. If a BB contains more than one spill, only keep the
1124  // earlier spill with smaller SlotIndex.
1125  for (const auto CurrentSpill : Spills) {
1126  MachineBasicBlock *Block = CurrentSpill->getParent();
1127  MachineDomTreeNode *Node = MDT.getBase().getNode(Block);
1128  MachineInstr *PrevSpill = SpillBBToSpill[Node];
1129  if (PrevSpill) {
1130  SlotIndex PIdx = LIS.getInstructionIndex(*PrevSpill);
1131  SlotIndex CIdx = LIS.getInstructionIndex(*CurrentSpill);
1132  MachineInstr *SpillToRm = (CIdx > PIdx) ? CurrentSpill : PrevSpill;
1133  MachineInstr *SpillToKeep = (CIdx > PIdx) ? PrevSpill : CurrentSpill;
1134  SpillsToRm.push_back(SpillToRm);
1135  SpillBBToSpill[MDT.getBase().getNode(Block)] = SpillToKeep;
1136  } else {
1137  SpillBBToSpill[MDT.getBase().getNode(Block)] = CurrentSpill;
1138  }
1139  }
1140  for (const auto SpillToRm : SpillsToRm)
1141  Spills.erase(SpillToRm);
1142 }
1143 
1144 /// Starting from \p Root find a top-down traversal order of the dominator
1145 /// tree to visit all basic blocks containing the elements of \p Spills.
1146 /// Redundant spills will be found and put into \p SpillsToRm at the same
1147 /// time. \p SpillBBToSpill will be populated as part of the process and
1148 /// maps a basic block to the first store occurring in the basic block.
1149 /// \post SpillsToRm.union(Spills\@post) == Spills\@pre
1150 ///
1151 void HoistSpillHelper::getVisitOrders(
1154  SmallVectorImpl<MachineInstr *> &SpillsToRm,
1157  // The set contains all the possible BB nodes to which we may hoist
1158  // original spills.
1160  // Save the BB nodes on the path from the first BB node containing
1161  // non-redundant spill to the Root node.
1163  // All the spills to be hoisted must originate from a single def instruction
1164  // to the OrigReg. It means the def instruction should dominate all the spills
1165  // to be hoisted. We choose the BB where the def instruction is located as
1166  // the Root.
1167  MachineDomTreeNode *RootIDomNode = MDT[Root]->getIDom();
1168  // For every node on the dominator tree with spill, walk up on the dominator
1169  // tree towards the Root node until it is reached. If there is other node
1170  // containing spill in the middle of the path, the previous spill saw will
1171  // be redundant and the node containing it will be removed. All the nodes on
1172  // the path starting from the first node with non-redundant spill to the Root
1173  // node will be added to the WorkSet, which will contain all the possible
1174  // locations where spills may be hoisted to after the loop below is done.
1175  for (const auto Spill : Spills) {
1176  MachineBasicBlock *Block = Spill->getParent();
1177  MachineDomTreeNode *Node = MDT[Block];
1178  MachineInstr *SpillToRm = nullptr;
1179  while (Node != RootIDomNode) {
1180  // If Node dominates Block, and it already contains a spill, the spill in
1181  // Block will be redundant.
1182  if (Node != MDT[Block] && SpillBBToSpill[Node]) {
1183  SpillToRm = SpillBBToSpill[MDT[Block]];
1184  break;
1185  /// If we see the Node already in WorkSet, the path from the Node to
1186  /// the Root node must already be traversed by another spill.
1187  /// Then no need to repeat.
1188  } else if (WorkSet.count(Node)) {
1189  break;
1190  } else {
1191  NodesOnPath.insert(Node);
1192  }
1193  Node = Node->getIDom();
1194  }
1195  if (SpillToRm) {
1196  SpillsToRm.push_back(SpillToRm);
1197  } else {
1198  // Add a BB containing the original spills to SpillsToKeep -- i.e.,
1199  // set the initial status before hoisting start. The value of BBs
1200  // containing original spills is set to 0, in order to descriminate
1201  // with BBs containing hoisted spills which will be inserted to
1202  // SpillsToKeep later during hoisting.
1203  SpillsToKeep[MDT[Block]] = 0;
1204  WorkSet.insert(NodesOnPath.begin(), NodesOnPath.end());
1205  }
1206  NodesOnPath.clear();
1207  }
1208 
1209  // Sort the nodes in WorkSet in top-down order and save the nodes
1210  // in Orders. Orders will be used for hoisting in runHoistSpills.
1211  unsigned idx = 0;
1212  Orders.push_back(MDT.getBase().getNode(Root));
1213  do {
1214  MachineDomTreeNode *Node = Orders[idx++];
1215  const std::vector<MachineDomTreeNode *> &Children = Node->getChildren();
1216  unsigned NumChildren = Children.size();
1217  for (unsigned i = 0; i != NumChildren; ++i) {
1218  MachineDomTreeNode *Child = Children[i];
1219  if (WorkSet.count(Child))
1220  Orders.push_back(Child);
1221  }
1222  } while (idx != Orders.size());
1223  assert(Orders.size() == WorkSet.size() &&
1224  "Orders have different size with WorkSet");
1225 
1226 #ifndef NDEBUG
1227  DEBUG(dbgs() << "Orders size is " << Orders.size() << "\n");
1229  for (; RIt != Orders.rend(); RIt++)
1230  DEBUG(dbgs() << "BB" << (*RIt)->getBlock()->getNumber() << ",");
1231  DEBUG(dbgs() << "\n");
1232 #endif
1233 }
1234 
1235 /// Try to hoist spills according to BB hotness. The spills to removed will
1236 /// be saved in \p SpillsToRm. The spills to be inserted will be saved in
1237 /// \p SpillsToIns.
1238 ///
1239 void HoistSpillHelper::runHoistSpills(
1240  unsigned OrigReg, VNInfo &OrigVNI, SmallPtrSet<MachineInstr *, 16> &Spills,
1241  SmallVectorImpl<MachineInstr *> &SpillsToRm,
1243  // Visit order of dominator tree nodes.
1245  // SpillsToKeep contains all the nodes where spills are to be inserted
1246  // during hoisting. If the spill to be inserted is an original spill
1247  // (not a hoisted one), the value of the map entry is 0. If the spill
1248  // is a hoisted spill, the value of the map entry is the VReg to be used
1249  // as the source of the spill.
1251  // Map from BB to the first spill inside of it.
1253 
1254  rmRedundantSpills(Spills, SpillsToRm, SpillBBToSpill);
1255 
1256  MachineBasicBlock *Root = LIS.getMBBFromIndex(OrigVNI.def);
1257  getVisitOrders(Root, Spills, Orders, SpillsToRm, SpillsToKeep,
1258  SpillBBToSpill);
1259 
1260  // SpillsInSubTreeMap keeps the map from a dom tree node to a pair of
1261  // nodes set and the cost of all the spills inside those nodes.
1262  // The nodes set are the locations where spills are to be inserted
1263  // in the subtree of current node.
1264  typedef std::pair<SmallPtrSet<MachineDomTreeNode *, 16>, BlockFrequency>
1265  NodesCostPair;
1267  // Iterate Orders set in reverse order, which will be a bottom-up order
1268  // in the dominator tree. Once we visit a dom tree node, we know its
1269  // children have already been visited and the spill locations in the
1270  // subtrees of all the children have been determined.
1272  for (; RIt != Orders.rend(); RIt++) {
1273  MachineBasicBlock *Block = (*RIt)->getBlock();
1274 
1275  // If Block contains an original spill, simply continue.
1276  if (SpillsToKeep.find(*RIt) != SpillsToKeep.end() && !SpillsToKeep[*RIt]) {
1277  SpillsInSubTreeMap[*RIt].first.insert(*RIt);
1278  // SpillsInSubTreeMap[*RIt].second contains the cost of spill.
1279  SpillsInSubTreeMap[*RIt].second = MBFI.getBlockFreq(Block);
1280  continue;
1281  }
1282 
1283  // Collect spills in subtree of current node (*RIt) to
1284  // SpillsInSubTreeMap[*RIt].first.
1285  const std::vector<MachineDomTreeNode *> &Children = (*RIt)->getChildren();
1286  unsigned NumChildren = Children.size();
1287  for (unsigned i = 0; i != NumChildren; ++i) {
1288  MachineDomTreeNode *Child = Children[i];
1289  if (SpillsInSubTreeMap.find(Child) == SpillsInSubTreeMap.end())
1290  continue;
1291  // The stmt "SpillsInSubTree = SpillsInSubTreeMap[*RIt].first" below
1292  // should be placed before getting the begin and end iterators of
1293  // SpillsInSubTreeMap[Child].first, or else the iterators may be
1294  // invalidated when SpillsInSubTreeMap[*RIt] is seen the first time
1295  // and the map grows and then the original buckets in the map are moved.
1296  SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1297  SpillsInSubTreeMap[*RIt].first;
1298  BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1299  SubTreeCost += SpillsInSubTreeMap[Child].second;
1300  auto BI = SpillsInSubTreeMap[Child].first.begin();
1301  auto EI = SpillsInSubTreeMap[Child].first.end();
1302  SpillsInSubTree.insert(BI, EI);
1303  SpillsInSubTreeMap.erase(Child);
1304  }
1305 
1306  SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1307  SpillsInSubTreeMap[*RIt].first;
1308  BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1309  // No spills in subtree, simply continue.
1310  if (SpillsInSubTree.empty())
1311  continue;
1312 
1313  // Check whether Block is a possible candidate to insert spill.
1314  unsigned LiveReg = 0;
1315  if (!isSpillCandBB(OrigReg, OrigVNI, *Block, LiveReg))
1316  continue;
1317 
1318  // If there are multiple spills that could be merged, bias a little
1319  // to hoist the spill.
1320  BranchProbability MarginProb = (SpillsInSubTree.size() > 1)
1321  ? BranchProbability(9, 10)
1322  : BranchProbability(1, 1);
1323  if (SubTreeCost > MBFI.getBlockFreq(Block) * MarginProb) {
1324  // Hoist: Move spills to current Block.
1325  for (const auto SpillBB : SpillsInSubTree) {
1326  // When SpillBB is a BB contains original spill, insert the spill
1327  // to SpillsToRm.
1328  if (SpillsToKeep.find(SpillBB) != SpillsToKeep.end() &&
1329  !SpillsToKeep[SpillBB]) {
1330  MachineInstr *SpillToRm = SpillBBToSpill[SpillBB];
1331  SpillsToRm.push_back(SpillToRm);
1332  }
1333  // SpillBB will not contain spill anymore, remove it from SpillsToKeep.
1334  SpillsToKeep.erase(SpillBB);
1335  }
1336  // Current Block is the BB containing the new hoisted spill. Add it to
1337  // SpillsToKeep. LiveReg is the source of the new spill.
1338  SpillsToKeep[*RIt] = LiveReg;
1339  DEBUG({
1340  dbgs() << "spills in BB: ";
1341  for (const auto Rspill : SpillsInSubTree)
1342  dbgs() << Rspill->getBlock()->getNumber() << " ";
1343  dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber()
1344  << "\n";
1345  });
1346  SpillsInSubTree.clear();
1347  SpillsInSubTree.insert(*RIt);
1348  SubTreeCost = MBFI.getBlockFreq(Block);
1349  }
1350  }
1351  // For spills in SpillsToKeep with LiveReg set (i.e., not original spill),
1352  // save them to SpillsToIns.
1353  for (const auto Ent : SpillsToKeep) {
1354  if (Ent.second)
1355  SpillsToIns[Ent.first->getBlock()] = Ent.second;
1356  }
1357 }
1358 
1359 /// For spills with equal values, remove redundant spills and hoist those left
1360 /// to less hot spots.
1361 ///
1362 /// Spills with equal values will be collected into the same set in
1363 /// MergeableSpills when spill is inserted. These equal spills are originated
1364 /// from the same defining instruction and are dominated by the instruction.
1365 /// Before hoisting all the equal spills, redundant spills inside in the same
1366 /// BB are first marked to be deleted. Then starting from the spills left, walk
1367 /// up on the dominator tree towards the Root node where the define instruction
1368 /// is located, mark the dominated spills to be deleted along the way and
1369 /// collect the BB nodes on the path from non-dominated spills to the define
1370 /// instruction into a WorkSet. The nodes in WorkSet are the candidate places
1371 /// where we are considering to hoist the spills. We iterate the WorkSet in
1372 /// bottom-up order, and for each node, we will decide whether to hoist spills
1373 /// inside its subtree to that node. In this way, we can get benefit locally
1374 /// even if hoisting all the equal spills to one cold place is impossible.
1375 ///
1376 void HoistSpillHelper::hoistAllSpills() {
1377  SmallVector<unsigned, 4> NewVRegs;
1378  LiveRangeEdit Edit(nullptr, NewVRegs, MF, LIS, &VRM, this);
1379 
1380  // Save the mapping between stackslot and its original reg.
1381  DenseMap<int, unsigned> SlotToOrigReg;
1382  for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
1383  unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
1384  int Slot = VRM.getStackSlot(Reg);
1385  if (Slot != VirtRegMap::NO_STACK_SLOT)
1386  SlotToOrigReg[Slot] = VRM.getOriginal(Reg);
1387  unsigned Original = VRM.getPreSplitReg(Reg);
1388  if (!MRI.def_empty(Reg))
1389  Virt2SiblingsMap[Original].insert(Reg);
1390  }
1391 
1392  // Each entry in MergeableSpills contains a spill set with equal values.
1393  for (auto &Ent : MergeableSpills) {
1394  int Slot = Ent.first.first;
1395  unsigned OrigReg = SlotToOrigReg[Slot];
1396  LiveInterval &OrigLI = LIS.getInterval(OrigReg);
1397  VNInfo *OrigVNI = Ent.first.second;
1398  SmallPtrSet<MachineInstr *, 16> &EqValSpills = Ent.second;
1399  if (Ent.second.empty())
1400  continue;
1401 
1402  DEBUG({
1403  dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n"
1404  << "Equal spills in BB: ";
1405  for (const auto spill : EqValSpills)
1406  dbgs() << spill->getParent()->getNumber() << " ";
1407  dbgs() << "\n";
1408  });
1409 
1410  // SpillsToRm is the spill set to be removed from EqValSpills.
1412  // SpillsToIns is the spill set to be newly inserted after hoisting.
1414 
1415  runHoistSpills(OrigReg, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns);
1416 
1417  DEBUG({
1418  dbgs() << "Finally inserted spills in BB: ";
1419  for (const auto Ispill : SpillsToIns)
1420  dbgs() << Ispill.first->getNumber() << " ";
1421  dbgs() << "\nFinally removed spills in BB: ";
1422  for (const auto Rspill : SpillsToRm)
1423  dbgs() << Rspill->getParent()->getNumber() << " ";
1424  dbgs() << "\n";
1425  });
1426 
1427  // Stack live range update.
1428  LiveInterval &StackIntvl = LSS.getInterval(Slot);
1429  if (!SpillsToIns.empty() || !SpillsToRm.empty())
1430  StackIntvl.MergeValueInAsValue(OrigLI, OrigVNI,
1431  StackIntvl.getValNumInfo(0));
1432 
1433  // Insert hoisted spills.
1434  for (auto const Insert : SpillsToIns) {
1435  MachineBasicBlock *BB = Insert.first;
1436  unsigned LiveReg = Insert.second;
1437  MachineBasicBlock::iterator MI = IPA.getLastInsertPointIter(OrigLI, *BB);
1438  TII.storeRegToStackSlot(*BB, MI, LiveReg, false, Slot,
1439  MRI.getRegClass(LiveReg), &TRI);
1440  LIS.InsertMachineInstrRangeInMaps(std::prev(MI), MI);
1441  ++NumSpills;
1442  }
1443 
1444  // Remove redundant spills or change them to dead instructions.
1445  NumSpills -= SpillsToRm.size();
1446  for (auto const RMEnt : SpillsToRm) {
1447  RMEnt->setDesc(TII.get(TargetOpcode::KILL));
1448  for (unsigned i = RMEnt->getNumOperands(); i; --i) {
1449  MachineOperand &MO = RMEnt->getOperand(i - 1);
1450  if (MO.isReg() && MO.isImplicit() && MO.isDef() && !MO.isDead())
1451  RMEnt->RemoveOperand(i - 1);
1452  }
1453  }
1454  Edit.eliminateDeadDefs(SpillsToRm, None, AA);
1455  }
1456 }
1457 
1458 /// For VirtReg clone, the \p New register should have the same physreg or
1459 /// stackslot as the \p old register.
1460 void HoistSpillHelper::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
1461  if (VRM.hasPhys(Old))
1462  VRM.assignVirt2Phys(New, VRM.getPhys(Old));
1463  else if (VRM.getStackSlot(Old) != VirtRegMap::NO_STACK_SLOT)
1464  VRM.assignVirt2StackSlot(New, VRM.getStackSlot(Old));
1465  else
1466  llvm_unreachable("VReg should be assigned either physreg or stackslot");
1467 }
bool isFullCopy() const
Definition: MachineInstr.h:810
const NoneType None
Definition: None.h:23
bool isImplicit() const
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
const unsigned reg
Definition: LiveInterval.h:656
SlotIndex def
The index of the defining instruction.
Definition: LiveInterval.h:53
const std::vector< DomTreeNodeBase< NodeT > * > & getChildren() const
STATISTIC(NumFunctions,"Total number of functions")
size_t i
SlotIndex getBaseIndex() const
Returns the base index for associated with this index.
Definition: SlotIndexes.h:234
void MergeValueInAsValue(const LiveRange &RHS, const VNInfo *RHSValNo, VNInfo *LHSValNo)
MergeValueInAsValue - Merge all of the segments of a specific val# in RHS into this live range as the...
bool addRegisterDead(unsigned Reg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI defined a register without a use.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Store the specified register of the given register class to the specified stack frame index...
vni_iterator vni_begin()
Definition: LiveInterval.h:213
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:625
MIBundleOperands - Iterate over all operands in a bundle of machine instructions. ...
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
Definition: MachineInstr.h:605
bool isSpillable() const
isSpillable - Can this interval be spilled?
Definition: LiveInterval.h:754
void setIsUndef(bool Val=true)
bool isDead() const
size_type count(PtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:380
Spiller interface.
Definition: Spiller.h:25
MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate machine basic b...
bool isKill() const
Return true if the live-in value is killed by this instruction.
Definition: LiveInterval.h:108
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
This class implements a map that also provides access to all stored values in a deterministic order...
Definition: MapVector.h:32
A debug info location.
Definition: DebugLoc.h:34
Metadata node.
Definition: Metadata.h:830
void setIsDead(bool Val=true)
VNInfo - Value Number Information.
Definition: LiveInterval.h:45
Determines the latest safe point in a block in which we can insert a split, spill or other instructio...
Definition: SplitKit.h:43
unsigned getNumValNums() const
Definition: LiveInterval.h:288
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
Definition: LiveInterval.h:396
Callback methods for LiveRangeEdit owners.
Definition: LiveRangeEdit.h:40
VirtRegInfo analyzeVirtReg(unsigned Reg, SmallVectorImpl< std::pair< MachineInstr *, unsigned > > *Ops=nullptr)
analyzeVirtReg - Analyze how the current instruction or bundle uses a virtual register.
bool allDefsAreDead() const
Return true if all the defs of this instruction are dead.
MachineInstrSpan provides an interface to get an iteration range containing the instruction it was in...
DomTreeNodeBase< NodeT > * getIDom() const
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:172
safe Safe Stack instrumentation pass
Definition: SafeStack.cpp:796
Hexagon Hardware Loops
static void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E, LiveIntervals const &LIS, const char *const header, unsigned VReg=0)
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Load the specified register of the given register class from the specified stack frame index...
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
const HexagonInstrInfo * TII
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:32
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
Result of a LiveRange query.
Definition: LiveInterval.h:86
Reg
All possible values of the reg field in the ModR/M byte.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
PhysRegInfo analyzePhysReg(unsigned Reg, const TargetRegisterInfo *TRI)
analyzePhysReg - Analyze how the current instruction or bundle uses a physical register.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:60
unsigned getNumOperands() const
Access to explicit operands of the instruction.
Definition: MachineInstr.h:277
bool isUnused() const
Returns true if this value is unused.
Definition: LiveInterval.h:77
defusechain_iterator - This class provides iterator support for machine operands in the function that...
void RemoveOperand(unsigned i)
Erase an operand from an instruction, leaving it with one fewer operand than it started with...
MachineBasicBlock * MBB
bool isBundled() const
Return true if this instruction part of a bundle.
Definition: MachineInstr.h:223
Base class for the actual dominator tree node.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
static GCRegistry::Add< OcamlGC > B("ocaml","ocaml 3.10-compatible GC")
int64_t getImm() const
Printable PrintReg(unsigned Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubRegIdx=0)
Prints virtual and physical registers with or without a TRI instance.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:273
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
SlotIndex getPrevSlot() const
Returns the previous slot in the index list.
Definition: SlotIndexes.h:282
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:131
TargetInstrInfo - Interface to description of machine instruction set.
bool isDebugValue() const
Definition: MachineInstr.h:777
bool isEarlyClobber() const
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
#define P(N)
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
Definition: LiveInterval.h:516
unsigned const MachineRegisterInfo * MRI
size_type size() const
Definition: SmallPtrSet.h:99
MachineInstrBuilder & UseMI
bool isIndirectDebugValue() const
A DBG_VALUE is indirect iff the first operand is a register and the second operand is an immediate...
Definition: MachineInstr.h:780
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:279
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:368
bool isCopy() const
Definition: MachineInstr.h:807
uint32_t Offset
iterator begin() const
Definition: SmallPtrSet.h:398
LiveInterval & getParent() const
iterator_range< pred_iterator > predecessors()
const DIExpression * getDebugExpression() const
Return the complex address expression referenced by this DBG_VALUE instruction.
LLVM_NODISCARD bool empty() const
Definition: SmallPtrSet.h:98
unsigned getSubReg() const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
Definition: LiveInterval.h:74
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
virtual ~Spiller()=0
void setIsKill(bool Val=true)
unsigned id
The ID number of this value.
Definition: LiveInterval.h:50
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:292
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:425
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
MachineOperand class - Representation of each machine instruction operand.
static cl::opt< bool > DisableHoisting("disable-spill-hoist", cl::Hidden, cl::desc("Disable inline spill hoisting"))
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
If the specified machine instruction is a direct store to a stack slot, return the virtual or physica...
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:382
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
TargetInstrInfo overrides.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:250
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
Spiller * createInlineSpiller(MachineFunctionPass &pass, MachineFunction &mf, VirtRegMap &vrm)
Create and return a spiller that will insert spill code directly instead of deferring though VirtRegM...
iterator end() const
Definition: SmallPtrSet.h:405
Representation of each machine instruction.
Definition: MachineInstr.h:52
void setReg(unsigned Reg)
Change the register this operand corresponds to.
#define I(x, y, z)
Definition: MD5.cpp:54
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:135
static unsigned isFullCopyOf(const MachineInstr &MI, unsigned Reg)
isFullCopyOf - If MI is a COPY to or from Reg, return the other register, otherwise return 0...
const DILocalVariable * getDebugVariable() const
Return the debug variable referenced by this DBG_VALUE instruction.
Remat - Information needed to rematerialize at a specific location.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def...
Definition: SlotIndexes.h:247
unsigned getReg() const
getReg - Returns the register number.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
#define DEBUG(X)
Definition: Debug.h:100
bool isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx=nullptr) const
Return true if the use operand of the specified index is tied to a def operand.
iterator SkipPHIsLabelsAndDebug(iterator I)
Return the first instruction in MBB after I that is not a PHI, label or debug.
IRTranslator LLVM IR MI
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object...
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:76
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
vni_iterator vni_end()
Definition: LiveInterval.h:214
unsigned getReg() const
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarilly including Idx...
Definition: LiveInterval.h:404
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:783