LLVM  15.0.0git
InlineSpiller.cpp
Go to the documentation of this file.
1 //===- InlineSpiller.cpp - Insert spills and restores inline --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // The inline spiller modifies the machine function directly instead of
10 // inserting spills and restores in VirtRegMap.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "SplitKit.h"
15 #include "llvm/ADT/ArrayRef.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/MapVector.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
41 #include "llvm/CodeGen/Spiller.h"
42 #include "llvm/CodeGen/StackMaps.h"
48 #include "llvm/Config/llvm-config.h"
52 #include "llvm/Support/Compiler.h"
53 #include "llvm/Support/Debug.h"
56 #include <cassert>
57 #include <iterator>
58 #include <tuple>
59 #include <utility>
60 #include <vector>
61 
62 using namespace llvm;
63 
64 #define DEBUG_TYPE "regalloc"
65 
66 STATISTIC(NumSpilledRanges, "Number of spilled live ranges");
67 STATISTIC(NumSnippets, "Number of spilled snippets");
68 STATISTIC(NumSpills, "Number of spills inserted");
69 STATISTIC(NumSpillsRemoved, "Number of spills removed");
70 STATISTIC(NumReloads, "Number of reloads inserted");
71 STATISTIC(NumReloadsRemoved, "Number of reloads removed");
72 STATISTIC(NumFolded, "Number of folded stack accesses");
73 STATISTIC(NumFoldedLoads, "Number of folded loads");
74 STATISTIC(NumRemats, "Number of rematerialized defs for spilling");
75 
76 static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden,
77  cl::desc("Disable inline spill hoisting"));
78 static cl::opt<bool>
79 RestrictStatepointRemat("restrict-statepoint-remat",
80  cl::init(false), cl::Hidden,
81  cl::desc("Restrict remat for statepoint operands"));
82 
83 namespace {
84 
85 class HoistSpillHelper : private LiveRangeEdit::Delegate {
86  MachineFunction &MF;
87  LiveIntervals &LIS;
88  LiveStacks &LSS;
92  VirtRegMap &VRM;
94  const TargetInstrInfo &TII;
95  const TargetRegisterInfo &TRI;
96  const MachineBlockFrequencyInfo &MBFI;
97 
99 
100  // Map from StackSlot to the LiveInterval of the original register.
101  // Note the LiveInterval of the original register may have been deleted
102  // after it is spilled. We keep a copy here to track the range where
103  // spills can be moved.
105 
106  // Map from pair of (StackSlot and Original VNI) to a set of spills which
107  // have the same stackslot and have equal values defined by Original VNI.
108  // These spills are mergeable and are hoist candiates.
109  using MergeableSpillsMap =
111  MergeableSpillsMap MergeableSpills;
112 
113  /// This is the map from original register to a set containing all its
114  /// siblings. To hoist a spill to another BB, we need to find out a live
115  /// sibling there and use it as the source of the new spill.
117 
118  bool isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
119  MachineBasicBlock &BB, Register &LiveReg);
120 
121  void rmRedundantSpills(
125 
126  void getVisitOrders(
132 
133  void runHoistSpills(LiveInterval &OrigLI, VNInfo &OrigVNI,
137 
138 public:
139  HoistSpillHelper(MachineFunctionPass &pass, MachineFunction &mf,
140  VirtRegMap &vrm)
141  : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
142  LSS(pass.getAnalysis<LiveStacks>()),
143  AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
144  MDT(pass.getAnalysis<MachineDominatorTree>()),
145  Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
146  MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()),
147  TRI(*mf.getSubtarget().getRegisterInfo()),
148  MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),
149  IPA(LIS, mf.getNumBlockIDs()) {}
150 
151  void addToMergeableSpills(MachineInstr &Spill, int StackSlot,
152  unsigned Original);
153  bool rmFromMergeableSpills(MachineInstr &Spill, int StackSlot);
154  void hoistAllSpills();
155  void LRE_DidCloneVirtReg(Register, Register) override;
156 };
157 
158 class InlineSpiller : public Spiller {
159  MachineFunction &MF;
160  LiveIntervals &LIS;
161  LiveStacks &LSS;
162  AliasAnalysis *AA;
165  VirtRegMap &VRM;
167  const TargetInstrInfo &TII;
168  const TargetRegisterInfo &TRI;
169  const MachineBlockFrequencyInfo &MBFI;
170 
171  // Variables that are valid during spill(), but used by multiple methods.
172  LiveRangeEdit *Edit;
173  LiveInterval *StackInt;
174  int StackSlot;
175  Register Original;
176 
177  // All registers to spill to StackSlot, including the main register.
178  SmallVector<Register, 8> RegsToSpill;
179 
180  // All COPY instructions to/from snippets.
181  // They are ignored since both operands refer to the same stack slot.
182  SmallPtrSet<MachineInstr*, 8> SnippetCopies;
183 
184  // Values that failed to remat at some point.
185  SmallPtrSet<VNInfo*, 8> UsedValues;
186 
187  // Dead defs generated during spilling.
189 
190  // Object records spills information and does the hoisting.
191  HoistSpillHelper HSpiller;
192 
193  // Live range weight calculator.
194  VirtRegAuxInfo &VRAI;
195 
196  ~InlineSpiller() override = default;
197 
198 public:
199  InlineSpiller(MachineFunctionPass &Pass, MachineFunction &MF, VirtRegMap &VRM,
200  VirtRegAuxInfo &VRAI)
201  : MF(MF), LIS(Pass.getAnalysis<LiveIntervals>()),
202  LSS(Pass.getAnalysis<LiveStacks>()),
203  AA(&Pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
204  MDT(Pass.getAnalysis<MachineDominatorTree>()),
205  Loops(Pass.getAnalysis<MachineLoopInfo>()), VRM(VRM),
206  MRI(MF.getRegInfo()), TII(*MF.getSubtarget().getInstrInfo()),
207  TRI(*MF.getSubtarget().getRegisterInfo()),
208  MBFI(Pass.getAnalysis<MachineBlockFrequencyInfo>()),
209  HSpiller(Pass, MF, VRM), VRAI(VRAI) {}
210 
211  void spill(LiveRangeEdit &) override;
212  void postOptimization() override;
213 
214 private:
215  bool isSnippet(const LiveInterval &SnipLI);
216  void collectRegsToSpill();
217 
218  bool isRegToSpill(Register Reg) { return is_contained(RegsToSpill, Reg); }
219 
220  bool isSibling(Register Reg);
221  bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI);
222  void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI);
223 
224  void markValueUsed(LiveInterval*, VNInfo*);
225  bool canGuaranteeAssignmentAfterRemat(Register VReg, MachineInstr &MI);
226  bool reMaterializeFor(LiveInterval &, MachineInstr &MI);
227  void reMaterializeAll();
228 
229  bool coalesceStackAccess(MachineInstr *MI, Register Reg);
230  bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>>,
231  MachineInstr *LoadMI = nullptr);
232  void insertReload(Register VReg, SlotIndex, MachineBasicBlock::iterator MI);
233  void insertSpill(Register VReg, bool isKill, MachineBasicBlock::iterator MI);
234 
235  void spillAroundUses(Register Reg);
236  void spillAll();
237 };
238 
239 } // end anonymous namespace
240 
241 Spiller::~Spiller() = default;
242 
243 void Spiller::anchor() {}
244 
246  MachineFunction &MF, VirtRegMap &VRM,
247  VirtRegAuxInfo &VRAI) {
248  return new InlineSpiller(Pass, MF, VRM, VRAI);
249 }
250 
251 //===----------------------------------------------------------------------===//
252 // Snippets
253 //===----------------------------------------------------------------------===//
254 
255 // When spilling a virtual register, we also spill any snippets it is connected
256 // to. The snippets are small live ranges that only have a single real use,
257 // leftovers from live range splitting. Spilling them enables memory operand
258 // folding or tightens the live range around the single use.
259 //
260 // This minimizes register pressure and maximizes the store-to-load distance for
261 // spill slots which can be important in tight loops.
262 
263 /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
264 /// otherwise return 0.
266  if (!MI.isFullCopy())
267  return Register();
268  if (MI.getOperand(0).getReg() == Reg)
269  return MI.getOperand(1).getReg();
270  if (MI.getOperand(1).getReg() == Reg)
271  return MI.getOperand(0).getReg();
272  return Register();
273 }
274 
275 static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS) {
276  for (const MachineOperand &MO : MI.operands())
277  if (MO.isReg() && MO.isDef() && Register::isVirtualRegister(MO.getReg()))
278  LIS.getInterval(MO.getReg());
279 }
280 
281 /// isSnippet - Identify if a live interval is a snippet that should be spilled.
282 /// It is assumed that SnipLI is a virtual register with the same original as
283 /// Edit->getReg().
284 bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
285  Register Reg = Edit->getReg();
286 
287  // A snippet is a tiny live range with only a single instruction using it
288  // besides copies to/from Reg or spills/fills. We accept:
289  //
290  // %snip = COPY %Reg / FILL fi#
291  // %snip = USE %snip
292  // %Reg = COPY %snip / SPILL %snip, fi#
293  //
294  if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI))
295  return false;
296 
297  MachineInstr *UseMI = nullptr;
298 
299  // Check that all uses satisfy our criteria.
301  RI = MRI.reg_instr_nodbg_begin(SnipLI.reg()),
303  RI != E;) {
304  MachineInstr &MI = *RI++;
305 
306  // Allow copies to/from Reg.
307  if (isFullCopyOf(MI, Reg))
308  continue;
309 
310  // Allow stack slot loads.
311  int FI;
312  if (SnipLI.reg() == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot)
313  continue;
314 
315  // Allow stack slot stores.
316  if (SnipLI.reg() == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot)
317  continue;
318 
319  // Allow a single additional instruction.
320  if (UseMI && &MI != UseMI)
321  return false;
322  UseMI = &MI;
323  }
324  return true;
325 }
326 
327 /// collectRegsToSpill - Collect live range snippets that only have a single
328 /// real use.
329 void InlineSpiller::collectRegsToSpill() {
330  Register Reg = Edit->getReg();
331 
332  // Main register always spills.
333  RegsToSpill.assign(1, Reg);
334  SnippetCopies.clear();
335 
336  // Snippets all have the same original, so there can't be any for an original
337  // register.
338  if (Original == Reg)
339  return;
340 
341  for (MachineInstr &MI :
343  Register SnipReg = isFullCopyOf(MI, Reg);
344  if (!isSibling(SnipReg))
345  continue;
346  LiveInterval &SnipLI = LIS.getInterval(SnipReg);
347  if (!isSnippet(SnipLI))
348  continue;
349  SnippetCopies.insert(&MI);
350  if (isRegToSpill(SnipReg))
351  continue;
352  RegsToSpill.push_back(SnipReg);
353  LLVM_DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n');
354  ++NumSnippets;
355  }
356 }
357 
358 bool InlineSpiller::isSibling(Register Reg) {
359  return Reg.isVirtual() && VRM.getOriginal(Reg) == Original;
360 }
361 
362 /// It is beneficial to spill to earlier place in the same BB in case
363 /// as follows:
364 /// There is an alternative def earlier in the same MBB.
365 /// Hoist the spill as far as possible in SpillMBB. This can ease
366 /// register pressure:
367 ///
368 /// x = def
369 /// y = use x
370 /// s = copy x
371 ///
372 /// Hoisting the spill of s to immediately after the def removes the
373 /// interference between x and y:
374 ///
375 /// x = def
376 /// spill x
377 /// y = use killed x
378 ///
379 /// This hoist only helps when the copy kills its source.
380 ///
381 bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,
382  MachineInstr &CopyMI) {
383  SlotIndex Idx = LIS.getInstructionIndex(CopyMI);
384 #ifndef NDEBUG
385  VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot());
386  assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy");
387 #endif
388 
389  Register SrcReg = CopyMI.getOperand(1).getReg();
390  LiveInterval &SrcLI = LIS.getInterval(SrcReg);
391  VNInfo *SrcVNI = SrcLI.getVNInfoAt(Idx);
392  LiveQueryResult SrcQ = SrcLI.Query(Idx);
393  MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(SrcVNI->def);
394  if (DefMBB != CopyMI.getParent() || !SrcQ.isKill())
395  return false;
396 
397  // Conservatively extend the stack slot range to the range of the original
398  // value. We may be able to do better with stack slot coloring by being more
399  // careful here.
400  assert(StackInt && "No stack slot assigned yet.");
401  LiveInterval &OrigLI = LIS.getInterval(Original);
402  VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx);
403  StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0));
404  LLVM_DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": "
405  << *StackInt << '\n');
406 
407  // We are going to spill SrcVNI immediately after its def, so clear out
408  // any later spills of the same value.
409  eliminateRedundantSpills(SrcLI, SrcVNI);
410 
411  MachineBasicBlock *MBB = LIS.getMBBFromIndex(SrcVNI->def);
413  if (SrcVNI->isPHIDef())
414  MII = MBB->SkipPHIsLabelsAndDebug(MBB->begin());
415  else {
416  MachineInstr *DefMI = LIS.getInstructionFromIndex(SrcVNI->def);
417  assert(DefMI && "Defining instruction disappeared");
418  MII = DefMI;
419  ++MII;
420  }
421  MachineInstrSpan MIS(MII, MBB);
422  // Insert spill without kill flag immediately after def.
423  TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot,
424  MRI.getRegClass(SrcReg), &TRI);
425  LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
426  for (const MachineInstr &MI : make_range(MIS.begin(), MII))
427  getVDefInterval(MI, LIS);
428  --MII; // Point to store instruction.
429  LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII);
430 
431  // If there is only 1 store instruction is required for spill, add it
432  // to mergeable list. In X86 AMX, 2 intructions are required to store.
433  // We disable the merge for this case.
434  if (MIS.begin() == MII)
435  HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
436  ++NumSpills;
437  return true;
438 }
439 
440 /// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any
441 /// redundant spills of this value in SLI.reg and sibling copies.
442 void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
443  assert(VNI && "Missing value");
445  WorkList.push_back(std::make_pair(&SLI, VNI));
446  assert(StackInt && "No stack slot assigned yet.");
447 
448  do {
449  LiveInterval *LI;
450  std::tie(LI, VNI) = WorkList.pop_back_val();
451  Register Reg = LI->reg();
452  LLVM_DEBUG(dbgs() << "Checking redundant spills for " << VNI->id << '@'
453  << VNI->def << " in " << *LI << '\n');
454 
455  // Regs to spill are taken care of.
456  if (isRegToSpill(Reg))
457  continue;
458 
459  // Add all of VNI's live range to StackInt.
460  StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0));
461  LLVM_DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n');
462 
463  // Find all spills and copies of VNI.
464  for (MachineInstr &MI :
466  if (!MI.isCopy() && !MI.mayStore())
467  continue;
468  SlotIndex Idx = LIS.getInstructionIndex(MI);
469  if (LI->getVNInfoAt(Idx) != VNI)
470  continue;
471 
472  // Follow sibling copies down the dominator tree.
473  if (Register DstReg = isFullCopyOf(MI, Reg)) {
474  if (isSibling(DstReg)) {
475  LiveInterval &DstLI = LIS.getInterval(DstReg);
476  VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot());
477  assert(DstVNI && "Missing defined value");
478  assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot");
479  WorkList.push_back(std::make_pair(&DstLI, DstVNI));
480  }
481  continue;
482  }
483 
484  // Erase spills.
485  int FI;
486  if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) {
487  LLVM_DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI);
488  // eliminateDeadDefs won't normally remove stores, so switch opcode.
489  MI.setDesc(TII.get(TargetOpcode::KILL));
490  DeadDefs.push_back(&MI);
491  ++NumSpillsRemoved;
492  if (HSpiller.rmFromMergeableSpills(MI, StackSlot))
493  --NumSpills;
494  }
495  }
496  } while (!WorkList.empty());
497 }
498 
499 //===----------------------------------------------------------------------===//
500 // Rematerialization
501 //===----------------------------------------------------------------------===//
502 
503 /// markValueUsed - Remember that VNI failed to rematerialize, so its defining
504 /// instruction cannot be eliminated. See through snippet copies
505 void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
507  WorkList.push_back(std::make_pair(LI, VNI));
508  do {
509  std::tie(LI, VNI) = WorkList.pop_back_val();
510  if (!UsedValues.insert(VNI).second)
511  continue;
512 
513  if (VNI->isPHIDef()) {
514  MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
515  for (MachineBasicBlock *P : MBB->predecessors()) {
516  VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(P));
517  if (PVNI)
518  WorkList.push_back(std::make_pair(LI, PVNI));
519  }
520  continue;
521  }
522 
523  // Follow snippet copies.
524  MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
525  if (!SnippetCopies.count(MI))
526  continue;
527  LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg());
528  assert(isRegToSpill(SnipLI.reg()) && "Unexpected register in copy");
529  VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true));
530  assert(SnipVNI && "Snippet undefined before copy");
531  WorkList.push_back(std::make_pair(&SnipLI, SnipVNI));
532  } while (!WorkList.empty());
533 }
534 
535 bool InlineSpiller::canGuaranteeAssignmentAfterRemat(Register VReg,
536  MachineInstr &MI) {
538  return true;
539  // Here's a quick explanation of the problem we're trying to handle here:
540  // * There are some pseudo instructions with more vreg uses than there are
541  // physical registers on the machine.
542  // * This is normally handled by spilling the vreg, and folding the reload
543  // into the user instruction. (Thus decreasing the number of used vregs
544  // until the remainder can be assigned to physregs.)
545  // * However, since we may try to spill vregs in any order, we can end up
546  // trying to spill each operand to the instruction, and then rematting it
547  // instead. When that happens, the new live intervals (for the remats) are
548  // expected to be trivially assignable (i.e. RS_Done). However, since we
549  // may have more remats than physregs, we're guaranteed to fail to assign
550  // one.
551  // At the moment, we only handle this for STATEPOINTs since they're the only
552  // pseudo op where we've seen this. If we start seeing other instructions
553  // with the same problem, we need to revisit this.
554  if (MI.getOpcode() != TargetOpcode::STATEPOINT)
555  return true;
556  // For STATEPOINTs we allow re-materialization for fixed arguments only hoping
557  // that number of physical registers is enough to cover all fixed arguments.
558  // If it is not true we need to revisit it.
559  for (unsigned Idx = StatepointOpers(&MI).getVarIdx(),
560  EndIdx = MI.getNumOperands();
561  Idx < EndIdx; ++Idx) {
562  MachineOperand &MO = MI.getOperand(Idx);
563  if (MO.isReg() && MO.getReg() == VReg)
564  return false;
565  }
566  return true;
567 }
568 
569 /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
570 bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
571  // Analyze instruction
573  VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, VirtReg.reg(), &Ops);
574 
575  if (!RI.Reads)
576  return false;
577 
578  SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true);
579  VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex());
580 
581  if (!ParentVNI) {
582  LLVM_DEBUG(dbgs() << "\tadding <undef> flags: ");
583  for (MachineOperand &MO : MI.operands())
584  if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg())
585  MO.setIsUndef();
586  LLVM_DEBUG(dbgs() << UseIdx << '\t' << MI);
587  return true;
588  }
589 
590  if (SnippetCopies.count(&MI))
591  return false;
592 
593  LiveInterval &OrigLI = LIS.getInterval(Original);
594  VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx);
595  LiveRangeEdit::Remat RM(ParentVNI);
596  RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def);
597 
598  if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx, false)) {
599  markValueUsed(&VirtReg, ParentVNI);
600  LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
601  return false;
602  }
603 
604  // If the instruction also writes VirtReg.reg, it had better not require the
605  // same register for uses and defs.
606  if (RI.Tied) {
607  markValueUsed(&VirtReg, ParentVNI);
608  LLVM_DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI);
609  return false;
610  }
611 
612  // Before rematerializing into a register for a single instruction, try to
613  // fold a load into the instruction. That avoids allocating a new register.
614  if (RM.OrigMI->canFoldAsLoad() &&
615  foldMemoryOperand(Ops, RM.OrigMI)) {
616  Edit->markRematerialized(RM.ParentVNI);
617  ++NumFoldedLoads;
618  return true;
619  }
620 
621  // If we can't guarantee that we'll be able to actually assign the new vreg,
622  // we can't remat.
623  if (!canGuaranteeAssignmentAfterRemat(VirtReg.reg(), MI)) {
624  markValueUsed(&VirtReg, ParentVNI);
625  LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
626  return false;
627  }
628 
629  // Allocate a new register for the remat.
630  Register NewVReg = Edit->createFrom(Original);
631 
632  // Finally we can rematerialize OrigMI before MI.
633  SlotIndex DefIdx =
634  Edit->rematerializeAt(*MI.getParent(), MI, NewVReg, RM, TRI);
635 
636  // We take the DebugLoc from MI, since OrigMI may be attributed to a
637  // different source location.
638  auto *NewMI = LIS.getInstructionFromIndex(DefIdx);
639  NewMI->setDebugLoc(MI.getDebugLoc());
640 
641  (void)DefIdx;
642  LLVM_DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'
643  << *LIS.getInstructionFromIndex(DefIdx));
644 
645  // Replace operands
646  for (const auto &OpPair : Ops) {
647  MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
648  if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg()) {
649  MO.setReg(NewVReg);
650  MO.setIsKill();
651  }
652  }
653  LLVM_DEBUG(dbgs() << "\t " << UseIdx << '\t' << MI << '\n');
654 
655  ++NumRemats;
656  return true;
657 }
658 
659 /// reMaterializeAll - Try to rematerialize as many uses as possible,
660 /// and trim the live ranges after.
661 void InlineSpiller::reMaterializeAll() {
662  if (!Edit->anyRematerializable(AA))
663  return;
664 
665  UsedValues.clear();
666 
667  // Try to remat before all uses of snippets.
668  bool anyRemat = false;
669  for (Register Reg : RegsToSpill) {
670  LiveInterval &LI = LIS.getInterval(Reg);
672  // Debug values are not allowed to affect codegen.
673  if (MI.isDebugValue())
674  continue;
675 
676  assert(!MI.isDebugInstr() && "Did not expect to find a use in debug "
677  "instruction that isn't a DBG_VALUE");
678 
679  anyRemat |= reMaterializeFor(LI, MI);
680  }
681  }
682  if (!anyRemat)
683  return;
684 
685  // Remove any values that were completely rematted.
686  for (Register Reg : RegsToSpill) {
687  LiveInterval &LI = LIS.getInterval(Reg);
688  for (VNInfo *VNI : LI.vnis()) {
689  if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI))
690  continue;
691  MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
692  MI->addRegisterDead(Reg, &TRI);
693  if (!MI->allDefsAreDead())
694  continue;
695  LLVM_DEBUG(dbgs() << "All defs dead: " << *MI);
696  DeadDefs.push_back(MI);
697  }
698  }
699 
700  // Eliminate dead code after remat. Note that some snippet copies may be
701  // deleted here.
702  if (DeadDefs.empty())
703  return;
704  LLVM_DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n");
705  Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
706 
707  // LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions
708  // after rematerialization. To remove a VNI for a vreg from its LiveInterval,
709  // LiveIntervals::removeVRegDefAt is used. However, after non-PHI VNIs are all
710  // removed, PHI VNI are still left in the LiveInterval.
711  // So to get rid of unused reg, we need to check whether it has non-dbg
712  // reference instead of whether it has non-empty interval.
713  unsigned ResultPos = 0;
714  for (Register Reg : RegsToSpill) {
715  if (MRI.reg_nodbg_empty(Reg)) {
716  Edit->eraseVirtReg(Reg);
717  continue;
718  }
719 
720  assert(LIS.hasInterval(Reg) &&
721  (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&
722  "Empty and not used live-range?!");
723 
724  RegsToSpill[ResultPos++] = Reg;
725  }
726  RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end());
727  LLVM_DEBUG(dbgs() << RegsToSpill.size()
728  << " registers to spill after remat.\n");
729 }
730 
731 //===----------------------------------------------------------------------===//
732 // Spilling
733 //===----------------------------------------------------------------------===//
734 
735 /// If MI is a load or store of StackSlot, it can be removed.
736 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, Register Reg) {
737  int FI = 0;
738  Register InstrReg = TII.isLoadFromStackSlot(*MI, FI);
739  bool IsLoad = InstrReg;
740  if (!IsLoad)
741  InstrReg = TII.isStoreToStackSlot(*MI, FI);
742 
743  // We have a stack access. Is it the right register and slot?
744  if (InstrReg != Reg || FI != StackSlot)
745  return false;
746 
747  if (!IsLoad)
748  HSpiller.rmFromMergeableSpills(*MI, StackSlot);
749 
750  LLVM_DEBUG(dbgs() << "Coalescing stack access: " << *MI);
751  LIS.RemoveMachineInstrFromMaps(*MI);
752  MI->eraseFromParent();
753 
754  if (IsLoad) {
755  ++NumReloadsRemoved;
756  --NumReloads;
757  } else {
758  ++NumSpillsRemoved;
759  --NumSpills;
760  }
761 
762  return true;
763 }
764 
765 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
767 // Dump the range of instructions from B to E with their slot indexes.
770  LiveIntervals const &LIS,
771  const char *const header,
772  Register VReg = Register()) {
773  char NextLine = '\n';
774  char SlotIndent = '\t';
775 
776  if (std::next(B) == E) {
777  NextLine = ' ';
778  SlotIndent = ' ';
779  }
780 
781  dbgs() << '\t' << header << ": " << NextLine;
782 
783  for (MachineBasicBlock::iterator I = B; I != E; ++I) {
784  SlotIndex Idx = LIS.getInstructionIndex(*I).getRegSlot();
785 
786  // If a register was passed in and this instruction has it as a
787  // destination that is marked as an early clobber, print the
788  // early-clobber slot index.
789  if (VReg) {
790  MachineOperand *MO = I->findRegisterDefOperand(VReg);
791  if (MO && MO->isEarlyClobber())
792  Idx = Idx.getRegSlot(true);
793  }
794 
795  dbgs() << SlotIndent << Idx << '\t' << *I;
796  }
797 }
798 #endif
799 
800 /// foldMemoryOperand - Try folding stack slot references in Ops into their
801 /// instructions.
802 ///
803 /// @param Ops Operand indices from AnalyzeVirtRegInBundle().
804 /// @param LoadMI Load instruction to use instead of stack slot when non-null.
805 /// @return True on success.
806 bool InlineSpiller::
807 foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
808  MachineInstr *LoadMI) {
809  if (Ops.empty())
810  return false;
811  // Don't attempt folding in bundles.
812  MachineInstr *MI = Ops.front().first;
813  if (Ops.back().first != MI || MI->isBundled())
814  return false;
815 
816  bool WasCopy = MI->isCopy();
817  Register ImpReg;
818 
819  // TII::foldMemoryOperand will do what we need here for statepoint
820  // (fold load into use and remove corresponding def). We will replace
821  // uses of removed def with loads (spillAroundUses).
822  // For that to work we need to untie def and use to pass it through
823  // foldMemoryOperand and signal foldPatchpoint that it is allowed to
824  // fold them.
825  bool UntieRegs = MI->getOpcode() == TargetOpcode::STATEPOINT;
826 
827  // Spill subregs if the target allows it.
828  // We always want to spill subregs for stackmap/patchpoint pseudos.
829  bool SpillSubRegs = TII.isSubregFoldable() ||
830  MI->getOpcode() == TargetOpcode::STATEPOINT ||
831  MI->getOpcode() == TargetOpcode::PATCHPOINT ||
832  MI->getOpcode() == TargetOpcode::STACKMAP;
833 
834  // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
835  // operands.
836  SmallVector<unsigned, 8> FoldOps;
837  for (const auto &OpPair : Ops) {
838  unsigned Idx = OpPair.second;
839  assert(MI == OpPair.first && "Instruction conflict during operand folding");
840  MachineOperand &MO = MI->getOperand(Idx);
841 
842  // No point restoring an undef read, and we'll produce an invalid live
843  // interval.
844  // TODO: Is this really the correct way to handle undef tied uses?
845  if (MO.isUse() && !MO.readsReg() && !MO.isTied())
846  continue;
847 
848  if (MO.isImplicit()) {
849  ImpReg = MO.getReg();
850  continue;
851  }
852 
853  if (!SpillSubRegs && MO.getSubReg())
854  return false;
855  // We cannot fold a load instruction into a def.
856  if (LoadMI && MO.isDef())
857  return false;
858  // Tied use operands should not be passed to foldMemoryOperand.
859  if (UntieRegs || !MI->isRegTiedToDefOperand(Idx))
860  FoldOps.push_back(Idx);
861  }
862 
863  // If we only have implicit uses, we won't be able to fold that.
864  // Moreover, TargetInstrInfo::foldMemoryOperand will assert if we try!
865  if (FoldOps.empty())
866  return false;
867 
868  MachineInstrSpan MIS(MI, MI->getParent());
869 
871  if (UntieRegs)
872  for (unsigned Idx : FoldOps) {
873  MachineOperand &MO = MI->getOperand(Idx);
874  if (!MO.isTied())
875  continue;
876  unsigned Tied = MI->findTiedOperandIdx(Idx);
877  if (MO.isUse())
878  TiedOps.emplace_back(Tied, Idx);
879  else {
880  assert(MO.isDef() && "Tied to not use and def?");
881  TiedOps.emplace_back(Idx, Tied);
882  }
883  MI->untieRegOperand(Idx);
884  }
885 
886  MachineInstr *FoldMI =
887  LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS)
888  : TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS, &VRM);
889  if (!FoldMI) {
890  // Re-tie operands.
891  for (auto Tied : TiedOps)
892  MI->tieOperands(Tied.first, Tied.second);
893  return false;
894  }
895 
896  // Remove LIS for any dead defs in the original MI not in FoldMI.
897  for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) {
898  if (!MO->isReg())
899  continue;
900  Register Reg = MO->getReg();
902  continue;
903  }
904  // Skip non-Defs, including undef uses and internal reads.
905  if (MO->isUse())
906  continue;
907  PhysRegInfo RI = AnalyzePhysRegInBundle(*FoldMI, Reg, &TRI);
908  if (RI.FullyDefined)
909  continue;
910  // FoldMI does not define this physreg. Remove the LI segment.
911  assert(MO->isDead() && "Cannot fold physreg def");
912  SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
913  LIS.removePhysRegDefAt(Reg.asMCReg(), Idx);
914  }
915 
916  int FI;
917  if (TII.isStoreToStackSlot(*MI, FI) &&
918  HSpiller.rmFromMergeableSpills(*MI, FI))
919  --NumSpills;
920  LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI);
921  // Update the call site info.
922  if (MI->isCandidateForCallSiteEntry())
923  MI->getMF()->moveCallSiteInfo(MI, FoldMI);
924 
925  // If we've folded a store into an instruction labelled with debug-info,
926  // record a substitution from the old operand to the memory operand. Handle
927  // the simple common case where operand 0 is the one being folded, plus when
928  // the destination operand is also a tied def. More values could be
929  // substituted / preserved with more analysis.
930  if (MI->peekDebugInstrNum() && Ops[0].second == 0) {
931  // Helper lambda.
932  auto MakeSubstitution = [this,FoldMI,MI,&Ops]() {
933  // Substitute old operand zero to the new instructions memory operand.
934  unsigned OldOperandNum = Ops[0].second;
935  unsigned NewNum = FoldMI->getDebugInstrNum();
936  unsigned OldNum = MI->getDebugInstrNum();
937  MF.makeDebugValueSubstitution({OldNum, OldOperandNum},
939  };
940 
941  const MachineOperand &Op0 = MI->getOperand(Ops[0].second);
942  if (Ops.size() == 1 && Op0.isDef()) {
943  MakeSubstitution();
944  } else if (Ops.size() == 2 && Op0.isDef() && MI->getOperand(1).isTied() &&
945  Op0.getReg() == MI->getOperand(1).getReg()) {
946  MakeSubstitution();
947  }
948  } else if (MI->peekDebugInstrNum()) {
949  // This is a debug-labelled instruction, but the operand being folded isn't
950  // at operand zero. Most likely this means it's a load being folded in.
951  // Substitute any register defs from operand zero up to the one being
952  // folded -- past that point, we don't know what the new operand indexes
953  // will be.
954  MF.substituteDebugValuesForInst(*MI, *FoldMI, Ops[0].second);
955  }
956 
957  MI->eraseFromParent();
958 
959  // Insert any new instructions other than FoldMI into the LIS maps.
960  assert(!MIS.empty() && "Unexpected empty span of instructions!");
961  for (MachineInstr &MI : MIS)
962  if (&MI != FoldMI)
964 
965  // TII.foldMemoryOperand may have left some implicit operands on the
966  // instruction. Strip them.
967  if (ImpReg)
968  for (unsigned i = FoldMI->getNumOperands(); i; --i) {
969  MachineOperand &MO = FoldMI->getOperand(i - 1);
970  if (!MO.isReg() || !MO.isImplicit())
971  break;
972  if (MO.getReg() == ImpReg)
973  FoldMI->removeOperand(i - 1);
974  }
975 
976  LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS,
977  "folded"));
978 
979  if (!WasCopy)
980  ++NumFolded;
981  else if (Ops.front().second == 0) {
982  ++NumSpills;
983  // If there is only 1 store instruction is required for spill, add it
984  // to mergeable list. In X86 AMX, 2 intructions are required to store.
985  // We disable the merge for this case.
986  if (std::distance(MIS.begin(), MIS.end()) <= 1)
987  HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original);
988  } else
989  ++NumReloads;
990  return true;
991 }
992 
993 void InlineSpiller::insertReload(Register NewVReg,
994  SlotIndex Idx,
996  MachineBasicBlock &MBB = *MI->getParent();
997 
998  MachineInstrSpan MIS(MI, &MBB);
999  TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot,
1000  MRI.getRegClass(NewVReg), &TRI);
1001 
1002  LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI);
1003 
1004  LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MI, LIS, "reload",
1005  NewVReg));
1006  ++NumReloads;
1007 }
1008 
1009 /// Check if \p Def fully defines a VReg with an undefined value.
1010 /// If that's the case, that means the value of VReg is actually
1011 /// not relevant.
1012 static bool isRealSpill(const MachineInstr &Def) {
1013  if (!Def.isImplicitDef())
1014  return true;
1015  assert(Def.getNumOperands() == 1 &&
1016  "Implicit def with more than one definition");
1017  // We can say that the VReg defined by Def is undef, only if it is
1018  // fully defined by Def. Otherwise, some of the lanes may not be
1019  // undef and the value of the VReg matters.
1020  return Def.getOperand(0).getSubReg();
1021 }
1022 
1023 /// insertSpill - Insert a spill of NewVReg after MI.
1024 void InlineSpiller::insertSpill(Register NewVReg, bool isKill,
1026  // Spill are not terminators, so inserting spills after terminators will
1027  // violate invariants in MachineVerifier.
1028  assert(!MI->isTerminator() && "Inserting a spill after a terminator");
1029  MachineBasicBlock &MBB = *MI->getParent();
1030 
1031  MachineInstrSpan MIS(MI, &MBB);
1032  MachineBasicBlock::iterator SpillBefore = std::next(MI);
1033  bool IsRealSpill = isRealSpill(*MI);
1034 
1035  if (IsRealSpill)
1036  TII.storeRegToStackSlot(MBB, SpillBefore, NewVReg, isKill, StackSlot,
1037  MRI.getRegClass(NewVReg), &TRI);
1038  else
1039  // Don't spill undef value.
1040  // Anything works for undef, in particular keeping the memory
1041  // uninitialized is a viable option and it saves code size and
1042  // run time.
1043  BuildMI(MBB, SpillBefore, MI->getDebugLoc(), TII.get(TargetOpcode::KILL))
1044  .addReg(NewVReg, getKillRegState(isKill));
1045 
1046  MachineBasicBlock::iterator Spill = std::next(MI);
1047  LIS.InsertMachineInstrRangeInMaps(Spill, MIS.end());
1048  for (const MachineInstr &MI : make_range(Spill, MIS.end()))
1049  getVDefInterval(MI, LIS);
1050 
1051  LLVM_DEBUG(
1052  dumpMachineInstrRangeWithSlotIndex(Spill, MIS.end(), LIS, "spill"));
1053  ++NumSpills;
1054  // If there is only 1 store instruction is required for spill, add it
1055  // to mergeable list. In X86 AMX, 2 intructions are required to store.
1056  // We disable the merge for this case.
1057  if (IsRealSpill && std::distance(Spill, MIS.end()) <= 1)
1058  HSpiller.addToMergeableSpills(*Spill, StackSlot, Original);
1059 }
1060 
1061 /// spillAroundUses - insert spill code around each use of Reg.
1062 void InlineSpiller::spillAroundUses(Register Reg) {
1063  LLVM_DEBUG(dbgs() << "spillAroundUses " << printReg(Reg) << '\n');
1064  LiveInterval &OldLI = LIS.getInterval(Reg);
1065 
1066  // Iterate over instructions using Reg.
1068  // Debug values are not allowed to affect codegen.
1069  if (MI.isDebugValue()) {
1070  // Modify DBG_VALUE now that the value is in a spill slot.
1071  MachineBasicBlock *MBB = MI.getParent();
1072  LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:\t" << MI);
1073  buildDbgValueForSpill(*MBB, &MI, MI, StackSlot, Reg);
1074  MBB->erase(MI);
1075  continue;
1076  }
1077 
1078  assert(!MI.isDebugInstr() && "Did not expect to find a use in debug "
1079  "instruction that isn't a DBG_VALUE");
1080 
1081  // Ignore copies to/from snippets. We'll delete them.
1082  if (SnippetCopies.count(&MI))
1083  continue;
1084 
1085  // Stack slot accesses may coalesce away.
1086  if (coalesceStackAccess(&MI, Reg))
1087  continue;
1088 
1089  // Analyze instruction.
1092 
1093  // Find the slot index where this instruction reads and writes OldLI.
1094  // This is usually the def slot, except for tied early clobbers.
1096  if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true)))
1097  if (SlotIndex::isSameInstr(Idx, VNI->def))
1098  Idx = VNI->def;
1099 
1100  // Check for a sibling copy.
1101  Register SibReg = isFullCopyOf(MI, Reg);
1102  if (SibReg && isSibling(SibReg)) {
1103  // This may actually be a copy between snippets.
1104  if (isRegToSpill(SibReg)) {
1105  LLVM_DEBUG(dbgs() << "Found new snippet copy: " << MI);
1106  SnippetCopies.insert(&MI);
1107  continue;
1108  }
1109  if (RI.Writes) {
1110  if (hoistSpillInsideBB(OldLI, MI)) {
1111  // This COPY is now dead, the value is already in the stack slot.
1112  MI.getOperand(0).setIsDead();
1113  DeadDefs.push_back(&MI);
1114  continue;
1115  }
1116  } else {
1117  // This is a reload for a sib-reg copy. Drop spills downstream.
1118  LiveInterval &SibLI = LIS.getInterval(SibReg);
1119  eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx));
1120  // The COPY will fold to a reload below.
1121  }
1122  }
1123 
1124  // Attempt to fold memory ops.
1125  if (foldMemoryOperand(Ops))
1126  continue;
1127 
1128  // Create a new virtual register for spill/fill.
1129  // FIXME: Infer regclass from instruction alone.
1130  Register NewVReg = Edit->createFrom(Reg);
1131 
1132  if (RI.Reads)
1133  insertReload(NewVReg, Idx, &MI);
1134 
1135  // Rewrite instruction operands.
1136  bool hasLiveDef = false;
1137  for (const auto &OpPair : Ops) {
1138  MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
1139  MO.setReg(NewVReg);
1140  if (MO.isUse()) {
1141  if (!OpPair.first->isRegTiedToDefOperand(OpPair.second))
1142  MO.setIsKill();
1143  } else {
1144  if (!MO.isDead())
1145  hasLiveDef = true;
1146  }
1147  }
1148  LLVM_DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << MI << '\n');
1149 
1150  // FIXME: Use a second vreg if instruction has no tied ops.
1151  if (RI.Writes)
1152  if (hasLiveDef)
1153  insertSpill(NewVReg, true, &MI);
1154  }
1155 }
1156 
1157 /// spillAll - Spill all registers remaining after rematerialization.
1158 void InlineSpiller::spillAll() {
1159  // Update LiveStacks now that we are committed to spilling.
1160  if (StackSlot == VirtRegMap::NO_STACK_SLOT) {
1161  StackSlot = VRM.assignVirt2StackSlot(Original);
1162  StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original));
1163  StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator());
1164  } else
1165  StackInt = &LSS.getInterval(StackSlot);
1166 
1167  if (Original != Edit->getReg())
1168  VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot);
1169 
1170  assert(StackInt->getNumValNums() == 1 && "Bad stack interval values");
1171  for (Register Reg : RegsToSpill)
1172  StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg),
1173  StackInt->getValNumInfo(0));
1174  LLVM_DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n');
1175 
1176  // Spill around uses of all RegsToSpill.
1177  for (Register Reg : RegsToSpill)
1178  spillAroundUses(Reg);
1179 
1180  // Hoisted spills may cause dead code.
1181  if (!DeadDefs.empty()) {
1182  LLVM_DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n");
1183  Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
1184  }
1185 
1186  // Finally delete the SnippetCopies.
1187  for (Register Reg : RegsToSpill) {
1188  for (MachineInstr &MI :
1190  assert(SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy");
1191  // FIXME: Do this with a LiveRangeEdit callback.
1193  MI.eraseFromParent();
1194  }
1195  }
1196 
1197  // Delete all spilled registers.
1198  for (Register Reg : RegsToSpill)
1199  Edit->eraseVirtReg(Reg);
1200 }
1201 
1202 void InlineSpiller::spill(LiveRangeEdit &edit) {
1203  ++NumSpilledRanges;
1204  Edit = &edit;
1205  assert(!Register::isStackSlot(edit.getReg()) &&
1206  "Trying to spill a stack slot.");
1207  // Share a stack slot among all descendants of Original.
1208  Original = VRM.getOriginal(edit.getReg());
1209  StackSlot = VRM.getStackSlot(Original);
1210  StackInt = nullptr;
1211 
1212  LLVM_DEBUG(dbgs() << "Inline spilling "
1214  << ':' << edit.getParent() << "\nFrom original "
1215  << printReg(Original) << '\n');
1216  assert(edit.getParent().isSpillable() &&
1217  "Attempting to spill already spilled value.");
1218  assert(DeadDefs.empty() && "Previous spill didn't remove dead defs");
1219 
1220  collectRegsToSpill();
1221  reMaterializeAll();
1222 
1223  // Remat may handle everything.
1224  if (!RegsToSpill.empty())
1225  spillAll();
1226 
1227  Edit->calculateRegClassAndHint(MF, VRAI);
1228 }
1229 
1230 /// Optimizations after all the reg selections and spills are done.
1231 void InlineSpiller::postOptimization() { HSpiller.hoistAllSpills(); }
1232 
1233 /// When a spill is inserted, add the spill to MergeableSpills map.
1234 void HoistSpillHelper::addToMergeableSpills(MachineInstr &Spill, int StackSlot,
1235  unsigned Original) {
1237  LiveInterval &OrigLI = LIS.getInterval(Original);
1238  // save a copy of LiveInterval in StackSlotToOrigLI because the original
1239  // LiveInterval may be cleared after all its references are spilled.
1240  if (StackSlotToOrigLI.find(StackSlot) == StackSlotToOrigLI.end()) {
1241  auto LI = std::make_unique<LiveInterval>(OrigLI.reg(), OrigLI.weight());
1242  LI->assign(OrigLI, Allocator);
1243  StackSlotToOrigLI[StackSlot] = std::move(LI);
1244  }
1245  SlotIndex Idx = LIS.getInstructionIndex(Spill);
1246  VNInfo *OrigVNI = StackSlotToOrigLI[StackSlot]->getVNInfoAt(Idx.getRegSlot());
1247  std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1248  MergeableSpills[MIdx].insert(&Spill);
1249 }
1250 
1251 /// When a spill is removed, remove the spill from MergeableSpills map.
1252 /// Return true if the spill is removed successfully.
1253 bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr &Spill,
1254  int StackSlot) {
1255  auto It = StackSlotToOrigLI.find(StackSlot);
1256  if (It == StackSlotToOrigLI.end())
1257  return false;
1258  SlotIndex Idx = LIS.getInstructionIndex(Spill);
1259  VNInfo *OrigVNI = It->second->getVNInfoAt(Idx.getRegSlot());
1260  std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1261  return MergeableSpills[MIdx].erase(&Spill);
1262 }
1263 
1264 /// Check BB to see if it is a possible target BB to place a hoisted spill,
1265 /// i.e., there should be a living sibling of OrigReg at the insert point.
1266 bool HoistSpillHelper::isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
1267  MachineBasicBlock &BB, Register &LiveReg) {
1268  SlotIndex Idx = IPA.getLastInsertPoint(OrigLI, BB);
1269  // The original def could be after the last insert point in the root block,
1270  // we can't hoist to here.
1271  if (Idx < OrigVNI.def) {
1272  // TODO: We could be better here. If LI is not alive in landing pad
1273  // we could hoist spill after LIP.
1274  LLVM_DEBUG(dbgs() << "can't spill in root block - def after LIP\n");
1275  return false;
1276  }
1277  Register OrigReg = OrigLI.reg();
1278  SmallSetVector<Register, 16> &Siblings = Virt2SiblingsMap[OrigReg];
1279  assert(OrigLI.getVNInfoAt(Idx) == &OrigVNI && "Unexpected VNI");
1280 
1281  for (const Register &SibReg : Siblings) {
1282  LiveInterval &LI = LIS.getInterval(SibReg);
1283  VNInfo *VNI = LI.getVNInfoAt(Idx);
1284  if (VNI) {
1285  LiveReg = SibReg;
1286  return true;
1287  }
1288  }
1289  return false;
1290 }
1291 
1292 /// Remove redundant spills in the same BB. Save those redundant spills in
1293 /// SpillsToRm, and save the spill to keep and its BB in SpillBBToSpill map.
1294 void HoistSpillHelper::rmRedundantSpills(
1296  SmallVectorImpl<MachineInstr *> &SpillsToRm,
1298  // For each spill saw, check SpillBBToSpill[] and see if its BB already has
1299  // another spill inside. If a BB contains more than one spill, only keep the
1300  // earlier spill with smaller SlotIndex.
1301  for (const auto CurrentSpill : Spills) {
1302  MachineBasicBlock *Block = CurrentSpill->getParent();
1303  MachineDomTreeNode *Node = MDT.getBase().getNode(Block);
1304  MachineInstr *PrevSpill = SpillBBToSpill[Node];
1305  if (PrevSpill) {
1306  SlotIndex PIdx = LIS.getInstructionIndex(*PrevSpill);
1307  SlotIndex CIdx = LIS.getInstructionIndex(*CurrentSpill);
1308  MachineInstr *SpillToRm = (CIdx > PIdx) ? CurrentSpill : PrevSpill;
1309  MachineInstr *SpillToKeep = (CIdx > PIdx) ? PrevSpill : CurrentSpill;
1310  SpillsToRm.push_back(SpillToRm);
1311  SpillBBToSpill[MDT.getBase().getNode(Block)] = SpillToKeep;
1312  } else {
1313  SpillBBToSpill[MDT.getBase().getNode(Block)] = CurrentSpill;
1314  }
1315  }
1316  for (const auto SpillToRm : SpillsToRm)
1317  Spills.erase(SpillToRm);
1318 }
1319 
1320 /// Starting from \p Root find a top-down traversal order of the dominator
1321 /// tree to visit all basic blocks containing the elements of \p Spills.
1322 /// Redundant spills will be found and put into \p SpillsToRm at the same
1323 /// time. \p SpillBBToSpill will be populated as part of the process and
1324 /// maps a basic block to the first store occurring in the basic block.
1325 /// \post SpillsToRm.union(Spills\@post) == Spills\@pre
1326 void HoistSpillHelper::getVisitOrders(
1329  SmallVectorImpl<MachineInstr *> &SpillsToRm,
1332  // The set contains all the possible BB nodes to which we may hoist
1333  // original spills.
1335  // Save the BB nodes on the path from the first BB node containing
1336  // non-redundant spill to the Root node.
1338  // All the spills to be hoisted must originate from a single def instruction
1339  // to the OrigReg. It means the def instruction should dominate all the spills
1340  // to be hoisted. We choose the BB where the def instruction is located as
1341  // the Root.
1342  MachineDomTreeNode *RootIDomNode = MDT[Root]->getIDom();
1343  // For every node on the dominator tree with spill, walk up on the dominator
1344  // tree towards the Root node until it is reached. If there is other node
1345  // containing spill in the middle of the path, the previous spill saw will
1346  // be redundant and the node containing it will be removed. All the nodes on
1347  // the path starting from the first node with non-redundant spill to the Root
1348  // node will be added to the WorkSet, which will contain all the possible
1349  // locations where spills may be hoisted to after the loop below is done.
1350  for (const auto Spill : Spills) {
1351  MachineBasicBlock *Block = Spill->getParent();
1352  MachineDomTreeNode *Node = MDT[Block];
1353  MachineInstr *SpillToRm = nullptr;
1354  while (Node != RootIDomNode) {
1355  // If Node dominates Block, and it already contains a spill, the spill in
1356  // Block will be redundant.
1357  if (Node != MDT[Block] && SpillBBToSpill[Node]) {
1358  SpillToRm = SpillBBToSpill[MDT[Block]];
1359  break;
1360  /// If we see the Node already in WorkSet, the path from the Node to
1361  /// the Root node must already be traversed by another spill.
1362  /// Then no need to repeat.
1363  } else if (WorkSet.count(Node)) {
1364  break;
1365  } else {
1366  NodesOnPath.insert(Node);
1367  }
1368  Node = Node->getIDom();
1369  }
1370  if (SpillToRm) {
1371  SpillsToRm.push_back(SpillToRm);
1372  } else {
1373  // Add a BB containing the original spills to SpillsToKeep -- i.e.,
1374  // set the initial status before hoisting start. The value of BBs
1375  // containing original spills is set to 0, in order to descriminate
1376  // with BBs containing hoisted spills which will be inserted to
1377  // SpillsToKeep later during hoisting.
1378  SpillsToKeep[MDT[Block]] = 0;
1379  WorkSet.insert(NodesOnPath.begin(), NodesOnPath.end());
1380  }
1381  NodesOnPath.clear();
1382  }
1383 
1384  // Sort the nodes in WorkSet in top-down order and save the nodes
1385  // in Orders. Orders will be used for hoisting in runHoistSpills.
1386  unsigned idx = 0;
1387  Orders.push_back(MDT.getBase().getNode(Root));
1388  do {
1389  MachineDomTreeNode *Node = Orders[idx++];
1390  for (MachineDomTreeNode *Child : Node->children()) {
1391  if (WorkSet.count(Child))
1392  Orders.push_back(Child);
1393  }
1394  } while (idx != Orders.size());
1395  assert(Orders.size() == WorkSet.size() &&
1396  "Orders have different size with WorkSet");
1397 
1398 #ifndef NDEBUG
1399  LLVM_DEBUG(dbgs() << "Orders size is " << Orders.size() << "\n");
1401  for (; RIt != Orders.rend(); RIt++)
1402  LLVM_DEBUG(dbgs() << "BB" << (*RIt)->getBlock()->getNumber() << ",");
1403  LLVM_DEBUG(dbgs() << "\n");
1404 #endif
1405 }
1406 
1407 /// Try to hoist spills according to BB hotness. The spills to removed will
1408 /// be saved in \p SpillsToRm. The spills to be inserted will be saved in
1409 /// \p SpillsToIns.
1410 void HoistSpillHelper::runHoistSpills(
1411  LiveInterval &OrigLI, VNInfo &OrigVNI,
1413  SmallVectorImpl<MachineInstr *> &SpillsToRm,
1415  // Visit order of dominator tree nodes.
1417  // SpillsToKeep contains all the nodes where spills are to be inserted
1418  // during hoisting. If the spill to be inserted is an original spill
1419  // (not a hoisted one), the value of the map entry is 0. If the spill
1420  // is a hoisted spill, the value of the map entry is the VReg to be used
1421  // as the source of the spill.
1423  // Map from BB to the first spill inside of it.
1425 
1426  rmRedundantSpills(Spills, SpillsToRm, SpillBBToSpill);
1427 
1428  MachineBasicBlock *Root = LIS.getMBBFromIndex(OrigVNI.def);
1429  getVisitOrders(Root, Spills, Orders, SpillsToRm, SpillsToKeep,
1430  SpillBBToSpill);
1431 
1432  // SpillsInSubTreeMap keeps the map from a dom tree node to a pair of
1433  // nodes set and the cost of all the spills inside those nodes.
1434  // The nodes set are the locations where spills are to be inserted
1435  // in the subtree of current node.
1436  using NodesCostPair =
1437  std::pair<SmallPtrSet<MachineDomTreeNode *, 16>, BlockFrequency>;
1439 
1440  // Iterate Orders set in reverse order, which will be a bottom-up order
1441  // in the dominator tree. Once we visit a dom tree node, we know its
1442  // children have already been visited and the spill locations in the
1443  // subtrees of all the children have been determined.
1445  for (; RIt != Orders.rend(); RIt++) {
1446  MachineBasicBlock *Block = (*RIt)->getBlock();
1447 
1448  // If Block contains an original spill, simply continue.
1449  if (SpillsToKeep.find(*RIt) != SpillsToKeep.end() && !SpillsToKeep[*RIt]) {
1450  SpillsInSubTreeMap[*RIt].first.insert(*RIt);
1451  // SpillsInSubTreeMap[*RIt].second contains the cost of spill.
1452  SpillsInSubTreeMap[*RIt].second = MBFI.getBlockFreq(Block);
1453  continue;
1454  }
1455 
1456  // Collect spills in subtree of current node (*RIt) to
1457  // SpillsInSubTreeMap[*RIt].first.
1458  for (MachineDomTreeNode *Child : (*RIt)->children()) {
1459  if (SpillsInSubTreeMap.find(Child) == SpillsInSubTreeMap.end())
1460  continue;
1461  // The stmt "SpillsInSubTree = SpillsInSubTreeMap[*RIt].first" below
1462  // should be placed before getting the begin and end iterators of
1463  // SpillsInSubTreeMap[Child].first, or else the iterators may be
1464  // invalidated when SpillsInSubTreeMap[*RIt] is seen the first time
1465  // and the map grows and then the original buckets in the map are moved.
1466  SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1467  SpillsInSubTreeMap[*RIt].first;
1468  BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1469  SubTreeCost += SpillsInSubTreeMap[Child].second;
1470  auto BI = SpillsInSubTreeMap[Child].first.begin();
1471  auto EI = SpillsInSubTreeMap[Child].first.end();
1472  SpillsInSubTree.insert(BI, EI);
1473  SpillsInSubTreeMap.erase(Child);
1474  }
1475 
1476  SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1477  SpillsInSubTreeMap[*RIt].first;
1478  BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1479  // No spills in subtree, simply continue.
1480  if (SpillsInSubTree.empty())
1481  continue;
1482 
1483  // Check whether Block is a possible candidate to insert spill.
1484  Register LiveReg;
1485  if (!isSpillCandBB(OrigLI, OrigVNI, *Block, LiveReg))
1486  continue;
1487 
1488  // If there are multiple spills that could be merged, bias a little
1489  // to hoist the spill.
1490  BranchProbability MarginProb = (SpillsInSubTree.size() > 1)
1491  ? BranchProbability(9, 10)
1492  : BranchProbability(1, 1);
1493  if (SubTreeCost > MBFI.getBlockFreq(Block) * MarginProb) {
1494  // Hoist: Move spills to current Block.
1495  for (const auto SpillBB : SpillsInSubTree) {
1496  // When SpillBB is a BB contains original spill, insert the spill
1497  // to SpillsToRm.
1498  if (SpillsToKeep.find(SpillBB) != SpillsToKeep.end() &&
1499  !SpillsToKeep[SpillBB]) {
1500  MachineInstr *SpillToRm = SpillBBToSpill[SpillBB];
1501  SpillsToRm.push_back(SpillToRm);
1502  }
1503  // SpillBB will not contain spill anymore, remove it from SpillsToKeep.
1504  SpillsToKeep.erase(SpillBB);
1505  }
1506  // Current Block is the BB containing the new hoisted spill. Add it to
1507  // SpillsToKeep. LiveReg is the source of the new spill.
1508  SpillsToKeep[*RIt] = LiveReg;
1509  LLVM_DEBUG({
1510  dbgs() << "spills in BB: ";
1511  for (const auto Rspill : SpillsInSubTree)
1512  dbgs() << Rspill->getBlock()->getNumber() << " ";
1513  dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber()
1514  << "\n";
1515  });
1516  SpillsInSubTree.clear();
1517  SpillsInSubTree.insert(*RIt);
1518  SubTreeCost = MBFI.getBlockFreq(Block);
1519  }
1520  }
1521  // For spills in SpillsToKeep with LiveReg set (i.e., not original spill),
1522  // save them to SpillsToIns.
1523  for (const auto &Ent : SpillsToKeep) {
1524  if (Ent.second)
1525  SpillsToIns[Ent.first->getBlock()] = Ent.second;
1526  }
1527 }
1528 
1529 /// For spills with equal values, remove redundant spills and hoist those left
1530 /// to less hot spots.
1531 ///
1532 /// Spills with equal values will be collected into the same set in
1533 /// MergeableSpills when spill is inserted. These equal spills are originated
1534 /// from the same defining instruction and are dominated by the instruction.
1535 /// Before hoisting all the equal spills, redundant spills inside in the same
1536 /// BB are first marked to be deleted. Then starting from the spills left, walk
1537 /// up on the dominator tree towards the Root node where the define instruction
1538 /// is located, mark the dominated spills to be deleted along the way and
1539 /// collect the BB nodes on the path from non-dominated spills to the define
1540 /// instruction into a WorkSet. The nodes in WorkSet are the candidate places
1541 /// where we are considering to hoist the spills. We iterate the WorkSet in
1542 /// bottom-up order, and for each node, we will decide whether to hoist spills
1543 /// inside its subtree to that node. In this way, we can get benefit locally
1544 /// even if hoisting all the equal spills to one cold place is impossible.
1545 void HoistSpillHelper::hoistAllSpills() {
1546  SmallVector<Register, 4> NewVRegs;
1547  LiveRangeEdit Edit(nullptr, NewVRegs, MF, LIS, &VRM, this);
1548 
1549  for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
1551  Register Original = VRM.getPreSplitReg(Reg);
1552  if (!MRI.def_empty(Reg))
1553  Virt2SiblingsMap[Original].insert(Reg);
1554  }
1555 
1556  // Each entry in MergeableSpills contains a spill set with equal values.
1557  for (auto &Ent : MergeableSpills) {
1558  int Slot = Ent.first.first;
1559  LiveInterval &OrigLI = *StackSlotToOrigLI[Slot];
1560  VNInfo *OrigVNI = Ent.first.second;
1561  SmallPtrSet<MachineInstr *, 16> &EqValSpills = Ent.second;
1562  if (Ent.second.empty())
1563  continue;
1564 
1565  LLVM_DEBUG({
1566  dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n"
1567  << "Equal spills in BB: ";
1568  for (const auto spill : EqValSpills)
1569  dbgs() << spill->getParent()->getNumber() << " ";
1570  dbgs() << "\n";
1571  });
1572 
1573  // SpillsToRm is the spill set to be removed from EqValSpills.
1575  // SpillsToIns is the spill set to be newly inserted after hoisting.
1577 
1578  runHoistSpills(OrigLI, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns);
1579 
1580  LLVM_DEBUG({
1581  dbgs() << "Finally inserted spills in BB: ";
1582  for (const auto &Ispill : SpillsToIns)
1583  dbgs() << Ispill.first->getNumber() << " ";
1584  dbgs() << "\nFinally removed spills in BB: ";
1585  for (const auto Rspill : SpillsToRm)
1586  dbgs() << Rspill->getParent()->getNumber() << " ";
1587  dbgs() << "\n";
1588  });
1589 
1590  // Stack live range update.
1591  LiveInterval &StackIntvl = LSS.getInterval(Slot);
1592  if (!SpillsToIns.empty() || !SpillsToRm.empty())
1593  StackIntvl.MergeValueInAsValue(OrigLI, OrigVNI,
1594  StackIntvl.getValNumInfo(0));
1595 
1596  // Insert hoisted spills.
1597  for (auto const &Insert : SpillsToIns) {
1598  MachineBasicBlock *BB = Insert.first;
1599  Register LiveReg = Insert.second;
1600  MachineBasicBlock::iterator MII = IPA.getLastInsertPointIter(OrigLI, *BB);
1601  MachineInstrSpan MIS(MII, BB);
1602  TII.storeRegToStackSlot(*BB, MII, LiveReg, false, Slot,
1603  MRI.getRegClass(LiveReg), &TRI);
1604  LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
1605  for (const MachineInstr &MI : make_range(MIS.begin(), MII))
1606  getVDefInterval(MI, LIS);
1607  ++NumSpills;
1608  }
1609 
1610  // Remove redundant spills or change them to dead instructions.
1611  NumSpills -= SpillsToRm.size();
1612  for (auto const RMEnt : SpillsToRm) {
1613  RMEnt->setDesc(TII.get(TargetOpcode::KILL));
1614  for (unsigned i = RMEnt->getNumOperands(); i; --i) {
1615  MachineOperand &MO = RMEnt->getOperand(i - 1);
1616  if (MO.isReg() && MO.isImplicit() && MO.isDef() && !MO.isDead())
1617  RMEnt->removeOperand(i - 1);
1618  }
1619  }
1620  Edit.eliminateDeadDefs(SpillsToRm, None, AA);
1621  }
1622 }
1623 
1624 /// For VirtReg clone, the \p New register should have the same physreg or
1625 /// stackslot as the \p old register.
1626 void HoistSpillHelper::LRE_DidCloneVirtReg(Register New, Register Old) {
1627  if (VRM.hasPhys(Old))
1628  VRM.assignVirt2Phys(New, VRM.getPhys(Old));
1629  else if (VRM.getStackSlot(Old) != VirtRegMap::NO_STACK_SLOT)
1630  VRM.assignVirt2StackSlot(New, VRM.getStackSlot(Old));
1631  else
1632  llvm_unreachable("VReg should be assigned either physreg or stackslot");
1633  if (VRM.hasShape(Old))
1634  VRM.assignVirt2Shape(New, VRM.getShape(Old));
1635 }
i
i
Definition: README.txt:29
llvm::Spiller
Spiller interface.
Definition: Spiller.h:24
llvm::LiveRangeEdit::Remat
Remat - Information needed to rematerialize at a specific location.
Definition: LiveRangeEdit.h:195
llvm::HexagonInstrInfo::isStoreToStackSlot
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
If the specified machine instruction is a direct store to a stack slot, return the virtual or physica...
Definition: HexagonInstrInfo.cpp:335
llvm::MachineInstr::getDebugInstrNum
unsigned getDebugInstrNum()
Fetch the instruction number of this MachineInstr.
Definition: MachineInstr.cpp:2322
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:104
MachineInstr.h
LLVM_DUMP_METHOD
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition: Compiler.h:494
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:17
llvm::tgtok::Def
@ Def
Definition: TGLexer.h:50
UseMI
MachineInstrBuilder & UseMI
Definition: AArch64ExpandPseudoInsts.cpp:103
llvm::make_range
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Definition: iterator_range.h:53
Insert
Vector Rotate Left Mask Mask Insert
Definition: README_P9.txt:112
llvm::VirtRegInfo::Tied
bool Tied
Tied - Uses and defs must use the same register.
Definition: MachineInstrBundle.h:229
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:50
llvm::MachineInstrSpan
MachineInstrSpan provides an interface to get an iteration range containing the instruction it was in...
Definition: MachineBasicBlock.h:1213
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
Loops
Hexagon Hardware Loops
Definition: HexagonHardwareLoops.cpp:372
llvm::VirtRegInfo::Reads
bool Reads
Reads - One of the operands read the virtual register.
Definition: MachineInstrBundle.h:221
llvm::MachineOperand::setIsKill
void setIsKill(bool Val=true)
Definition: MachineOperand.h:509
llvm::LiveInterval::isSpillable
bool isSpillable() const
isSpillable - Can this interval be spilled?
Definition: LiveInterval.h:819
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1185
Statistic.h
llvm::VirtRegAuxInfo
Calculate auxiliary information for a virtual register such as its spill weight and allocation hint.
Definition: CalcSpillWeights.h:45
llvm::LiveInterval::weight
float weight() const
Definition: LiveInterval.h:718
ErrorHandling.h
llvm::VirtRegMap
Definition: VirtRegMap.h:33
llvm::MachineOperand::isTied
bool isTied() const
Definition: MachineOperand.h:440
llvm::X86Disassembler::Reg
Reg
All possible values of the reg field in the ModR/M byte.
Definition: X86DisassemblerDecoder.h:462
MapVector.h
llvm::MachineRegisterInfo::defusechain_instr_iterator
defusechain_iterator - This class provides iterator support for machine operands in the function that...
Definition: MachineRegisterInfo.h:277
llvm::MachineFunctionPass
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
Definition: MachineFunctionPass.h:30
MachineBasicBlock.h
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:139
llvm::VNInfo::def
SlotIndex def
The index of the defining instruction.
Definition: LiveInterval.h:61
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::erase
bool erase(const KeyT &Val)
Definition: DenseMap.h:304
llvm::MachineRegisterInfo::use_nodbg_instructions
iterator_range< use_instr_nodbg_iterator > use_nodbg_instructions(Register Reg) const
Definition: MachineRegisterInfo.h:551
llvm::InsertPointAnalysis
Determines the latest safe point in a block in which we can insert a split, spill or other instructio...
Definition: SplitKit.h:51
llvm::TargetRegisterInfo
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Definition: TargetRegisterInfo.h:234
llvm::PhysRegInfo::FullyDefined
bool FullyDefined
Reg or a super-register is defined.
Definition: MachineInstrBundle.h:256
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::begin
iterator begin()
Definition: DenseMap.h:75
BlockFrequency.h
Spiller.h
DenseMap.h
TargetInstrInfo.h
llvm::MapVector
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:37
llvm::MachineRegisterInfo::getNumVirtRegs
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
Definition: MachineRegisterInfo.h:765
llvm::SmallPtrSet
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:450
llvm::Register::index2VirtReg
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
Definition: Register.h:84
STLExtras.h
llvm::SmallVectorImpl::pop_back_val
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:654
llvm::LiveRangeEdit::getReg
Register getReg() const
Definition: LiveRangeEdit.h:149
llvm::DomTreeNodeBase::getIDom
DomTreeNodeBase * getIDom() const
Definition: GenericDomTree.h:89
llvm::LiveQueryResult
Result of a LiveRange query.
Definition: LiveInterval.h:90
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1628
llvm::LiveRange::assign
void assign(const LiveRange &Other, BumpPtrAllocator &Allocator)
Copies values numbers and live segments from Other into this range.
Definition: LiveInterval.h:252
llvm::AnalyzePhysRegInBundle
PhysRegInfo AnalyzePhysRegInBundle(const MachineInstr &MI, Register Reg, const TargetRegisterInfo *TRI)
AnalyzePhysRegInBundle - Analyze how the current instruction or bundle uses a physical register.
Definition: MachineInstrBundle.cpp:313
llvm::LiveIntervals::getMBBFromIndex
MachineBasicBlock * getMBBFromIndex(SlotIndex index) const
Definition: LiveIntervals.h:255
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
llvm::LiveIntervals::getInstructionIndex
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
Definition: LiveIntervals.h:226
llvm::MachineLoopInfo
Definition: MachineLoopInfo.h:89
MachineRegisterInfo.h
llvm::MachineBasicBlock::erase
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
Definition: MachineBasicBlock.cpp:1299
AliasAnalysis.h
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
CommandLine.h
llvm::createInlineSpiller
Spiller * createInlineSpiller(MachineFunctionPass &Pass, MachineFunction &MF, VirtRegMap &VRM, VirtRegAuxInfo &VRAI)
Create and return a spiller that will insert spill code directly instead of deferring though VirtRegM...
Definition: InlineSpiller.cpp:245
llvm::MachineOperand::isImplicit
bool isImplicit() const
Definition: MachineOperand.h:379
llvm::TargetInstrInfo
TargetInstrInfo - Interface to description of machine instruction set.
Definition: TargetInstrInfo.h:97
MachineLoopInfo.h
llvm::MachineBlockFrequencyInfo
MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate machine basic b...
Definition: MachineBlockFrequencyInfo.h:33
llvm::AAResults
Definition: AliasAnalysis.h:511
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::Register::isStackSlot
static bool isStackSlot(unsigned Reg)
isStackSlot - Sometimes it is useful the be able to store a non-negative frame index in a variable th...
Definition: Register.h:44
llvm::MachineOperand::isUse
bool isUse() const
Definition: MachineOperand.h:369
llvm::MachineInstr::getOperand
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:501
llvm::buildDbgValueForSpill
MachineInstr * buildDbgValueForSpill(MachineBasicBlock &BB, MachineBasicBlock::iterator I, const MachineInstr &Orig, int FrameIndex, Register SpillReg)
Clone a DBG_VALUE whose value has been spilled to FrameIndex.
Definition: MachineInstr.cpp:2178
llvm::MachineRegisterInfo::isReserved
bool isReserved(MCRegister PhysReg) const
isReserved - Returns true when PhysReg is a reserved register.
Definition: MachineRegisterInfo.h:925
llvm::LiveIntervals::InsertMachineInstrInMaps
SlotIndex InsertMachineInstrInMaps(MachineInstr &MI)
Definition: LiveIntervals.h:266
llvm::DomTreeNodeBase::children
iterator_range< iterator > children()
Definition: GenericDomTree.h:83
SplitKit.h
TargetOpcodes.h
llvm::MachineRegisterInfo::reg_instr_nodbg_end
static reg_instr_nodbg_iterator reg_instr_nodbg_end()
Definition: MachineRegisterInfo.h:357
TII
const HexagonInstrInfo * TII
Definition: HexagonCopyToCombine.cpp:125
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
llvm::MachineOperand
MachineOperand class - Representation of each machine instruction operand.
Definition: MachineOperand.h:48
pass
modulo schedule Modulo Schedule test pass
Definition: ModuloSchedule.cpp:2123
llvm::VirtRegInfo::Writes
bool Writes
Writes - One of the operands writes the virtual register.
Definition: MachineInstrBundle.h:224
llvm::MachineRegisterInfo::reg_instructions
iterator_range< reg_instr_iterator > reg_instructions(Register Reg) const
Definition: MachineRegisterInfo.h:310
llvm::LiveRangeEdit
Definition: LiveRangeEdit.h:44
MachineInstrBundle.h
llvm::STATISTIC
STATISTIC(NumFunctions, "Total number of functions")
llvm::MIBundleOperands
MIBundleOperands - Iterate over all operands in a bundle of machine instructions.
Definition: MachineInstrBundle.h:166
SmallPtrSet.h
llvm::LiveInterval
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:686
isFullCopyOf
static Register isFullCopyOf(const MachineInstr &MI, Register Reg)
isFullCopyOf - If MI is a COPY to or from Reg, return the other register, otherwise return 0.
Definition: InlineSpiller.cpp:265
llvm::SlotIndex
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:82
llvm::None
const NoneType None
Definition: None.h:24
BranchProbability.h
llvm::MachineBasicBlock
Definition: MachineBasicBlock.h:94
llvm::MachineRegisterInfo::def_empty
bool def_empty(Register RegNo) const
def_empty - Return true if there are no instructions defining the specified register (it may be live-...
Definition: MachineRegisterInfo.h:434
llvm::MachineRegisterInfo::getRegClass
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
Definition: MachineRegisterInfo.h:642
llvm::Spiller::~Spiller
virtual ~Spiller()=0
llvm::LiveIntervals::removePhysRegDefAt
void removePhysRegDefAt(MCRegister Reg, SlotIndex Pos)
Remove value numbers and related live segments starting at position Pos that are part of any liverang...
Definition: LiveIntervals.cpp:1716
llvm::cl::opt< bool >
llvm::SlotIndex::getBaseIndex
SlotIndex getBaseIndex() const
Returns the base index for associated with this index.
Definition: SlotIndexes.h:240
llvm::MachineRegisterInfo::reg_bundles
iterator_range< reg_bundle_iterator > reg_bundles(Register Reg) const
Definition: MachineRegisterInfo.h:325
llvm::BlockFrequency
Definition: BlockFrequency.h:23
llvm::LiveRange::getVNInfoBefore
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarilly including Idx,...
Definition: LiveInterval.h:429
llvm::LiveIntervals::ReplaceMachineInstrInMaps
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
Definition: LiveIntervals.h:280
llvm::MachineOperand::isReg
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Definition: MachineOperand.h:320
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:66
llvm::AnalyzeVirtRegInBundle
VirtRegInfo AnalyzeVirtRegInBundle(MachineInstr &MI, Register Reg, SmallVectorImpl< std::pair< MachineInstr *, unsigned >> *Ops=nullptr)
AnalyzeVirtRegInBundle - Analyze how the current instruction or bundle uses a virtual register.
Definition: MachineInstrBundle.cpp:283
LiveIntervals.h
VirtRegMap.h
llvm::VNInfo::id
unsigned id
The ID number of this value.
Definition: LiveInterval.h:58
llvm::SmallPtrSetImpl::end
iterator end() const
Definition: SmallPtrSet.h:408
llvm::TargetRegisterInfo::getRegClassName
const char * getRegClassName(const TargetRegisterClass *Class) const
Returns the name of the register class.
Definition: TargetRegisterInfo.h:756
llvm::BumpPtrAllocatorImpl
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:63
llvm::HexagonInstrInfo::storeRegToStackSlot
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Store the specified register of the given register class to the specified stack frame index.
Definition: HexagonInstrInfo.cpp:955
move
compiles ldr LCPI1_0 ldr ldr mov lsr tst moveq r1 ldr LCPI1_1 and r0 bx lr It would be better to do something like to fold the shift into the conditional move
Definition: README.txt:546
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:57
llvm::DenseMap
Definition: DenseMap.h:716
llvm::LiveRange::getNumValNums
unsigned getNumValNums() const
Definition: LiveInterval.h:313
llvm::MachineOperand::isDead
bool isDead() const
Definition: MachineOperand.h:384
DisableHoisting
static cl::opt< bool > DisableHoisting("disable-spill-hoist", cl::Hidden, cl::desc("Disable inline spill hoisting"))
llvm::MachineRegisterInfo::reg_instr_nodbg_begin
reg_instr_nodbg_iterator reg_instr_nodbg_begin(Register RegNo) const
Definition: MachineRegisterInfo.h:354
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:432
llvm::make_early_inc_range
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:618
llvm::SmallPtrSetImpl::begin
iterator begin() const
Definition: SmallPtrSet.h:403
llvm::SlotIndex::isSameInstr
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
Definition: SlotIndexes.h:196
llvm::is_contained
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:1682
ArrayRef.h
llvm::LiveRange::Query
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
Definition: LiveInterval.h:541
MachineFunctionPass.h
llvm::Register::isVirtualRegister
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:71
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::find
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:152
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::MachineOperand::isEarlyClobber
bool isEarlyClobber() const
Definition: MachineOperand.h:435
llvm::PhysRegInfo
Information about how a physical register Reg is used by a set of operands.
Definition: MachineInstrBundle.h:246
llvm::MachineInstrBuilder::addReg
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Definition: MachineInstrBuilder.h:97
llvm::MachineOperand::getReg
Register getReg() const
getReg - Returns the register number.
Definition: MachineOperand.h:359
llvm::MachineBasicBlock::predecessors
iterator_range< pred_iterator > predecessors()
Definition: MachineBasicBlock.h:359
llvm::SlotIndex::getRegSlot
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
Definition: SlotIndexes.h:253
llvm::LiveIntervals::getInterval
LiveInterval & getInterval(Register Reg)
Definition: LiveIntervals.h:114
llvm::SmallPtrSetImpl::count
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:383
llvm::MachineFunction
Definition: MachineFunction.h:257
isRealSpill
static bool isRealSpill(const MachineInstr &Def)
Check if Def fully defines a VReg with an undefined value.
Definition: InlineSpiller.cpp:1012
llvm::AArch64::RM
@ RM
Definition: AArch64ISelLowering.h:470
llvm::SmallPtrSetImplBase::clear
void clear()
Definition: SmallPtrSet.h:95
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
None.h
llvm::HexagonInstrInfo::loadRegFromStackSlot
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Load the specified register of the given register class from the specified stack frame index.
Definition: HexagonInstrInfo.cpp:1000
RestrictStatepointRemat
static cl::opt< bool > RestrictStatepointRemat("restrict-statepoint-remat", cl::init(false), cl::Hidden, cl::desc("Restrict remat for statepoint operands"))
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:143
llvm::LiveRange::vnis
iterator_range< vni_iterator > vnis()
Definition: LiveInterval.h:230
llvm::MachineOperand::setIsUndef
void setIsUndef(bool Val=true)
Definition: MachineOperand.h:520
llvm::BranchProbability
Definition: BranchProbability.h:30
Compiler.h
TargetSubtargetInfo.h
llvm::MachineOperand::isDef
bool isDef() const
Definition: MachineOperand.h:374
llvm::HexagonInstrInfo::isLoadFromStackSlot
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
TargetInstrInfo overrides.
Definition: HexagonInstrInfo.cpp:287
llvm::MachineInstr::getParent
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:288
llvm::DomTreeNodeBase
Base class for the actual dominator tree node.
Definition: LiveIntervalCalc.h:24
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::insert
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:209
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
llvm::SmallPtrSetImplBase::size
size_type size() const
Definition: SmallPtrSet.h:93
llvm::MachineOperand::getSubReg
unsigned getSubReg() const
Definition: MachineOperand.h:364
LiveRangeEdit.h
llvm::VNInfo::isPHIDef
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
Definition: LiveInterval.h:78
MBB
MachineBasicBlock & MBB
Definition: AArch64SLSHardening.cpp:74
dumpMachineInstrRangeWithSlotIndex
static LLVM_DUMP_METHOD void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E, LiveIntervals const &LIS, const char *const header, Register VReg=Register())
Definition: InlineSpiller.cpp:768
getVDefInterval
static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS)
Definition: InlineSpiller.cpp:275
llvm::StatepointOpers
MI-level Statepoint operands.
Definition: StackMaps.h:158
llvm::LiveRangeEdit::getParent
const LiveInterval & getParent() const
Definition: LiveRangeEdit.h:144
llvm::MachineOperand::readsReg
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
Definition: MachineOperand.h:457
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::end
iterator end()
Definition: DenseMap.h:84
llvm::pdb::PDB_LocType::Slot
@ Slot
StackMaps.h
llvm::LiveRange::getValNumInfo
VNInfo * getValNumInfo(unsigned ValNo)
getValNumInfo - Returns pointer to the specified val#.
Definition: LiveInterval.h:317
llvm::LiveIntervals
Definition: LiveIntervals.h:54
llvm::VNInfo
VNInfo - Value Number Information.
Definition: LiveInterval.h:53
llvm::LiveRange::getVNInfoAt
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
Definition: LiveInterval.h:421
llvm::MachineInstr::removeOperand
void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
Definition: MachineInstr.cpp:276
AA
LiveInterval.h
llvm::LiveRange::MergeValueInAsValue
void MergeValueInAsValue(const LiveRange &RHS, const VNInfo *RHSValNo, VNInfo *LHSValNo)
MergeValueInAsValue - Merge all of the segments of a specific val# in RHS into this live range as the...
Definition: LiveInterval.cpp:722
llvm::getKillRegState
unsigned getKillRegState(bool B)
Definition: MachineInstrBuilder.h:508
llvm::Pass
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:91
SmallVector.h
llvm::MachineBasicBlock::begin
iterator begin()
Definition: MachineBasicBlock.h:278
MachineInstrBuilder.h
LiveStacks.h
Allocator
Basic Register Allocator
Definition: RegAllocBasic.cpp:142
llvm::VirtRegMap::NO_STACK_SLOT
@ NO_STACK_SLOT
Definition: VirtRegMap.h:37
llvm::BuildMI
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
Definition: MachineInstrBuilder.h:328
llvm::SmallPtrSetImplBase::empty
LLVM_NODISCARD bool empty() const
Definition: SmallPtrSet.h:92
llvm::MachineOperand::setReg
void setReg(Register Reg)
Change the register this operand corresponds to.
Definition: MachineOperand.cpp:53
llvm::LiveIntervals::RemoveMachineInstrFromMaps
void RemoveMachineInstrFromMaps(MachineInstr &MI)
Definition: LiveIntervals.h:276
DefMI
MachineInstrBuilder MachineInstrBuilder & DefMI
Definition: AArch64ExpandPseudoInsts.cpp:104
llvm::AAResultsWrapperPass
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Definition: AliasAnalysis.h:1351
llvm::MachineInstr::getNumOperands
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:494
llvm::LiveIntervals::InsertMachineInstrRangeInMaps
void InsertMachineInstrRangeInMaps(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E)
Definition: LiveIntervals.h:270
llvm::LiveInterval::reg
Register reg() const
Definition: LiveInterval.h:717
llvm::SmallVectorImpl< MachineInstr * >
MachineOperand.h
llvm::SmallSetVector
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:307
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
llvm::LiveRangeEdit::Delegate
Callback methods for LiveRangeEdit owners.
Definition: LiveRangeEdit.h:47
llvm::VNInfo::isUnused
bool isUnused() const
Returns true if this value is unused.
Definition: LiveInterval.h:81
llvm::MachineBasicBlock::SkipPHIsLabelsAndDebug
iterator SkipPHIsLabelsAndDebug(iterator I, bool SkipPseudoOp=true)
Return the first instruction in MBB after I that is not a PHI, label or debug.
Definition: MachineBasicBlock.cpp:221
llvm::MachineFunction::DebugOperandMemNumber
const static unsigned int DebugOperandMemNumber
A reserved operand number representing the instructions memory operand, for instructions that have a ...
Definition: MachineFunction.h:571
SlotIndexes.h
llvm::cl::desc
Definition: CommandLine.h:405
spill
the custom lowered code happens to be but we shouldn t have to custom lower anything This is probably related to< 2 x i64 > ops being so bad LLVM currently generates stack realignment when it is not necessary needed The problem is that we need to know about stack alignment too before RA runs At that point we don t whether there will be vector spill
Definition: README-SSE.txt:489
llvm::MachineRegisterInfo::reg_nodbg_empty
bool reg_nodbg_empty(Register RegNo) const
reg_nodbg_empty - Return true if the only instructions using or defining Reg are Debug instructions.
Definition: MachineRegisterInfo.h:385
raw_ostream.h
llvm::MachineDominatorTree
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
Definition: MachineDominators.h:51
llvm::VirtRegInfo
VirtRegInfo - Information about a virtual register used by a set of operands.
Definition: MachineInstrBundle.h:218
MachineFunction.h
llvm::printReg
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
Definition: TargetRegisterInfo.cpp:111
llvm::MachineInstrBundleIterator< MachineInstr >
llvm::pdb::PDB_SymType::Block
@ Block
llvm::LiveStacks
Definition: LiveStacks.h:35
MachineBlockFrequencyInfo.h
TargetRegisterInfo.h
Debug.h
llvm::LiveQueryResult::isKill
bool isKill() const
Return true if the live-in value is killed by this instruction.
Definition: LiveInterval.h:112
llvm::LiveIntervals::getVNInfoAllocator
VNInfo::Allocator & getVNInfoAllocator()
Definition: LiveIntervals.h:284
SetVector.h
MachineDominators.h
llvm::SmallVectorImpl::emplace_back
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:927
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365