LLVM  14.0.0git
InlineSpiller.cpp
Go to the documentation of this file.
1 //===- InlineSpiller.cpp - Insert spills and restores inline --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // The inline spiller modifies the machine function directly instead of
10 // inserting spills and restores in VirtRegMap.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "SplitKit.h"
15 #include "llvm/ADT/ArrayRef.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/MapVector.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
42 #include "llvm/CodeGen/Spiller.h"
43 #include "llvm/CodeGen/StackMaps.h"
49 #include "llvm/Config/llvm-config.h"
53 #include "llvm/Support/Compiler.h"
54 #include "llvm/Support/Debug.h"
57 #include <cassert>
58 #include <iterator>
59 #include <tuple>
60 #include <utility>
61 #include <vector>
62 
63 using namespace llvm;
64 
65 #define DEBUG_TYPE "regalloc"
66 
67 STATISTIC(NumSpilledRanges, "Number of spilled live ranges");
68 STATISTIC(NumSnippets, "Number of spilled snippets");
69 STATISTIC(NumSpills, "Number of spills inserted");
70 STATISTIC(NumSpillsRemoved, "Number of spills removed");
71 STATISTIC(NumReloads, "Number of reloads inserted");
72 STATISTIC(NumReloadsRemoved, "Number of reloads removed");
73 STATISTIC(NumFolded, "Number of folded stack accesses");
74 STATISTIC(NumFoldedLoads, "Number of folded loads");
75 STATISTIC(NumRemats, "Number of rematerialized defs for spilling");
76 
77 static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden,
78  cl::desc("Disable inline spill hoisting"));
79 static cl::opt<bool>
80 RestrictStatepointRemat("restrict-statepoint-remat",
81  cl::init(false), cl::Hidden,
82  cl::desc("Restrict remat for statepoint operands"));
83 
84 namespace {
85 
86 class HoistSpillHelper : private LiveRangeEdit::Delegate {
87  MachineFunction &MF;
88  LiveIntervals &LIS;
89  LiveStacks &LSS;
90  AliasAnalysis *AA;
93  VirtRegMap &VRM;
95  const TargetInstrInfo &TII;
96  const TargetRegisterInfo &TRI;
97  const MachineBlockFrequencyInfo &MBFI;
98 
100 
101  // Map from StackSlot to the LiveInterval of the original register.
102  // Note the LiveInterval of the original register may have been deleted
103  // after it is spilled. We keep a copy here to track the range where
104  // spills can be moved.
106 
107  // Map from pair of (StackSlot and Original VNI) to a set of spills which
108  // have the same stackslot and have equal values defined by Original VNI.
109  // These spills are mergeable and are hoist candiates.
110  using MergeableSpillsMap =
112  MergeableSpillsMap MergeableSpills;
113 
114  /// This is the map from original register to a set containing all its
115  /// siblings. To hoist a spill to another BB, we need to find out a live
116  /// sibling there and use it as the source of the new spill.
118 
119  bool isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
120  MachineBasicBlock &BB, Register &LiveReg);
121 
122  void rmRedundantSpills(
126 
127  void getVisitOrders(
133 
134  void runHoistSpills(LiveInterval &OrigLI, VNInfo &OrigVNI,
138 
139 public:
140  HoistSpillHelper(MachineFunctionPass &pass, MachineFunction &mf,
141  VirtRegMap &vrm)
142  : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
143  LSS(pass.getAnalysis<LiveStacks>()),
144  AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
145  MDT(pass.getAnalysis<MachineDominatorTree>()),
146  Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
147  MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()),
148  TRI(*mf.getSubtarget().getRegisterInfo()),
149  MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),
150  IPA(LIS, mf.getNumBlockIDs()) {}
151 
152  void addToMergeableSpills(MachineInstr &Spill, int StackSlot,
153  unsigned Original);
154  bool rmFromMergeableSpills(MachineInstr &Spill, int StackSlot);
155  void hoistAllSpills();
156  void LRE_DidCloneVirtReg(Register, Register) override;
157 };
158 
159 class InlineSpiller : public Spiller {
160  MachineFunction &MF;
161  LiveIntervals &LIS;
162  LiveStacks &LSS;
163  AliasAnalysis *AA;
166  VirtRegMap &VRM;
168  const TargetInstrInfo &TII;
169  const TargetRegisterInfo &TRI;
170  const MachineBlockFrequencyInfo &MBFI;
171 
172  // Variables that are valid during spill(), but used by multiple methods.
173  LiveRangeEdit *Edit;
174  LiveInterval *StackInt;
175  int StackSlot;
176  Register Original;
177 
178  // All registers to spill to StackSlot, including the main register.
179  SmallVector<Register, 8> RegsToSpill;
180 
181  // All COPY instructions to/from snippets.
182  // They are ignored since both operands refer to the same stack slot.
183  SmallPtrSet<MachineInstr*, 8> SnippetCopies;
184 
185  // Values that failed to remat at some point.
186  SmallPtrSet<VNInfo*, 8> UsedValues;
187 
188  // Dead defs generated during spilling.
190 
191  // Object records spills information and does the hoisting.
192  HoistSpillHelper HSpiller;
193 
194  // Live range weight calculator.
195  VirtRegAuxInfo &VRAI;
196 
197  ~InlineSpiller() override = default;
198 
199 public:
200  InlineSpiller(MachineFunctionPass &Pass, MachineFunction &MF, VirtRegMap &VRM,
201  VirtRegAuxInfo &VRAI)
202  : MF(MF), LIS(Pass.getAnalysis<LiveIntervals>()),
203  LSS(Pass.getAnalysis<LiveStacks>()),
204  AA(&Pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
205  MDT(Pass.getAnalysis<MachineDominatorTree>()),
206  Loops(Pass.getAnalysis<MachineLoopInfo>()), VRM(VRM),
207  MRI(MF.getRegInfo()), TII(*MF.getSubtarget().getInstrInfo()),
208  TRI(*MF.getSubtarget().getRegisterInfo()),
209  MBFI(Pass.getAnalysis<MachineBlockFrequencyInfo>()),
210  HSpiller(Pass, MF, VRM), VRAI(VRAI) {}
211 
212  void spill(LiveRangeEdit &) override;
213  void postOptimization() override;
214 
215 private:
216  bool isSnippet(const LiveInterval &SnipLI);
217  void collectRegsToSpill();
218 
219  bool isRegToSpill(Register Reg) { return is_contained(RegsToSpill, Reg); }
220 
221  bool isSibling(Register Reg);
222  bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI);
223  void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI);
224 
225  void markValueUsed(LiveInterval*, VNInfo*);
226  bool canGuaranteeAssignmentAfterRemat(Register VReg, MachineInstr &MI);
227  bool reMaterializeFor(LiveInterval &, MachineInstr &MI);
228  void reMaterializeAll();
229 
230  bool coalesceStackAccess(MachineInstr *MI, Register Reg);
231  bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>>,
232  MachineInstr *LoadMI = nullptr);
233  void insertReload(Register VReg, SlotIndex, MachineBasicBlock::iterator MI);
234  void insertSpill(Register VReg, bool isKill, MachineBasicBlock::iterator MI);
235 
236  void spillAroundUses(Register Reg);
237  void spillAll();
238 };
239 
240 } // end anonymous namespace
241 
242 Spiller::~Spiller() = default;
243 
244 void Spiller::anchor() {}
245 
247  MachineFunction &MF, VirtRegMap &VRM,
248  VirtRegAuxInfo &VRAI) {
249  return new InlineSpiller(Pass, MF, VRM, VRAI);
250 }
251 
252 //===----------------------------------------------------------------------===//
253 // Snippets
254 //===----------------------------------------------------------------------===//
255 
256 // When spilling a virtual register, we also spill any snippets it is connected
257 // to. The snippets are small live ranges that only have a single real use,
258 // leftovers from live range splitting. Spilling them enables memory operand
259 // folding or tightens the live range around the single use.
260 //
261 // This minimizes register pressure and maximizes the store-to-load distance for
262 // spill slots which can be important in tight loops.
263 
264 /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
265 /// otherwise return 0.
267  if (!MI.isFullCopy())
268  return Register();
269  if (MI.getOperand(0).getReg() == Reg)
270  return MI.getOperand(1).getReg();
271  if (MI.getOperand(1).getReg() == Reg)
272  return MI.getOperand(0).getReg();
273  return Register();
274 }
275 
276 static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS) {
277  for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
278  const MachineOperand &MO = MI.getOperand(I);
279  if (MO.isReg() && MO.isDef() && Register::isVirtualRegister(MO.getReg()))
280  LIS.getInterval(MO.getReg());
281  }
282 }
283 
284 /// isSnippet - Identify if a live interval is a snippet that should be spilled.
285 /// It is assumed that SnipLI is a virtual register with the same original as
286 /// Edit->getReg().
287 bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
288  Register Reg = Edit->getReg();
289 
290  // A snippet is a tiny live range with only a single instruction using it
291  // besides copies to/from Reg or spills/fills. We accept:
292  //
293  // %snip = COPY %Reg / FILL fi#
294  // %snip = USE %snip
295  // %Reg = COPY %snip / SPILL %snip, fi#
296  //
297  if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI))
298  return false;
299 
300  MachineInstr *UseMI = nullptr;
301 
302  // Check that all uses satisfy our criteria.
304  RI = MRI.reg_instr_nodbg_begin(SnipLI.reg()),
306  RI != E;) {
307  MachineInstr &MI = *RI++;
308 
309  // Allow copies to/from Reg.
310  if (isFullCopyOf(MI, Reg))
311  continue;
312 
313  // Allow stack slot loads.
314  int FI;
315  if (SnipLI.reg() == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot)
316  continue;
317 
318  // Allow stack slot stores.
319  if (SnipLI.reg() == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot)
320  continue;
321 
322  // Allow a single additional instruction.
323  if (UseMI && &MI != UseMI)
324  return false;
325  UseMI = &MI;
326  }
327  return true;
328 }
329 
330 /// collectRegsToSpill - Collect live range snippets that only have a single
331 /// real use.
332 void InlineSpiller::collectRegsToSpill() {
333  Register Reg = Edit->getReg();
334 
335  // Main register always spills.
336  RegsToSpill.assign(1, Reg);
337  SnippetCopies.clear();
338 
339  // Snippets all have the same original, so there can't be any for an original
340  // register.
341  if (Original == Reg)
342  return;
343 
345  RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end(); RI != E; ) {
346  MachineInstr &MI = *RI++;
347  Register SnipReg = isFullCopyOf(MI, Reg);
348  if (!isSibling(SnipReg))
349  continue;
350  LiveInterval &SnipLI = LIS.getInterval(SnipReg);
351  if (!isSnippet(SnipLI))
352  continue;
353  SnippetCopies.insert(&MI);
354  if (isRegToSpill(SnipReg))
355  continue;
356  RegsToSpill.push_back(SnipReg);
357  LLVM_DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n');
358  ++NumSnippets;
359  }
360 }
361 
362 bool InlineSpiller::isSibling(Register Reg) {
363  return Reg.isVirtual() && VRM.getOriginal(Reg) == Original;
364 }
365 
366 /// It is beneficial to spill to earlier place in the same BB in case
367 /// as follows:
368 /// There is an alternative def earlier in the same MBB.
369 /// Hoist the spill as far as possible in SpillMBB. This can ease
370 /// register pressure:
371 ///
372 /// x = def
373 /// y = use x
374 /// s = copy x
375 ///
376 /// Hoisting the spill of s to immediately after the def removes the
377 /// interference between x and y:
378 ///
379 /// x = def
380 /// spill x
381 /// y = use killed x
382 ///
383 /// This hoist only helps when the copy kills its source.
384 ///
385 bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,
386  MachineInstr &CopyMI) {
387  SlotIndex Idx = LIS.getInstructionIndex(CopyMI);
388 #ifndef NDEBUG
389  VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot());
390  assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy");
391 #endif
392 
393  Register SrcReg = CopyMI.getOperand(1).getReg();
394  LiveInterval &SrcLI = LIS.getInterval(SrcReg);
395  VNInfo *SrcVNI = SrcLI.getVNInfoAt(Idx);
396  LiveQueryResult SrcQ = SrcLI.Query(Idx);
397  MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(SrcVNI->def);
398  if (DefMBB != CopyMI.getParent() || !SrcQ.isKill())
399  return false;
400 
401  // Conservatively extend the stack slot range to the range of the original
402  // value. We may be able to do better with stack slot coloring by being more
403  // careful here.
404  assert(StackInt && "No stack slot assigned yet.");
405  LiveInterval &OrigLI = LIS.getInterval(Original);
406  VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx);
407  StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0));
408  LLVM_DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": "
409  << *StackInt << '\n');
410 
411  // We are going to spill SrcVNI immediately after its def, so clear out
412  // any later spills of the same value.
413  eliminateRedundantSpills(SrcLI, SrcVNI);
414 
415  MachineBasicBlock *MBB = LIS.getMBBFromIndex(SrcVNI->def);
417  if (SrcVNI->isPHIDef())
418  MII = MBB->SkipPHIsLabelsAndDebug(MBB->begin());
419  else {
420  MachineInstr *DefMI = LIS.getInstructionFromIndex(SrcVNI->def);
421  assert(DefMI && "Defining instruction disappeared");
422  MII = DefMI;
423  ++MII;
424  }
425  MachineInstrSpan MIS(MII, MBB);
426  // Insert spill without kill flag immediately after def.
427  TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot,
428  MRI.getRegClass(SrcReg), &TRI);
429  LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
430  for (const MachineInstr &MI : make_range(MIS.begin(), MII))
431  getVDefInterval(MI, LIS);
432  --MII; // Point to store instruction.
433  LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII);
434 
435  // If there is only 1 store instruction is required for spill, add it
436  // to mergeable list. In X86 AMX, 2 intructions are required to store.
437  // We disable the merge for this case.
438  if (MIS.begin() == MII)
439  HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
440  ++NumSpills;
441  return true;
442 }
443 
444 /// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any
445 /// redundant spills of this value in SLI.reg and sibling copies.
446 void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
447  assert(VNI && "Missing value");
449  WorkList.push_back(std::make_pair(&SLI, VNI));
450  assert(StackInt && "No stack slot assigned yet.");
451 
452  do {
453  LiveInterval *LI;
454  std::tie(LI, VNI) = WorkList.pop_back_val();
455  Register Reg = LI->reg();
456  LLVM_DEBUG(dbgs() << "Checking redundant spills for " << VNI->id << '@'
457  << VNI->def << " in " << *LI << '\n');
458 
459  // Regs to spill are taken care of.
460  if (isRegToSpill(Reg))
461  continue;
462 
463  // Add all of VNI's live range to StackInt.
464  StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0));
465  LLVM_DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n');
466 
467  // Find all spills and copies of VNI.
470  UI != E; ) {
471  MachineInstr &MI = *UI++;
472  if (!MI.isCopy() && !MI.mayStore())
473  continue;
474  SlotIndex Idx = LIS.getInstructionIndex(MI);
475  if (LI->getVNInfoAt(Idx) != VNI)
476  continue;
477 
478  // Follow sibling copies down the dominator tree.
479  if (Register DstReg = isFullCopyOf(MI, Reg)) {
480  if (isSibling(DstReg)) {
481  LiveInterval &DstLI = LIS.getInterval(DstReg);
482  VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot());
483  assert(DstVNI && "Missing defined value");
484  assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot");
485  WorkList.push_back(std::make_pair(&DstLI, DstVNI));
486  }
487  continue;
488  }
489 
490  // Erase spills.
491  int FI;
492  if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) {
493  LLVM_DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI);
494  // eliminateDeadDefs won't normally remove stores, so switch opcode.
495  MI.setDesc(TII.get(TargetOpcode::KILL));
496  DeadDefs.push_back(&MI);
497  ++NumSpillsRemoved;
498  if (HSpiller.rmFromMergeableSpills(MI, StackSlot))
499  --NumSpills;
500  }
501  }
502  } while (!WorkList.empty());
503 }
504 
505 //===----------------------------------------------------------------------===//
506 // Rematerialization
507 //===----------------------------------------------------------------------===//
508 
509 /// markValueUsed - Remember that VNI failed to rematerialize, so its defining
510 /// instruction cannot be eliminated. See through snippet copies
511 void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
513  WorkList.push_back(std::make_pair(LI, VNI));
514  do {
515  std::tie(LI, VNI) = WorkList.pop_back_val();
516  if (!UsedValues.insert(VNI).second)
517  continue;
518 
519  if (VNI->isPHIDef()) {
520  MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
521  for (MachineBasicBlock *P : MBB->predecessors()) {
522  VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(P));
523  if (PVNI)
524  WorkList.push_back(std::make_pair(LI, PVNI));
525  }
526  continue;
527  }
528 
529  // Follow snippet copies.
530  MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
531  if (!SnippetCopies.count(MI))
532  continue;
533  LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg());
534  assert(isRegToSpill(SnipLI.reg()) && "Unexpected register in copy");
535  VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true));
536  assert(SnipVNI && "Snippet undefined before copy");
537  WorkList.push_back(std::make_pair(&SnipLI, SnipVNI));
538  } while (!WorkList.empty());
539 }
540 
541 bool InlineSpiller::canGuaranteeAssignmentAfterRemat(Register VReg,
542  MachineInstr &MI) {
544  return true;
545  // Here's a quick explanation of the problem we're trying to handle here:
546  // * There are some pseudo instructions with more vreg uses than there are
547  // physical registers on the machine.
548  // * This is normally handled by spilling the vreg, and folding the reload
549  // into the user instruction. (Thus decreasing the number of used vregs
550  // until the remainder can be assigned to physregs.)
551  // * However, since we may try to spill vregs in any order, we can end up
552  // trying to spill each operand to the instruction, and then rematting it
553  // instead. When that happens, the new live intervals (for the remats) are
554  // expected to be trivially assignable (i.e. RS_Done). However, since we
555  // may have more remats than physregs, we're guaranteed to fail to assign
556  // one.
557  // At the moment, we only handle this for STATEPOINTs since they're the only
558  // pseudo op where we've seen this. If we start seeing other instructions
559  // with the same problem, we need to revisit this.
560  if (MI.getOpcode() != TargetOpcode::STATEPOINT)
561  return true;
562  // For STATEPOINTs we allow re-materialization for fixed arguments only hoping
563  // that number of physical registers is enough to cover all fixed arguments.
564  // If it is not true we need to revisit it.
565  for (unsigned Idx = StatepointOpers(&MI).getVarIdx(),
566  EndIdx = MI.getNumOperands();
567  Idx < EndIdx; ++Idx) {
568  MachineOperand &MO = MI.getOperand(Idx);
569  if (MO.isReg() && MO.getReg() == VReg)
570  return false;
571  }
572  return true;
573 }
574 
575 /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
576 bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
577  // Analyze instruction
579  VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, VirtReg.reg(), &Ops);
580 
581  if (!RI.Reads)
582  return false;
583 
584  SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true);
585  VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex());
586 
587  if (!ParentVNI) {
588  LLVM_DEBUG(dbgs() << "\tadding <undef> flags: ");
589  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
590  MachineOperand &MO = MI.getOperand(i);
591  if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg())
592  MO.setIsUndef();
593  }
594  LLVM_DEBUG(dbgs() << UseIdx << '\t' << MI);
595  return true;
596  }
597 
598  if (SnippetCopies.count(&MI))
599  return false;
600 
601  LiveInterval &OrigLI = LIS.getInterval(Original);
602  VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx);
603  LiveRangeEdit::Remat RM(ParentVNI);
604  RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def);
605 
606  if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx, false)) {
607  markValueUsed(&VirtReg, ParentVNI);
608  LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
609  return false;
610  }
611 
612  // If the instruction also writes VirtReg.reg, it had better not require the
613  // same register for uses and defs.
614  if (RI.Tied) {
615  markValueUsed(&VirtReg, ParentVNI);
616  LLVM_DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI);
617  return false;
618  }
619 
620  // Before rematerializing into a register for a single instruction, try to
621  // fold a load into the instruction. That avoids allocating a new register.
622  if (RM.OrigMI->canFoldAsLoad() &&
623  foldMemoryOperand(Ops, RM.OrigMI)) {
624  Edit->markRematerialized(RM.ParentVNI);
625  ++NumFoldedLoads;
626  return true;
627  }
628 
629  // If we can't guarantee that we'll be able to actually assign the new vreg,
630  // we can't remat.
631  if (!canGuaranteeAssignmentAfterRemat(VirtReg.reg(), MI)) {
632  markValueUsed(&VirtReg, ParentVNI);
633  LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
634  return false;
635  }
636 
637  // Allocate a new register for the remat.
638  Register NewVReg = Edit->createFrom(Original);
639 
640  // Finally we can rematerialize OrigMI before MI.
641  SlotIndex DefIdx =
642  Edit->rematerializeAt(*MI.getParent(), MI, NewVReg, RM, TRI);
643 
644  // We take the DebugLoc from MI, since OrigMI may be attributed to a
645  // different source location.
646  auto *NewMI = LIS.getInstructionFromIndex(DefIdx);
647  NewMI->setDebugLoc(MI.getDebugLoc());
648 
649  (void)DefIdx;
650  LLVM_DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'
651  << *LIS.getInstructionFromIndex(DefIdx));
652 
653  // Replace operands
654  for (const auto &OpPair : Ops) {
655  MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
656  if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg()) {
657  MO.setReg(NewVReg);
658  MO.setIsKill();
659  }
660  }
661  LLVM_DEBUG(dbgs() << "\t " << UseIdx << '\t' << MI << '\n');
662 
663  ++NumRemats;
664  return true;
665 }
666 
667 /// reMaterializeAll - Try to rematerialize as many uses as possible,
668 /// and trim the live ranges after.
669 void InlineSpiller::reMaterializeAll() {
670  if (!Edit->anyRematerializable(AA))
671  return;
672 
673  UsedValues.clear();
674 
675  // Try to remat before all uses of snippets.
676  bool anyRemat = false;
677  for (Register Reg : RegsToSpill) {
678  LiveInterval &LI = LIS.getInterval(Reg);
681  RegI != E; ) {
682  MachineInstr &MI = *RegI++;
683 
684  // Debug values are not allowed to affect codegen.
685  if (MI.isDebugValue())
686  continue;
687 
688  assert(!MI.isDebugInstr() && "Did not expect to find a use in debug "
689  "instruction that isn't a DBG_VALUE");
690 
691  anyRemat |= reMaterializeFor(LI, MI);
692  }
693  }
694  if (!anyRemat)
695  return;
696 
697  // Remove any values that were completely rematted.
698  for (Register Reg : RegsToSpill) {
699  LiveInterval &LI = LIS.getInterval(Reg);
700  for (LiveInterval::vni_iterator I = LI.vni_begin(), E = LI.vni_end();
701  I != E; ++I) {
702  VNInfo *VNI = *I;
703  if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI))
704  continue;
705  MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
706  MI->addRegisterDead(Reg, &TRI);
707  if (!MI->allDefsAreDead())
708  continue;
709  LLVM_DEBUG(dbgs() << "All defs dead: " << *MI);
710  DeadDefs.push_back(MI);
711  }
712  }
713 
714  // Eliminate dead code after remat. Note that some snippet copies may be
715  // deleted here.
716  if (DeadDefs.empty())
717  return;
718  LLVM_DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n");
719  Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
720 
721  // LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions
722  // after rematerialization. To remove a VNI for a vreg from its LiveInterval,
723  // LiveIntervals::removeVRegDefAt is used. However, after non-PHI VNIs are all
724  // removed, PHI VNI are still left in the LiveInterval.
725  // So to get rid of unused reg, we need to check whether it has non-dbg
726  // reference instead of whether it has non-empty interval.
727  unsigned ResultPos = 0;
728  for (Register Reg : RegsToSpill) {
729  if (MRI.reg_nodbg_empty(Reg)) {
730  Edit->eraseVirtReg(Reg);
731  continue;
732  }
733 
734  assert(LIS.hasInterval(Reg) &&
735  (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&
736  "Empty and not used live-range?!");
737 
738  RegsToSpill[ResultPos++] = Reg;
739  }
740  RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end());
741  LLVM_DEBUG(dbgs() << RegsToSpill.size()
742  << " registers to spill after remat.\n");
743 }
744 
745 //===----------------------------------------------------------------------===//
746 // Spilling
747 //===----------------------------------------------------------------------===//
748 
749 /// If MI is a load or store of StackSlot, it can be removed.
750 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, Register Reg) {
751  int FI = 0;
752  Register InstrReg = TII.isLoadFromStackSlot(*MI, FI);
753  bool IsLoad = InstrReg;
754  if (!IsLoad)
755  InstrReg = TII.isStoreToStackSlot(*MI, FI);
756 
757  // We have a stack access. Is it the right register and slot?
758  if (InstrReg != Reg || FI != StackSlot)
759  return false;
760 
761  if (!IsLoad)
762  HSpiller.rmFromMergeableSpills(*MI, StackSlot);
763 
764  LLVM_DEBUG(dbgs() << "Coalescing stack access: " << *MI);
765  LIS.RemoveMachineInstrFromMaps(*MI);
766  MI->eraseFromParent();
767 
768  if (IsLoad) {
769  ++NumReloadsRemoved;
770  --NumReloads;
771  } else {
772  ++NumSpillsRemoved;
773  --NumSpills;
774  }
775 
776  return true;
777 }
778 
779 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
781 // Dump the range of instructions from B to E with their slot indexes.
784  LiveIntervals const &LIS,
785  const char *const header,
786  Register VReg = Register()) {
787  char NextLine = '\n';
788  char SlotIndent = '\t';
789 
790  if (std::next(B) == E) {
791  NextLine = ' ';
792  SlotIndent = ' ';
793  }
794 
795  dbgs() << '\t' << header << ": " << NextLine;
796 
797  for (MachineBasicBlock::iterator I = B; I != E; ++I) {
798  SlotIndex Idx = LIS.getInstructionIndex(*I).getRegSlot();
799 
800  // If a register was passed in and this instruction has it as a
801  // destination that is marked as an early clobber, print the
802  // early-clobber slot index.
803  if (VReg) {
804  MachineOperand *MO = I->findRegisterDefOperand(VReg);
805  if (MO && MO->isEarlyClobber())
806  Idx = Idx.getRegSlot(true);
807  }
808 
809  dbgs() << SlotIndent << Idx << '\t' << *I;
810  }
811 }
812 #endif
813 
814 /// foldMemoryOperand - Try folding stack slot references in Ops into their
815 /// instructions.
816 ///
817 /// @param Ops Operand indices from AnalyzeVirtRegInBundle().
818 /// @param LoadMI Load instruction to use instead of stack slot when non-null.
819 /// @return True on success.
820 bool InlineSpiller::
821 foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
822  MachineInstr *LoadMI) {
823  if (Ops.empty())
824  return false;
825  // Don't attempt folding in bundles.
826  MachineInstr *MI = Ops.front().first;
827  if (Ops.back().first != MI || MI->isBundled())
828  return false;
829 
830  bool WasCopy = MI->isCopy();
831  Register ImpReg;
832 
833  // TII::foldMemoryOperand will do what we need here for statepoint
834  // (fold load into use and remove corresponding def). We will replace
835  // uses of removed def with loads (spillAroundUses).
836  // For that to work we need to untie def and use to pass it through
837  // foldMemoryOperand and signal foldPatchpoint that it is allowed to
838  // fold them.
839  bool UntieRegs = MI->getOpcode() == TargetOpcode::STATEPOINT;
840 
841  // Spill subregs if the target allows it.
842  // We always want to spill subregs for stackmap/patchpoint pseudos.
843  bool SpillSubRegs = TII.isSubregFoldable() ||
844  MI->getOpcode() == TargetOpcode::STATEPOINT ||
845  MI->getOpcode() == TargetOpcode::PATCHPOINT ||
846  MI->getOpcode() == TargetOpcode::STACKMAP;
847 
848  // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
849  // operands.
850  SmallVector<unsigned, 8> FoldOps;
851  for (const auto &OpPair : Ops) {
852  unsigned Idx = OpPair.second;
853  assert(MI == OpPair.first && "Instruction conflict during operand folding");
854  MachineOperand &MO = MI->getOperand(Idx);
855  if (MO.isImplicit()) {
856  ImpReg = MO.getReg();
857  continue;
858  }
859 
860  if (!SpillSubRegs && MO.getSubReg())
861  return false;
862  // We cannot fold a load instruction into a def.
863  if (LoadMI && MO.isDef())
864  return false;
865  // Tied use operands should not be passed to foldMemoryOperand.
866  if (UntieRegs || !MI->isRegTiedToDefOperand(Idx))
867  FoldOps.push_back(Idx);
868  }
869 
870  // If we only have implicit uses, we won't be able to fold that.
871  // Moreover, TargetInstrInfo::foldMemoryOperand will assert if we try!
872  if (FoldOps.empty())
873  return false;
874 
875  MachineInstrSpan MIS(MI, MI->getParent());
876 
878  if (UntieRegs)
879  for (unsigned Idx : FoldOps) {
880  MachineOperand &MO = MI->getOperand(Idx);
881  if (!MO.isTied())
882  continue;
883  unsigned Tied = MI->findTiedOperandIdx(Idx);
884  if (MO.isUse())
885  TiedOps.emplace_back(Tied, Idx);
886  else {
887  assert(MO.isDef() && "Tied to not use and def?");
888  TiedOps.emplace_back(Idx, Tied);
889  }
890  MI->untieRegOperand(Idx);
891  }
892 
893  MachineInstr *FoldMI =
894  LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS)
895  : TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS, &VRM);
896  if (!FoldMI) {
897  // Re-tie operands.
898  for (auto Tied : TiedOps)
899  MI->tieOperands(Tied.first, Tied.second);
900  return false;
901  }
902 
903  // Remove LIS for any dead defs in the original MI not in FoldMI.
904  for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) {
905  if (!MO->isReg())
906  continue;
907  Register Reg = MO->getReg();
909  continue;
910  }
911  // Skip non-Defs, including undef uses and internal reads.
912  if (MO->isUse())
913  continue;
914  PhysRegInfo RI = AnalyzePhysRegInBundle(*FoldMI, Reg, &TRI);
915  if (RI.FullyDefined)
916  continue;
917  // FoldMI does not define this physreg. Remove the LI segment.
918  assert(MO->isDead() && "Cannot fold physreg def");
919  SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
920  LIS.removePhysRegDefAt(Reg.asMCReg(), Idx);
921  }
922 
923  int FI;
924  if (TII.isStoreToStackSlot(*MI, FI) &&
925  HSpiller.rmFromMergeableSpills(*MI, FI))
926  --NumSpills;
927  LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI);
928  // Update the call site info.
929  if (MI->isCandidateForCallSiteEntry())
930  MI->getMF()->moveCallSiteInfo(MI, FoldMI);
931 
932  // If we've folded a store into an instruction labelled with debug-info,
933  // record a substitution from the old operand to the memory operand. Handle
934  // the simple common case where operand 0 is the one being folded, plus when
935  // the destination operand is also a tied def. More values could be
936  // substituted / preserved with more analysis.
937  if (MI->peekDebugInstrNum() && Ops[0].second == 0) {
938  // Helper lambda.
939  auto MakeSubstitution = [this,FoldMI,MI,&Ops]() {
940  // Substitute old operand zero to the new instructions memory operand.
941  unsigned OldOperandNum = Ops[0].second;
942  unsigned NewNum = FoldMI->getDebugInstrNum();
943  unsigned OldNum = MI->getDebugInstrNum();
944  MF.makeDebugValueSubstitution({OldNum, OldOperandNum},
946  };
947 
948  const MachineOperand &Op0 = MI->getOperand(Ops[0].second);
949  if (Ops.size() == 1 && Op0.isDef()) {
950  MakeSubstitution();
951  } else if (Ops.size() == 2 && Op0.isDef() && MI->getOperand(1).isTied() &&
952  Op0.getReg() == MI->getOperand(1).getReg()) {
953  MakeSubstitution();
954  }
955  } else if (MI->peekDebugInstrNum()) {
956  // This is a debug-labelled instruction, but the operand being folded isn't
957  // at operand zero. Most likely this means it's a load being folded in.
958  // Substitute any register defs from operand zero up to the one being
959  // folded -- past that point, we don't know what the new operand indexes
960  // will be.
961  MF.substituteDebugValuesForInst(*MI, *FoldMI, Ops[0].second);
962  }
963 
964  MI->eraseFromParent();
965 
966  // Insert any new instructions other than FoldMI into the LIS maps.
967  assert(!MIS.empty() && "Unexpected empty span of instructions!");
968  for (MachineInstr &MI : MIS)
969  if (&MI != FoldMI)
971 
972  // TII.foldMemoryOperand may have left some implicit operands on the
973  // instruction. Strip them.
974  if (ImpReg)
975  for (unsigned i = FoldMI->getNumOperands(); i; --i) {
976  MachineOperand &MO = FoldMI->getOperand(i - 1);
977  if (!MO.isReg() || !MO.isImplicit())
978  break;
979  if (MO.getReg() == ImpReg)
980  FoldMI->RemoveOperand(i - 1);
981  }
982 
983  LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS,
984  "folded"));
985 
986  if (!WasCopy)
987  ++NumFolded;
988  else if (Ops.front().second == 0) {
989  ++NumSpills;
990  // If there is only 1 store instruction is required for spill, add it
991  // to mergeable list. In X86 AMX, 2 intructions are required to store.
992  // We disable the merge for this case.
993  if (std::distance(MIS.begin(), MIS.end()) <= 1)
994  HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original);
995  } else
996  ++NumReloads;
997  return true;
998 }
999 
1000 void InlineSpiller::insertReload(Register NewVReg,
1001  SlotIndex Idx,
1003  MachineBasicBlock &MBB = *MI->getParent();
1004 
1005  MachineInstrSpan MIS(MI, &MBB);
1006  TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot,
1007  MRI.getRegClass(NewVReg), &TRI);
1008 
1009  LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI);
1010 
1011  LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MI, LIS, "reload",
1012  NewVReg));
1013  ++NumReloads;
1014 }
1015 
1016 /// Check if \p Def fully defines a VReg with an undefined value.
1017 /// If that's the case, that means the value of VReg is actually
1018 /// not relevant.
1019 static bool isRealSpill(const MachineInstr &Def) {
1020  if (!Def.isImplicitDef())
1021  return true;
1022  assert(Def.getNumOperands() == 1 &&
1023  "Implicit def with more than one definition");
1024  // We can say that the VReg defined by Def is undef, only if it is
1025  // fully defined by Def. Otherwise, some of the lanes may not be
1026  // undef and the value of the VReg matters.
1027  return Def.getOperand(0).getSubReg();
1028 }
1029 
1030 /// insertSpill - Insert a spill of NewVReg after MI.
1031 void InlineSpiller::insertSpill(Register NewVReg, bool isKill,
1033  // Spill are not terminators, so inserting spills after terminators will
1034  // violate invariants in MachineVerifier.
1035  assert(!MI->isTerminator() && "Inserting a spill after a terminator");
1036  MachineBasicBlock &MBB = *MI->getParent();
1037 
1038  MachineInstrSpan MIS(MI, &MBB);
1039  MachineBasicBlock::iterator SpillBefore = std::next(MI);
1040  bool IsRealSpill = isRealSpill(*MI);
1041 
1042  if (IsRealSpill)
1043  TII.storeRegToStackSlot(MBB, SpillBefore, NewVReg, isKill, StackSlot,
1044  MRI.getRegClass(NewVReg), &TRI);
1045  else
1046  // Don't spill undef value.
1047  // Anything works for undef, in particular keeping the memory
1048  // uninitialized is a viable option and it saves code size and
1049  // run time.
1050  BuildMI(MBB, SpillBefore, MI->getDebugLoc(), TII.get(TargetOpcode::KILL))
1051  .addReg(NewVReg, getKillRegState(isKill));
1052 
1053  MachineBasicBlock::iterator Spill = std::next(MI);
1054  LIS.InsertMachineInstrRangeInMaps(Spill, MIS.end());
1055  for (const MachineInstr &MI : make_range(Spill, MIS.end()))
1056  getVDefInterval(MI, LIS);
1057 
1058  LLVM_DEBUG(
1059  dumpMachineInstrRangeWithSlotIndex(Spill, MIS.end(), LIS, "spill"));
1060  ++NumSpills;
1061  // If there is only 1 store instruction is required for spill, add it
1062  // to mergeable list. In X86 AMX, 2 intructions are required to store.
1063  // We disable the merge for this case.
1064  if (IsRealSpill && std::distance(Spill, MIS.end()) <= 1)
1065  HSpiller.addToMergeableSpills(*Spill, StackSlot, Original);
1066 }
1067 
1068 /// spillAroundUses - insert spill code around each use of Reg.
1069 void InlineSpiller::spillAroundUses(Register Reg) {
1070  LLVM_DEBUG(dbgs() << "spillAroundUses " << printReg(Reg) << '\n');
1071  LiveInterval &OldLI = LIS.getInterval(Reg);
1072 
1073  // Iterate over instructions using Reg.
1075  RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end();
1076  RegI != E; ) {
1077  MachineInstr *MI = &*(RegI++);
1078 
1079  // Debug values are not allowed to affect codegen.
1080  if (MI->isDebugValue()) {
1081  // Modify DBG_VALUE now that the value is in a spill slot.
1082  MachineBasicBlock *MBB = MI->getParent();
1083  LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:\t" << *MI);
1084  buildDbgValueForSpill(*MBB, MI, *MI, StackSlot, Reg);
1085  MBB->erase(MI);
1086  continue;
1087  }
1088 
1089  assert(!MI->isDebugInstr() && "Did not expect to find a use in debug "
1090  "instruction that isn't a DBG_VALUE");
1091 
1092  // Ignore copies to/from snippets. We'll delete them.
1093  if (SnippetCopies.count(MI))
1094  continue;
1095 
1096  // Stack slot accesses may coalesce away.
1097  if (coalesceStackAccess(MI, Reg))
1098  continue;
1099 
1100  // Analyze instruction.
1102  VirtRegInfo RI = AnalyzeVirtRegInBundle(*MI, Reg, &Ops);
1103 
1104  // Find the slot index where this instruction reads and writes OldLI.
1105  // This is usually the def slot, except for tied early clobbers.
1106  SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
1107  if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true)))
1108  if (SlotIndex::isSameInstr(Idx, VNI->def))
1109  Idx = VNI->def;
1110 
1111  // Check for a sibling copy.
1112  Register SibReg = isFullCopyOf(*MI, Reg);
1113  if (SibReg && isSibling(SibReg)) {
1114  // This may actually be a copy between snippets.
1115  if (isRegToSpill(SibReg)) {
1116  LLVM_DEBUG(dbgs() << "Found new snippet copy: " << *MI);
1117  SnippetCopies.insert(MI);
1118  continue;
1119  }
1120  if (RI.Writes) {
1121  if (hoistSpillInsideBB(OldLI, *MI)) {
1122  // This COPY is now dead, the value is already in the stack slot.
1123  MI->getOperand(0).setIsDead();
1124  DeadDefs.push_back(MI);
1125  continue;
1126  }
1127  } else {
1128  // This is a reload for a sib-reg copy. Drop spills downstream.
1129  LiveInterval &SibLI = LIS.getInterval(SibReg);
1130  eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx));
1131  // The COPY will fold to a reload below.
1132  }
1133  }
1134 
1135  // Attempt to fold memory ops.
1136  if (foldMemoryOperand(Ops))
1137  continue;
1138 
1139  // Create a new virtual register for spill/fill.
1140  // FIXME: Infer regclass from instruction alone.
1141  Register NewVReg = Edit->createFrom(Reg);
1142 
1143  if (RI.Reads)
1144  insertReload(NewVReg, Idx, MI);
1145 
1146  // Rewrite instruction operands.
1147  bool hasLiveDef = false;
1148  for (const auto &OpPair : Ops) {
1149  MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
1150  MO.setReg(NewVReg);
1151  if (MO.isUse()) {
1152  if (!OpPair.first->isRegTiedToDefOperand(OpPair.second))
1153  MO.setIsKill();
1154  } else {
1155  if (!MO.isDead())
1156  hasLiveDef = true;
1157  }
1158  }
1159  LLVM_DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << *MI << '\n');
1160 
1161  // FIXME: Use a second vreg if instruction has no tied ops.
1162  if (RI.Writes)
1163  if (hasLiveDef)
1164  insertSpill(NewVReg, true, MI);
1165  }
1166 }
1167 
1168 /// spillAll - Spill all registers remaining after rematerialization.
1169 void InlineSpiller::spillAll() {
1170  // Update LiveStacks now that we are committed to spilling.
1171  if (StackSlot == VirtRegMap::NO_STACK_SLOT) {
1172  StackSlot = VRM.assignVirt2StackSlot(Original);
1173  StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original));
1174  StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator());
1175  } else
1176  StackInt = &LSS.getInterval(StackSlot);
1177 
1178  if (Original != Edit->getReg())
1179  VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot);
1180 
1181  assert(StackInt->getNumValNums() == 1 && "Bad stack interval values");
1182  for (Register Reg : RegsToSpill)
1183  StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg),
1184  StackInt->getValNumInfo(0));
1185  LLVM_DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n');
1186 
1187  // Spill around uses of all RegsToSpill.
1188  for (Register Reg : RegsToSpill)
1189  spillAroundUses(Reg);
1190 
1191  // Hoisted spills may cause dead code.
1192  if (!DeadDefs.empty()) {
1193  LLVM_DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n");
1194  Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
1195  }
1196 
1197  // Finally delete the SnippetCopies.
1198  for (Register Reg : RegsToSpill) {
1201  RI != E; ) {
1202  MachineInstr &MI = *(RI++);
1203  assert(SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy");
1204  // FIXME: Do this with a LiveRangeEdit callback.
1206  MI.eraseFromParent();
1207  }
1208  }
1209 
1210  // Delete all spilled registers.
1211  for (Register Reg : RegsToSpill)
1212  Edit->eraseVirtReg(Reg);
1213 }
1214 
1215 void InlineSpiller::spill(LiveRangeEdit &edit) {
1216  ++NumSpilledRanges;
1217  Edit = &edit;
1218  assert(!Register::isStackSlot(edit.getReg()) &&
1219  "Trying to spill a stack slot.");
1220  // Share a stack slot among all descendants of Original.
1221  Original = VRM.getOriginal(edit.getReg());
1222  StackSlot = VRM.getStackSlot(Original);
1223  StackInt = nullptr;
1224 
1225  LLVM_DEBUG(dbgs() << "Inline spilling "
1227  << ':' << edit.getParent() << "\nFrom original "
1228  << printReg(Original) << '\n');
1229  assert(edit.getParent().isSpillable() &&
1230  "Attempting to spill already spilled value.");
1231  assert(DeadDefs.empty() && "Previous spill didn't remove dead defs");
1232 
1233  collectRegsToSpill();
1234  reMaterializeAll();
1235 
1236  // Remat may handle everything.
1237  if (!RegsToSpill.empty())
1238  spillAll();
1239 
1240  Edit->calculateRegClassAndHint(MF, VRAI);
1241 }
1242 
1243 /// Optimizations after all the reg selections and spills are done.
1244 void InlineSpiller::postOptimization() { HSpiller.hoistAllSpills(); }
1245 
1246 /// When a spill is inserted, add the spill to MergeableSpills map.
1247 void HoistSpillHelper::addToMergeableSpills(MachineInstr &Spill, int StackSlot,
1248  unsigned Original) {
1250  LiveInterval &OrigLI = LIS.getInterval(Original);
1251  // save a copy of LiveInterval in StackSlotToOrigLI because the original
1252  // LiveInterval may be cleared after all its references are spilled.
1253  if (StackSlotToOrigLI.find(StackSlot) == StackSlotToOrigLI.end()) {
1254  auto LI = std::make_unique<LiveInterval>(OrigLI.reg(), OrigLI.weight());
1255  LI->assign(OrigLI, Allocator);
1256  StackSlotToOrigLI[StackSlot] = std::move(LI);
1257  }
1258  SlotIndex Idx = LIS.getInstructionIndex(Spill);
1259  VNInfo *OrigVNI = StackSlotToOrigLI[StackSlot]->getVNInfoAt(Idx.getRegSlot());
1260  std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1261  MergeableSpills[MIdx].insert(&Spill);
1262 }
1263 
1264 /// When a spill is removed, remove the spill from MergeableSpills map.
1265 /// Return true if the spill is removed successfully.
1266 bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr &Spill,
1267  int StackSlot) {
1268  auto It = StackSlotToOrigLI.find(StackSlot);
1269  if (It == StackSlotToOrigLI.end())
1270  return false;
1271  SlotIndex Idx = LIS.getInstructionIndex(Spill);
1272  VNInfo *OrigVNI = It->second->getVNInfoAt(Idx.getRegSlot());
1273  std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1274  return MergeableSpills[MIdx].erase(&Spill);
1275 }
1276 
1277 /// Check BB to see if it is a possible target BB to place a hoisted spill,
1278 /// i.e., there should be a living sibling of OrigReg at the insert point.
1279 bool HoistSpillHelper::isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
1280  MachineBasicBlock &BB, Register &LiveReg) {
1281  SlotIndex Idx = IPA.getLastInsertPoint(OrigLI, BB);
1282  // The original def could be after the last insert point in the root block,
1283  // we can't hoist to here.
1284  if (Idx < OrigVNI.def) {
1285  // TODO: We could be better here. If LI is not alive in landing pad
1286  // we could hoist spill after LIP.
1287  LLVM_DEBUG(dbgs() << "can't spill in root block - def after LIP\n");
1288  return false;
1289  }
1290  Register OrigReg = OrigLI.reg();
1291  SmallSetVector<Register, 16> &Siblings = Virt2SiblingsMap[OrigReg];
1292  assert(OrigLI.getVNInfoAt(Idx) == &OrigVNI && "Unexpected VNI");
1293 
1294  for (const Register &SibReg : Siblings) {
1295  LiveInterval &LI = LIS.getInterval(SibReg);
1296  VNInfo *VNI = LI.getVNInfoAt(Idx);
1297  if (VNI) {
1298  LiveReg = SibReg;
1299  return true;
1300  }
1301  }
1302  return false;
1303 }
1304 
1305 /// Remove redundant spills in the same BB. Save those redundant spills in
1306 /// SpillsToRm, and save the spill to keep and its BB in SpillBBToSpill map.
1307 void HoistSpillHelper::rmRedundantSpills(
1309  SmallVectorImpl<MachineInstr *> &SpillsToRm,
1311  // For each spill saw, check SpillBBToSpill[] and see if its BB already has
1312  // another spill inside. If a BB contains more than one spill, only keep the
1313  // earlier spill with smaller SlotIndex.
1314  for (const auto CurrentSpill : Spills) {
1315  MachineBasicBlock *Block = CurrentSpill->getParent();
1316  MachineDomTreeNode *Node = MDT.getBase().getNode(Block);
1317  MachineInstr *PrevSpill = SpillBBToSpill[Node];
1318  if (PrevSpill) {
1319  SlotIndex PIdx = LIS.getInstructionIndex(*PrevSpill);
1320  SlotIndex CIdx = LIS.getInstructionIndex(*CurrentSpill);
1321  MachineInstr *SpillToRm = (CIdx > PIdx) ? CurrentSpill : PrevSpill;
1322  MachineInstr *SpillToKeep = (CIdx > PIdx) ? PrevSpill : CurrentSpill;
1323  SpillsToRm.push_back(SpillToRm);
1324  SpillBBToSpill[MDT.getBase().getNode(Block)] = SpillToKeep;
1325  } else {
1326  SpillBBToSpill[MDT.getBase().getNode(Block)] = CurrentSpill;
1327  }
1328  }
1329  for (const auto SpillToRm : SpillsToRm)
1330  Spills.erase(SpillToRm);
1331 }
1332 
1333 /// Starting from \p Root find a top-down traversal order of the dominator
1334 /// tree to visit all basic blocks containing the elements of \p Spills.
1335 /// Redundant spills will be found and put into \p SpillsToRm at the same
1336 /// time. \p SpillBBToSpill will be populated as part of the process and
1337 /// maps a basic block to the first store occurring in the basic block.
1338 /// \post SpillsToRm.union(Spills\@post) == Spills\@pre
1339 void HoistSpillHelper::getVisitOrders(
1342  SmallVectorImpl<MachineInstr *> &SpillsToRm,
1345  // The set contains all the possible BB nodes to which we may hoist
1346  // original spills.
1348  // Save the BB nodes on the path from the first BB node containing
1349  // non-redundant spill to the Root node.
1351  // All the spills to be hoisted must originate from a single def instruction
1352  // to the OrigReg. It means the def instruction should dominate all the spills
1353  // to be hoisted. We choose the BB where the def instruction is located as
1354  // the Root.
1355  MachineDomTreeNode *RootIDomNode = MDT[Root]->getIDom();
1356  // For every node on the dominator tree with spill, walk up on the dominator
1357  // tree towards the Root node until it is reached. If there is other node
1358  // containing spill in the middle of the path, the previous spill saw will
1359  // be redundant and the node containing it will be removed. All the nodes on
1360  // the path starting from the first node with non-redundant spill to the Root
1361  // node will be added to the WorkSet, which will contain all the possible
1362  // locations where spills may be hoisted to after the loop below is done.
1363  for (const auto Spill : Spills) {
1364  MachineBasicBlock *Block = Spill->getParent();
1365  MachineDomTreeNode *Node = MDT[Block];
1366  MachineInstr *SpillToRm = nullptr;
1367  while (Node != RootIDomNode) {
1368  // If Node dominates Block, and it already contains a spill, the spill in
1369  // Block will be redundant.
1370  if (Node != MDT[Block] && SpillBBToSpill[Node]) {
1371  SpillToRm = SpillBBToSpill[MDT[Block]];
1372  break;
1373  /// If we see the Node already in WorkSet, the path from the Node to
1374  /// the Root node must already be traversed by another spill.
1375  /// Then no need to repeat.
1376  } else if (WorkSet.count(Node)) {
1377  break;
1378  } else {
1379  NodesOnPath.insert(Node);
1380  }
1381  Node = Node->getIDom();
1382  }
1383  if (SpillToRm) {
1384  SpillsToRm.push_back(SpillToRm);
1385  } else {
1386  // Add a BB containing the original spills to SpillsToKeep -- i.e.,
1387  // set the initial status before hoisting start. The value of BBs
1388  // containing original spills is set to 0, in order to descriminate
1389  // with BBs containing hoisted spills which will be inserted to
1390  // SpillsToKeep later during hoisting.
1391  SpillsToKeep[MDT[Block]] = 0;
1392  WorkSet.insert(NodesOnPath.begin(), NodesOnPath.end());
1393  }
1394  NodesOnPath.clear();
1395  }
1396 
1397  // Sort the nodes in WorkSet in top-down order and save the nodes
1398  // in Orders. Orders will be used for hoisting in runHoistSpills.
1399  unsigned idx = 0;
1400  Orders.push_back(MDT.getBase().getNode(Root));
1401  do {
1402  MachineDomTreeNode *Node = Orders[idx++];
1403  for (MachineDomTreeNode *Child : Node->children()) {
1404  if (WorkSet.count(Child))
1405  Orders.push_back(Child);
1406  }
1407  } while (idx != Orders.size());
1408  assert(Orders.size() == WorkSet.size() &&
1409  "Orders have different size with WorkSet");
1410 
1411 #ifndef NDEBUG
1412  LLVM_DEBUG(dbgs() << "Orders size is " << Orders.size() << "\n");
1414  for (; RIt != Orders.rend(); RIt++)
1415  LLVM_DEBUG(dbgs() << "BB" << (*RIt)->getBlock()->getNumber() << ",");
1416  LLVM_DEBUG(dbgs() << "\n");
1417 #endif
1418 }
1419 
1420 /// Try to hoist spills according to BB hotness. The spills to removed will
1421 /// be saved in \p SpillsToRm. The spills to be inserted will be saved in
1422 /// \p SpillsToIns.
1423 void HoistSpillHelper::runHoistSpills(
1424  LiveInterval &OrigLI, VNInfo &OrigVNI,
1426  SmallVectorImpl<MachineInstr *> &SpillsToRm,
1428  // Visit order of dominator tree nodes.
1430  // SpillsToKeep contains all the nodes where spills are to be inserted
1431  // during hoisting. If the spill to be inserted is an original spill
1432  // (not a hoisted one), the value of the map entry is 0. If the spill
1433  // is a hoisted spill, the value of the map entry is the VReg to be used
1434  // as the source of the spill.
1436  // Map from BB to the first spill inside of it.
1438 
1439  rmRedundantSpills(Spills, SpillsToRm, SpillBBToSpill);
1440 
1441  MachineBasicBlock *Root = LIS.getMBBFromIndex(OrigVNI.def);
1442  getVisitOrders(Root, Spills, Orders, SpillsToRm, SpillsToKeep,
1443  SpillBBToSpill);
1444 
1445  // SpillsInSubTreeMap keeps the map from a dom tree node to a pair of
1446  // nodes set and the cost of all the spills inside those nodes.
1447  // The nodes set are the locations where spills are to be inserted
1448  // in the subtree of current node.
1449  using NodesCostPair =
1450  std::pair<SmallPtrSet<MachineDomTreeNode *, 16>, BlockFrequency>;
1452 
1453  // Iterate Orders set in reverse order, which will be a bottom-up order
1454  // in the dominator tree. Once we visit a dom tree node, we know its
1455  // children have already been visited and the spill locations in the
1456  // subtrees of all the children have been determined.
1458  for (; RIt != Orders.rend(); RIt++) {
1459  MachineBasicBlock *Block = (*RIt)->getBlock();
1460 
1461  // If Block contains an original spill, simply continue.
1462  if (SpillsToKeep.find(*RIt) != SpillsToKeep.end() && !SpillsToKeep[*RIt]) {
1463  SpillsInSubTreeMap[*RIt].first.insert(*RIt);
1464  // SpillsInSubTreeMap[*RIt].second contains the cost of spill.
1465  SpillsInSubTreeMap[*RIt].second = MBFI.getBlockFreq(Block);
1466  continue;
1467  }
1468 
1469  // Collect spills in subtree of current node (*RIt) to
1470  // SpillsInSubTreeMap[*RIt].first.
1471  for (MachineDomTreeNode *Child : (*RIt)->children()) {
1472  if (SpillsInSubTreeMap.find(Child) == SpillsInSubTreeMap.end())
1473  continue;
1474  // The stmt "SpillsInSubTree = SpillsInSubTreeMap[*RIt].first" below
1475  // should be placed before getting the begin and end iterators of
1476  // SpillsInSubTreeMap[Child].first, or else the iterators may be
1477  // invalidated when SpillsInSubTreeMap[*RIt] is seen the first time
1478  // and the map grows and then the original buckets in the map are moved.
1479  SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1480  SpillsInSubTreeMap[*RIt].first;
1481  BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1482  SubTreeCost += SpillsInSubTreeMap[Child].second;
1483  auto BI = SpillsInSubTreeMap[Child].first.begin();
1484  auto EI = SpillsInSubTreeMap[Child].first.end();
1485  SpillsInSubTree.insert(BI, EI);
1486  SpillsInSubTreeMap.erase(Child);
1487  }
1488 
1489  SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1490  SpillsInSubTreeMap[*RIt].first;
1491  BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1492  // No spills in subtree, simply continue.
1493  if (SpillsInSubTree.empty())
1494  continue;
1495 
1496  // Check whether Block is a possible candidate to insert spill.
1497  Register LiveReg;
1498  if (!isSpillCandBB(OrigLI, OrigVNI, *Block, LiveReg))
1499  continue;
1500 
1501  // If there are multiple spills that could be merged, bias a little
1502  // to hoist the spill.
1503  BranchProbability MarginProb = (SpillsInSubTree.size() > 1)
1504  ? BranchProbability(9, 10)
1505  : BranchProbability(1, 1);
1506  if (SubTreeCost > MBFI.getBlockFreq(Block) * MarginProb) {
1507  // Hoist: Move spills to current Block.
1508  for (const auto SpillBB : SpillsInSubTree) {
1509  // When SpillBB is a BB contains original spill, insert the spill
1510  // to SpillsToRm.
1511  if (SpillsToKeep.find(SpillBB) != SpillsToKeep.end() &&
1512  !SpillsToKeep[SpillBB]) {
1513  MachineInstr *SpillToRm = SpillBBToSpill[SpillBB];
1514  SpillsToRm.push_back(SpillToRm);
1515  }
1516  // SpillBB will not contain spill anymore, remove it from SpillsToKeep.
1517  SpillsToKeep.erase(SpillBB);
1518  }
1519  // Current Block is the BB containing the new hoisted spill. Add it to
1520  // SpillsToKeep. LiveReg is the source of the new spill.
1521  SpillsToKeep[*RIt] = LiveReg;
1522  LLVM_DEBUG({
1523  dbgs() << "spills in BB: ";
1524  for (const auto Rspill : SpillsInSubTree)
1525  dbgs() << Rspill->getBlock()->getNumber() << " ";
1526  dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber()
1527  << "\n";
1528  });
1529  SpillsInSubTree.clear();
1530  SpillsInSubTree.insert(*RIt);
1531  SubTreeCost = MBFI.getBlockFreq(Block);
1532  }
1533  }
1534  // For spills in SpillsToKeep with LiveReg set (i.e., not original spill),
1535  // save them to SpillsToIns.
1536  for (const auto &Ent : SpillsToKeep) {
1537  if (Ent.second)
1538  SpillsToIns[Ent.first->getBlock()] = Ent.second;
1539  }
1540 }
1541 
1542 /// For spills with equal values, remove redundant spills and hoist those left
1543 /// to less hot spots.
1544 ///
1545 /// Spills with equal values will be collected into the same set in
1546 /// MergeableSpills when spill is inserted. These equal spills are originated
1547 /// from the same defining instruction and are dominated by the instruction.
1548 /// Before hoisting all the equal spills, redundant spills inside in the same
1549 /// BB are first marked to be deleted. Then starting from the spills left, walk
1550 /// up on the dominator tree towards the Root node where the define instruction
1551 /// is located, mark the dominated spills to be deleted along the way and
1552 /// collect the BB nodes on the path from non-dominated spills to the define
1553 /// instruction into a WorkSet. The nodes in WorkSet are the candidate places
1554 /// where we are considering to hoist the spills. We iterate the WorkSet in
1555 /// bottom-up order, and for each node, we will decide whether to hoist spills
1556 /// inside its subtree to that node. In this way, we can get benefit locally
1557 /// even if hoisting all the equal spills to one cold place is impossible.
1558 void HoistSpillHelper::hoistAllSpills() {
1559  SmallVector<Register, 4> NewVRegs;
1560  LiveRangeEdit Edit(nullptr, NewVRegs, MF, LIS, &VRM, this);
1561 
1562  for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
1564  Register Original = VRM.getPreSplitReg(Reg);
1565  if (!MRI.def_empty(Reg))
1566  Virt2SiblingsMap[Original].insert(Reg);
1567  }
1568 
1569  // Each entry in MergeableSpills contains a spill set with equal values.
1570  for (auto &Ent : MergeableSpills) {
1571  int Slot = Ent.first.first;
1572  LiveInterval &OrigLI = *StackSlotToOrigLI[Slot];
1573  VNInfo *OrigVNI = Ent.first.second;
1574  SmallPtrSet<MachineInstr *, 16> &EqValSpills = Ent.second;
1575  if (Ent.second.empty())
1576  continue;
1577 
1578  LLVM_DEBUG({
1579  dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n"
1580  << "Equal spills in BB: ";
1581  for (const auto spill : EqValSpills)
1582  dbgs() << spill->getParent()->getNumber() << " ";
1583  dbgs() << "\n";
1584  });
1585 
1586  // SpillsToRm is the spill set to be removed from EqValSpills.
1588  // SpillsToIns is the spill set to be newly inserted after hoisting.
1590 
1591  runHoistSpills(OrigLI, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns);
1592 
1593  LLVM_DEBUG({
1594  dbgs() << "Finally inserted spills in BB: ";
1595  for (const auto &Ispill : SpillsToIns)
1596  dbgs() << Ispill.first->getNumber() << " ";
1597  dbgs() << "\nFinally removed spills in BB: ";
1598  for (const auto Rspill : SpillsToRm)
1599  dbgs() << Rspill->getParent()->getNumber() << " ";
1600  dbgs() << "\n";
1601  });
1602 
1603  // Stack live range update.
1604  LiveInterval &StackIntvl = LSS.getInterval(Slot);
1605  if (!SpillsToIns.empty() || !SpillsToRm.empty())
1606  StackIntvl.MergeValueInAsValue(OrigLI, OrigVNI,
1607  StackIntvl.getValNumInfo(0));
1608 
1609  // Insert hoisted spills.
1610  for (auto const &Insert : SpillsToIns) {
1611  MachineBasicBlock *BB = Insert.first;
1612  Register LiveReg = Insert.second;
1613  MachineBasicBlock::iterator MII = IPA.getLastInsertPointIter(OrigLI, *BB);
1614  MachineInstrSpan MIS(MII, BB);
1615  TII.storeRegToStackSlot(*BB, MII, LiveReg, false, Slot,
1616  MRI.getRegClass(LiveReg), &TRI);
1617  LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
1618  for (const MachineInstr &MI : make_range(MIS.begin(), MII))
1619  getVDefInterval(MI, LIS);
1620  ++NumSpills;
1621  }
1622 
1623  // Remove redundant spills or change them to dead instructions.
1624  NumSpills -= SpillsToRm.size();
1625  for (auto const RMEnt : SpillsToRm) {
1626  RMEnt->setDesc(TII.get(TargetOpcode::KILL));
1627  for (unsigned i = RMEnt->getNumOperands(); i; --i) {
1628  MachineOperand &MO = RMEnt->getOperand(i - 1);
1629  if (MO.isReg() && MO.isImplicit() && MO.isDef() && !MO.isDead())
1630  RMEnt->RemoveOperand(i - 1);
1631  }
1632  }
1633  Edit.eliminateDeadDefs(SpillsToRm, None, AA);
1634  }
1635 }
1636 
1637 /// For VirtReg clone, the \p New register should have the same physreg or
1638 /// stackslot as the \p old register.
1639 void HoistSpillHelper::LRE_DidCloneVirtReg(Register New, Register Old) {
1640  if (VRM.hasPhys(Old))
1641  VRM.assignVirt2Phys(New, VRM.getPhys(Old));
1642  else if (VRM.getStackSlot(Old) != VirtRegMap::NO_STACK_SLOT)
1643  VRM.assignVirt2StackSlot(New, VRM.getStackSlot(Old));
1644  else
1645  llvm_unreachable("VReg should be assigned either physreg or stackslot");
1646  if (VRM.hasShape(Old))
1647  VRM.assignVirt2Shape(New, VRM.getShape(Old));
1648 }
i
i
Definition: README.txt:29
llvm::Spiller
Spiller interface.
Definition: Spiller.h:24
llvm::LiveRangeEdit::Remat
Remat - Information needed to rematerialize at a specific location.
Definition: LiveRangeEdit.h:197
llvm::HexagonInstrInfo::isStoreToStackSlot
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
If the specified machine instruction is a direct store to a stack slot, return the virtual or physica...
Definition: HexagonInstrInfo.cpp:293
llvm::MachineInstr::getDebugInstrNum
unsigned getDebugInstrNum()
Fetch the instruction number of this MachineInstr.
Definition: MachineInstr.cpp:2375
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:105
MachineInstr.h
LLVM_DUMP_METHOD
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition: Compiler.h:506
llvm
This file implements support for optimizing divisions by a constant.
Definition: AllocatorList.h:23
llvm::tgtok::Def
@ Def
Definition: TGLexer.h:50
Reg
unsigned Reg
Definition: MachineSink.cpp:1566
llvm::MachineRegisterInfo::use_instr_nodbg_end
static use_instr_nodbg_iterator use_instr_nodbg_end()
Definition: MachineRegisterInfo.h:538
UseMI
MachineInstrBuilder & UseMI
Definition: AArch64ExpandPseudoInsts.cpp:102
llvm::make_range
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Definition: iterator_range.h:53
Insert
Vector Rotate Left Mask Mask Insert
Definition: README_P9.txt:112
llvm::VirtRegInfo::Tied
bool Tied
Tied - Uses and defs must use the same register.
Definition: MachineInstrBundle.h:229
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:52
llvm::MachineInstrSpan
MachineInstrSpan provides an interface to get an iteration range containing the instruction it was in...
Definition: MachineBasicBlock.h:1181
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
llvm::MachineInstr::RemoveOperand
void RemoveOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
Definition: MachineInstr.cpp:306
Loops
Hexagon Hardware Loops
Definition: HexagonHardwareLoops.cpp:372
llvm::VirtRegInfo::Reads
bool Reads
Reads - One of the operands read the virtual register.
Definition: MachineInstrBundle.h:221
llvm::MachineOperand::setIsKill
void setIsKill(bool Val=true)
Definition: MachineOperand.h:500
llvm::LiveInterval::isSpillable
bool isSpillable() const
isSpillable - Can this interval be spilled?
Definition: LiveInterval.h:813
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1168
Statistic.h
llvm::VirtRegAuxInfo
Calculate auxiliary information for a virtual register such as its spill weight and allocation hint.
Definition: CalcSpillWeights.h:46
llvm::LiveInterval::weight
float weight() const
Definition: LiveInterval.h:712
ErrorHandling.h
llvm::VirtRegMap
Definition: VirtRegMap.h:33
llvm::MachineOperand::isTied
bool isTied() const
Definition: MachineOperand.h:441
MapVector.h
llvm::MachineRegisterInfo::defusechain_instr_iterator
defusechain_iterator - This class provides iterator support for machine operands in the function that...
Definition: MachineRegisterInfo.h:269
llvm::MachineFunctionPass
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
Definition: MachineFunctionPass.h:30
llvm::MachineRegisterInfo::use_instr_nodbg_begin
use_instr_nodbg_iterator use_instr_nodbg_begin(Register RegNo) const
Definition: MachineRegisterInfo.h:535
MachineBasicBlock.h
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:143
llvm::VNInfo::def
SlotIndex def
The index of the defining instruction.
Definition: LiveInterval.h:61
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::erase
bool erase(const KeyT &Val)
Definition: DenseMap.h:302
llvm::InsertPointAnalysis
Determines the latest safe point in a block in which we can insert a split, spill or other instructio...
Definition: SplitKit.h:51
llvm::TargetRegisterInfo
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Definition: TargetRegisterInfo.h:233
llvm::PhysRegInfo::FullyDefined
bool FullyDefined
Reg or a super-register is defined.
Definition: MachineInstrBundle.h:256
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::begin
iterator begin()
Definition: DenseMap.h:74
BlockFrequency.h
Spiller.h
DenseMap.h
TargetInstrInfo.h
llvm::MapVector
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:37
llvm::MachineRegisterInfo::getNumVirtRegs
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
Definition: MachineRegisterInfo.h:757
llvm::SmallPtrSet
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:449
llvm::MachineRegisterInfo::reg_instr_begin
reg_instr_iterator reg_instr_begin(Register RegNo) const
Definition: MachineRegisterInfo.h:294
llvm::LiveRange::vni_begin
vni_iterator vni_begin()
Definition: LiveInterval.h:224
llvm::Register::index2VirtReg
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
Definition: Register.h:84
STLExtras.h
llvm::SmallVectorImpl::pop_back_val
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:635
llvm::LiveRangeEdit::getReg
Register getReg() const
Definition: LiveRangeEdit.h:151
llvm::DomTreeNodeBase::getIDom
DomTreeNodeBase * getIDom() const
Definition: GenericDomTree.h:89
llvm::LiveQueryResult
Result of a LiveRange query.
Definition: LiveInterval.h:90
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1567
llvm::LiveRange::assign
void assign(const LiveRange &Other, BumpPtrAllocator &Allocator)
Copies values numbers and live segments from Other into this range.
Definition: LiveInterval.h:244
llvm::AnalyzePhysRegInBundle
PhysRegInfo AnalyzePhysRegInBundle(const MachineInstr &MI, Register Reg, const TargetRegisterInfo *TRI)
AnalyzePhysRegInBundle - Analyze how the current instruction or bundle uses a physical register.
Definition: MachineInstrBundle.cpp:308
llvm::LiveIntervals::getMBBFromIndex
MachineBasicBlock * getMBBFromIndex(SlotIndex index) const
Definition: LiveIntervals.h:255
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
llvm::LiveIntervals::getInstructionIndex
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
Definition: LiveIntervals.h:226
llvm::MachineLoopInfo
Definition: MachineLoopInfo.h:90
MachineRegisterInfo.h
llvm::MachineBasicBlock::erase
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
Definition: MachineBasicBlock.cpp:1304
AliasAnalysis.h
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
CommandLine.h
llvm::createInlineSpiller
Spiller * createInlineSpiller(MachineFunctionPass &Pass, MachineFunction &MF, VirtRegMap &VRM, VirtRegAuxInfo &VRAI)
Create and return a spiller that will insert spill code directly instead of deferring though VirtRegM...
Definition: InlineSpiller.cpp:246
llvm::MachineOperand::isImplicit
bool isImplicit() const
Definition: MachineOperand.h:380
llvm::TargetInstrInfo
TargetInstrInfo - Interface to description of machine instruction set.
Definition: TargetInstrInfo.h:97
MachineLoopInfo.h
llvm::MachineBlockFrequencyInfo
MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate machine basic b...
Definition: MachineBlockFrequencyInfo.h:33
llvm::AAResults
Definition: AliasAnalysis.h:508
llvm::MachineRegisterInfo::reg_bundle_begin
reg_bundle_iterator reg_bundle_begin(Register RegNo) const
Definition: MachineRegisterInfo.h:310
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::Register::isStackSlot
static bool isStackSlot(unsigned Reg)
isStackSlot - Sometimes it is useful the be able to store a non-negative frame index in a variable th...
Definition: Register.h:44
llvm::MachineOperand::isUse
bool isUse() const
Definition: MachineOperand.h:370
llvm::MachineInstr::getOperand
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:499
llvm::buildDbgValueForSpill
MachineInstr * buildDbgValueForSpill(MachineBasicBlock &BB, MachineBasicBlock::iterator I, const MachineInstr &Orig, int FrameIndex, Register SpillReg)
Clone a DBG_VALUE whose value has been spilled to FrameIndex.
Definition: MachineInstr.cpp:2231
llvm::MachineRegisterInfo::isReserved
bool isReserved(MCRegister PhysReg) const
isReserved - Returns true when PhysReg is a reserved register.
Definition: MachineRegisterInfo.h:928
llvm::LiveIntervals::InsertMachineInstrInMaps
SlotIndex InsertMachineInstrInMaps(MachineInstr &MI)
Definition: LiveIntervals.h:266
llvm::DomTreeNodeBase::children
iterator_range< iterator > children()
Definition: GenericDomTree.h:83
SplitKit.h
TargetOpcodes.h
llvm::MachineRegisterInfo::reg_instr_nodbg_end
static reg_instr_nodbg_iterator reg_instr_nodbg_end()
Definition: MachineRegisterInfo.h:349
TII
const HexagonInstrInfo * TII
Definition: HexagonCopyToCombine.cpp:129
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
llvm::MachineOperand
MachineOperand class - Representation of each machine instruction operand.
Definition: MachineOperand.h:49
pass
modulo schedule Modulo Schedule test pass
Definition: ModuloSchedule.cpp:2121
llvm::VirtRegInfo::Writes
bool Writes
Writes - One of the operands writes the virtual register.
Definition: MachineInstrBundle.h:224
llvm::LiveRangeEdit
Definition: LiveRangeEdit.h:46
MachineInstrBundle.h
llvm::STATISTIC
STATISTIC(NumFunctions, "Total number of functions")
llvm::MIBundleOperands
MIBundleOperands - Iterate over all operands in a bundle of machine instructions.
Definition: MachineInstrBundle.h:166
SmallPtrSet.h
llvm::LiveInterval
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:680
isFullCopyOf
static Register isFullCopyOf(const MachineInstr &MI, Register Reg)
isFullCopyOf - If MI is a COPY to or from Reg, return the other register, otherwise return 0.
Definition: InlineSpiller.cpp:266
llvm::SlotIndex
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:83
llvm::None
const NoneType None
Definition: None.h:23
llvm::MachineRegisterInfo::reg_instr_end
static reg_instr_iterator reg_instr_end()
Definition: MachineRegisterInfo.h:297
BranchProbability.h
llvm::MachineBasicBlock
Definition: MachineBasicBlock.h:95
llvm::MachineRegisterInfo::def_empty
bool def_empty(Register RegNo) const
def_empty - Return true if there are no instructions defining the specified register (it may be live-...
Definition: MachineRegisterInfo.h:426
llvm::MachineRegisterInfo::getRegClass
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
Definition: MachineRegisterInfo.h:634
llvm::LiveRangeEdit::getParent
LiveInterval & getParent() const
Definition: LiveRangeEdit.h:146
LiveIntervalCalc.h
llvm::Spiller::~Spiller
virtual ~Spiller()=0
llvm::LiveIntervals::removePhysRegDefAt
void removePhysRegDefAt(MCRegister Reg, SlotIndex Pos)
Remove value numbers and related live segments starting at position Pos that are part of any liverang...
Definition: LiveIntervals.cpp:1729
llvm::cl::opt< bool >
llvm::SlotIndex::getBaseIndex
SlotIndex getBaseIndex() const
Returns the base index for associated with this index.
Definition: SlotIndexes.h:241
llvm::BlockFrequency
Definition: BlockFrequency.h:24
llvm::LiveRange::getVNInfoBefore
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarilly including Idx,...
Definition: LiveInterval.h:421
llvm::MachineRegisterInfo::reg_bundle_end
static reg_bundle_iterator reg_bundle_end()
Definition: MachineRegisterInfo.h:313
llvm::LiveIntervals::ReplaceMachineInstrInMaps
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
Definition: LiveIntervals.h:280
llvm::MachineOperand::isReg
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Definition: MachineOperand.h:321
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:64
llvm::AnalyzeVirtRegInBundle
VirtRegInfo AnalyzeVirtRegInBundle(MachineInstr &MI, Register Reg, SmallVectorImpl< std::pair< MachineInstr *, unsigned >> *Ops=nullptr)
AnalyzeVirtRegInBundle - Analyze how the current instruction or bundle uses a virtual register.
Definition: MachineInstrBundle.cpp:278
LiveIntervals.h
VirtRegMap.h
llvm::VNInfo::id
unsigned id
The ID number of this value.
Definition: LiveInterval.h:58
llvm::SmallPtrSetImpl::end
iterator end() const
Definition: SmallPtrSet.h:407
llvm::TargetRegisterInfo::getRegClassName
const char * getRegClassName(const TargetRegisterClass *Class) const
Returns the name of the register class.
Definition: TargetRegisterInfo.h:745
llvm::BumpPtrAllocatorImpl
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:67
llvm::HexagonInstrInfo::storeRegToStackSlot
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Store the specified register of the given register class to the specified stack frame index.
Definition: HexagonInstrInfo.cpp:913
move
compiles ldr LCPI1_0 ldr ldr mov lsr tst moveq r1 ldr LCPI1_1 and r0 bx lr It would be better to do something like to fold the shift into the conditional move
Definition: README.txt:546
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:57
llvm::DenseMap
Definition: DenseMap.h:714
llvm::LiveRange::getNumValNums
unsigned getNumValNums() const
Definition: LiveInterval.h:305
llvm::MachineOperand::isDead
bool isDead() const
Definition: MachineOperand.h:385
DisableHoisting
static cl::opt< bool > DisableHoisting("disable-spill-hoist", cl::Hidden, cl::desc("Disable inline spill hoisting"))
llvm::MachineRegisterInfo::reg_instr_nodbg_begin
reg_instr_nodbg_iterator reg_instr_nodbg_begin(Register RegNo) const
Definition: MachineRegisterInfo.h:346
I
#define I(x, y, z)
Definition: MD5.cpp:59
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:441
llvm::SmallPtrSetImpl::begin
iterator begin() const
Definition: SmallPtrSet.h:402
llvm::SlotIndex::isSameInstr
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
Definition: SlotIndexes.h:197
llvm::is_contained
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:1616
ArrayRef.h
llvm::LiveRange::Query
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
Definition: LiveInterval.h:533
MachineFunctionPass.h
llvm::Register::isVirtualRegister
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:71
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::find
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:150
llvm::VirtRegMap::NO_STACK_SLOT
@ NO_STACK_SLOT
Definition: VirtRegMap.h:37
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::MachineOperand::isEarlyClobber
bool isEarlyClobber() const
Definition: MachineOperand.h:436
llvm::PhysRegInfo
Information about how a physical register Reg is used by a set of operands.
Definition: MachineInstrBundle.h:246
llvm::MachineInstrBuilder::addReg
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Definition: MachineInstrBuilder.h:97
llvm::MachineOperand::getReg
Register getReg() const
getReg - Returns the register number.
Definition: MachineOperand.h:360
llvm::MachineBasicBlock::predecessors
iterator_range< pred_iterator > predecessors()
Definition: MachineBasicBlock.h:349
llvm::SlotIndex::getRegSlot
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
Definition: SlotIndexes.h:254
llvm::LiveIntervals::getInterval
LiveInterval & getInterval(Register Reg)
Definition: LiveIntervals.h:114
llvm::SmallPtrSetImpl::count
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:382
llvm::MachineFunction
Definition: MachineFunction.h:234
isRealSpill
static bool isRealSpill(const MachineInstr &Def)
Check if Def fully defines a VReg with an undefined value.
Definition: InlineSpiller.cpp:1019
llvm::AArch64::RM
@ RM
Definition: AArch64ISelLowering.h:476
llvm::SmallPtrSetImplBase::clear
void clear()
Definition: SmallPtrSet.h:94
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
None.h
llvm::HexagonInstrInfo::loadRegFromStackSlot
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Load the specified register of the given register class from the specified stack frame index.
Definition: HexagonInstrInfo.cpp:958
RestrictStatepointRemat
static cl::opt< bool > RestrictStatepointRemat("restrict-statepoint-remat", cl::init(false), cl::Hidden, cl::desc("Restrict remat for statepoint operands"))
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:134
llvm::MachineOperand::setIsUndef
void setIsUndef(bool Val=true)
Definition: MachineOperand.h:511
llvm::BranchProbability
Definition: BranchProbability.h:30
Compiler.h
TargetSubtargetInfo.h
llvm::MachineOperand::isDef
bool isDef() const
Definition: MachineOperand.h:375
llvm::HexagonInstrInfo::isLoadFromStackSlot
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
TargetInstrInfo overrides.
Definition: HexagonInstrInfo.cpp:245
llvm::MachineInstr::getParent
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:286
llvm::DomTreeNodeBase
Base class for the actual dominator tree node.
Definition: LiveIntervalCalc.h:24
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::insert
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:207
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
llvm::SmallPtrSetImplBase::size
size_type size() const
Definition: SmallPtrSet.h:92
llvm::MachineOperand::getSubReg
unsigned getSubReg() const
Definition: MachineOperand.h:365
LiveRangeEdit.h
llvm::VNInfo::isPHIDef
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
Definition: LiveInterval.h:78
MBB
MachineBasicBlock & MBB
Definition: AArch64SLSHardening.cpp:74
dumpMachineInstrRangeWithSlotIndex
static LLVM_DUMP_METHOD void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E, LiveIntervals const &LIS, const char *const header, Register VReg=Register())
Definition: InlineSpiller.cpp:782
getVDefInterval
static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS)
Definition: InlineSpiller.cpp:276
llvm::StatepointOpers
MI-level Statepoint operands.
Definition: StackMaps.h:158
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::end
iterator end()
Definition: DenseMap.h:83
llvm::pdb::PDB_LocType::Slot
@ Slot
StackMaps.h
llvm::LiveRange::getValNumInfo
VNInfo * getValNumInfo(unsigned ValNo)
getValNumInfo - Returns pointer to the specified val#.
Definition: LiveInterval.h:309
llvm::LiveIntervals
Definition: LiveIntervals.h:54
llvm::VNInfo
VNInfo - Value Number Information.
Definition: LiveInterval.h:53
llvm::LiveRange::getVNInfoAt
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
Definition: LiveInterval.h:413
LiveInterval.h
llvm::LiveRange::MergeValueInAsValue
void MergeValueInAsValue(const LiveRange &RHS, const VNInfo *RHSValNo, VNInfo *LHSValNo)
MergeValueInAsValue - Merge all of the segments of a specific val# in RHS into this live range as the...
Definition: LiveInterval.cpp:735
llvm::getKillRegState
unsigned getKillRegState(bool B)
Definition: MachineInstrBuilder.h:508
llvm::Pass
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:91
SmallVector.h
llvm::MachineBasicBlock::begin
iterator begin()
Definition: MachineBasicBlock.h:268
MachineInstrBuilder.h
LiveStacks.h
Allocator
Basic Register Allocator
Definition: RegAllocBasic.cpp:146
llvm::BuildMI
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
Definition: MachineInstrBuilder.h:328
llvm::SmallPtrSetImplBase::empty
LLVM_NODISCARD bool empty() const
Definition: SmallPtrSet.h:91
llvm::MachineOperand::setReg
void setReg(Register Reg)
Change the register this operand corresponds to.
Definition: MachineOperand.cpp:55
llvm::LiveIntervals::RemoveMachineInstrFromMaps
void RemoveMachineInstrFromMaps(MachineInstr &MI)
Definition: LiveIntervals.h:276
DefMI
MachineInstrBuilder MachineInstrBuilder & DefMI
Definition: AArch64ExpandPseudoInsts.cpp:103
llvm::AAResultsWrapperPass
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Definition: AliasAnalysis.h:1336
llvm::MachineInstr::getNumOperands
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:492
llvm::LiveIntervals::InsertMachineInstrRangeInMaps
void InsertMachineInstrRangeInMaps(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E)
Definition: LiveIntervals.h:270
llvm::LiveInterval::reg
Register reg() const
Definition: LiveInterval.h:711
llvm::SmallVectorImpl< MachineInstr * >
MachineOperand.h
llvm::SmallSetVector
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:307
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
llvm::LiveRangeEdit::Delegate
Callback methods for LiveRangeEdit owners.
Definition: LiveRangeEdit.h:49
llvm::LiveRange::vni_iterator
VNInfoList::iterator vni_iterator
Definition: LiveInterval.h:221
llvm::VNInfo::isUnused
bool isUnused() const
Returns true if this value is unused.
Definition: LiveInterval.h:81
llvm::MachineBasicBlock::SkipPHIsLabelsAndDebug
iterator SkipPHIsLabelsAndDebug(iterator I, bool SkipPseudoOp=true)
Return the first instruction in MBB after I that is not a PHI, label or debug.
Definition: MachineBasicBlock.cpp:225
llvm::MachineFunction::DebugOperandMemNumber
const static unsigned int DebugOperandMemNumber
A reserved operand number representing the instructions memory operand, for instructions that have a ...
Definition: MachineFunction.h:549
SlotIndexes.h
llvm::cl::desc
Definition: CommandLine.h:412
spill
the custom lowered code happens to be but we shouldn t have to custom lower anything This is probably related to< 2 x i64 > ops being so bad LLVM currently generates stack realignment when it is not necessary needed The problem is that we need to know about stack alignment too before RA runs At that point we don t whether there will be vector spill
Definition: README-SSE.txt:489
llvm::MachineRegisterInfo::reg_nodbg_empty
bool reg_nodbg_empty(Register RegNo) const
reg_nodbg_empty - Return true if the only instructions using or defining Reg are Debug instructions.
Definition: MachineRegisterInfo.h:377
raw_ostream.h
llvm::MachineDominatorTree
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
Definition: MachineDominators.h:45
llvm::LiveRange::vni_end
vni_iterator vni_end()
Definition: LiveInterval.h:225
llvm::VirtRegInfo
VirtRegInfo - Information about a virtual register used by a set of operands.
Definition: MachineInstrBundle.h:218
MachineFunction.h
llvm::printReg
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
Definition: TargetRegisterInfo.cpp:110
llvm::MachineInstrBundleIterator< MachineInstr >
llvm::pdb::PDB_SymType::Block
@ Block
llvm::LiveStacks
Definition: LiveStacks.h:31
MachineBlockFrequencyInfo.h
TargetRegisterInfo.h
Debug.h
llvm::LiveQueryResult::isKill
bool isKill() const
Return true if the live-in value is killed by this instruction.
Definition: LiveInterval.h:112
llvm::LiveIntervals::getVNInfoAllocator
VNInfo::Allocator & getVNInfoAllocator()
Definition: LiveIntervals.h:284
SetVector.h
MachineDominators.h
llvm::SmallVectorImpl::emplace_back
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:908
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:364