LLVM 17.0.0git
InlineSpiller.cpp
Go to the documentation of this file.
1//===- InlineSpiller.cpp - Insert spills and restores inline --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The inline spiller modifies the machine function directly instead of
10// inserting spills and restores in VirtRegMap.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SplitKit.h"
15#include "llvm/ADT/ArrayRef.h"
16#include "llvm/ADT/DenseMap.h"
17#include "llvm/ADT/MapVector.h"
18#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/SetVector.h"
22#include "llvm/ADT/Statistic.h"
47#include "llvm/Config/llvm-config.h"
52#include "llvm/Support/Debug.h"
55#include <cassert>
56#include <iterator>
57#include <tuple>
58#include <utility>
59#include <vector>
60
61using namespace llvm;
62
63#define DEBUG_TYPE "regalloc"
64
65STATISTIC(NumSpilledRanges, "Number of spilled live ranges");
66STATISTIC(NumSnippets, "Number of spilled snippets");
67STATISTIC(NumSpills, "Number of spills inserted");
68STATISTIC(NumSpillsRemoved, "Number of spills removed");
69STATISTIC(NumReloads, "Number of reloads inserted");
70STATISTIC(NumReloadsRemoved, "Number of reloads removed");
71STATISTIC(NumFolded, "Number of folded stack accesses");
72STATISTIC(NumFoldedLoads, "Number of folded loads");
73STATISTIC(NumRemats, "Number of rematerialized defs for spilling");
74
75static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden,
76 cl::desc("Disable inline spill hoisting"));
77static cl::opt<bool>
78RestrictStatepointRemat("restrict-statepoint-remat",
79 cl::init(false), cl::Hidden,
80 cl::desc("Restrict remat for statepoint operands"));
81
82namespace {
83
84class HoistSpillHelper : private LiveRangeEdit::Delegate {
86 LiveIntervals &LIS;
87 LiveStacks &LSS;
90 VirtRegMap &VRM;
92 const TargetInstrInfo &TII;
94 const MachineBlockFrequencyInfo &MBFI;
95
97
98 // Map from StackSlot to the LiveInterval of the original register.
99 // Note the LiveInterval of the original register may have been deleted
100 // after it is spilled. We keep a copy here to track the range where
101 // spills can be moved.
103
104 // Map from pair of (StackSlot and Original VNI) to a set of spills which
105 // have the same stackslot and have equal values defined by Original VNI.
106 // These spills are mergeable and are hoist candidates.
107 using MergeableSpillsMap =
109 MergeableSpillsMap MergeableSpills;
110
111 /// This is the map from original register to a set containing all its
112 /// siblings. To hoist a spill to another BB, we need to find out a live
113 /// sibling there and use it as the source of the new spill.
115
116 bool isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
117 MachineBasicBlock &BB, Register &LiveReg);
118
119 void rmRedundantSpills(
123
124 void getVisitOrders(
130
131 void runHoistSpills(LiveInterval &OrigLI, VNInfo &OrigVNI,
135
136public:
137 HoistSpillHelper(MachineFunctionPass &pass, MachineFunction &mf,
138 VirtRegMap &vrm)
139 : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
140 LSS(pass.getAnalysis<LiveStacks>()),
141 MDT(pass.getAnalysis<MachineDominatorTree>()),
142 Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
143 MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()),
144 TRI(*mf.getSubtarget().getRegisterInfo()),
145 MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),
146 IPA(LIS, mf.getNumBlockIDs()) {}
147
148 void addToMergeableSpills(MachineInstr &Spill, int StackSlot,
149 unsigned Original);
150 bool rmFromMergeableSpills(MachineInstr &Spill, int StackSlot);
151 void hoistAllSpills();
152 void LRE_DidCloneVirtReg(Register, Register) override;
153};
154
155class InlineSpiller : public Spiller {
156 MachineFunction &MF;
157 LiveIntervals &LIS;
158 LiveStacks &LSS;
161 VirtRegMap &VRM;
163 const TargetInstrInfo &TII;
164 const TargetRegisterInfo &TRI;
165 const MachineBlockFrequencyInfo &MBFI;
166
167 // Variables that are valid during spill(), but used by multiple methods.
168 LiveRangeEdit *Edit;
169 LiveInterval *StackInt;
170 int StackSlot;
171 Register Original;
172
173 // All registers to spill to StackSlot, including the main register.
174 SmallVector<Register, 8> RegsToSpill;
175
176 // All COPY instructions to/from snippets.
177 // They are ignored since both operands refer to the same stack slot.
178 SmallPtrSet<MachineInstr*, 8> SnippetCopies;
179
180 // Values that failed to remat at some point.
181 SmallPtrSet<VNInfo*, 8> UsedValues;
182
183 // Dead defs generated during spilling.
185
186 // Object records spills information and does the hoisting.
187 HoistSpillHelper HSpiller;
188
189 // Live range weight calculator.
190 VirtRegAuxInfo &VRAI;
191
192 ~InlineSpiller() override = default;
193
194public:
195 InlineSpiller(MachineFunctionPass &Pass, MachineFunction &MF, VirtRegMap &VRM,
196 VirtRegAuxInfo &VRAI)
197 : MF(MF), LIS(Pass.getAnalysis<LiveIntervals>()),
198 LSS(Pass.getAnalysis<LiveStacks>()),
199 MDT(Pass.getAnalysis<MachineDominatorTree>()),
200 Loops(Pass.getAnalysis<MachineLoopInfo>()), VRM(VRM),
201 MRI(MF.getRegInfo()), TII(*MF.getSubtarget().getInstrInfo()),
202 TRI(*MF.getSubtarget().getRegisterInfo()),
203 MBFI(Pass.getAnalysis<MachineBlockFrequencyInfo>()),
204 HSpiller(Pass, MF, VRM), VRAI(VRAI) {}
205
206 void spill(LiveRangeEdit &) override;
207 void postOptimization() override;
208
209private:
210 bool isSnippet(const LiveInterval &SnipLI);
211 void collectRegsToSpill();
212
213 bool isRegToSpill(Register Reg) { return is_contained(RegsToSpill, Reg); }
214
215 bool isSibling(Register Reg);
216 bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI);
217 void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI);
218
219 void markValueUsed(LiveInterval*, VNInfo*);
220 bool canGuaranteeAssignmentAfterRemat(Register VReg, MachineInstr &MI);
221 bool reMaterializeFor(LiveInterval &, MachineInstr &MI);
222 void reMaterializeAll();
223
224 bool coalesceStackAccess(MachineInstr *MI, Register Reg);
225 bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>>,
226 MachineInstr *LoadMI = nullptr);
227 void insertReload(Register VReg, SlotIndex, MachineBasicBlock::iterator MI);
228 void insertSpill(Register VReg, bool isKill, MachineBasicBlock::iterator MI);
229
230 void spillAroundUses(Register Reg);
231 void spillAll();
232};
233
234} // end anonymous namespace
235
236Spiller::~Spiller() = default;
237
238void Spiller::anchor() {}
239
241 MachineFunction &MF, VirtRegMap &VRM,
242 VirtRegAuxInfo &VRAI) {
243 return new InlineSpiller(Pass, MF, VRM, VRAI);
244}
245
246//===----------------------------------------------------------------------===//
247// Snippets
248//===----------------------------------------------------------------------===//
249
250// When spilling a virtual register, we also spill any snippets it is connected
251// to. The snippets are small live ranges that only have a single real use,
252// leftovers from live range splitting. Spilling them enables memory operand
253// folding or tightens the live range around the single use.
254//
255// This minimizes register pressure and maximizes the store-to-load distance for
256// spill slots which can be important in tight loops.
257
258/// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
259/// otherwise return 0.
261 if (!MI.isFullCopy())
262 return Register();
263 if (MI.getOperand(0).getReg() == Reg)
264 return MI.getOperand(1).getReg();
265 if (MI.getOperand(1).getReg() == Reg)
266 return MI.getOperand(0).getReg();
267 return Register();
268}
269
270static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS) {
271 for (const MachineOperand &MO : MI.operands())
272 if (MO.isReg() && MO.isDef() && MO.getReg().isVirtual())
273 LIS.getInterval(MO.getReg());
274}
275
276/// isSnippet - Identify if a live interval is a snippet that should be spilled.
277/// It is assumed that SnipLI is a virtual register with the same original as
278/// Edit->getReg().
279bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
280 Register Reg = Edit->getReg();
281
282 // A snippet is a tiny live range with only a single instruction using it
283 // besides copies to/from Reg or spills/fills.
284 // Exception is done for statepoint instructions which will fold fills
285 // into their operands.
286 // We accept:
287 //
288 // %snip = COPY %Reg / FILL fi#
289 // %snip = USE %snip
290 // %snip = STATEPOINT %snip in var arg area
291 // %Reg = COPY %snip / SPILL %snip, fi#
292 //
293 if (!LIS.intervalIsInOneMBB(SnipLI))
294 return false;
295
296 // Number of defs should not exceed 2 not accounting defs coming from
297 // statepoint instructions.
298 unsigned NumValNums = SnipLI.getNumValNums();
299 for (auto *VNI : SnipLI.vnis()) {
300 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
301 if (MI->getOpcode() == TargetOpcode::STATEPOINT)
302 --NumValNums;
303 }
304 if (NumValNums > 2)
305 return false;
306
307 MachineInstr *UseMI = nullptr;
308
309 // Check that all uses satisfy our criteria.
311 RI = MRI.reg_instr_nodbg_begin(SnipLI.reg()),
312 E = MRI.reg_instr_nodbg_end();
313 RI != E;) {
314 MachineInstr &MI = *RI++;
315
316 // Allow copies to/from Reg.
317 if (isFullCopyOf(MI, Reg))
318 continue;
319
320 // Allow stack slot loads.
321 int FI;
322 if (SnipLI.reg() == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot)
323 continue;
324
325 // Allow stack slot stores.
326 if (SnipLI.reg() == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot)
327 continue;
328
329 if (StatepointOpers::isFoldableReg(&MI, SnipLI.reg()))
330 continue;
331
332 // Allow a single additional instruction.
333 if (UseMI && &MI != UseMI)
334 return false;
335 UseMI = &MI;
336 }
337 return true;
338}
339
340/// collectRegsToSpill - Collect live range snippets that only have a single
341/// real use.
342void InlineSpiller::collectRegsToSpill() {
343 Register Reg = Edit->getReg();
344
345 // Main register always spills.
346 RegsToSpill.assign(1, Reg);
347 SnippetCopies.clear();
348
349 // Snippets all have the same original, so there can't be any for an original
350 // register.
351 if (Original == Reg)
352 return;
353
354 for (MachineInstr &MI :
355 llvm::make_early_inc_range(MRI.reg_instructions(Reg))) {
356 Register SnipReg = isFullCopyOf(MI, Reg);
357 if (!isSibling(SnipReg))
358 continue;
359 LiveInterval &SnipLI = LIS.getInterval(SnipReg);
360 if (!isSnippet(SnipLI))
361 continue;
362 SnippetCopies.insert(&MI);
363 if (isRegToSpill(SnipReg))
364 continue;
365 RegsToSpill.push_back(SnipReg);
366 LLVM_DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n');
367 ++NumSnippets;
368 }
369}
370
371bool InlineSpiller::isSibling(Register Reg) {
372 return Reg.isVirtual() && VRM.getOriginal(Reg) == Original;
373}
374
375/// It is beneficial to spill to earlier place in the same BB in case
376/// as follows:
377/// There is an alternative def earlier in the same MBB.
378/// Hoist the spill as far as possible in SpillMBB. This can ease
379/// register pressure:
380///
381/// x = def
382/// y = use x
383/// s = copy x
384///
385/// Hoisting the spill of s to immediately after the def removes the
386/// interference between x and y:
387///
388/// x = def
389/// spill x
390/// y = use killed x
391///
392/// This hoist only helps when the copy kills its source.
393///
394bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,
395 MachineInstr &CopyMI) {
396 SlotIndex Idx = LIS.getInstructionIndex(CopyMI);
397#ifndef NDEBUG
398 VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot());
399 assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy");
400#endif
401
402 Register SrcReg = CopyMI.getOperand(1).getReg();
403 LiveInterval &SrcLI = LIS.getInterval(SrcReg);
404 VNInfo *SrcVNI = SrcLI.getVNInfoAt(Idx);
405 LiveQueryResult SrcQ = SrcLI.Query(Idx);
406 MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(SrcVNI->def);
407 if (DefMBB != CopyMI.getParent() || !SrcQ.isKill())
408 return false;
409
410 // Conservatively extend the stack slot range to the range of the original
411 // value. We may be able to do better with stack slot coloring by being more
412 // careful here.
413 assert(StackInt && "No stack slot assigned yet.");
414 LiveInterval &OrigLI = LIS.getInterval(Original);
415 VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx);
416 StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0));
417 LLVM_DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": "
418 << *StackInt << '\n');
419
420 // We are going to spill SrcVNI immediately after its def, so clear out
421 // any later spills of the same value.
422 eliminateRedundantSpills(SrcLI, SrcVNI);
423
424 MachineBasicBlock *MBB = LIS.getMBBFromIndex(SrcVNI->def);
426 if (SrcVNI->isPHIDef())
428 else {
429 MachineInstr *DefMI = LIS.getInstructionFromIndex(SrcVNI->def);
430 assert(DefMI && "Defining instruction disappeared");
431 MII = DefMI;
432 ++MII;
433 }
434 MachineInstrSpan MIS(MII, MBB);
435 // Insert spill without kill flag immediately after def.
436 TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot,
437 MRI.getRegClass(SrcReg), &TRI, Register());
438 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
439 for (const MachineInstr &MI : make_range(MIS.begin(), MII))
440 getVDefInterval(MI, LIS);
441 --MII; // Point to store instruction.
442 LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII);
443
444 // If there is only 1 store instruction is required for spill, add it
445 // to mergeable list. In X86 AMX, 2 intructions are required to store.
446 // We disable the merge for this case.
447 if (MIS.begin() == MII)
448 HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
449 ++NumSpills;
450 return true;
451}
452
453/// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any
454/// redundant spills of this value in SLI.reg and sibling copies.
455void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
456 assert(VNI && "Missing value");
458 WorkList.push_back(std::make_pair(&SLI, VNI));
459 assert(StackInt && "No stack slot assigned yet.");
460
461 do {
462 LiveInterval *LI;
463 std::tie(LI, VNI) = WorkList.pop_back_val();
464 Register Reg = LI->reg();
465 LLVM_DEBUG(dbgs() << "Checking redundant spills for " << VNI->id << '@'
466 << VNI->def << " in " << *LI << '\n');
467
468 // Regs to spill are taken care of.
469 if (isRegToSpill(Reg))
470 continue;
471
472 // Add all of VNI's live range to StackInt.
473 StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0));
474 LLVM_DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n');
475
476 // Find all spills and copies of VNI.
477 for (MachineInstr &MI :
478 llvm::make_early_inc_range(MRI.use_nodbg_instructions(Reg))) {
479 if (!MI.isCopy() && !MI.mayStore())
480 continue;
481 SlotIndex Idx = LIS.getInstructionIndex(MI);
482 if (LI->getVNInfoAt(Idx) != VNI)
483 continue;
484
485 // Follow sibling copies down the dominator tree.
486 if (Register DstReg = isFullCopyOf(MI, Reg)) {
487 if (isSibling(DstReg)) {
488 LiveInterval &DstLI = LIS.getInterval(DstReg);
489 VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot());
490 assert(DstVNI && "Missing defined value");
491 assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot");
492 WorkList.push_back(std::make_pair(&DstLI, DstVNI));
493 }
494 continue;
495 }
496
497 // Erase spills.
498 int FI;
499 if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) {
500 LLVM_DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI);
501 // eliminateDeadDefs won't normally remove stores, so switch opcode.
502 MI.setDesc(TII.get(TargetOpcode::KILL));
503 DeadDefs.push_back(&MI);
504 ++NumSpillsRemoved;
505 if (HSpiller.rmFromMergeableSpills(MI, StackSlot))
506 --NumSpills;
507 }
508 }
509 } while (!WorkList.empty());
510}
511
512//===----------------------------------------------------------------------===//
513// Rematerialization
514//===----------------------------------------------------------------------===//
515
516/// markValueUsed - Remember that VNI failed to rematerialize, so its defining
517/// instruction cannot be eliminated. See through snippet copies
518void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
520 WorkList.push_back(std::make_pair(LI, VNI));
521 do {
522 std::tie(LI, VNI) = WorkList.pop_back_val();
523 if (!UsedValues.insert(VNI).second)
524 continue;
525
526 if (VNI->isPHIDef()) {
527 MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
528 for (MachineBasicBlock *P : MBB->predecessors()) {
529 VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(P));
530 if (PVNI)
531 WorkList.push_back(std::make_pair(LI, PVNI));
532 }
533 continue;
534 }
535
536 // Follow snippet copies.
537 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
538 if (!SnippetCopies.count(MI))
539 continue;
540 LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg());
541 assert(isRegToSpill(SnipLI.reg()) && "Unexpected register in copy");
542 VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true));
543 assert(SnipVNI && "Snippet undefined before copy");
544 WorkList.push_back(std::make_pair(&SnipLI, SnipVNI));
545 } while (!WorkList.empty());
546}
547
548bool InlineSpiller::canGuaranteeAssignmentAfterRemat(Register VReg,
549 MachineInstr &MI) {
551 return true;
552 // Here's a quick explanation of the problem we're trying to handle here:
553 // * There are some pseudo instructions with more vreg uses than there are
554 // physical registers on the machine.
555 // * This is normally handled by spilling the vreg, and folding the reload
556 // into the user instruction. (Thus decreasing the number of used vregs
557 // until the remainder can be assigned to physregs.)
558 // * However, since we may try to spill vregs in any order, we can end up
559 // trying to spill each operand to the instruction, and then rematting it
560 // instead. When that happens, the new live intervals (for the remats) are
561 // expected to be trivially assignable (i.e. RS_Done). However, since we
562 // may have more remats than physregs, we're guaranteed to fail to assign
563 // one.
564 // At the moment, we only handle this for STATEPOINTs since they're the only
565 // pseudo op where we've seen this. If we start seeing other instructions
566 // with the same problem, we need to revisit this.
567 if (MI.getOpcode() != TargetOpcode::STATEPOINT)
568 return true;
569 // For STATEPOINTs we allow re-materialization for fixed arguments only hoping
570 // that number of physical registers is enough to cover all fixed arguments.
571 // If it is not true we need to revisit it.
572 for (unsigned Idx = StatepointOpers(&MI).getVarIdx(),
573 EndIdx = MI.getNumOperands();
574 Idx < EndIdx; ++Idx) {
575 MachineOperand &MO = MI.getOperand(Idx);
576 if (MO.isReg() && MO.getReg() == VReg)
577 return false;
578 }
579 return true;
580}
581
582/// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
583bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
584 // Analyze instruction
586 VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, VirtReg.reg(), &Ops);
587
588 if (!RI.Reads)
589 return false;
590
591 SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true);
592 VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex());
593
594 if (!ParentVNI) {
595 LLVM_DEBUG(dbgs() << "\tadding <undef> flags: ");
596 for (MachineOperand &MO : MI.operands())
597 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg())
598 MO.setIsUndef();
599 LLVM_DEBUG(dbgs() << UseIdx << '\t' << MI);
600 return true;
601 }
602
603 if (SnippetCopies.count(&MI))
604 return false;
605
606 LiveInterval &OrigLI = LIS.getInterval(Original);
607 VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx);
608 LiveRangeEdit::Remat RM(ParentVNI);
609 RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def);
610
611 if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx, false)) {
612 markValueUsed(&VirtReg, ParentVNI);
613 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
614 return false;
615 }
616
617 // If the instruction also writes VirtReg.reg, it had better not require the
618 // same register for uses and defs.
619 if (RI.Tied) {
620 markValueUsed(&VirtReg, ParentVNI);
621 LLVM_DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI);
622 return false;
623 }
624
625 // Before rematerializing into a register for a single instruction, try to
626 // fold a load into the instruction. That avoids allocating a new register.
627 if (RM.OrigMI->canFoldAsLoad() &&
628 foldMemoryOperand(Ops, RM.OrigMI)) {
629 Edit->markRematerialized(RM.ParentVNI);
630 ++NumFoldedLoads;
631 return true;
632 }
633
634 // If we can't guarantee that we'll be able to actually assign the new vreg,
635 // we can't remat.
636 if (!canGuaranteeAssignmentAfterRemat(VirtReg.reg(), MI)) {
637 markValueUsed(&VirtReg, ParentVNI);
638 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
639 return false;
640 }
641
642 // Allocate a new register for the remat.
643 Register NewVReg = Edit->createFrom(Original);
644
645 // Finally we can rematerialize OrigMI before MI.
646 SlotIndex DefIdx =
647 Edit->rematerializeAt(*MI.getParent(), MI, NewVReg, RM, TRI);
648
649 // We take the DebugLoc from MI, since OrigMI may be attributed to a
650 // different source location.
651 auto *NewMI = LIS.getInstructionFromIndex(DefIdx);
652 NewMI->setDebugLoc(MI.getDebugLoc());
653
654 (void)DefIdx;
655 LLVM_DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'
656 << *LIS.getInstructionFromIndex(DefIdx));
657
658 // Replace operands
659 for (const auto &OpPair : Ops) {
660 MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
661 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg()) {
662 MO.setReg(NewVReg);
663 MO.setIsKill();
664 }
665 }
666 LLVM_DEBUG(dbgs() << "\t " << UseIdx << '\t' << MI << '\n');
667
668 ++NumRemats;
669 return true;
670}
671
672/// reMaterializeAll - Try to rematerialize as many uses as possible,
673/// and trim the live ranges after.
674void InlineSpiller::reMaterializeAll() {
675 if (!Edit->anyRematerializable())
676 return;
677
678 UsedValues.clear();
679
680 // Try to remat before all uses of snippets.
681 bool anyRemat = false;
682 for (Register Reg : RegsToSpill) {
683 LiveInterval &LI = LIS.getInterval(Reg);
684 for (MachineInstr &MI : llvm::make_early_inc_range(MRI.reg_bundles(Reg))) {
685 // Debug values are not allowed to affect codegen.
686 if (MI.isDebugValue())
687 continue;
688
689 assert(!MI.isDebugInstr() && "Did not expect to find a use in debug "
690 "instruction that isn't a DBG_VALUE");
691
692 anyRemat |= reMaterializeFor(LI, MI);
693 }
694 }
695 if (!anyRemat)
696 return;
697
698 // Remove any values that were completely rematted.
699 for (Register Reg : RegsToSpill) {
700 LiveInterval &LI = LIS.getInterval(Reg);
701 for (VNInfo *VNI : LI.vnis()) {
702 if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI))
703 continue;
704 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
705 MI->addRegisterDead(Reg, &TRI);
706 if (!MI->allDefsAreDead())
707 continue;
708 LLVM_DEBUG(dbgs() << "All defs dead: " << *MI);
709 DeadDefs.push_back(MI);
710 }
711 }
712
713 // Eliminate dead code after remat. Note that some snippet copies may be
714 // deleted here.
715 if (DeadDefs.empty())
716 return;
717 LLVM_DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n");
718 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill);
719
720 // LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions
721 // after rematerialization. To remove a VNI for a vreg from its LiveInterval,
722 // LiveIntervals::removeVRegDefAt is used. However, after non-PHI VNIs are all
723 // removed, PHI VNI are still left in the LiveInterval.
724 // So to get rid of unused reg, we need to check whether it has non-dbg
725 // reference instead of whether it has non-empty interval.
726 unsigned ResultPos = 0;
727 for (Register Reg : RegsToSpill) {
728 if (MRI.reg_nodbg_empty(Reg)) {
729 Edit->eraseVirtReg(Reg);
730 continue;
731 }
732
733 assert(LIS.hasInterval(Reg) &&
734 (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&
735 "Empty and not used live-range?!");
736
737 RegsToSpill[ResultPos++] = Reg;
738 }
739 RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end());
740 LLVM_DEBUG(dbgs() << RegsToSpill.size()
741 << " registers to spill after remat.\n");
742}
743
744//===----------------------------------------------------------------------===//
745// Spilling
746//===----------------------------------------------------------------------===//
747
748/// If MI is a load or store of StackSlot, it can be removed.
749bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, Register Reg) {
750 int FI = 0;
751 Register InstrReg = TII.isLoadFromStackSlot(*MI, FI);
752 bool IsLoad = InstrReg;
753 if (!IsLoad)
754 InstrReg = TII.isStoreToStackSlot(*MI, FI);
755
756 // We have a stack access. Is it the right register and slot?
757 if (InstrReg != Reg || FI != StackSlot)
758 return false;
759
760 if (!IsLoad)
761 HSpiller.rmFromMergeableSpills(*MI, StackSlot);
762
763 LLVM_DEBUG(dbgs() << "Coalescing stack access: " << *MI);
764 LIS.RemoveMachineInstrFromMaps(*MI);
765 MI->eraseFromParent();
766
767 if (IsLoad) {
768 ++NumReloadsRemoved;
769 --NumReloads;
770 } else {
771 ++NumSpillsRemoved;
772 --NumSpills;
773 }
774
775 return true;
776}
777
778#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
780// Dump the range of instructions from B to E with their slot indexes.
783 LiveIntervals const &LIS,
784 const char *const header,
785 Register VReg = Register()) {
786 char NextLine = '\n';
787 char SlotIndent = '\t';
788
789 if (std::next(B) == E) {
790 NextLine = ' ';
791 SlotIndent = ' ';
792 }
793
794 dbgs() << '\t' << header << ": " << NextLine;
795
796 for (MachineBasicBlock::iterator I = B; I != E; ++I) {
798
799 // If a register was passed in and this instruction has it as a
800 // destination that is marked as an early clobber, print the
801 // early-clobber slot index.
802 if (VReg) {
803 MachineOperand *MO = I->findRegisterDefOperand(VReg);
804 if (MO && MO->isEarlyClobber())
805 Idx = Idx.getRegSlot(true);
806 }
807
808 dbgs() << SlotIndent << Idx << '\t' << *I;
809 }
810}
811#endif
812
813/// foldMemoryOperand - Try folding stack slot references in Ops into their
814/// instructions.
815///
816/// @param Ops Operand indices from AnalyzeVirtRegInBundle().
817/// @param LoadMI Load instruction to use instead of stack slot when non-null.
818/// @return True on success.
819bool InlineSpiller::
820foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
821 MachineInstr *LoadMI) {
822 if (Ops.empty())
823 return false;
824 // Don't attempt folding in bundles.
825 MachineInstr *MI = Ops.front().first;
826 if (Ops.back().first != MI || MI->isBundled())
827 return false;
828
829 bool WasCopy = MI->isCopy();
830 Register ImpReg;
831
832 // TII::foldMemoryOperand will do what we need here for statepoint
833 // (fold load into use and remove corresponding def). We will replace
834 // uses of removed def with loads (spillAroundUses).
835 // For that to work we need to untie def and use to pass it through
836 // foldMemoryOperand and signal foldPatchpoint that it is allowed to
837 // fold them.
838 bool UntieRegs = MI->getOpcode() == TargetOpcode::STATEPOINT;
839
840 // Spill subregs if the target allows it.
841 // We always want to spill subregs for stackmap/patchpoint pseudos.
842 bool SpillSubRegs = TII.isSubregFoldable() ||
843 MI->getOpcode() == TargetOpcode::STATEPOINT ||
844 MI->getOpcode() == TargetOpcode::PATCHPOINT ||
845 MI->getOpcode() == TargetOpcode::STACKMAP;
846
847 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
848 // operands.
850 for (const auto &OpPair : Ops) {
851 unsigned Idx = OpPair.second;
852 assert(MI == OpPair.first && "Instruction conflict during operand folding");
853 MachineOperand &MO = MI->getOperand(Idx);
854
855 // No point restoring an undef read, and we'll produce an invalid live
856 // interval.
857 // TODO: Is this really the correct way to handle undef tied uses?
858 if (MO.isUse() && !MO.readsReg() && !MO.isTied())
859 continue;
860
861 if (MO.isImplicit()) {
862 ImpReg = MO.getReg();
863 continue;
864 }
865
866 if (!SpillSubRegs && MO.getSubReg())
867 return false;
868 // We cannot fold a load instruction into a def.
869 if (LoadMI && MO.isDef())
870 return false;
871 // Tied use operands should not be passed to foldMemoryOperand.
872 if (UntieRegs || !MI->isRegTiedToDefOperand(Idx))
873 FoldOps.push_back(Idx);
874 }
875
876 // If we only have implicit uses, we won't be able to fold that.
877 // Moreover, TargetInstrInfo::foldMemoryOperand will assert if we try!
878 if (FoldOps.empty())
879 return false;
880
881 MachineInstrSpan MIS(MI, MI->getParent());
882
884 if (UntieRegs)
885 for (unsigned Idx : FoldOps) {
886 MachineOperand &MO = MI->getOperand(Idx);
887 if (!MO.isTied())
888 continue;
889 unsigned Tied = MI->findTiedOperandIdx(Idx);
890 if (MO.isUse())
891 TiedOps.emplace_back(Tied, Idx);
892 else {
893 assert(MO.isDef() && "Tied to not use and def?");
894 TiedOps.emplace_back(Idx, Tied);
895 }
896 MI->untieRegOperand(Idx);
897 }
898
899 MachineInstr *FoldMI =
900 LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS)
901 : TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS, &VRM);
902 if (!FoldMI) {
903 // Re-tie operands.
904 for (auto Tied : TiedOps)
905 MI->tieOperands(Tied.first, Tied.second);
906 return false;
907 }
908
909 // Remove LIS for any dead defs in the original MI not in FoldMI.
910 for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) {
911 if (!MO->isReg())
912 continue;
913 Register Reg = MO->getReg();
914 if (!Reg || Reg.isVirtual() || MRI.isReserved(Reg)) {
915 continue;
916 }
917 // Skip non-Defs, including undef uses and internal reads.
918 if (MO->isUse())
919 continue;
920 PhysRegInfo RI = AnalyzePhysRegInBundle(*FoldMI, Reg, &TRI);
921 if (RI.FullyDefined)
922 continue;
923 // FoldMI does not define this physreg. Remove the LI segment.
924 assert(MO->isDead() && "Cannot fold physreg def");
926 LIS.removePhysRegDefAt(Reg.asMCReg(), Idx);
927 }
928
929 int FI;
930 if (TII.isStoreToStackSlot(*MI, FI) &&
931 HSpiller.rmFromMergeableSpills(*MI, FI))
932 --NumSpills;
933 LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI);
934 // Update the call site info.
935 if (MI->isCandidateForCallSiteEntry())
936 MI->getMF()->moveCallSiteInfo(MI, FoldMI);
937
938 // If we've folded a store into an instruction labelled with debug-info,
939 // record a substitution from the old operand to the memory operand. Handle
940 // the simple common case where operand 0 is the one being folded, plus when
941 // the destination operand is also a tied def. More values could be
942 // substituted / preserved with more analysis.
943 if (MI->peekDebugInstrNum() && Ops[0].second == 0) {
944 // Helper lambda.
945 auto MakeSubstitution = [this,FoldMI,MI,&Ops]() {
946 // Substitute old operand zero to the new instructions memory operand.
947 unsigned OldOperandNum = Ops[0].second;
948 unsigned NewNum = FoldMI->getDebugInstrNum();
949 unsigned OldNum = MI->getDebugInstrNum();
950 MF.makeDebugValueSubstitution({OldNum, OldOperandNum},
952 };
953
954 const MachineOperand &Op0 = MI->getOperand(Ops[0].second);
955 if (Ops.size() == 1 && Op0.isDef()) {
956 MakeSubstitution();
957 } else if (Ops.size() == 2 && Op0.isDef() && MI->getOperand(1).isTied() &&
958 Op0.getReg() == MI->getOperand(1).getReg()) {
959 MakeSubstitution();
960 }
961 } else if (MI->peekDebugInstrNum()) {
962 // This is a debug-labelled instruction, but the operand being folded isn't
963 // at operand zero. Most likely this means it's a load being folded in.
964 // Substitute any register defs from operand zero up to the one being
965 // folded -- past that point, we don't know what the new operand indexes
966 // will be.
967 MF.substituteDebugValuesForInst(*MI, *FoldMI, Ops[0].second);
968 }
969
970 MI->eraseFromParent();
971
972 // Insert any new instructions other than FoldMI into the LIS maps.
973 assert(!MIS.empty() && "Unexpected empty span of instructions!");
974 for (MachineInstr &MI : MIS)
975 if (&MI != FoldMI)
977
978 // TII.foldMemoryOperand may have left some implicit operands on the
979 // instruction. Strip them.
980 if (ImpReg)
981 for (unsigned i = FoldMI->getNumOperands(); i; --i) {
982 MachineOperand &MO = FoldMI->getOperand(i - 1);
983 if (!MO.isReg() || !MO.isImplicit())
984 break;
985 if (MO.getReg() == ImpReg)
986 FoldMI->removeOperand(i - 1);
987 }
988
989 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS,
990 "folded"));
991
992 if (!WasCopy)
993 ++NumFolded;
994 else if (Ops.front().second == 0) {
995 ++NumSpills;
996 // If there is only 1 store instruction is required for spill, add it
997 // to mergeable list. In X86 AMX, 2 intructions are required to store.
998 // We disable the merge for this case.
999 if (std::distance(MIS.begin(), MIS.end()) <= 1)
1000 HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original);
1001 } else
1002 ++NumReloads;
1003 return true;
1004}
1005
1006void InlineSpiller::insertReload(Register NewVReg,
1007 SlotIndex Idx,
1009 MachineBasicBlock &MBB = *MI->getParent();
1010
1011 MachineInstrSpan MIS(MI, &MBB);
1012 TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot,
1013 MRI.getRegClass(NewVReg), &TRI, Register());
1014
1015 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI);
1016
1017 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MI, LIS, "reload",
1018 NewVReg));
1019 ++NumReloads;
1020}
1021
1022/// Check if \p Def fully defines a VReg with an undefined value.
1023/// If that's the case, that means the value of VReg is actually
1024/// not relevant.
1025static bool isRealSpill(const MachineInstr &Def) {
1026 if (!Def.isImplicitDef())
1027 return true;
1028 assert(Def.getNumOperands() == 1 &&
1029 "Implicit def with more than one definition");
1030 // We can say that the VReg defined by Def is undef, only if it is
1031 // fully defined by Def. Otherwise, some of the lanes may not be
1032 // undef and the value of the VReg matters.
1033 return Def.getOperand(0).getSubReg();
1034}
1035
1036/// insertSpill - Insert a spill of NewVReg after MI.
1037void InlineSpiller::insertSpill(Register NewVReg, bool isKill,
1039 // Spill are not terminators, so inserting spills after terminators will
1040 // violate invariants in MachineVerifier.
1041 assert(!MI->isTerminator() && "Inserting a spill after a terminator");
1042 MachineBasicBlock &MBB = *MI->getParent();
1043
1044 MachineInstrSpan MIS(MI, &MBB);
1045 MachineBasicBlock::iterator SpillBefore = std::next(MI);
1046 bool IsRealSpill = isRealSpill(*MI);
1047
1048 if (IsRealSpill)
1049 TII.storeRegToStackSlot(MBB, SpillBefore, NewVReg, isKill, StackSlot,
1050 MRI.getRegClass(NewVReg), &TRI, Register());
1051 else
1052 // Don't spill undef value.
1053 // Anything works for undef, in particular keeping the memory
1054 // uninitialized is a viable option and it saves code size and
1055 // run time.
1056 BuildMI(MBB, SpillBefore, MI->getDebugLoc(), TII.get(TargetOpcode::KILL))
1057 .addReg(NewVReg, getKillRegState(isKill));
1058
1059 MachineBasicBlock::iterator Spill = std::next(MI);
1060 LIS.InsertMachineInstrRangeInMaps(Spill, MIS.end());
1061 for (const MachineInstr &MI : make_range(Spill, MIS.end()))
1062 getVDefInterval(MI, LIS);
1063
1064 LLVM_DEBUG(
1065 dumpMachineInstrRangeWithSlotIndex(Spill, MIS.end(), LIS, "spill"));
1066 ++NumSpills;
1067 // If there is only 1 store instruction is required for spill, add it
1068 // to mergeable list. In X86 AMX, 2 intructions are required to store.
1069 // We disable the merge for this case.
1070 if (IsRealSpill && std::distance(Spill, MIS.end()) <= 1)
1071 HSpiller.addToMergeableSpills(*Spill, StackSlot, Original);
1072}
1073
1074/// spillAroundUses - insert spill code around each use of Reg.
1075void InlineSpiller::spillAroundUses(Register Reg) {
1076 LLVM_DEBUG(dbgs() << "spillAroundUses " << printReg(Reg) << '\n');
1077 LiveInterval &OldLI = LIS.getInterval(Reg);
1078
1079 // Iterate over instructions using Reg.
1080 for (MachineInstr &MI : llvm::make_early_inc_range(MRI.reg_bundles(Reg))) {
1081 // Debug values are not allowed to affect codegen.
1082 if (MI.isDebugValue()) {
1083 // Modify DBG_VALUE now that the value is in a spill slot.
1084 MachineBasicBlock *MBB = MI.getParent();
1085 LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:\t" << MI);
1086 buildDbgValueForSpill(*MBB, &MI, MI, StackSlot, Reg);
1087 MBB->erase(MI);
1088 continue;
1089 }
1090
1091 assert(!MI.isDebugInstr() && "Did not expect to find a use in debug "
1092 "instruction that isn't a DBG_VALUE");
1093
1094 // Ignore copies to/from snippets. We'll delete them.
1095 if (SnippetCopies.count(&MI))
1096 continue;
1097
1098 // Stack slot accesses may coalesce away.
1099 if (coalesceStackAccess(&MI, Reg))
1100 continue;
1101
1102 // Analyze instruction.
1104 VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, Reg, &Ops);
1105
1106 // Find the slot index where this instruction reads and writes OldLI.
1107 // This is usually the def slot, except for tied early clobbers.
1109 if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true)))
1110 if (SlotIndex::isSameInstr(Idx, VNI->def))
1111 Idx = VNI->def;
1112
1113 // Check for a sibling copy.
1114 Register SibReg = isFullCopyOf(MI, Reg);
1115 if (SibReg && isSibling(SibReg)) {
1116 // This may actually be a copy between snippets.
1117 if (isRegToSpill(SibReg)) {
1118 LLVM_DEBUG(dbgs() << "Found new snippet copy: " << MI);
1119 SnippetCopies.insert(&MI);
1120 continue;
1121 }
1122 if (RI.Writes) {
1123 if (hoistSpillInsideBB(OldLI, MI)) {
1124 // This COPY is now dead, the value is already in the stack slot.
1125 MI.getOperand(0).setIsDead();
1126 DeadDefs.push_back(&MI);
1127 continue;
1128 }
1129 } else {
1130 // This is a reload for a sib-reg copy. Drop spills downstream.
1131 LiveInterval &SibLI = LIS.getInterval(SibReg);
1132 eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx));
1133 // The COPY will fold to a reload below.
1134 }
1135 }
1136
1137 // Attempt to fold memory ops.
1138 if (foldMemoryOperand(Ops))
1139 continue;
1140
1141 // Create a new virtual register for spill/fill.
1142 // FIXME: Infer regclass from instruction alone.
1143 Register NewVReg = Edit->createFrom(Reg);
1144
1145 if (RI.Reads)
1146 insertReload(NewVReg, Idx, &MI);
1147
1148 // Rewrite instruction operands.
1149 bool hasLiveDef = false;
1150 for (const auto &OpPair : Ops) {
1151 MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
1152 MO.setReg(NewVReg);
1153 if (MO.isUse()) {
1154 if (!OpPair.first->isRegTiedToDefOperand(OpPair.second))
1155 MO.setIsKill();
1156 } else {
1157 if (!MO.isDead())
1158 hasLiveDef = true;
1159 }
1160 }
1161 LLVM_DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << MI << '\n');
1162
1163 // FIXME: Use a second vreg if instruction has no tied ops.
1164 if (RI.Writes)
1165 if (hasLiveDef)
1166 insertSpill(NewVReg, true, &MI);
1167 }
1168}
1169
1170/// spillAll - Spill all registers remaining after rematerialization.
1171void InlineSpiller::spillAll() {
1172 // Update LiveStacks now that we are committed to spilling.
1173 if (StackSlot == VirtRegMap::NO_STACK_SLOT) {
1174 StackSlot = VRM.assignVirt2StackSlot(Original);
1175 StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original));
1176 StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator());
1177 } else
1178 StackInt = &LSS.getInterval(StackSlot);
1179
1180 if (Original != Edit->getReg())
1181 VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot);
1182
1183 assert(StackInt->getNumValNums() == 1 && "Bad stack interval values");
1184 for (Register Reg : RegsToSpill)
1185 StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg),
1186 StackInt->getValNumInfo(0));
1187 LLVM_DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n');
1188
1189 // Spill around uses of all RegsToSpill.
1190 for (Register Reg : RegsToSpill)
1191 spillAroundUses(Reg);
1192
1193 // Hoisted spills may cause dead code.
1194 if (!DeadDefs.empty()) {
1195 LLVM_DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n");
1196 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill);
1197 }
1198
1199 // Finally delete the SnippetCopies.
1200 for (Register Reg : RegsToSpill) {
1201 for (MachineInstr &MI :
1202 llvm::make_early_inc_range(MRI.reg_instructions(Reg))) {
1203 assert(SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy");
1204 // FIXME: Do this with a LiveRangeEdit callback.
1206 MI.eraseFromParent();
1207 }
1208 }
1209
1210 // Delete all spilled registers.
1211 for (Register Reg : RegsToSpill)
1212 Edit->eraseVirtReg(Reg);
1213}
1214
1215void InlineSpiller::spill(LiveRangeEdit &edit) {
1216 ++NumSpilledRanges;
1217 Edit = &edit;
1219 "Trying to spill a stack slot.");
1220 // Share a stack slot among all descendants of Original.
1221 Original = VRM.getOriginal(edit.getReg());
1222 StackSlot = VRM.getStackSlot(Original);
1223 StackInt = nullptr;
1224
1225 LLVM_DEBUG(dbgs() << "Inline spilling "
1226 << TRI.getRegClassName(MRI.getRegClass(edit.getReg()))
1227 << ':' << edit.getParent() << "\nFrom original "
1228 << printReg(Original) << '\n');
1229 assert(edit.getParent().isSpillable() &&
1230 "Attempting to spill already spilled value.");
1231 assert(DeadDefs.empty() && "Previous spill didn't remove dead defs");
1232
1233 collectRegsToSpill();
1234 reMaterializeAll();
1235
1236 // Remat may handle everything.
1237 if (!RegsToSpill.empty())
1238 spillAll();
1239
1240 Edit->calculateRegClassAndHint(MF, VRAI);
1241}
1242
1243/// Optimizations after all the reg selections and spills are done.
1244void InlineSpiller::postOptimization() { HSpiller.hoistAllSpills(); }
1245
1246/// When a spill is inserted, add the spill to MergeableSpills map.
1247void HoistSpillHelper::addToMergeableSpills(MachineInstr &Spill, int StackSlot,
1248 unsigned Original) {
1250 LiveInterval &OrigLI = LIS.getInterval(Original);
1251 // save a copy of LiveInterval in StackSlotToOrigLI because the original
1252 // LiveInterval may be cleared after all its references are spilled.
1253 if (!StackSlotToOrigLI.contains(StackSlot)) {
1254 auto LI = std::make_unique<LiveInterval>(OrigLI.reg(), OrigLI.weight());
1255 LI->assign(OrigLI, Allocator);
1256 StackSlotToOrigLI[StackSlot] = std::move(LI);
1257 }
1258 SlotIndex Idx = LIS.getInstructionIndex(Spill);
1259 VNInfo *OrigVNI = StackSlotToOrigLI[StackSlot]->getVNInfoAt(Idx.getRegSlot());
1260 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1261 MergeableSpills[MIdx].insert(&Spill);
1262}
1263
1264/// When a spill is removed, remove the spill from MergeableSpills map.
1265/// Return true if the spill is removed successfully.
1266bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr &Spill,
1267 int StackSlot) {
1268 auto It = StackSlotToOrigLI.find(StackSlot);
1269 if (It == StackSlotToOrigLI.end())
1270 return false;
1271 SlotIndex Idx = LIS.getInstructionIndex(Spill);
1272 VNInfo *OrigVNI = It->second->getVNInfoAt(Idx.getRegSlot());
1273 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1274 return MergeableSpills[MIdx].erase(&Spill);
1275}
1276
1277/// Check BB to see if it is a possible target BB to place a hoisted spill,
1278/// i.e., there should be a living sibling of OrigReg at the insert point.
1279bool HoistSpillHelper::isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
1280 MachineBasicBlock &BB, Register &LiveReg) {
1281 SlotIndex Idx = IPA.getLastInsertPoint(OrigLI, BB);
1282 // The original def could be after the last insert point in the root block,
1283 // we can't hoist to here.
1284 if (Idx < OrigVNI.def) {
1285 // TODO: We could be better here. If LI is not alive in landing pad
1286 // we could hoist spill after LIP.
1287 LLVM_DEBUG(dbgs() << "can't spill in root block - def after LIP\n");
1288 return false;
1289 }
1290 Register OrigReg = OrigLI.reg();
1291 SmallSetVector<Register, 16> &Siblings = Virt2SiblingsMap[OrigReg];
1292 assert(OrigLI.getVNInfoAt(Idx) == &OrigVNI && "Unexpected VNI");
1293
1294 for (const Register &SibReg : Siblings) {
1295 LiveInterval &LI = LIS.getInterval(SibReg);
1296 VNInfo *VNI = LI.getVNInfoAt(Idx);
1297 if (VNI) {
1298 LiveReg = SibReg;
1299 return true;
1300 }
1301 }
1302 return false;
1303}
1304
1305/// Remove redundant spills in the same BB. Save those redundant spills in
1306/// SpillsToRm, and save the spill to keep and its BB in SpillBBToSpill map.
1307void HoistSpillHelper::rmRedundantSpills(
1311 // For each spill saw, check SpillBBToSpill[] and see if its BB already has
1312 // another spill inside. If a BB contains more than one spill, only keep the
1313 // earlier spill with smaller SlotIndex.
1314 for (auto *const CurrentSpill : Spills) {
1315 MachineBasicBlock *Block = CurrentSpill->getParent();
1316 MachineDomTreeNode *Node = MDT.getBase().getNode(Block);
1317 MachineInstr *PrevSpill = SpillBBToSpill[Node];
1318 if (PrevSpill) {
1319 SlotIndex PIdx = LIS.getInstructionIndex(*PrevSpill);
1320 SlotIndex CIdx = LIS.getInstructionIndex(*CurrentSpill);
1321 MachineInstr *SpillToRm = (CIdx > PIdx) ? CurrentSpill : PrevSpill;
1322 MachineInstr *SpillToKeep = (CIdx > PIdx) ? PrevSpill : CurrentSpill;
1323 SpillsToRm.push_back(SpillToRm);
1324 SpillBBToSpill[MDT.getBase().getNode(Block)] = SpillToKeep;
1325 } else {
1326 SpillBBToSpill[MDT.getBase().getNode(Block)] = CurrentSpill;
1327 }
1328 }
1329 for (auto *const SpillToRm : SpillsToRm)
1330 Spills.erase(SpillToRm);
1331}
1332
1333/// Starting from \p Root find a top-down traversal order of the dominator
1334/// tree to visit all basic blocks containing the elements of \p Spills.
1335/// Redundant spills will be found and put into \p SpillsToRm at the same
1336/// time. \p SpillBBToSpill will be populated as part of the process and
1337/// maps a basic block to the first store occurring in the basic block.
1338/// \post SpillsToRm.union(Spills\@post) == Spills\@pre
1339void HoistSpillHelper::getVisitOrders(
1345 // The set contains all the possible BB nodes to which we may hoist
1346 // original spills.
1348 // Save the BB nodes on the path from the first BB node containing
1349 // non-redundant spill to the Root node.
1351 // All the spills to be hoisted must originate from a single def instruction
1352 // to the OrigReg. It means the def instruction should dominate all the spills
1353 // to be hoisted. We choose the BB where the def instruction is located as
1354 // the Root.
1355 MachineDomTreeNode *RootIDomNode = MDT[Root]->getIDom();
1356 // For every node on the dominator tree with spill, walk up on the dominator
1357 // tree towards the Root node until it is reached. If there is other node
1358 // containing spill in the middle of the path, the previous spill saw will
1359 // be redundant and the node containing it will be removed. All the nodes on
1360 // the path starting from the first node with non-redundant spill to the Root
1361 // node will be added to the WorkSet, which will contain all the possible
1362 // locations where spills may be hoisted to after the loop below is done.
1363 for (auto *const Spill : Spills) {
1364 MachineBasicBlock *Block = Spill->getParent();
1366 MachineInstr *SpillToRm = nullptr;
1367 while (Node != RootIDomNode) {
1368 // If Node dominates Block, and it already contains a spill, the spill in
1369 // Block will be redundant.
1370 if (Node != MDT[Block] && SpillBBToSpill[Node]) {
1371 SpillToRm = SpillBBToSpill[MDT[Block]];
1372 break;
1373 /// If we see the Node already in WorkSet, the path from the Node to
1374 /// the Root node must already be traversed by another spill.
1375 /// Then no need to repeat.
1376 } else if (WorkSet.count(Node)) {
1377 break;
1378 } else {
1379 NodesOnPath.insert(Node);
1380 }
1381 Node = Node->getIDom();
1382 }
1383 if (SpillToRm) {
1384 SpillsToRm.push_back(SpillToRm);
1385 } else {
1386 // Add a BB containing the original spills to SpillsToKeep -- i.e.,
1387 // set the initial status before hoisting start. The value of BBs
1388 // containing original spills is set to 0, in order to descriminate
1389 // with BBs containing hoisted spills which will be inserted to
1390 // SpillsToKeep later during hoisting.
1391 SpillsToKeep[MDT[Block]] = 0;
1392 WorkSet.insert(NodesOnPath.begin(), NodesOnPath.end());
1393 }
1394 NodesOnPath.clear();
1395 }
1396
1397 // Sort the nodes in WorkSet in top-down order and save the nodes
1398 // in Orders. Orders will be used for hoisting in runHoistSpills.
1399 unsigned idx = 0;
1400 Orders.push_back(MDT.getBase().getNode(Root));
1401 do {
1402 MachineDomTreeNode *Node = Orders[idx++];
1403 for (MachineDomTreeNode *Child : Node->children()) {
1404 if (WorkSet.count(Child))
1405 Orders.push_back(Child);
1406 }
1407 } while (idx != Orders.size());
1408 assert(Orders.size() == WorkSet.size() &&
1409 "Orders have different size with WorkSet");
1410
1411#ifndef NDEBUG
1412 LLVM_DEBUG(dbgs() << "Orders size is " << Orders.size() << "\n");
1414 for (; RIt != Orders.rend(); RIt++)
1415 LLVM_DEBUG(dbgs() << "BB" << (*RIt)->getBlock()->getNumber() << ",");
1416 LLVM_DEBUG(dbgs() << "\n");
1417#endif
1418}
1419
1420/// Try to hoist spills according to BB hotness. The spills to removed will
1421/// be saved in \p SpillsToRm. The spills to be inserted will be saved in
1422/// \p SpillsToIns.
1423void HoistSpillHelper::runHoistSpills(
1424 LiveInterval &OrigLI, VNInfo &OrigVNI,
1428 // Visit order of dominator tree nodes.
1430 // SpillsToKeep contains all the nodes where spills are to be inserted
1431 // during hoisting. If the spill to be inserted is an original spill
1432 // (not a hoisted one), the value of the map entry is 0. If the spill
1433 // is a hoisted spill, the value of the map entry is the VReg to be used
1434 // as the source of the spill.
1436 // Map from BB to the first spill inside of it.
1438
1439 rmRedundantSpills(Spills, SpillsToRm, SpillBBToSpill);
1440
1441 MachineBasicBlock *Root = LIS.getMBBFromIndex(OrigVNI.def);
1442 getVisitOrders(Root, Spills, Orders, SpillsToRm, SpillsToKeep,
1443 SpillBBToSpill);
1444
1445 // SpillsInSubTreeMap keeps the map from a dom tree node to a pair of
1446 // nodes set and the cost of all the spills inside those nodes.
1447 // The nodes set are the locations where spills are to be inserted
1448 // in the subtree of current node.
1449 using NodesCostPair =
1450 std::pair<SmallPtrSet<MachineDomTreeNode *, 16>, BlockFrequency>;
1452
1453 // Iterate Orders set in reverse order, which will be a bottom-up order
1454 // in the dominator tree. Once we visit a dom tree node, we know its
1455 // children have already been visited and the spill locations in the
1456 // subtrees of all the children have been determined.
1458 for (; RIt != Orders.rend(); RIt++) {
1459 MachineBasicBlock *Block = (*RIt)->getBlock();
1460
1461 // If Block contains an original spill, simply continue.
1462 if (SpillsToKeep.contains(*RIt) && !SpillsToKeep[*RIt]) {
1463 SpillsInSubTreeMap[*RIt].first.insert(*RIt);
1464 // SpillsInSubTreeMap[*RIt].second contains the cost of spill.
1465 SpillsInSubTreeMap[*RIt].second = MBFI.getBlockFreq(Block);
1466 continue;
1467 }
1468
1469 // Collect spills in subtree of current node (*RIt) to
1470 // SpillsInSubTreeMap[*RIt].first.
1471 for (MachineDomTreeNode *Child : (*RIt)->children()) {
1472 if (!SpillsInSubTreeMap.contains(Child))
1473 continue;
1474 // The stmt "SpillsInSubTree = SpillsInSubTreeMap[*RIt].first" below
1475 // should be placed before getting the begin and end iterators of
1476 // SpillsInSubTreeMap[Child].first, or else the iterators may be
1477 // invalidated when SpillsInSubTreeMap[*RIt] is seen the first time
1478 // and the map grows and then the original buckets in the map are moved.
1479 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1480 SpillsInSubTreeMap[*RIt].first;
1481 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1482 SubTreeCost += SpillsInSubTreeMap[Child].second;
1483 auto BI = SpillsInSubTreeMap[Child].first.begin();
1484 auto EI = SpillsInSubTreeMap[Child].first.end();
1485 SpillsInSubTree.insert(BI, EI);
1486 SpillsInSubTreeMap.erase(Child);
1487 }
1488
1489 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1490 SpillsInSubTreeMap[*RIt].first;
1491 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1492 // No spills in subtree, simply continue.
1493 if (SpillsInSubTree.empty())
1494 continue;
1495
1496 // Check whether Block is a possible candidate to insert spill.
1497 Register LiveReg;
1498 if (!isSpillCandBB(OrigLI, OrigVNI, *Block, LiveReg))
1499 continue;
1500
1501 // If there are multiple spills that could be merged, bias a little
1502 // to hoist the spill.
1503 BranchProbability MarginProb = (SpillsInSubTree.size() > 1)
1504 ? BranchProbability(9, 10)
1505 : BranchProbability(1, 1);
1506 if (SubTreeCost > MBFI.getBlockFreq(Block) * MarginProb) {
1507 // Hoist: Move spills to current Block.
1508 for (auto *const SpillBB : SpillsInSubTree) {
1509 // When SpillBB is a BB contains original spill, insert the spill
1510 // to SpillsToRm.
1511 if (SpillsToKeep.contains(SpillBB) && !SpillsToKeep[SpillBB]) {
1512 MachineInstr *SpillToRm = SpillBBToSpill[SpillBB];
1513 SpillsToRm.push_back(SpillToRm);
1514 }
1515 // SpillBB will not contain spill anymore, remove it from SpillsToKeep.
1516 SpillsToKeep.erase(SpillBB);
1517 }
1518 // Current Block is the BB containing the new hoisted spill. Add it to
1519 // SpillsToKeep. LiveReg is the source of the new spill.
1520 SpillsToKeep[*RIt] = LiveReg;
1521 LLVM_DEBUG({
1522 dbgs() << "spills in BB: ";
1523 for (const auto Rspill : SpillsInSubTree)
1524 dbgs() << Rspill->getBlock()->getNumber() << " ";
1525 dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber()
1526 << "\n";
1527 });
1528 SpillsInSubTree.clear();
1529 SpillsInSubTree.insert(*RIt);
1530 SubTreeCost = MBFI.getBlockFreq(Block);
1531 }
1532 }
1533 // For spills in SpillsToKeep with LiveReg set (i.e., not original spill),
1534 // save them to SpillsToIns.
1535 for (const auto &Ent : SpillsToKeep) {
1536 if (Ent.second)
1537 SpillsToIns[Ent.first->getBlock()] = Ent.second;
1538 }
1539}
1540
1541/// For spills with equal values, remove redundant spills and hoist those left
1542/// to less hot spots.
1543///
1544/// Spills with equal values will be collected into the same set in
1545/// MergeableSpills when spill is inserted. These equal spills are originated
1546/// from the same defining instruction and are dominated by the instruction.
1547/// Before hoisting all the equal spills, redundant spills inside in the same
1548/// BB are first marked to be deleted. Then starting from the spills left, walk
1549/// up on the dominator tree towards the Root node where the define instruction
1550/// is located, mark the dominated spills to be deleted along the way and
1551/// collect the BB nodes on the path from non-dominated spills to the define
1552/// instruction into a WorkSet. The nodes in WorkSet are the candidate places
1553/// where we are considering to hoist the spills. We iterate the WorkSet in
1554/// bottom-up order, and for each node, we will decide whether to hoist spills
1555/// inside its subtree to that node. In this way, we can get benefit locally
1556/// even if hoisting all the equal spills to one cold place is impossible.
1557void HoistSpillHelper::hoistAllSpills() {
1558 SmallVector<Register, 4> NewVRegs;
1559 LiveRangeEdit Edit(nullptr, NewVRegs, MF, LIS, &VRM, this);
1560
1561 for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
1563 Register Original = VRM.getPreSplitReg(Reg);
1564 if (!MRI.def_empty(Reg))
1565 Virt2SiblingsMap[Original].insert(Reg);
1566 }
1567
1568 // Each entry in MergeableSpills contains a spill set with equal values.
1569 for (auto &Ent : MergeableSpills) {
1570 int Slot = Ent.first.first;
1571 LiveInterval &OrigLI = *StackSlotToOrigLI[Slot];
1572 VNInfo *OrigVNI = Ent.first.second;
1573 SmallPtrSet<MachineInstr *, 16> &EqValSpills = Ent.second;
1574 if (Ent.second.empty())
1575 continue;
1576
1577 LLVM_DEBUG({
1578 dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n"
1579 << "Equal spills in BB: ";
1580 for (const auto spill : EqValSpills)
1581 dbgs() << spill->getParent()->getNumber() << " ";
1582 dbgs() << "\n";
1583 });
1584
1585 // SpillsToRm is the spill set to be removed from EqValSpills.
1587 // SpillsToIns is the spill set to be newly inserted after hoisting.
1589
1590 runHoistSpills(OrigLI, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns);
1591
1592 LLVM_DEBUG({
1593 dbgs() << "Finally inserted spills in BB: ";
1594 for (const auto &Ispill : SpillsToIns)
1595 dbgs() << Ispill.first->getNumber() << " ";
1596 dbgs() << "\nFinally removed spills in BB: ";
1597 for (const auto Rspill : SpillsToRm)
1598 dbgs() << Rspill->getParent()->getNumber() << " ";
1599 dbgs() << "\n";
1600 });
1601
1602 // Stack live range update.
1603 LiveInterval &StackIntvl = LSS.getInterval(Slot);
1604 if (!SpillsToIns.empty() || !SpillsToRm.empty())
1605 StackIntvl.MergeValueInAsValue(OrigLI, OrigVNI,
1606 StackIntvl.getValNumInfo(0));
1607
1608 // Insert hoisted spills.
1609 for (auto const &Insert : SpillsToIns) {
1610 MachineBasicBlock *BB = Insert.first;
1611 Register LiveReg = Insert.second;
1612 MachineBasicBlock::iterator MII = IPA.getLastInsertPointIter(OrigLI, *BB);
1613 MachineInstrSpan MIS(MII, BB);
1614 TII.storeRegToStackSlot(*BB, MII, LiveReg, false, Slot,
1615 MRI.getRegClass(LiveReg), &TRI, Register());
1616 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
1617 for (const MachineInstr &MI : make_range(MIS.begin(), MII))
1618 getVDefInterval(MI, LIS);
1619 ++NumSpills;
1620 }
1621
1622 // Remove redundant spills or change them to dead instructions.
1623 NumSpills -= SpillsToRm.size();
1624 for (auto *const RMEnt : SpillsToRm) {
1625 RMEnt->setDesc(TII.get(TargetOpcode::KILL));
1626 for (unsigned i = RMEnt->getNumOperands(); i; --i) {
1627 MachineOperand &MO = RMEnt->getOperand(i - 1);
1628 if (MO.isReg() && MO.isImplicit() && MO.isDef() && !MO.isDead())
1629 RMEnt->removeOperand(i - 1);
1630 }
1631 }
1632 Edit.eliminateDeadDefs(SpillsToRm, std::nullopt);
1633 }
1634}
1635
1636/// For VirtReg clone, the \p New register should have the same physreg or
1637/// stackslot as the \p old register.
1638void HoistSpillHelper::LRE_DidCloneVirtReg(Register New, Register Old) {
1639 if (VRM.hasPhys(Old))
1640 VRM.assignVirt2Phys(New, VRM.getPhys(Old));
1641 else if (VRM.getStackSlot(Old) != VirtRegMap::NO_STACK_SLOT)
1642 VRM.assignVirt2StackSlot(New, VRM.getStackSlot(Old));
1643 else
1644 llvm_unreachable("VReg should be assigned either physreg or stackslot");
1645 if (VRM.hasShape(Old))
1646 VRM.assignVirt2Shape(New, VRM.getShape(Old));
1647}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineBasicBlock & MBB
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition: Compiler.h:492
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
const HexagonInstrInfo * TII
Hexagon Hardware Loops
IRTranslator LLVM IR MI
static LLVM_DUMP_METHOD void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E, LiveIntervals const &LIS, const char *const header, Register VReg=Register())
static Register isFullCopyOf(const MachineInstr &MI, Register Reg)
isFullCopyOf - If MI is a COPY to or from Reg, return the other register, otherwise return 0.
static bool isRealSpill(const MachineInstr &Def)
Check if Def fully defines a VReg with an undefined value.
static cl::opt< bool > DisableHoisting("disable-spill-hoist", cl::Hidden, cl::desc("Disable inline spill hoisting"))
static cl::opt< bool > RestrictStatepointRemat("restrict-statepoint-remat", cl::init(false), cl::Hidden, cl::desc("Restrict remat for statepoint operands"))
static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS)
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
This file implements a map that provides insertion order iteration.
modulo schedule Modulo Schedule test pass
#define P(N)
Basic Register Allocator
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:66
bool erase(const KeyT &Val)
Definition: DenseMap.h:315
iterator begin()
Definition: DenseMap.h:75
iterator end()
Definition: DenseMap.h:84
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition: DenseMap.h:145
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Base class for the actual dominator tree node.
iterator_range< iterator > children()
DomTreeNodeBase * getIDom() const
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
Store the specified register of the given register class to the specified stack frame index.
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
If the specified machine instruction is a direct store to a stack slot, return the virtual or physica...
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
TargetInstrInfo overrides.
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
Load the specified register of the given register class from the specified stack frame index.
Determines the latest safe point in a block in which we can insert a split, spill or other instructio...
Definition: SplitKit.h:50
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:686
float weight() const
Definition: LiveInterval.h:718
Register reg() const
Definition: LiveInterval.h:717
bool isSpillable() const
isSpillable - Can this interval be spilled?
Definition: LiveInterval.h:819
SlotIndex InsertMachineInstrInMaps(MachineInstr &MI)
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
void RemoveMachineInstrFromMaps(MachineInstr &MI)
VNInfo::Allocator & getVNInfoAllocator()
LiveInterval & getInterval(Register Reg)
void InsertMachineInstrRangeInMaps(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E)
void removePhysRegDefAt(MCRegister Reg, SlotIndex Pos)
Remove value numbers and related live segments starting at position Pos that are part of any liverang...
MachineBasicBlock * getMBBFromIndex(SlotIndex index) const
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
Result of a LiveRange query.
Definition: LiveInterval.h:90
bool isKill() const
Return true if the live-in value is killed by this instruction.
Definition: LiveInterval.h:112
Callback methods for LiveRangeEdit owners.
Definition: LiveRangeEdit.h:45
virtual void LRE_DidCloneVirtReg(Register New, Register Old)
Called after cloning a virtual register.
Definition: LiveRangeEdit.h:63
const LiveInterval & getParent() const
Register getReg() const
VNInfo * getValNumInfo(unsigned ValNo)
getValNumInfo - Returns pointer to the specified val#.
Definition: LiveInterval.h:317
void MergeValueInAsValue(const LiveRange &RHS, const VNInfo *RHSValNo, VNInfo *LHSValNo)
MergeValueInAsValue - Merge all of the segments of a specific val# in RHS into this live range as the...
iterator_range< vni_iterator > vnis()
Definition: LiveInterval.h:230
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
Definition: LiveInterval.h:541
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarilly including Idx,...
Definition: LiveInterval.h:429
unsigned getNumValNums() const
Definition: LiveInterval.h:313
void assign(const LiveRange &Other, BumpPtrAllocator &Allocator)
Copies values numbers and live segments from Other into this range.
Definition: LiveInterval.h:252
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
Definition: LiveInterval.h:421
MIBundleOperands - Iterate over all operands in a bundle of machine instructions.
iterator SkipPHIsLabelsAndDebug(iterator I, bool SkipPseudoOp=true)
Return the first instruction in MBB after I that is not a PHI, label or debug.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
iterator_range< pred_iterator > predecessors()
MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate machine basic b...
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
static const unsigned int DebugOperandMemNumber
A reserved operand number representing the instructions memory operand, for instructions that have a ...
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
MachineInstrSpan provides an interface to get an iteration range containing the instruction it was in...
Representation of each machine instruction.
Definition: MachineInstr.h:68
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:313
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:519
void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
unsigned getDebugInstrNum()
Fetch the instruction number of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:526
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isImplicit() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setReg(Register Reg)
Change the register this operand corresponds to.
void setIsKill(bool Val=true)
void setIsUndef(bool Val=true)
bool isEarlyClobber() const
Register getReg() const
getReg - Returns the register number.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:37
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:91
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
Definition: Register.h:84
static bool isStackSlot(unsigned Reg)
isStackSlot - Sometimes it is useful the be able to store a non-negative frame index in a variable th...
Definition: Register.h:44
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:82
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
Definition: SlotIndexes.h:198
SlotIndex getBaseIndex() const
Returns the base index for associated with this index.
Definition: SlotIndexes.h:246
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
Definition: SlotIndexes.h:259
size_type size() const
Definition: SmallPtrSet.h:93
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:383
iterator end() const
Definition: SmallPtrSet.h:408
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365
iterator begin() const
Definition: SmallPtrSet.h:403
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:450
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:301
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:941
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
Spiller interface.
Definition: Spiller.h:24
virtual void spill(LiveRangeEdit &LRE)=0
spill - Spill the LRE.getParent() live interval.
virtual ~Spiller()=0
virtual void postOptimization()
Definition: Spiller.h:33
MI-level Statepoint operands.
Definition: StackMaps.h:158
bool isFoldableReg(Register Reg) const
Return true if Reg is used only in operands which can be folded to stack usage.
Definition: StackMaps.cpp:149
TargetInstrInfo - Interface to description of machine instruction set.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
VNInfo - Value Number Information.
Definition: LiveInterval.h:53
bool isUnused() const
Returns true if this value is unused.
Definition: LiveInterval.h:81
unsigned id
The ID number of this value.
Definition: LiveInterval.h:58
SlotIndex def
The index of the defining instruction.
Definition: LiveInterval.h:61
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
Definition: LiveInterval.h:78
Calculate auxiliary information for a virtual register such as its spill weight and allocation hint.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
PhysRegInfo AnalyzePhysRegInBundle(const MachineInstr &MI, Register Reg, const TargetRegisterInfo *TRI)
AnalyzePhysRegInBundle - Analyze how the current instruction or bundle uses a physical register.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:732
Spiller * createInlineSpiller(MachineFunctionPass &Pass, MachineFunction &MF, VirtRegMap &VRM, VirtRegAuxInfo &VRAI)
Create and return a spiller that will insert spill code directly instead of deferring though VirtRegM...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
VirtRegInfo AnalyzeVirtRegInBundle(MachineInstr &MI, Register Reg, SmallVectorImpl< std::pair< MachineInstr *, unsigned > > *Ops=nullptr)
AnalyzeVirtRegInBundle - Analyze how the current instruction or bundle uses a virtual register.
unsigned getKillRegState(bool B)
MachineInstr * buildDbgValueForSpill(MachineBasicBlock &BB, MachineBasicBlock::iterator I, const MachineInstr &Orig, int FrameIndex, Register SpillReg)
Clone a DBG_VALUE whose value has been spilled to FrameIndex.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1939
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
Remat - Information needed to rematerialize at a specific location.
Information about how a physical register Reg is used by a set of operands.
bool FullyDefined
Reg or a super-register is defined.
VirtRegInfo - Information about a virtual register used by a set of operands.
bool Reads
Reads - One of the operands read the virtual register.
bool Tied
Tied - Uses and defs must use the same register.
bool Writes
Writes - One of the operands writes the virtual register.