LLVM 20.0.0git
InlineSpiller.cpp
Go to the documentation of this file.
1//===- InlineSpiller.cpp - Insert spills and restores inline --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The inline spiller modifies the machine function directly instead of
10// inserting spills and restores in VirtRegMap.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SplitKit.h"
15#include "llvm/ADT/ArrayRef.h"
16#include "llvm/ADT/DenseMap.h"
17#include "llvm/ADT/MapVector.h"
18#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/SetVector.h"
22#include "llvm/ADT/Statistic.h"
45#include "llvm/Config/llvm-config.h"
50#include "llvm/Support/Debug.h"
53#include <cassert>
54#include <iterator>
55#include <tuple>
56#include <utility>
57
58using namespace llvm;
59
60#define DEBUG_TYPE "regalloc"
61
62STATISTIC(NumSpilledRanges, "Number of spilled live ranges");
63STATISTIC(NumSnippets, "Number of spilled snippets");
64STATISTIC(NumSpills, "Number of spills inserted");
65STATISTIC(NumSpillsRemoved, "Number of spills removed");
66STATISTIC(NumReloads, "Number of reloads inserted");
67STATISTIC(NumReloadsRemoved, "Number of reloads removed");
68STATISTIC(NumFolded, "Number of folded stack accesses");
69STATISTIC(NumFoldedLoads, "Number of folded loads");
70STATISTIC(NumRemats, "Number of rematerialized defs for spilling");
71
72static cl::opt<bool>
73RestrictStatepointRemat("restrict-statepoint-remat",
74 cl::init(false), cl::Hidden,
75 cl::desc("Restrict remat for statepoint operands"));
76
77namespace {
78class HoistSpillHelper : private LiveRangeEdit::Delegate {
80 LiveIntervals &LIS;
81 LiveStacks &LSS;
83 VirtRegMap &VRM;
85 const TargetInstrInfo &TII;
87 const MachineBlockFrequencyInfo &MBFI;
88
90
91 // Map from StackSlot to the LiveInterval of the original register.
92 // Note the LiveInterval of the original register may have been deleted
93 // after it is spilled. We keep a copy here to track the range where
94 // spills can be moved.
96
97 // Map from pair of (StackSlot and Original VNI) to a set of spills which
98 // have the same stackslot and have equal values defined by Original VNI.
99 // These spills are mergeable and are hoist candidates.
100 using MergeableSpillsMap =
102 MergeableSpillsMap MergeableSpills;
103
104 /// This is the map from original register to a set containing all its
105 /// siblings. To hoist a spill to another BB, we need to find out a live
106 /// sibling there and use it as the source of the new spill.
108
109 bool isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
110 MachineBasicBlock &BB, Register &LiveReg);
111
112 void rmRedundantSpills(
116
117 void getVisitOrders(
123
124 void runHoistSpills(LiveInterval &OrigLI, VNInfo &OrigVNI,
128
129public:
130 HoistSpillHelper(const Spiller::RequiredAnalyses &Analyses,
131 MachineFunction &mf, VirtRegMap &vrm)
132 : MF(mf), LIS(Analyses.LIS), LSS(Analyses.LSS), MDT(Analyses.MDT),
133 VRM(vrm), MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()),
134 TRI(*mf.getSubtarget().getRegisterInfo()), MBFI(Analyses.MBFI),
135 IPA(LIS, mf.getNumBlockIDs()) {}
136
137 void addToMergeableSpills(MachineInstr &Spill, int StackSlot,
138 unsigned Original);
139 bool rmFromMergeableSpills(MachineInstr &Spill, int StackSlot);
140 void hoistAllSpills();
141 void LRE_DidCloneVirtReg(Register, Register) override;
142};
143
144class InlineSpiller : public Spiller {
145 MachineFunction &MF;
146 LiveIntervals &LIS;
147 LiveStacks &LSS;
148 VirtRegMap &VRM;
150 const TargetInstrInfo &TII;
151 const TargetRegisterInfo &TRI;
152
153 // Variables that are valid during spill(), but used by multiple methods.
154 LiveRangeEdit *Edit = nullptr;
155 LiveInterval *StackInt = nullptr;
156 int StackSlot;
157 Register Original;
158
159 // All registers to spill to StackSlot, including the main register.
160 SmallVector<Register, 8> RegsToSpill;
161
162 // All registers that were replaced by the spiller through some other method,
163 // e.g. rematerialization.
164 SmallVector<Register, 8> RegsReplaced;
165
166 // All COPY instructions to/from snippets.
167 // They are ignored since both operands refer to the same stack slot.
168 // For bundled copies, this will only include the first header copy.
169 SmallPtrSet<MachineInstr*, 8> SnippetCopies;
170
171 // Values that failed to remat at some point.
172 SmallPtrSet<VNInfo*, 8> UsedValues;
173
174 // Dead defs generated during spilling.
176
177 // Object records spills information and does the hoisting.
178 HoistSpillHelper HSpiller;
179
180 // Live range weight calculator.
181 VirtRegAuxInfo &VRAI;
182
183 ~InlineSpiller() override = default;
184
185public:
186 InlineSpiller(const Spiller::RequiredAnalyses &Analyses, MachineFunction &MF,
187 VirtRegMap &VRM, VirtRegAuxInfo &VRAI)
188 : MF(MF), LIS(Analyses.LIS), LSS(Analyses.LSS), VRM(VRM),
189 MRI(MF.getRegInfo()), TII(*MF.getSubtarget().getInstrInfo()),
190 TRI(*MF.getSubtarget().getRegisterInfo()), HSpiller(Analyses, MF, VRM),
191 VRAI(VRAI) {}
192
193 void spill(LiveRangeEdit &) override;
194 ArrayRef<Register> getSpilledRegs() override { return RegsToSpill; }
195 ArrayRef<Register> getReplacedRegs() override { return RegsReplaced; }
196 void postOptimization() override;
197
198private:
199 bool isSnippet(const LiveInterval &SnipLI);
200 void collectRegsToSpill();
201
202 bool isRegToSpill(Register Reg) { return is_contained(RegsToSpill, Reg); }
203
204 bool isSibling(Register Reg);
205 bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI);
206 void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI);
207
208 void markValueUsed(LiveInterval*, VNInfo*);
209 bool canGuaranteeAssignmentAfterRemat(Register VReg, MachineInstr &MI);
210 bool reMaterializeFor(LiveInterval &, MachineInstr &MI);
211 void reMaterializeAll();
212
213 bool coalesceStackAccess(MachineInstr *MI, Register Reg);
214 bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>>,
215 MachineInstr *LoadMI = nullptr);
216 void insertReload(Register VReg, SlotIndex, MachineBasicBlock::iterator MI);
217 void insertSpill(Register VReg, bool isKill, MachineBasicBlock::iterator MI);
218
219 void spillAroundUses(Register Reg);
220 void spillAll();
221};
222
223} // end anonymous namespace
224
225Spiller::~Spiller() = default;
226
227void Spiller::anchor() {}
228
229Spiller *
230llvm::createInlineSpiller(const InlineSpiller::RequiredAnalyses &Analyses,
231 MachineFunction &MF, VirtRegMap &VRM,
232 VirtRegAuxInfo &VRAI) {
233 return new InlineSpiller(Analyses, MF, VRM, VRAI);
234}
235
236//===----------------------------------------------------------------------===//
237// Snippets
238//===----------------------------------------------------------------------===//
239
240// When spilling a virtual register, we also spill any snippets it is connected
241// to. The snippets are small live ranges that only have a single real use,
242// leftovers from live range splitting. Spilling them enables memory operand
243// folding or tightens the live range around the single use.
244//
245// This minimizes register pressure and maximizes the store-to-load distance for
246// spill slots which can be important in tight loops.
247
248/// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
249/// otherwise return 0.
251 const TargetInstrInfo &TII) {
252 if (!TII.isCopyInstr(MI))
253 return Register();
254
255 const MachineOperand &DstOp = MI.getOperand(0);
256 const MachineOperand &SrcOp = MI.getOperand(1);
257
258 // TODO: Probably only worth allowing subreg copies with undef dests.
259 if (DstOp.getSubReg() != SrcOp.getSubReg())
260 return Register();
261 if (DstOp.getReg() == Reg)
262 return SrcOp.getReg();
263 if (SrcOp.getReg() == Reg)
264 return DstOp.getReg();
265 return Register();
266}
267
268/// Check for a copy bundle as formed by SplitKit.
269static Register isCopyOfBundle(const MachineInstr &FirstMI, Register Reg,
270 const TargetInstrInfo &TII) {
271 if (!FirstMI.isBundled())
272 return isCopyOf(FirstMI, Reg, TII);
273
274 assert(!FirstMI.isBundledWithPred() && FirstMI.isBundledWithSucc() &&
275 "expected to see first instruction in bundle");
276
277 Register SnipReg;
279 while (I->isBundledWithSucc()) {
280 const MachineInstr &MI = *I;
281 auto CopyInst = TII.isCopyInstr(MI);
282 if (!CopyInst)
283 return Register();
284
285 const MachineOperand &DstOp = *CopyInst->Destination;
286 const MachineOperand &SrcOp = *CopyInst->Source;
287 if (DstOp.getReg() == Reg) {
288 if (!SnipReg)
289 SnipReg = SrcOp.getReg();
290 else if (SnipReg != SrcOp.getReg())
291 return Register();
292 } else if (SrcOp.getReg() == Reg) {
293 if (!SnipReg)
294 SnipReg = DstOp.getReg();
295 else if (SnipReg != DstOp.getReg())
296 return Register();
297 }
298
299 ++I;
300 }
301
302 return Register();
303}
304
305static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS) {
306 for (const MachineOperand &MO : MI.all_defs())
307 if (MO.getReg().isVirtual())
308 LIS.getInterval(MO.getReg());
309}
310
311/// isSnippet - Identify if a live interval is a snippet that should be spilled.
312/// It is assumed that SnipLI is a virtual register with the same original as
313/// Edit->getReg().
314bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
315 Register Reg = Edit->getReg();
316
317 // A snippet is a tiny live range with only a single instruction using it
318 // besides copies to/from Reg or spills/fills.
319 // Exception is done for statepoint instructions which will fold fills
320 // into their operands.
321 // We accept:
322 //
323 // %snip = COPY %Reg / FILL fi#
324 // %snip = USE %snip
325 // %snip = STATEPOINT %snip in var arg area
326 // %Reg = COPY %snip / SPILL %snip, fi#
327 //
328 if (!LIS.intervalIsInOneMBB(SnipLI))
329 return false;
330
331 // Number of defs should not exceed 2 not accounting defs coming from
332 // statepoint instructions.
333 unsigned NumValNums = SnipLI.getNumValNums();
334 for (auto *VNI : SnipLI.vnis()) {
335 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
336 if (MI->getOpcode() == TargetOpcode::STATEPOINT)
337 --NumValNums;
338 }
339 if (NumValNums > 2)
340 return false;
341
342 MachineInstr *UseMI = nullptr;
343
344 // Check that all uses satisfy our criteria.
346 RI = MRI.reg_bundle_nodbg_begin(SnipLI.reg()),
347 E = MRI.reg_bundle_nodbg_end();
348 RI != E;) {
349 MachineInstr &MI = *RI++;
350
351 // Allow copies to/from Reg.
352 if (isCopyOfBundle(MI, Reg, TII))
353 continue;
354
355 // Allow stack slot loads.
356 int FI;
357 if (SnipLI.reg() == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot)
358 continue;
359
360 // Allow stack slot stores.
361 if (SnipLI.reg() == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot)
362 continue;
363
364 if (StatepointOpers::isFoldableReg(&MI, SnipLI.reg()))
365 continue;
366
367 // Allow a single additional instruction.
368 if (UseMI && &MI != UseMI)
369 return false;
370 UseMI = &MI;
371 }
372 return true;
373}
374
375/// collectRegsToSpill - Collect live range snippets that only have a single
376/// real use.
377void InlineSpiller::collectRegsToSpill() {
378 Register Reg = Edit->getReg();
379
380 // Main register always spills.
381 RegsToSpill.assign(1, Reg);
382 SnippetCopies.clear();
383 RegsReplaced.clear();
384
385 // Snippets all have the same original, so there can't be any for an original
386 // register.
387 if (Original == Reg)
388 return;
389
390 for (MachineInstr &MI : llvm::make_early_inc_range(MRI.reg_bundles(Reg))) {
391 Register SnipReg = isCopyOfBundle(MI, Reg, TII);
392 if (!isSibling(SnipReg))
393 continue;
394 LiveInterval &SnipLI = LIS.getInterval(SnipReg);
395 if (!isSnippet(SnipLI))
396 continue;
397 SnippetCopies.insert(&MI);
398 if (isRegToSpill(SnipReg))
399 continue;
400 RegsToSpill.push_back(SnipReg);
401 LLVM_DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n');
402 ++NumSnippets;
403 }
404}
405
406bool InlineSpiller::isSibling(Register Reg) {
407 return Reg.isVirtual() && VRM.getOriginal(Reg) == Original;
408}
409
410/// It is beneficial to spill to earlier place in the same BB in case
411/// as follows:
412/// There is an alternative def earlier in the same MBB.
413/// Hoist the spill as far as possible in SpillMBB. This can ease
414/// register pressure:
415///
416/// x = def
417/// y = use x
418/// s = copy x
419///
420/// Hoisting the spill of s to immediately after the def removes the
421/// interference between x and y:
422///
423/// x = def
424/// spill x
425/// y = use killed x
426///
427/// This hoist only helps when the copy kills its source.
428///
429bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,
430 MachineInstr &CopyMI) {
431 SlotIndex Idx = LIS.getInstructionIndex(CopyMI);
432#ifndef NDEBUG
433 VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot());
434 assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy");
435#endif
436
437 Register SrcReg = CopyMI.getOperand(1).getReg();
438 LiveInterval &SrcLI = LIS.getInterval(SrcReg);
439 VNInfo *SrcVNI = SrcLI.getVNInfoAt(Idx);
440 LiveQueryResult SrcQ = SrcLI.Query(Idx);
441 MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(SrcVNI->def);
442 if (DefMBB != CopyMI.getParent() || !SrcQ.isKill())
443 return false;
444
445 // Conservatively extend the stack slot range to the range of the original
446 // value. We may be able to do better with stack slot coloring by being more
447 // careful here.
448 assert(StackInt && "No stack slot assigned yet.");
449 LiveInterval &OrigLI = LIS.getInterval(Original);
450 VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx);
451 StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0));
452 LLVM_DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": "
453 << *StackInt << '\n');
454
455 // We are going to spill SrcVNI immediately after its def, so clear out
456 // any later spills of the same value.
457 eliminateRedundantSpills(SrcLI, SrcVNI);
458
459 MachineBasicBlock *MBB = LIS.getMBBFromIndex(SrcVNI->def);
461 if (SrcVNI->isPHIDef())
462 MII = MBB->SkipPHIsLabelsAndDebug(MBB->begin(), SrcReg);
463 else {
464 MachineInstr *DefMI = LIS.getInstructionFromIndex(SrcVNI->def);
465 assert(DefMI && "Defining instruction disappeared");
466 MII = DefMI;
467 ++MII;
468 }
469 MachineInstrSpan MIS(MII, MBB);
470 // Insert spill without kill flag immediately after def.
471 TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot,
472 MRI.getRegClass(SrcReg), &TRI, Register());
473 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
474 for (const MachineInstr &MI : make_range(MIS.begin(), MII))
475 getVDefInterval(MI, LIS);
476 --MII; // Point to store instruction.
477 LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII);
478
479 // If there is only 1 store instruction is required for spill, add it
480 // to mergeable list. In X86 AMX, 2 intructions are required to store.
481 // We disable the merge for this case.
482 if (MIS.begin() == MII)
483 HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
484 ++NumSpills;
485 return true;
486}
487
488/// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any
489/// redundant spills of this value in SLI.reg and sibling copies.
490void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
491 assert(VNI && "Missing value");
493 WorkList.push_back(std::make_pair(&SLI, VNI));
494 assert(StackInt && "No stack slot assigned yet.");
495
496 do {
497 LiveInterval *LI;
498 std::tie(LI, VNI) = WorkList.pop_back_val();
499 Register Reg = LI->reg();
500 LLVM_DEBUG(dbgs() << "Checking redundant spills for " << VNI->id << '@'
501 << VNI->def << " in " << *LI << '\n');
502
503 // Regs to spill are taken care of.
504 if (isRegToSpill(Reg))
505 continue;
506
507 // Add all of VNI's live range to StackInt.
508 StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0));
509 LLVM_DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n');
510
511 // Find all spills and copies of VNI.
512 for (MachineInstr &MI :
513 llvm::make_early_inc_range(MRI.use_nodbg_bundles(Reg))) {
514 if (!MI.mayStore() && !TII.isCopyInstr(MI))
515 continue;
516 SlotIndex Idx = LIS.getInstructionIndex(MI);
517 if (LI->getVNInfoAt(Idx) != VNI)
518 continue;
519
520 // Follow sibling copies down the dominator tree.
521 if (Register DstReg = isCopyOfBundle(MI, Reg, TII)) {
522 if (isSibling(DstReg)) {
523 LiveInterval &DstLI = LIS.getInterval(DstReg);
524 VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot());
525 assert(DstVNI && "Missing defined value");
526 assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot");
527
528 WorkList.push_back(std::make_pair(&DstLI, DstVNI));
529 }
530 continue;
531 }
532
533 // Erase spills.
534 int FI;
535 if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) {
536 LLVM_DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI);
537 // eliminateDeadDefs won't normally remove stores, so switch opcode.
538 MI.setDesc(TII.get(TargetOpcode::KILL));
539 DeadDefs.push_back(&MI);
540 ++NumSpillsRemoved;
541 if (HSpiller.rmFromMergeableSpills(MI, StackSlot))
542 --NumSpills;
543 }
544 }
545 } while (!WorkList.empty());
546}
547
548//===----------------------------------------------------------------------===//
549// Rematerialization
550//===----------------------------------------------------------------------===//
551
552/// markValueUsed - Remember that VNI failed to rematerialize, so its defining
553/// instruction cannot be eliminated. See through snippet copies
554void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
556 WorkList.push_back(std::make_pair(LI, VNI));
557 do {
558 std::tie(LI, VNI) = WorkList.pop_back_val();
559 if (!UsedValues.insert(VNI).second)
560 continue;
561
562 if (VNI->isPHIDef()) {
563 MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
564 for (MachineBasicBlock *P : MBB->predecessors()) {
565 VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(P));
566 if (PVNI)
567 WorkList.push_back(std::make_pair(LI, PVNI));
568 }
569 continue;
570 }
571
572 // Follow snippet copies.
573 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
574 if (!SnippetCopies.count(MI))
575 continue;
576 LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg());
577 assert(isRegToSpill(SnipLI.reg()) && "Unexpected register in copy");
578 VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true));
579 assert(SnipVNI && "Snippet undefined before copy");
580 WorkList.push_back(std::make_pair(&SnipLI, SnipVNI));
581 } while (!WorkList.empty());
582}
583
584bool InlineSpiller::canGuaranteeAssignmentAfterRemat(Register VReg,
585 MachineInstr &MI) {
587 return true;
588 // Here's a quick explanation of the problem we're trying to handle here:
589 // * There are some pseudo instructions with more vreg uses than there are
590 // physical registers on the machine.
591 // * This is normally handled by spilling the vreg, and folding the reload
592 // into the user instruction. (Thus decreasing the number of used vregs
593 // until the remainder can be assigned to physregs.)
594 // * However, since we may try to spill vregs in any order, we can end up
595 // trying to spill each operand to the instruction, and then rematting it
596 // instead. When that happens, the new live intervals (for the remats) are
597 // expected to be trivially assignable (i.e. RS_Done). However, since we
598 // may have more remats than physregs, we're guaranteed to fail to assign
599 // one.
600 // At the moment, we only handle this for STATEPOINTs since they're the only
601 // pseudo op where we've seen this. If we start seeing other instructions
602 // with the same problem, we need to revisit this.
603 if (MI.getOpcode() != TargetOpcode::STATEPOINT)
604 return true;
605 // For STATEPOINTs we allow re-materialization for fixed arguments only hoping
606 // that number of physical registers is enough to cover all fixed arguments.
607 // If it is not true we need to revisit it.
608 for (unsigned Idx = StatepointOpers(&MI).getVarIdx(),
609 EndIdx = MI.getNumOperands();
610 Idx < EndIdx; ++Idx) {
611 MachineOperand &MO = MI.getOperand(Idx);
612 if (MO.isReg() && MO.getReg() == VReg)
613 return false;
614 }
615 return true;
616}
617
618/// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
619bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
620 // Analyze instruction
622 VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, VirtReg.reg(), &Ops);
623
624 if (!RI.Reads)
625 return false;
626
627 SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true);
628 VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex());
629
630 if (!ParentVNI) {
631 LLVM_DEBUG(dbgs() << "\tadding <undef> flags: ");
632 for (MachineOperand &MO : MI.all_uses())
633 if (MO.getReg() == VirtReg.reg())
634 MO.setIsUndef();
635 LLVM_DEBUG(dbgs() << UseIdx << '\t' << MI);
636 return true;
637 }
638
639 if (SnippetCopies.count(&MI))
640 return false;
641
642 LiveInterval &OrigLI = LIS.getInterval(Original);
643 VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx);
644 LiveRangeEdit::Remat RM(ParentVNI);
645 RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def);
646
647 if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx, false)) {
648 markValueUsed(&VirtReg, ParentVNI);
649 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
650 return false;
651 }
652
653 // If the instruction also writes VirtReg.reg, it had better not require the
654 // same register for uses and defs.
655 if (RI.Tied) {
656 markValueUsed(&VirtReg, ParentVNI);
657 LLVM_DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI);
658 return false;
659 }
660
661 // Before rematerializing into a register for a single instruction, try to
662 // fold a load into the instruction. That avoids allocating a new register.
663 if (RM.OrigMI->canFoldAsLoad() &&
664 foldMemoryOperand(Ops, RM.OrigMI)) {
665 Edit->markRematerialized(RM.ParentVNI);
666 ++NumFoldedLoads;
667 return true;
668 }
669
670 // If we can't guarantee that we'll be able to actually assign the new vreg,
671 // we can't remat.
672 if (!canGuaranteeAssignmentAfterRemat(VirtReg.reg(), MI)) {
673 markValueUsed(&VirtReg, ParentVNI);
674 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
675 return false;
676 }
677
678 // Allocate a new register for the remat.
679 Register NewVReg = Edit->createFrom(Original);
680
681 // Finally we can rematerialize OrigMI before MI.
682 SlotIndex DefIdx =
683 Edit->rematerializeAt(*MI.getParent(), MI, NewVReg, RM, TRI);
684
685 // We take the DebugLoc from MI, since OrigMI may be attributed to a
686 // different source location.
687 auto *NewMI = LIS.getInstructionFromIndex(DefIdx);
688 NewMI->setDebugLoc(MI.getDebugLoc());
689
690 (void)DefIdx;
691 LLVM_DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'
692 << *LIS.getInstructionFromIndex(DefIdx));
693
694 // Replace operands
695 for (const auto &OpPair : Ops) {
696 MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
697 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg()) {
698 MO.setReg(NewVReg);
699 MO.setIsKill();
700 }
701 }
702 LLVM_DEBUG(dbgs() << "\t " << UseIdx << '\t' << MI << '\n');
703
704 ++NumRemats;
705 return true;
706}
707
708/// reMaterializeAll - Try to rematerialize as many uses as possible,
709/// and trim the live ranges after.
710void InlineSpiller::reMaterializeAll() {
711 if (!Edit->anyRematerializable())
712 return;
713
714 UsedValues.clear();
715
716 // Try to remat before all uses of snippets.
717 bool anyRemat = false;
718 for (Register Reg : RegsToSpill) {
719 LiveInterval &LI = LIS.getInterval(Reg);
720 for (MachineInstr &MI : llvm::make_early_inc_range(MRI.reg_bundles(Reg))) {
721 // Debug values are not allowed to affect codegen.
722 if (MI.isDebugValue())
723 continue;
724
725 assert(!MI.isDebugInstr() && "Did not expect to find a use in debug "
726 "instruction that isn't a DBG_VALUE");
727
728 anyRemat |= reMaterializeFor(LI, MI);
729 }
730 }
731 if (!anyRemat)
732 return;
733
734 // Remove any values that were completely rematted.
735 for (Register Reg : RegsToSpill) {
736 LiveInterval &LI = LIS.getInterval(Reg);
737 for (VNInfo *VNI : LI.vnis()) {
738 if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI))
739 continue;
740 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
741 MI->addRegisterDead(Reg, &TRI);
742 if (!MI->allDefsAreDead())
743 continue;
744 LLVM_DEBUG(dbgs() << "All defs dead: " << *MI);
745 DeadDefs.push_back(MI);
746 // If MI is a bundle header, also try removing copies inside the bundle,
747 // otherwise the verifier would complain "live range continues after dead
748 // def flag".
749 if (MI->isBundledWithSucc() && !MI->isBundledWithPred()) {
750 MachineBasicBlock::instr_iterator BeginIt = MI->getIterator(),
751 EndIt = MI->getParent()->instr_end();
752 ++BeginIt; // Skip MI that was already handled.
753
754 bool OnlyDeadCopies = true;
755 for (MachineBasicBlock::instr_iterator It = BeginIt;
756 It != EndIt && It->isBundledWithPred(); ++It) {
757
758 auto DestSrc = TII.isCopyInstr(*It);
759 bool IsCopyToDeadReg =
760 DestSrc && DestSrc->Destination->getReg() == Reg;
761 if (!IsCopyToDeadReg) {
762 OnlyDeadCopies = false;
763 break;
764 }
765 }
766 if (OnlyDeadCopies) {
767 for (MachineBasicBlock::instr_iterator It = BeginIt;
768 It != EndIt && It->isBundledWithPred(); ++It) {
769 It->addRegisterDead(Reg, &TRI);
770 LLVM_DEBUG(dbgs() << "All defs dead: " << *It);
771 DeadDefs.push_back(&*It);
772 }
773 }
774 }
775 }
776 }
777
778 // Eliminate dead code after remat. Note that some snippet copies may be
779 // deleted here.
780 if (DeadDefs.empty())
781 return;
782 LLVM_DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n");
783 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill);
784
785 // LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions
786 // after rematerialization. To remove a VNI for a vreg from its LiveInterval,
787 // LiveIntervals::removeVRegDefAt is used. However, after non-PHI VNIs are all
788 // removed, PHI VNI are still left in the LiveInterval.
789 // So to get rid of unused reg, we need to check whether it has non-dbg
790 // reference instead of whether it has non-empty interval.
791 unsigned ResultPos = 0;
792 for (Register Reg : RegsToSpill) {
793 if (MRI.reg_nodbg_empty(Reg)) {
794 Edit->eraseVirtReg(Reg);
795 RegsReplaced.push_back(Reg);
796 continue;
797 }
798
799 assert(LIS.hasInterval(Reg) &&
800 (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&
801 "Empty and not used live-range?!");
802
803 RegsToSpill[ResultPos++] = Reg;
804 }
805 RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end());
806 LLVM_DEBUG(dbgs() << RegsToSpill.size()
807 << " registers to spill after remat.\n");
808}
809
810//===----------------------------------------------------------------------===//
811// Spilling
812//===----------------------------------------------------------------------===//
813
814/// If MI is a load or store of StackSlot, it can be removed.
815bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, Register Reg) {
816 int FI = 0;
817 Register InstrReg = TII.isLoadFromStackSlot(*MI, FI);
818 bool IsLoad = InstrReg;
819 if (!IsLoad)
820 InstrReg = TII.isStoreToStackSlot(*MI, FI);
821
822 // We have a stack access. Is it the right register and slot?
823 if (InstrReg != Reg || FI != StackSlot)
824 return false;
825
826 if (!IsLoad)
827 HSpiller.rmFromMergeableSpills(*MI, StackSlot);
828
829 LLVM_DEBUG(dbgs() << "Coalescing stack access: " << *MI);
830 LIS.RemoveMachineInstrFromMaps(*MI);
831 MI->eraseFromParent();
832
833 if (IsLoad) {
834 ++NumReloadsRemoved;
835 --NumReloads;
836 } else {
837 ++NumSpillsRemoved;
838 --NumSpills;
839 }
840
841 return true;
842}
843
844#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
846// Dump the range of instructions from B to E with their slot indexes.
849 LiveIntervals const &LIS,
850 const char *const header,
851 Register VReg = Register()) {
852 char NextLine = '\n';
853 char SlotIndent = '\t';
854
855 if (std::next(B) == E) {
856 NextLine = ' ';
857 SlotIndent = ' ';
858 }
859
860 dbgs() << '\t' << header << ": " << NextLine;
861
862 for (MachineBasicBlock::iterator I = B; I != E; ++I) {
864
865 // If a register was passed in and this instruction has it as a
866 // destination that is marked as an early clobber, print the
867 // early-clobber slot index.
868 if (VReg) {
869 MachineOperand *MO = I->findRegisterDefOperand(VReg, /*TRI=*/nullptr);
870 if (MO && MO->isEarlyClobber())
871 Idx = Idx.getRegSlot(true);
872 }
873
874 dbgs() << SlotIndent << Idx << '\t' << *I;
875 }
876}
877#endif
878
879/// foldMemoryOperand - Try folding stack slot references in Ops into their
880/// instructions.
881///
882/// @param Ops Operand indices from AnalyzeVirtRegInBundle().
883/// @param LoadMI Load instruction to use instead of stack slot when non-null.
884/// @return True on success.
885bool InlineSpiller::
886foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
887 MachineInstr *LoadMI) {
888 if (Ops.empty())
889 return false;
890 // Don't attempt folding in bundles.
891 MachineInstr *MI = Ops.front().first;
892 if (Ops.back().first != MI || MI->isBundled())
893 return false;
894
895 bool WasCopy = TII.isCopyInstr(*MI).has_value();
896 Register ImpReg;
897
898 // TII::foldMemoryOperand will do what we need here for statepoint
899 // (fold load into use and remove corresponding def). We will replace
900 // uses of removed def with loads (spillAroundUses).
901 // For that to work we need to untie def and use to pass it through
902 // foldMemoryOperand and signal foldPatchpoint that it is allowed to
903 // fold them.
904 bool UntieRegs = MI->getOpcode() == TargetOpcode::STATEPOINT;
905
906 // Spill subregs if the target allows it.
907 // We always want to spill subregs for stackmap/patchpoint pseudos.
908 bool SpillSubRegs = TII.isSubregFoldable() ||
909 MI->getOpcode() == TargetOpcode::STATEPOINT ||
910 MI->getOpcode() == TargetOpcode::PATCHPOINT ||
911 MI->getOpcode() == TargetOpcode::STACKMAP;
912
913 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
914 // operands.
916 for (const auto &OpPair : Ops) {
917 unsigned Idx = OpPair.second;
918 assert(MI == OpPair.first && "Instruction conflict during operand folding");
919 MachineOperand &MO = MI->getOperand(Idx);
920
921 // No point restoring an undef read, and we'll produce an invalid live
922 // interval.
923 // TODO: Is this really the correct way to handle undef tied uses?
924 if (MO.isUse() && !MO.readsReg() && !MO.isTied())
925 continue;
926
927 if (MO.isImplicit()) {
928 ImpReg = MO.getReg();
929 continue;
930 }
931
932 if (!SpillSubRegs && MO.getSubReg())
933 return false;
934 // We cannot fold a load instruction into a def.
935 if (LoadMI && MO.isDef())
936 return false;
937 // Tied use operands should not be passed to foldMemoryOperand.
938 if (UntieRegs || !MI->isRegTiedToDefOperand(Idx))
939 FoldOps.push_back(Idx);
940 }
941
942 // If we only have implicit uses, we won't be able to fold that.
943 // Moreover, TargetInstrInfo::foldMemoryOperand will assert if we try!
944 if (FoldOps.empty())
945 return false;
946
947 MachineInstrSpan MIS(MI, MI->getParent());
948
950 if (UntieRegs)
951 for (unsigned Idx : FoldOps) {
952 MachineOperand &MO = MI->getOperand(Idx);
953 if (!MO.isTied())
954 continue;
955 unsigned Tied = MI->findTiedOperandIdx(Idx);
956 if (MO.isUse())
957 TiedOps.emplace_back(Tied, Idx);
958 else {
959 assert(MO.isDef() && "Tied to not use and def?");
960 TiedOps.emplace_back(Idx, Tied);
961 }
962 MI->untieRegOperand(Idx);
963 }
964
965 MachineInstr *FoldMI =
966 LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS)
967 : TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS, &VRM);
968 if (!FoldMI) {
969 // Re-tie operands.
970 for (auto Tied : TiedOps)
971 MI->tieOperands(Tied.first, Tied.second);
972 return false;
973 }
974
975 // Remove LIS for any dead defs in the original MI not in FoldMI.
976 for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) {
977 if (!MO->isReg())
978 continue;
979 Register Reg = MO->getReg();
980 if (!Reg || Reg.isVirtual() || MRI.isReserved(Reg)) {
981 continue;
982 }
983 // Skip non-Defs, including undef uses and internal reads.
984 if (MO->isUse())
985 continue;
986 PhysRegInfo RI = AnalyzePhysRegInBundle(*FoldMI, Reg, &TRI);
987 if (RI.FullyDefined)
988 continue;
989 // FoldMI does not define this physreg. Remove the LI segment.
990 assert(MO->isDead() && "Cannot fold physreg def");
992 LIS.removePhysRegDefAt(Reg.asMCReg(), Idx);
993 }
994
995 int FI;
996 if (TII.isStoreToStackSlot(*MI, FI) &&
997 HSpiller.rmFromMergeableSpills(*MI, FI))
998 --NumSpills;
999 LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI);
1000 // Update the call info.
1001 if (MI->isCandidateForAdditionalCallInfo())
1002 MI->getMF()->moveAdditionalCallInfo(MI, FoldMI);
1003
1004 // If we've folded a store into an instruction labelled with debug-info,
1005 // record a substitution from the old operand to the memory operand. Handle
1006 // the simple common case where operand 0 is the one being folded, plus when
1007 // the destination operand is also a tied def. More values could be
1008 // substituted / preserved with more analysis.
1009 if (MI->peekDebugInstrNum() && Ops[0].second == 0) {
1010 // Helper lambda.
1011 auto MakeSubstitution = [this,FoldMI,MI,&Ops]() {
1012 // Substitute old operand zero to the new instructions memory operand.
1013 unsigned OldOperandNum = Ops[0].second;
1014 unsigned NewNum = FoldMI->getDebugInstrNum();
1015 unsigned OldNum = MI->getDebugInstrNum();
1016 MF.makeDebugValueSubstitution({OldNum, OldOperandNum},
1018 };
1019
1020 const MachineOperand &Op0 = MI->getOperand(Ops[0].second);
1021 if (Ops.size() == 1 && Op0.isDef()) {
1022 MakeSubstitution();
1023 } else if (Ops.size() == 2 && Op0.isDef() && MI->getOperand(1).isTied() &&
1024 Op0.getReg() == MI->getOperand(1).getReg()) {
1025 MakeSubstitution();
1026 }
1027 } else if (MI->peekDebugInstrNum()) {
1028 // This is a debug-labelled instruction, but the operand being folded isn't
1029 // at operand zero. Most likely this means it's a load being folded in.
1030 // Substitute any register defs from operand zero up to the one being
1031 // folded -- past that point, we don't know what the new operand indexes
1032 // will be.
1033 MF.substituteDebugValuesForInst(*MI, *FoldMI, Ops[0].second);
1034 }
1035
1036 MI->eraseFromParent();
1037
1038 // Insert any new instructions other than FoldMI into the LIS maps.
1039 assert(!MIS.empty() && "Unexpected empty span of instructions!");
1040 for (MachineInstr &MI : MIS)
1041 if (&MI != FoldMI)
1043
1044 // TII.foldMemoryOperand may have left some implicit operands on the
1045 // instruction. Strip them.
1046 if (ImpReg)
1047 for (unsigned i = FoldMI->getNumOperands(); i; --i) {
1048 MachineOperand &MO = FoldMI->getOperand(i - 1);
1049 if (!MO.isReg() || !MO.isImplicit())
1050 break;
1051 if (MO.getReg() == ImpReg)
1052 FoldMI->removeOperand(i - 1);
1053 }
1054
1055 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS,
1056 "folded"));
1057
1058 if (!WasCopy)
1059 ++NumFolded;
1060 else if (Ops.front().second == 0) {
1061 ++NumSpills;
1062 // If there is only 1 store instruction is required for spill, add it
1063 // to mergeable list. In X86 AMX, 2 intructions are required to store.
1064 // We disable the merge for this case.
1065 if (std::distance(MIS.begin(), MIS.end()) <= 1)
1066 HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original);
1067 } else
1068 ++NumReloads;
1069 return true;
1070}
1071
1072void InlineSpiller::insertReload(Register NewVReg,
1073 SlotIndex Idx,
1075 MachineBasicBlock &MBB = *MI->getParent();
1076
1077 MachineInstrSpan MIS(MI, &MBB);
1078 TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot,
1079 MRI.getRegClass(NewVReg), &TRI, Register());
1080
1081 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI);
1082
1083 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MI, LIS, "reload",
1084 NewVReg));
1085 ++NumReloads;
1086}
1087
1088/// Check if \p Def fully defines a VReg with an undefined value.
1089/// If that's the case, that means the value of VReg is actually
1090/// not relevant.
1091static bool isRealSpill(const MachineInstr &Def) {
1092 if (!Def.isImplicitDef())
1093 return true;
1094
1095 // We can say that the VReg defined by Def is undef, only if it is
1096 // fully defined by Def. Otherwise, some of the lanes may not be
1097 // undef and the value of the VReg matters.
1098 return Def.getOperand(0).getSubReg();
1099}
1100
1101/// insertSpill - Insert a spill of NewVReg after MI.
1102void InlineSpiller::insertSpill(Register NewVReg, bool isKill,
1104 // Spill are not terminators, so inserting spills after terminators will
1105 // violate invariants in MachineVerifier.
1106 assert(!MI->isTerminator() && "Inserting a spill after a terminator");
1107 MachineBasicBlock &MBB = *MI->getParent();
1108
1109 MachineInstrSpan MIS(MI, &MBB);
1110 MachineBasicBlock::iterator SpillBefore = std::next(MI);
1111 bool IsRealSpill = isRealSpill(*MI);
1112
1113 if (IsRealSpill)
1114 TII.storeRegToStackSlot(MBB, SpillBefore, NewVReg, isKill, StackSlot,
1115 MRI.getRegClass(NewVReg), &TRI, Register());
1116 else
1117 // Don't spill undef value.
1118 // Anything works for undef, in particular keeping the memory
1119 // uninitialized is a viable option and it saves code size and
1120 // run time.
1121 BuildMI(MBB, SpillBefore, MI->getDebugLoc(), TII.get(TargetOpcode::KILL))
1122 .addReg(NewVReg, getKillRegState(isKill));
1123
1125 LIS.InsertMachineInstrRangeInMaps(Spill, MIS.end());
1126 for (const MachineInstr &MI : make_range(Spill, MIS.end()))
1127 getVDefInterval(MI, LIS);
1128
1129 LLVM_DEBUG(
1130 dumpMachineInstrRangeWithSlotIndex(Spill, MIS.end(), LIS, "spill"));
1131 ++NumSpills;
1132 // If there is only 1 store instruction is required for spill, add it
1133 // to mergeable list. In X86 AMX, 2 intructions are required to store.
1134 // We disable the merge for this case.
1135 if (IsRealSpill && std::distance(Spill, MIS.end()) <= 1)
1136 HSpiller.addToMergeableSpills(*Spill, StackSlot, Original);
1137}
1138
1139/// spillAroundUses - insert spill code around each use of Reg.
1140void InlineSpiller::spillAroundUses(Register Reg) {
1141 LLVM_DEBUG(dbgs() << "spillAroundUses " << printReg(Reg) << '\n');
1142 LiveInterval &OldLI = LIS.getInterval(Reg);
1143
1144 // Iterate over instructions using Reg.
1145 for (MachineInstr &MI : llvm::make_early_inc_range(MRI.reg_bundles(Reg))) {
1146 // Debug values are not allowed to affect codegen.
1147 if (MI.isDebugValue()) {
1148 // Modify DBG_VALUE now that the value is in a spill slot.
1149 MachineBasicBlock *MBB = MI.getParent();
1150 LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:\t" << MI);
1151 buildDbgValueForSpill(*MBB, &MI, MI, StackSlot, Reg);
1152 MBB->erase(MI);
1153 continue;
1154 }
1155
1156 assert(!MI.isDebugInstr() && "Did not expect to find a use in debug "
1157 "instruction that isn't a DBG_VALUE");
1158
1159 // Ignore copies to/from snippets. We'll delete them.
1160 if (SnippetCopies.count(&MI))
1161 continue;
1162
1163 // Stack slot accesses may coalesce away.
1164 if (coalesceStackAccess(&MI, Reg))
1165 continue;
1166
1167 // Analyze instruction.
1169 VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, Reg, &Ops);
1170
1171 // Find the slot index where this instruction reads and writes OldLI.
1172 // This is usually the def slot, except for tied early clobbers.
1174 if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true)))
1175 if (SlotIndex::isSameInstr(Idx, VNI->def))
1176 Idx = VNI->def;
1177
1178 // Check for a sibling copy.
1179 Register SibReg = isCopyOfBundle(MI, Reg, TII);
1180 if (SibReg && isSibling(SibReg)) {
1181 // This may actually be a copy between snippets.
1182 if (isRegToSpill(SibReg)) {
1183 LLVM_DEBUG(dbgs() << "Found new snippet copy: " << MI);
1184 SnippetCopies.insert(&MI);
1185 continue;
1186 }
1187 if (RI.Writes) {
1188 if (hoistSpillInsideBB(OldLI, MI)) {
1189 // This COPY is now dead, the value is already in the stack slot.
1190 MI.getOperand(0).setIsDead();
1191 DeadDefs.push_back(&MI);
1192 continue;
1193 }
1194 } else {
1195 // This is a reload for a sib-reg copy. Drop spills downstream.
1196 LiveInterval &SibLI = LIS.getInterval(SibReg);
1197 eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx));
1198 // The COPY will fold to a reload below.
1199 }
1200 }
1201
1202 // Attempt to fold memory ops.
1203 if (foldMemoryOperand(Ops))
1204 continue;
1205
1206 // Create a new virtual register for spill/fill.
1207 // FIXME: Infer regclass from instruction alone.
1208 Register NewVReg = Edit->createFrom(Reg);
1209
1210 if (RI.Reads)
1211 insertReload(NewVReg, Idx, &MI);
1212
1213 // Rewrite instruction operands.
1214 bool hasLiveDef = false;
1215 for (const auto &OpPair : Ops) {
1216 MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
1217 MO.setReg(NewVReg);
1218 if (MO.isUse()) {
1219 if (!OpPair.first->isRegTiedToDefOperand(OpPair.second))
1220 MO.setIsKill();
1221 } else {
1222 if (!MO.isDead())
1223 hasLiveDef = true;
1224 }
1225 }
1226 LLVM_DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << MI << '\n');
1227
1228 // FIXME: Use a second vreg if instruction has no tied ops.
1229 if (RI.Writes)
1230 if (hasLiveDef)
1231 insertSpill(NewVReg, true, &MI);
1232 }
1233}
1234
1235/// spillAll - Spill all registers remaining after rematerialization.
1236void InlineSpiller::spillAll() {
1237 // Update LiveStacks now that we are committed to spilling.
1238 if (StackSlot == VirtRegMap::NO_STACK_SLOT) {
1239 StackSlot = VRM.assignVirt2StackSlot(Original);
1240 StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original));
1241 StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator());
1242 } else
1243 StackInt = &LSS.getInterval(StackSlot);
1244
1245 if (Original != Edit->getReg())
1246 VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot);
1247
1248 assert(StackInt->getNumValNums() == 1 && "Bad stack interval values");
1249 for (Register Reg : RegsToSpill)
1250 StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg),
1251 StackInt->getValNumInfo(0));
1252 LLVM_DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n');
1253
1254 // Spill around uses of all RegsToSpill.
1255 for (Register Reg : RegsToSpill) {
1256 spillAroundUses(Reg);
1257 // Assign all of the spilled registers to the slot so that
1258 // LiveDebugVariables knows about these locations later on.
1259 if (VRM.getStackSlot(Reg) == VirtRegMap::NO_STACK_SLOT)
1260 VRM.assignVirt2StackSlot(Reg, StackSlot);
1261 }
1262
1263 // Hoisted spills may cause dead code.
1264 if (!DeadDefs.empty()) {
1265 LLVM_DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n");
1266 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill);
1267 }
1268
1269 // Finally delete the SnippetCopies.
1270 for (Register Reg : RegsToSpill) {
1271 for (MachineInstr &MI :
1272 llvm::make_early_inc_range(MRI.reg_instructions(Reg))) {
1273 assert(SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy");
1274 // FIXME: Do this with a LiveRangeEdit callback.
1276 MI.eraseFromBundle();
1277 }
1278 }
1279
1280 // Delete all spilled registers.
1281 for (Register Reg : RegsToSpill)
1282 Edit->eraseVirtReg(Reg);
1283}
1284
1285void InlineSpiller::spill(LiveRangeEdit &edit) {
1286 ++NumSpilledRanges;
1287 Edit = &edit;
1289 "Trying to spill a stack slot.");
1290 // Share a stack slot among all descendants of Original.
1291 Original = VRM.getOriginal(edit.getReg());
1292 StackSlot = VRM.getStackSlot(Original);
1293 StackInt = nullptr;
1294
1295 LLVM_DEBUG(dbgs() << "Inline spilling "
1296 << TRI.getRegClassName(MRI.getRegClass(edit.getReg()))
1297 << ':' << edit.getParent() << "\nFrom original "
1298 << printReg(Original) << '\n');
1299 assert(edit.getParent().isSpillable() &&
1300 "Attempting to spill already spilled value.");
1301 assert(DeadDefs.empty() && "Previous spill didn't remove dead defs");
1302
1303 collectRegsToSpill();
1304 reMaterializeAll();
1305
1306 // Remat may handle everything.
1307 if (!RegsToSpill.empty())
1308 spillAll();
1309
1310 Edit->calculateRegClassAndHint(MF, VRAI);
1311}
1312
1313/// Optimizations after all the reg selections and spills are done.
1314void InlineSpiller::postOptimization() { HSpiller.hoistAllSpills(); }
1315
1316/// When a spill is inserted, add the spill to MergeableSpills map.
1317void HoistSpillHelper::addToMergeableSpills(MachineInstr &Spill, int StackSlot,
1318 unsigned Original) {
1320 LiveInterval &OrigLI = LIS.getInterval(Original);
1321 // save a copy of LiveInterval in StackSlotToOrigLI because the original
1322 // LiveInterval may be cleared after all its references are spilled.
1323 if (!StackSlotToOrigLI.contains(StackSlot)) {
1324 auto LI = std::make_unique<LiveInterval>(OrigLI.reg(), OrigLI.weight());
1325 LI->assign(OrigLI, Allocator);
1326 StackSlotToOrigLI[StackSlot] = std::move(LI);
1327 }
1328 SlotIndex Idx = LIS.getInstructionIndex(Spill);
1329 VNInfo *OrigVNI = StackSlotToOrigLI[StackSlot]->getVNInfoAt(Idx.getRegSlot());
1330 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1331 MergeableSpills[MIdx].insert(&Spill);
1332}
1333
1334/// When a spill is removed, remove the spill from MergeableSpills map.
1335/// Return true if the spill is removed successfully.
1336bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr &Spill,
1337 int StackSlot) {
1338 auto It = StackSlotToOrigLI.find(StackSlot);
1339 if (It == StackSlotToOrigLI.end())
1340 return false;
1341 SlotIndex Idx = LIS.getInstructionIndex(Spill);
1342 VNInfo *OrigVNI = It->second->getVNInfoAt(Idx.getRegSlot());
1343 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1344 return MergeableSpills[MIdx].erase(&Spill);
1345}
1346
1347/// Check BB to see if it is a possible target BB to place a hoisted spill,
1348/// i.e., there should be a living sibling of OrigReg at the insert point.
1349bool HoistSpillHelper::isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
1350 MachineBasicBlock &BB, Register &LiveReg) {
1351 SlotIndex Idx = IPA.getLastInsertPoint(OrigLI, BB);
1352 // The original def could be after the last insert point in the root block,
1353 // we can't hoist to here.
1354 if (Idx < OrigVNI.def) {
1355 // TODO: We could be better here. If LI is not alive in landing pad
1356 // we could hoist spill after LIP.
1357 LLVM_DEBUG(dbgs() << "can't spill in root block - def after LIP\n");
1358 return false;
1359 }
1360 Register OrigReg = OrigLI.reg();
1361 SmallSetVector<Register, 16> &Siblings = Virt2SiblingsMap[OrigReg];
1362 assert(OrigLI.getVNInfoAt(Idx) == &OrigVNI && "Unexpected VNI");
1363
1364 for (const Register &SibReg : Siblings) {
1365 LiveInterval &LI = LIS.getInterval(SibReg);
1366 VNInfo *VNI = LI.getVNInfoAt(Idx);
1367 if (VNI) {
1368 LiveReg = SibReg;
1369 return true;
1370 }
1371 }
1372 return false;
1373}
1374
1375/// Remove redundant spills in the same BB. Save those redundant spills in
1376/// SpillsToRm, and save the spill to keep and its BB in SpillBBToSpill map.
1377void HoistSpillHelper::rmRedundantSpills(
1381 // For each spill saw, check SpillBBToSpill[] and see if its BB already has
1382 // another spill inside. If a BB contains more than one spill, only keep the
1383 // earlier spill with smaller SlotIndex.
1384 for (auto *const CurrentSpill : Spills) {
1385 MachineBasicBlock *Block = CurrentSpill->getParent();
1386 MachineDomTreeNode *Node = MDT.getNode(Block);
1387 MachineInstr *PrevSpill = SpillBBToSpill[Node];
1388 if (PrevSpill) {
1389 SlotIndex PIdx = LIS.getInstructionIndex(*PrevSpill);
1390 SlotIndex CIdx = LIS.getInstructionIndex(*CurrentSpill);
1391 MachineInstr *SpillToRm = (CIdx > PIdx) ? CurrentSpill : PrevSpill;
1392 MachineInstr *SpillToKeep = (CIdx > PIdx) ? PrevSpill : CurrentSpill;
1393 SpillsToRm.push_back(SpillToRm);
1394 SpillBBToSpill[MDT.getNode(Block)] = SpillToKeep;
1395 } else {
1396 SpillBBToSpill[MDT.getNode(Block)] = CurrentSpill;
1397 }
1398 }
1399 for (auto *const SpillToRm : SpillsToRm)
1400 Spills.erase(SpillToRm);
1401}
1402
1403/// Starting from \p Root find a top-down traversal order of the dominator
1404/// tree to visit all basic blocks containing the elements of \p Spills.
1405/// Redundant spills will be found and put into \p SpillsToRm at the same
1406/// time. \p SpillBBToSpill will be populated as part of the process and
1407/// maps a basic block to the first store occurring in the basic block.
1408/// \post SpillsToRm.union(Spills\@post) == Spills\@pre
1409void HoistSpillHelper::getVisitOrders(
1415 // The set contains all the possible BB nodes to which we may hoist
1416 // original spills.
1418 // Save the BB nodes on the path from the first BB node containing
1419 // non-redundant spill to the Root node.
1421 // All the spills to be hoisted must originate from a single def instruction
1422 // to the OrigReg. It means the def instruction should dominate all the spills
1423 // to be hoisted. We choose the BB where the def instruction is located as
1424 // the Root.
1425 MachineDomTreeNode *RootIDomNode = MDT[Root]->getIDom();
1426 // For every node on the dominator tree with spill, walk up on the dominator
1427 // tree towards the Root node until it is reached. If there is other node
1428 // containing spill in the middle of the path, the previous spill saw will
1429 // be redundant and the node containing it will be removed. All the nodes on
1430 // the path starting from the first node with non-redundant spill to the Root
1431 // node will be added to the WorkSet, which will contain all the possible
1432 // locations where spills may be hoisted to after the loop below is done.
1433 for (auto *const Spill : Spills) {
1434 MachineBasicBlock *Block = Spill->getParent();
1436 MachineInstr *SpillToRm = nullptr;
1437 while (Node != RootIDomNode) {
1438 // If Node dominates Block, and it already contains a spill, the spill in
1439 // Block will be redundant.
1440 if (Node != MDT[Block] && SpillBBToSpill[Node]) {
1441 SpillToRm = SpillBBToSpill[MDT[Block]];
1442 break;
1443 /// If we see the Node already in WorkSet, the path from the Node to
1444 /// the Root node must already be traversed by another spill.
1445 /// Then no need to repeat.
1446 } else if (WorkSet.count(Node)) {
1447 break;
1448 } else {
1449 NodesOnPath.insert(Node);
1450 }
1451 Node = Node->getIDom();
1452 }
1453 if (SpillToRm) {
1454 SpillsToRm.push_back(SpillToRm);
1455 } else {
1456 // Add a BB containing the original spills to SpillsToKeep -- i.e.,
1457 // set the initial status before hoisting start. The value of BBs
1458 // containing original spills is set to 0, in order to descriminate
1459 // with BBs containing hoisted spills which will be inserted to
1460 // SpillsToKeep later during hoisting.
1461 SpillsToKeep[MDT[Block]] = 0;
1462 WorkSet.insert(NodesOnPath.begin(), NodesOnPath.end());
1463 }
1464 NodesOnPath.clear();
1465 }
1466
1467 // Sort the nodes in WorkSet in top-down order and save the nodes
1468 // in Orders. Orders will be used for hoisting in runHoistSpills.
1469 unsigned idx = 0;
1470 Orders.push_back(MDT.getNode(Root));
1471 do {
1472 MachineDomTreeNode *Node = Orders[idx++];
1473 for (MachineDomTreeNode *Child : Node->children()) {
1474 if (WorkSet.count(Child))
1475 Orders.push_back(Child);
1476 }
1477 } while (idx != Orders.size());
1478 assert(Orders.size() == WorkSet.size() &&
1479 "Orders have different size with WorkSet");
1480
1481#ifndef NDEBUG
1482 LLVM_DEBUG(dbgs() << "Orders size is " << Orders.size() << "\n");
1484 for (; RIt != Orders.rend(); RIt++)
1485 LLVM_DEBUG(dbgs() << "BB" << (*RIt)->getBlock()->getNumber() << ",");
1486 LLVM_DEBUG(dbgs() << "\n");
1487#endif
1488}
1489
1490/// Try to hoist spills according to BB hotness. The spills to removed will
1491/// be saved in \p SpillsToRm. The spills to be inserted will be saved in
1492/// \p SpillsToIns.
1493void HoistSpillHelper::runHoistSpills(
1494 LiveInterval &OrigLI, VNInfo &OrigVNI,
1498 // Visit order of dominator tree nodes.
1500 // SpillsToKeep contains all the nodes where spills are to be inserted
1501 // during hoisting. If the spill to be inserted is an original spill
1502 // (not a hoisted one), the value of the map entry is 0. If the spill
1503 // is a hoisted spill, the value of the map entry is the VReg to be used
1504 // as the source of the spill.
1506 // Map from BB to the first spill inside of it.
1508
1509 rmRedundantSpills(Spills, SpillsToRm, SpillBBToSpill);
1510
1511 MachineBasicBlock *Root = LIS.getMBBFromIndex(OrigVNI.def);
1512 getVisitOrders(Root, Spills, Orders, SpillsToRm, SpillsToKeep,
1513 SpillBBToSpill);
1514
1515 // SpillsInSubTreeMap keeps the map from a dom tree node to a pair of
1516 // nodes set and the cost of all the spills inside those nodes.
1517 // The nodes set are the locations where spills are to be inserted
1518 // in the subtree of current node.
1519 using NodesCostPair =
1520 std::pair<SmallPtrSet<MachineDomTreeNode *, 16>, BlockFrequency>;
1522
1523 // Iterate Orders set in reverse order, which will be a bottom-up order
1524 // in the dominator tree. Once we visit a dom tree node, we know its
1525 // children have already been visited and the spill locations in the
1526 // subtrees of all the children have been determined.
1528 for (; RIt != Orders.rend(); RIt++) {
1529 MachineBasicBlock *Block = (*RIt)->getBlock();
1530
1531 // If Block contains an original spill, simply continue.
1532 if (SpillsToKeep.contains(*RIt) && !SpillsToKeep[*RIt]) {
1533 SpillsInSubTreeMap[*RIt].first.insert(*RIt);
1534 // SpillsInSubTreeMap[*RIt].second contains the cost of spill.
1535 SpillsInSubTreeMap[*RIt].second = MBFI.getBlockFreq(Block);
1536 continue;
1537 }
1538
1539 // Collect spills in subtree of current node (*RIt) to
1540 // SpillsInSubTreeMap[*RIt].first.
1541 for (MachineDomTreeNode *Child : (*RIt)->children()) {
1542 if (!SpillsInSubTreeMap.contains(Child))
1543 continue;
1544 // The stmt "SpillsInSubTree = SpillsInSubTreeMap[*RIt].first" below
1545 // should be placed before getting the begin and end iterators of
1546 // SpillsInSubTreeMap[Child].first, or else the iterators may be
1547 // invalidated when SpillsInSubTreeMap[*RIt] is seen the first time
1548 // and the map grows and then the original buckets in the map are moved.
1549 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1550 SpillsInSubTreeMap[*RIt].first;
1551 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1552 SubTreeCost += SpillsInSubTreeMap[Child].second;
1553 auto BI = SpillsInSubTreeMap[Child].first.begin();
1554 auto EI = SpillsInSubTreeMap[Child].first.end();
1555 SpillsInSubTree.insert(BI, EI);
1556 SpillsInSubTreeMap.erase(Child);
1557 }
1558
1559 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1560 SpillsInSubTreeMap[*RIt].first;
1561 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1562 // No spills in subtree, simply continue.
1563 if (SpillsInSubTree.empty())
1564 continue;
1565
1566 // Check whether Block is a possible candidate to insert spill.
1567 Register LiveReg;
1568 if (!isSpillCandBB(OrigLI, OrigVNI, *Block, LiveReg))
1569 continue;
1570
1571 // If there are multiple spills that could be merged, bias a little
1572 // to hoist the spill.
1573 BranchProbability MarginProb = (SpillsInSubTree.size() > 1)
1574 ? BranchProbability(9, 10)
1575 : BranchProbability(1, 1);
1576 if (SubTreeCost > MBFI.getBlockFreq(Block) * MarginProb) {
1577 // Hoist: Move spills to current Block.
1578 for (auto *const SpillBB : SpillsInSubTree) {
1579 // When SpillBB is a BB contains original spill, insert the spill
1580 // to SpillsToRm.
1581 if (SpillsToKeep.contains(SpillBB) && !SpillsToKeep[SpillBB]) {
1582 MachineInstr *SpillToRm = SpillBBToSpill[SpillBB];
1583 SpillsToRm.push_back(SpillToRm);
1584 }
1585 // SpillBB will not contain spill anymore, remove it from SpillsToKeep.
1586 SpillsToKeep.erase(SpillBB);
1587 }
1588 // Current Block is the BB containing the new hoisted spill. Add it to
1589 // SpillsToKeep. LiveReg is the source of the new spill.
1590 SpillsToKeep[*RIt] = LiveReg;
1591 LLVM_DEBUG({
1592 dbgs() << "spills in BB: ";
1593 for (const auto Rspill : SpillsInSubTree)
1594 dbgs() << Rspill->getBlock()->getNumber() << " ";
1595 dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber()
1596 << "\n";
1597 });
1598 SpillsInSubTree.clear();
1599 SpillsInSubTree.insert(*RIt);
1600 SubTreeCost = MBFI.getBlockFreq(Block);
1601 }
1602 }
1603 // For spills in SpillsToKeep with LiveReg set (i.e., not original spill),
1604 // save them to SpillsToIns.
1605 for (const auto &Ent : SpillsToKeep) {
1606 if (Ent.second)
1607 SpillsToIns[Ent.first->getBlock()] = Ent.second;
1608 }
1609}
1610
1611/// For spills with equal values, remove redundant spills and hoist those left
1612/// to less hot spots.
1613///
1614/// Spills with equal values will be collected into the same set in
1615/// MergeableSpills when spill is inserted. These equal spills are originated
1616/// from the same defining instruction and are dominated by the instruction.
1617/// Before hoisting all the equal spills, redundant spills inside in the same
1618/// BB are first marked to be deleted. Then starting from the spills left, walk
1619/// up on the dominator tree towards the Root node where the define instruction
1620/// is located, mark the dominated spills to be deleted along the way and
1621/// collect the BB nodes on the path from non-dominated spills to the define
1622/// instruction into a WorkSet. The nodes in WorkSet are the candidate places
1623/// where we are considering to hoist the spills. We iterate the WorkSet in
1624/// bottom-up order, and for each node, we will decide whether to hoist spills
1625/// inside its subtree to that node. In this way, we can get benefit locally
1626/// even if hoisting all the equal spills to one cold place is impossible.
1627void HoistSpillHelper::hoistAllSpills() {
1628 SmallVector<Register, 4> NewVRegs;
1629 LiveRangeEdit Edit(nullptr, NewVRegs, MF, LIS, &VRM, this);
1630
1631 for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
1633 Register Original = VRM.getPreSplitReg(Reg);
1634 if (!MRI.def_empty(Reg))
1635 Virt2SiblingsMap[Original].insert(Reg);
1636 }
1637
1638 // Each entry in MergeableSpills contains a spill set with equal values.
1639 for (auto &Ent : MergeableSpills) {
1640 int Slot = Ent.first.first;
1641 LiveInterval &OrigLI = *StackSlotToOrigLI[Slot];
1642 VNInfo *OrigVNI = Ent.first.second;
1643 SmallPtrSet<MachineInstr *, 16> &EqValSpills = Ent.second;
1644 if (Ent.second.empty())
1645 continue;
1646
1647 LLVM_DEBUG({
1648 dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n"
1649 << "Equal spills in BB: ";
1650 for (const auto spill : EqValSpills)
1651 dbgs() << spill->getParent()->getNumber() << " ";
1652 dbgs() << "\n";
1653 });
1654
1655 // SpillsToRm is the spill set to be removed from EqValSpills.
1657 // SpillsToIns is the spill set to be newly inserted after hoisting.
1659
1660 runHoistSpills(OrigLI, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns);
1661
1662 LLVM_DEBUG({
1663 dbgs() << "Finally inserted spills in BB: ";
1664 for (const auto &Ispill : SpillsToIns)
1665 dbgs() << Ispill.first->getNumber() << " ";
1666 dbgs() << "\nFinally removed spills in BB: ";
1667 for (const auto Rspill : SpillsToRm)
1668 dbgs() << Rspill->getParent()->getNumber() << " ";
1669 dbgs() << "\n";
1670 });
1671
1672 // Stack live range update.
1673 LiveInterval &StackIntvl = LSS.getInterval(Slot);
1674 if (!SpillsToIns.empty() || !SpillsToRm.empty())
1675 StackIntvl.MergeValueInAsValue(OrigLI, OrigVNI,
1676 StackIntvl.getValNumInfo(0));
1677
1678 // Insert hoisted spills.
1679 for (auto const &Insert : SpillsToIns) {
1680 MachineBasicBlock *BB = Insert.first;
1681 Register LiveReg = Insert.second;
1682 MachineBasicBlock::iterator MII = IPA.getLastInsertPointIter(OrigLI, *BB);
1683 MachineInstrSpan MIS(MII, BB);
1684 TII.storeRegToStackSlot(*BB, MII, LiveReg, false, Slot,
1685 MRI.getRegClass(LiveReg), &TRI, Register());
1686 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
1687 for (const MachineInstr &MI : make_range(MIS.begin(), MII))
1688 getVDefInterval(MI, LIS);
1689 ++NumSpills;
1690 }
1691
1692 // Remove redundant spills or change them to dead instructions.
1693 NumSpills -= SpillsToRm.size();
1694 for (auto *const RMEnt : SpillsToRm) {
1695 RMEnt->setDesc(TII.get(TargetOpcode::KILL));
1696 for (unsigned i = RMEnt->getNumOperands(); i; --i) {
1697 MachineOperand &MO = RMEnt->getOperand(i - 1);
1698 if (MO.isReg() && MO.isImplicit() && MO.isDef() && !MO.isDead())
1699 RMEnt->removeOperand(i - 1);
1700 }
1701 }
1702 Edit.eliminateDeadDefs(SpillsToRm, {});
1703 }
1704}
1705
1706/// For VirtReg clone, the \p New register should have the same physreg or
1707/// stackslot as the \p old register.
1708void HoistSpillHelper::LRE_DidCloneVirtReg(Register New, Register Old) {
1709 if (VRM.hasPhys(Old))
1710 VRM.assignVirt2Phys(New, VRM.getPhys(Old));
1711 else if (VRM.getStackSlot(Old) != VirtRegMap::NO_STACK_SLOT)
1712 VRM.assignVirt2StackSlot(New, VRM.getStackSlot(Old));
1713 else
1714 llvm_unreachable("VReg should be assigned either physreg or stackslot");
1715 if (VRM.hasShape(Old))
1716 VRM.assignVirt2Shape(New, VRM.getShape(Old));
1717}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineBasicBlock & MBB
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition: Compiler.h:622
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(...)
Definition: Debug.h:106
This file defines the DenseMap class.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
static LLVM_DUMP_METHOD void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E, LiveIntervals const &LIS, const char *const header, Register VReg=Register())
static Register isCopyOfBundle(const MachineInstr &FirstMI, Register Reg, const TargetInstrInfo &TII)
Check for a copy bundle as formed by SplitKit.
static bool isRealSpill(const MachineInstr &Def)
Check if Def fully defines a VReg with an undefined value.
static cl::opt< bool > RestrictStatepointRemat("restrict-statepoint-remat", cl::init(false), cl::Hidden, cl::desc("Restrict remat for statepoint operands"))
static Register isCopyOf(const MachineInstr &MI, Register Reg, const TargetInstrInfo &TII)
isFullCopyOf - If MI is a COPY to or from Reg, return the other register, otherwise return 0.
static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS)
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
This file implements a map that provides insertion order iteration.
#define P(N)
Basic Register Allocator
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:166
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:66
bool erase(const KeyT &Val)
Definition: DenseMap.h:321
iterator begin()
Definition: DenseMap.h:75
iterator end()
Definition: DenseMap.h:84
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition: DenseMap.h:147
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:211
Base class for the actual dominator tree node.
iterator_range< iterator > children()
DomTreeNodeBase * getIDom() const
Register getReg() const
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
Store the specified register of the given register class to the specified stack frame index.
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
TargetInstrInfo overrides.
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
Load the specified register of the given register class from the specified stack frame index.
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
If the specified machine instruction is a direct store to a stack slot, return the virtual or physica...
Determines the latest safe point in a block in which we can insert a split, spill or other instructio...
Definition: SplitKit.h:50
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:687
float weight() const
Definition: LiveInterval.h:719
Register reg() const
Definition: LiveInterval.h:718
bool isSpillable() const
isSpillable - Can this interval be spilled?
Definition: LiveInterval.h:826
SlotIndex InsertMachineInstrInMaps(MachineInstr &MI)
SlotIndexes * getSlotIndexes() const
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
VNInfo::Allocator & getVNInfoAllocator()
LiveInterval & getInterval(Register Reg)
void InsertMachineInstrRangeInMaps(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E)
void removePhysRegDefAt(MCRegister Reg, SlotIndex Pos)
Remove value numbers and related live segments starting at position Pos that are part of any liverang...
MachineBasicBlock * getMBBFromIndex(SlotIndex index) const
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
Result of a LiveRange query.
Definition: LiveInterval.h:90
bool isKill() const
Return true if the live-in value is killed by this instruction.
Definition: LiveInterval.h:112
Callback methods for LiveRangeEdit owners.
Definition: LiveRangeEdit.h:45
virtual void LRE_DidCloneVirtReg(Register New, Register Old)
Called after cloning a virtual register.
Definition: LiveRangeEdit.h:63
const LiveInterval & getParent() const
Register getReg() const
VNInfo * getValNumInfo(unsigned ValNo)
getValNumInfo - Returns pointer to the specified val#.
Definition: LiveInterval.h:317
void MergeValueInAsValue(const LiveRange &RHS, const VNInfo *RHSValNo, VNInfo *LHSValNo)
MergeValueInAsValue - Merge all of the segments of a specific val# in RHS into this live range as the...
iterator_range< vni_iterator > vnis()
Definition: LiveInterval.h:230
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
Definition: LiveInterval.h:542
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarily including Idx,...
Definition: LiveInterval.h:429
unsigned getNumValNums() const
Definition: LiveInterval.h:313
void assign(const LiveRange &Other, BumpPtrAllocator &Allocator)
Copies values numbers and live segments from Other into this range.
Definition: LiveInterval.h:252
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
Definition: LiveInterval.h:421
MIBundleOperands - Iterate over all operands in a bundle of machine instructions.
iterator SkipPHIsLabelsAndDebug(iterator I, Register Reg=Register(), bool SkipPseudoOp=true)
Return the first instruction in MBB after I that is not a PHI, label or debug.
Instructions::iterator instr_iterator
Instructions::const_iterator const_instr_iterator
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
iterator_range< pred_iterator > predecessors()
MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate machine basic b...
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
static const unsigned int DebugOperandMemNumber
A reserved operand number representing the instructions memory operand, for instructions that have a ...
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
MachineInstrSpan provides an interface to get an iteration range containing the instruction it was in...
Representation of each machine instruction.
Definition: MachineInstr.h:69
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:347
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:578
bool isBundledWithPred() const
Return true if this instruction is part of a bundle, and it is not the first instruction in the bundl...
Definition: MachineInstr.h:478
void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
bool isBundledWithSucc() const
Return true if this instruction is part of a bundle, and it is not the last instruction in the bundle...
Definition: MachineInstr.h:482
unsigned getDebugInstrNum()
Fetch the instruction number of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:585
bool isBundled() const
Return true if this instruction part of a bundle.
Definition: MachineInstr.h:472
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isImplicit() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setReg(Register Reg)
Change the register this operand corresponds to.
void setIsKill(bool Val=true)
void setIsUndef(bool Val=true)
bool isEarlyClobber() const
Register getReg() const
getReg - Returns the register number.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static constexpr bool isStackSlot(unsigned Reg)
isStackSlot - Sometimes it is useful to be able to store a non-negative frame index in a variable tha...
Definition: Register.h:44
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
Definition: Register.h:84
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:65
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
Definition: SlotIndexes.h:176
SlotIndex getBaseIndex() const
Returns the base index for associated with this index.
Definition: SlotIndexes.h:224
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
Definition: SlotIndexes.h:237
void removeSingleMachineInstrFromMaps(MachineInstr &MI)
Removes a single machine instruction MI from the mapping.
size_type size() const
Definition: SmallPtrSet.h:94
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:452
iterator end() const
Definition: SmallPtrSet.h:477
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:384
iterator begin() const
Definition: SmallPtrSet.h:472
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:519
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:370
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:937
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
Spiller interface.
Definition: Spiller.h:31
virtual void spill(LiveRangeEdit &LRE)=0
spill - Spill the LRE.getParent() live interval.
virtual ~Spiller()=0
virtual ArrayRef< Register > getReplacedRegs()=0
Return registers that were not spilled, but otherwise replaced (e.g.
virtual ArrayRef< Register > getSpilledRegs()=0
Return the registers that were spilled.
virtual void postOptimization()
Definition: Spiller.h:47
Register getReg() const
MI-level Statepoint operands.
Definition: StackMaps.h:158
bool isFoldableReg(Register Reg) const
Return true if Reg is used only in operands which can be folded to stack usage.
Definition: StackMaps.cpp:148
TargetInstrInfo - Interface to description of machine instruction set.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
VNInfo - Value Number Information.
Definition: LiveInterval.h:53
bool isUnused() const
Returns true if this value is unused.
Definition: LiveInterval.h:81
unsigned id
The ID number of this value.
Definition: LiveInterval.h:58
SlotIndex def
The index of the defining instruction.
Definition: LiveInterval.h:61
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
Definition: LiveInterval.h:78
Calculate auxiliary information for a virtual register such as its spill weight and allocation hint.
static constexpr int NO_STACK_SLOT
Definition: VirtRegMap.h:65
self_iterator getIterator()
Definition: ilist_node.h:132
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
PhysRegInfo AnalyzePhysRegInBundle(const MachineInstr &MI, Register Reg, const TargetRegisterInfo *TRI)
AnalyzePhysRegInBundle - Analyze how the current instruction or bundle uses a physical register.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:657
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
Spiller * createInlineSpiller(const Spiller::RequiredAnalyses &Analyses, MachineFunction &MF, VirtRegMap &VRM, VirtRegAuxInfo &VRAI)
Create and return a spiller that will insert spill code directly instead of deferring though VirtRegM...
VirtRegInfo AnalyzeVirtRegInBundle(MachineInstr &MI, Register Reg, SmallVectorImpl< std::pair< MachineInstr *, unsigned > > *Ops=nullptr)
AnalyzeVirtRegInBundle - Analyze how the current instruction or bundle uses a virtual register.
unsigned getKillRegState(bool B)
MachineInstr * buildDbgValueForSpill(MachineBasicBlock &BB, MachineBasicBlock::iterator I, const MachineInstr &Orig, int FrameIndex, Register SpillReg)
Clone a DBG_VALUE whose value has been spilled to FrameIndex.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1903
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
Remat - Information needed to rematerialize at a specific location.
Information about how a physical register Reg is used by a set of operands.
bool FullyDefined
Reg or a super-register is defined.
VirtRegInfo - Information about a virtual register used by a set of operands.
bool Reads
Reads - One of the operands read the virtual register.
bool Tied
Tied - Uses and defs must use the same register.
bool Writes
Writes - One of the operands writes the virtual register.