File: | llvm/lib/CodeGen/InlineSpiller.cpp |
Warning: | line 315, column 62 The left operand of '==' is a garbage value |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- InlineSpiller.cpp - Insert spills and restores inline --------------===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // The inline spiller modifies the machine function directly instead of | |||
10 | // inserting spills and restores in VirtRegMap. | |||
11 | // | |||
12 | //===----------------------------------------------------------------------===// | |||
13 | ||||
14 | #include "SplitKit.h" | |||
15 | #include "llvm/ADT/ArrayRef.h" | |||
16 | #include "llvm/ADT/DenseMap.h" | |||
17 | #include "llvm/ADT/MapVector.h" | |||
18 | #include "llvm/ADT/None.h" | |||
19 | #include "llvm/ADT/STLExtras.h" | |||
20 | #include "llvm/ADT/SetVector.h" | |||
21 | #include "llvm/ADT/SmallPtrSet.h" | |||
22 | #include "llvm/ADT/SmallVector.h" | |||
23 | #include "llvm/ADT/Statistic.h" | |||
24 | #include "llvm/Analysis/AliasAnalysis.h" | |||
25 | #include "llvm/CodeGen/LiveInterval.h" | |||
26 | #include "llvm/CodeGen/LiveIntervalCalc.h" | |||
27 | #include "llvm/CodeGen/LiveIntervals.h" | |||
28 | #include "llvm/CodeGen/LiveRangeEdit.h" | |||
29 | #include "llvm/CodeGen/LiveStacks.h" | |||
30 | #include "llvm/CodeGen/MachineBasicBlock.h" | |||
31 | #include "llvm/CodeGen/MachineBlockFrequencyInfo.h" | |||
32 | #include "llvm/CodeGen/MachineDominators.h" | |||
33 | #include "llvm/CodeGen/MachineFunction.h" | |||
34 | #include "llvm/CodeGen/MachineFunctionPass.h" | |||
35 | #include "llvm/CodeGen/MachineInstr.h" | |||
36 | #include "llvm/CodeGen/MachineInstrBuilder.h" | |||
37 | #include "llvm/CodeGen/MachineInstrBundle.h" | |||
38 | #include "llvm/CodeGen/MachineLoopInfo.h" | |||
39 | #include "llvm/CodeGen/MachineOperand.h" | |||
40 | #include "llvm/CodeGen/MachineRegisterInfo.h" | |||
41 | #include "llvm/CodeGen/SlotIndexes.h" | |||
42 | #include "llvm/CodeGen/Spiller.h" | |||
43 | #include "llvm/CodeGen/StackMaps.h" | |||
44 | #include "llvm/CodeGen/TargetInstrInfo.h" | |||
45 | #include "llvm/CodeGen/TargetOpcodes.h" | |||
46 | #include "llvm/CodeGen/TargetRegisterInfo.h" | |||
47 | #include "llvm/CodeGen/TargetSubtargetInfo.h" | |||
48 | #include "llvm/CodeGen/VirtRegMap.h" | |||
49 | #include "llvm/Config/llvm-config.h" | |||
50 | #include "llvm/Support/BlockFrequency.h" | |||
51 | #include "llvm/Support/BranchProbability.h" | |||
52 | #include "llvm/Support/CommandLine.h" | |||
53 | #include "llvm/Support/Compiler.h" | |||
54 | #include "llvm/Support/Debug.h" | |||
55 | #include "llvm/Support/ErrorHandling.h" | |||
56 | #include "llvm/Support/raw_ostream.h" | |||
57 | #include <cassert> | |||
58 | #include <iterator> | |||
59 | #include <tuple> | |||
60 | #include <utility> | |||
61 | #include <vector> | |||
62 | ||||
63 | using namespace llvm; | |||
64 | ||||
65 | #define DEBUG_TYPE"regalloc" "regalloc" | |||
66 | ||||
67 | STATISTIC(NumSpilledRanges, "Number of spilled live ranges")static llvm::Statistic NumSpilledRanges = {"regalloc", "NumSpilledRanges" , "Number of spilled live ranges"}; | |||
68 | STATISTIC(NumSnippets, "Number of spilled snippets")static llvm::Statistic NumSnippets = {"regalloc", "NumSnippets" , "Number of spilled snippets"}; | |||
69 | STATISTIC(NumSpills, "Number of spills inserted")static llvm::Statistic NumSpills = {"regalloc", "NumSpills", "Number of spills inserted" }; | |||
70 | STATISTIC(NumSpillsRemoved, "Number of spills removed")static llvm::Statistic NumSpillsRemoved = {"regalloc", "NumSpillsRemoved" , "Number of spills removed"}; | |||
71 | STATISTIC(NumReloads, "Number of reloads inserted")static llvm::Statistic NumReloads = {"regalloc", "NumReloads" , "Number of reloads inserted"}; | |||
72 | STATISTIC(NumReloadsRemoved, "Number of reloads removed")static llvm::Statistic NumReloadsRemoved = {"regalloc", "NumReloadsRemoved" , "Number of reloads removed"}; | |||
73 | STATISTIC(NumFolded, "Number of folded stack accesses")static llvm::Statistic NumFolded = {"regalloc", "NumFolded", "Number of folded stack accesses" }; | |||
74 | STATISTIC(NumFoldedLoads, "Number of folded loads")static llvm::Statistic NumFoldedLoads = {"regalloc", "NumFoldedLoads" , "Number of folded loads"}; | |||
75 | STATISTIC(NumRemats, "Number of rematerialized defs for spilling")static llvm::Statistic NumRemats = {"regalloc", "NumRemats", "Number of rematerialized defs for spilling" }; | |||
76 | ||||
77 | static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden, | |||
78 | cl::desc("Disable inline spill hoisting")); | |||
79 | static cl::opt<bool> | |||
80 | RestrictStatepointRemat("restrict-statepoint-remat", | |||
81 | cl::init(false), cl::Hidden, | |||
82 | cl::desc("Restrict remat for statepoint operands")); | |||
83 | ||||
84 | namespace { | |||
85 | ||||
86 | class HoistSpillHelper : private LiveRangeEdit::Delegate { | |||
87 | MachineFunction &MF; | |||
88 | LiveIntervals &LIS; | |||
89 | LiveStacks &LSS; | |||
90 | AliasAnalysis *AA; | |||
91 | MachineDominatorTree &MDT; | |||
92 | MachineLoopInfo &Loops; | |||
93 | VirtRegMap &VRM; | |||
94 | MachineRegisterInfo &MRI; | |||
95 | const TargetInstrInfo &TII; | |||
96 | const TargetRegisterInfo &TRI; | |||
97 | const MachineBlockFrequencyInfo &MBFI; | |||
98 | ||||
99 | InsertPointAnalysis IPA; | |||
100 | ||||
101 | // Map from StackSlot to the LiveInterval of the original register. | |||
102 | // Note the LiveInterval of the original register may have been deleted | |||
103 | // after it is spilled. We keep a copy here to track the range where | |||
104 | // spills can be moved. | |||
105 | DenseMap<int, std::unique_ptr<LiveInterval>> StackSlotToOrigLI; | |||
106 | ||||
107 | // Map from pair of (StackSlot and Original VNI) to a set of spills which | |||
108 | // have the same stackslot and have equal values defined by Original VNI. | |||
109 | // These spills are mergeable and are hoist candiates. | |||
110 | using MergeableSpillsMap = | |||
111 | MapVector<std::pair<int, VNInfo *>, SmallPtrSet<MachineInstr *, 16>>; | |||
112 | MergeableSpillsMap MergeableSpills; | |||
113 | ||||
114 | /// This is the map from original register to a set containing all its | |||
115 | /// siblings. To hoist a spill to another BB, we need to find out a live | |||
116 | /// sibling there and use it as the source of the new spill. | |||
117 | DenseMap<Register, SmallSetVector<Register, 16>> Virt2SiblingsMap; | |||
118 | ||||
119 | bool isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI, | |||
120 | MachineBasicBlock &BB, Register &LiveReg); | |||
121 | ||||
122 | void rmRedundantSpills( | |||
123 | SmallPtrSet<MachineInstr *, 16> &Spills, | |||
124 | SmallVectorImpl<MachineInstr *> &SpillsToRm, | |||
125 | DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill); | |||
126 | ||||
127 | void getVisitOrders( | |||
128 | MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills, | |||
129 | SmallVectorImpl<MachineDomTreeNode *> &Orders, | |||
130 | SmallVectorImpl<MachineInstr *> &SpillsToRm, | |||
131 | DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep, | |||
132 | DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill); | |||
133 | ||||
134 | void runHoistSpills(LiveInterval &OrigLI, VNInfo &OrigVNI, | |||
135 | SmallPtrSet<MachineInstr *, 16> &Spills, | |||
136 | SmallVectorImpl<MachineInstr *> &SpillsToRm, | |||
137 | DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns); | |||
138 | ||||
139 | public: | |||
140 | HoistSpillHelper(MachineFunctionPass &pass, MachineFunction &mf, | |||
141 | VirtRegMap &vrm) | |||
142 | : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()), | |||
143 | LSS(pass.getAnalysis<LiveStacks>()), | |||
144 | AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()), | |||
145 | MDT(pass.getAnalysis<MachineDominatorTree>()), | |||
146 | Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm), | |||
147 | MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()), | |||
148 | TRI(*mf.getSubtarget().getRegisterInfo()), | |||
149 | MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()), | |||
150 | IPA(LIS, mf.getNumBlockIDs()) {} | |||
151 | ||||
152 | void addToMergeableSpills(MachineInstr &Spill, int StackSlot, | |||
153 | unsigned Original); | |||
154 | bool rmFromMergeableSpills(MachineInstr &Spill, int StackSlot); | |||
155 | void hoistAllSpills(); | |||
156 | void LRE_DidCloneVirtReg(Register, Register) override; | |||
157 | }; | |||
158 | ||||
159 | class InlineSpiller : public Spiller { | |||
160 | MachineFunction &MF; | |||
161 | LiveIntervals &LIS; | |||
162 | LiveStacks &LSS; | |||
163 | AliasAnalysis *AA; | |||
164 | MachineDominatorTree &MDT; | |||
165 | MachineLoopInfo &Loops; | |||
166 | VirtRegMap &VRM; | |||
167 | MachineRegisterInfo &MRI; | |||
168 | const TargetInstrInfo &TII; | |||
169 | const TargetRegisterInfo &TRI; | |||
170 | const MachineBlockFrequencyInfo &MBFI; | |||
171 | ||||
172 | // Variables that are valid during spill(), but used by multiple methods. | |||
173 | LiveRangeEdit *Edit; | |||
174 | LiveInterval *StackInt; | |||
175 | int StackSlot; | |||
176 | unsigned Original; | |||
177 | ||||
178 | // All registers to spill to StackSlot, including the main register. | |||
179 | SmallVector<Register, 8> RegsToSpill; | |||
180 | ||||
181 | // All COPY instructions to/from snippets. | |||
182 | // They are ignored since both operands refer to the same stack slot. | |||
183 | SmallPtrSet<MachineInstr*, 8> SnippetCopies; | |||
184 | ||||
185 | // Values that failed to remat at some point. | |||
186 | SmallPtrSet<VNInfo*, 8> UsedValues; | |||
187 | ||||
188 | // Dead defs generated during spilling. | |||
189 | SmallVector<MachineInstr*, 8> DeadDefs; | |||
190 | ||||
191 | // Object records spills information and does the hoisting. | |||
192 | HoistSpillHelper HSpiller; | |||
193 | ||||
194 | ~InlineSpiller() override = default; | |||
195 | ||||
196 | public: | |||
197 | InlineSpiller(MachineFunctionPass &pass, MachineFunction &mf, VirtRegMap &vrm) | |||
198 | : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()), | |||
199 | LSS(pass.getAnalysis<LiveStacks>()), | |||
200 | AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()), | |||
201 | MDT(pass.getAnalysis<MachineDominatorTree>()), | |||
202 | Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm), | |||
203 | MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()), | |||
204 | TRI(*mf.getSubtarget().getRegisterInfo()), | |||
205 | MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()), | |||
206 | HSpiller(pass, mf, vrm) {} | |||
207 | ||||
208 | void spill(LiveRangeEdit &) override; | |||
209 | void postOptimization() override; | |||
210 | ||||
211 | private: | |||
212 | bool isSnippet(const LiveInterval &SnipLI); | |||
213 | void collectRegsToSpill(); | |||
214 | ||||
215 | bool isRegToSpill(Register Reg) { return is_contained(RegsToSpill, Reg); } | |||
216 | ||||
217 | bool isSibling(Register Reg); | |||
218 | bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI); | |||
219 | void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI); | |||
220 | ||||
221 | void markValueUsed(LiveInterval*, VNInfo*); | |||
222 | bool canGuaranteeAssignmentAfterRemat(Register VReg, MachineInstr &MI); | |||
223 | bool reMaterializeFor(LiveInterval &, MachineInstr &MI); | |||
224 | void reMaterializeAll(); | |||
225 | ||||
226 | bool coalesceStackAccess(MachineInstr *MI, Register Reg); | |||
227 | bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>>, | |||
228 | MachineInstr *LoadMI = nullptr); | |||
229 | void insertReload(Register VReg, SlotIndex, MachineBasicBlock::iterator MI); | |||
230 | void insertSpill(Register VReg, bool isKill, MachineBasicBlock::iterator MI); | |||
231 | ||||
232 | void spillAroundUses(Register Reg); | |||
233 | void spillAll(); | |||
234 | }; | |||
235 | ||||
236 | } // end anonymous namespace | |||
237 | ||||
238 | Spiller::~Spiller() = default; | |||
239 | ||||
240 | void Spiller::anchor() {} | |||
241 | ||||
242 | Spiller *llvm::createInlineSpiller(MachineFunctionPass &pass, | |||
243 | MachineFunction &mf, | |||
244 | VirtRegMap &vrm) { | |||
245 | return new InlineSpiller(pass, mf, vrm); | |||
246 | } | |||
247 | ||||
248 | //===----------------------------------------------------------------------===// | |||
249 | // Snippets | |||
250 | //===----------------------------------------------------------------------===// | |||
251 | ||||
252 | // When spilling a virtual register, we also spill any snippets it is connected | |||
253 | // to. The snippets are small live ranges that only have a single real use, | |||
254 | // leftovers from live range splitting. Spilling them enables memory operand | |||
255 | // folding or tightens the live range around the single use. | |||
256 | // | |||
257 | // This minimizes register pressure and maximizes the store-to-load distance for | |||
258 | // spill slots which can be important in tight loops. | |||
259 | ||||
260 | /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register, | |||
261 | /// otherwise return 0. | |||
262 | static Register isFullCopyOf(const MachineInstr &MI, Register Reg) { | |||
263 | if (!MI.isFullCopy()) | |||
264 | return Register(); | |||
265 | if (MI.getOperand(0).getReg() == Reg) | |||
266 | return MI.getOperand(1).getReg(); | |||
267 | if (MI.getOperand(1).getReg() == Reg) | |||
268 | return MI.getOperand(0).getReg(); | |||
269 | return Register(); | |||
270 | } | |||
271 | ||||
272 | static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS) { | |||
273 | for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) { | |||
274 | const MachineOperand &MO = MI.getOperand(I); | |||
275 | if (MO.isReg() && MO.isDef() && Register::isVirtualRegister(MO.getReg())) | |||
276 | LIS.getInterval(MO.getReg()); | |||
277 | } | |||
278 | } | |||
279 | ||||
280 | /// isSnippet - Identify if a live interval is a snippet that should be spilled. | |||
281 | /// It is assumed that SnipLI is a virtual register with the same original as | |||
282 | /// Edit->getReg(). | |||
283 | bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) { | |||
284 | Register Reg = Edit->getReg(); | |||
285 | ||||
286 | // A snippet is a tiny live range with only a single instruction using it | |||
287 | // besides copies to/from Reg or spills/fills. We accept: | |||
288 | // | |||
289 | // %snip = COPY %Reg / FILL fi# | |||
290 | // %snip = USE %snip | |||
291 | // %Reg = COPY %snip / SPILL %snip, fi# | |||
292 | // | |||
293 | if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI)) | |||
| ||||
294 | return false; | |||
295 | ||||
296 | MachineInstr *UseMI = nullptr; | |||
297 | ||||
298 | // Check that all uses satisfy our criteria. | |||
299 | for (MachineRegisterInfo::reg_instr_nodbg_iterator | |||
300 | RI = MRI.reg_instr_nodbg_begin(SnipLI.reg()), | |||
301 | E = MRI.reg_instr_nodbg_end(); | |||
302 | RI != E;) { | |||
303 | MachineInstr &MI = *RI++; | |||
304 | ||||
305 | // Allow copies to/from Reg. | |||
306 | if (isFullCopyOf(MI, Reg)) | |||
307 | continue; | |||
308 | ||||
309 | // Allow stack slot loads. | |||
310 | int FI; | |||
311 | if (SnipLI.reg() == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot) | |||
312 | continue; | |||
313 | ||||
314 | // Allow stack slot stores. | |||
315 | if (SnipLI.reg() == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) | |||
| ||||
316 | continue; | |||
317 | ||||
318 | // Allow a single additional instruction. | |||
319 | if (UseMI && &MI != UseMI) | |||
320 | return false; | |||
321 | UseMI = &MI; | |||
322 | } | |||
323 | return true; | |||
324 | } | |||
325 | ||||
326 | /// collectRegsToSpill - Collect live range snippets that only have a single | |||
327 | /// real use. | |||
328 | void InlineSpiller::collectRegsToSpill() { | |||
329 | Register Reg = Edit->getReg(); | |||
330 | ||||
331 | // Main register always spills. | |||
332 | RegsToSpill.assign(1, Reg); | |||
333 | SnippetCopies.clear(); | |||
334 | ||||
335 | // Snippets all have the same original, so there can't be any for an original | |||
336 | // register. | |||
337 | if (Original == Reg) | |||
338 | return; | |||
339 | ||||
340 | for (MachineRegisterInfo::reg_instr_iterator | |||
341 | RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end(); RI != E; ) { | |||
342 | MachineInstr &MI = *RI++; | |||
343 | Register SnipReg = isFullCopyOf(MI, Reg); | |||
344 | if (!isSibling(SnipReg)) | |||
345 | continue; | |||
346 | LiveInterval &SnipLI = LIS.getInterval(SnipReg); | |||
347 | if (!isSnippet(SnipLI)) | |||
348 | continue; | |||
349 | SnippetCopies.insert(&MI); | |||
350 | if (isRegToSpill(SnipReg)) | |||
351 | continue; | |||
352 | RegsToSpill.push_back(SnipReg); | |||
353 | LLVM_DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "\talso spill snippet " << SnipLI << '\n'; } } while (false); | |||
354 | ++NumSnippets; | |||
355 | } | |||
356 | } | |||
357 | ||||
358 | bool InlineSpiller::isSibling(Register Reg) { | |||
359 | return Reg.isVirtual() && VRM.getOriginal(Reg) == Original; | |||
360 | } | |||
361 | ||||
362 | /// It is beneficial to spill to earlier place in the same BB in case | |||
363 | /// as follows: | |||
364 | /// There is an alternative def earlier in the same MBB. | |||
365 | /// Hoist the spill as far as possible in SpillMBB. This can ease | |||
366 | /// register pressure: | |||
367 | /// | |||
368 | /// x = def | |||
369 | /// y = use x | |||
370 | /// s = copy x | |||
371 | /// | |||
372 | /// Hoisting the spill of s to immediately after the def removes the | |||
373 | /// interference between x and y: | |||
374 | /// | |||
375 | /// x = def | |||
376 | /// spill x | |||
377 | /// y = use killed x | |||
378 | /// | |||
379 | /// This hoist only helps when the copy kills its source. | |||
380 | /// | |||
381 | bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI, | |||
382 | MachineInstr &CopyMI) { | |||
383 | SlotIndex Idx = LIS.getInstructionIndex(CopyMI); | |||
384 | #ifndef NDEBUG | |||
385 | VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot()); | |||
386 | assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy")((VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy" ) ? static_cast<void> (0) : __assert_fail ("VNI && VNI->def == Idx.getRegSlot() && \"Not defined by copy\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 386, __PRETTY_FUNCTION__)); | |||
387 | #endif | |||
388 | ||||
389 | Register SrcReg = CopyMI.getOperand(1).getReg(); | |||
390 | LiveInterval &SrcLI = LIS.getInterval(SrcReg); | |||
391 | VNInfo *SrcVNI = SrcLI.getVNInfoAt(Idx); | |||
392 | LiveQueryResult SrcQ = SrcLI.Query(Idx); | |||
393 | MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(SrcVNI->def); | |||
394 | if (DefMBB != CopyMI.getParent() || !SrcQ.isKill()) | |||
395 | return false; | |||
396 | ||||
397 | // Conservatively extend the stack slot range to the range of the original | |||
398 | // value. We may be able to do better with stack slot coloring by being more | |||
399 | // careful here. | |||
400 | assert(StackInt && "No stack slot assigned yet.")((StackInt && "No stack slot assigned yet.") ? static_cast <void> (0) : __assert_fail ("StackInt && \"No stack slot assigned yet.\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 400, __PRETTY_FUNCTION__)); | |||
401 | LiveInterval &OrigLI = LIS.getInterval(Original); | |||
402 | VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx); | |||
403 | StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0)); | |||
404 | LLVM_DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "\tmerged orig valno " << OrigVNI->id << ": " << *StackInt << '\n' ; } } while (false) | |||
405 | << *StackInt << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "\tmerged orig valno " << OrigVNI->id << ": " << *StackInt << '\n' ; } } while (false); | |||
406 | ||||
407 | // We are going to spill SrcVNI immediately after its def, so clear out | |||
408 | // any later spills of the same value. | |||
409 | eliminateRedundantSpills(SrcLI, SrcVNI); | |||
410 | ||||
411 | MachineBasicBlock *MBB = LIS.getMBBFromIndex(SrcVNI->def); | |||
412 | MachineBasicBlock::iterator MII; | |||
413 | if (SrcVNI->isPHIDef()) | |||
414 | MII = MBB->SkipPHIsLabelsAndDebug(MBB->begin()); | |||
415 | else { | |||
416 | MachineInstr *DefMI = LIS.getInstructionFromIndex(SrcVNI->def); | |||
417 | assert(DefMI && "Defining instruction disappeared")((DefMI && "Defining instruction disappeared") ? static_cast <void> (0) : __assert_fail ("DefMI && \"Defining instruction disappeared\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 417, __PRETTY_FUNCTION__)); | |||
418 | MII = DefMI; | |||
419 | ++MII; | |||
420 | } | |||
421 | MachineInstrSpan MIS(MII, MBB); | |||
422 | // Insert spill without kill flag immediately after def. | |||
423 | TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot, | |||
424 | MRI.getRegClass(SrcReg), &TRI); | |||
425 | LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII); | |||
426 | for (const MachineInstr &MI : make_range(MIS.begin(), MII)) | |||
427 | getVDefInterval(MI, LIS); | |||
428 | --MII; // Point to store instruction. | |||
429 | LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "\thoisted: " << SrcVNI ->def << '\t' << *MII; } } while (false); | |||
430 | ||||
431 | // If there is only 1 store instruction is required for spill, add it | |||
432 | // to mergeable list. In X86 AMX, 2 intructions are required to store. | |||
433 | // We disable the merge for this case. | |||
434 | if (std::distance(MIS.begin(), MII) <= 1) | |||
435 | HSpiller.addToMergeableSpills(*MII, StackSlot, Original); | |||
436 | ++NumSpills; | |||
437 | return true; | |||
438 | } | |||
439 | ||||
440 | /// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any | |||
441 | /// redundant spills of this value in SLI.reg and sibling copies. | |||
442 | void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) { | |||
443 | assert(VNI && "Missing value")((VNI && "Missing value") ? static_cast<void> ( 0) : __assert_fail ("VNI && \"Missing value\"", "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 443, __PRETTY_FUNCTION__)); | |||
444 | SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList; | |||
445 | WorkList.push_back(std::make_pair(&SLI, VNI)); | |||
446 | assert(StackInt && "No stack slot assigned yet.")((StackInt && "No stack slot assigned yet.") ? static_cast <void> (0) : __assert_fail ("StackInt && \"No stack slot assigned yet.\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 446, __PRETTY_FUNCTION__)); | |||
447 | ||||
448 | do { | |||
449 | LiveInterval *LI; | |||
450 | std::tie(LI, VNI) = WorkList.pop_back_val(); | |||
451 | Register Reg = LI->reg(); | |||
452 | LLVM_DEBUG(dbgs() << "Checking redundant spills for " << VNI->id << '@'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "Checking redundant spills for " << VNI->id << '@' << VNI->def << " in " << *LI << '\n'; } } while (false) | |||
453 | << VNI->def << " in " << *LI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "Checking redundant spills for " << VNI->id << '@' << VNI->def << " in " << *LI << '\n'; } } while (false); | |||
454 | ||||
455 | // Regs to spill are taken care of. | |||
456 | if (isRegToSpill(Reg)) | |||
457 | continue; | |||
458 | ||||
459 | // Add all of VNI's live range to StackInt. | |||
460 | StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0)); | |||
461 | LLVM_DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "Merged to stack int: " << *StackInt << '\n'; } } while (false); | |||
462 | ||||
463 | // Find all spills and copies of VNI. | |||
464 | for (MachineRegisterInfo::use_instr_nodbg_iterator | |||
465 | UI = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end(); | |||
466 | UI != E; ) { | |||
467 | MachineInstr &MI = *UI++; | |||
468 | if (!MI.isCopy() && !MI.mayStore()) | |||
469 | continue; | |||
470 | SlotIndex Idx = LIS.getInstructionIndex(MI); | |||
471 | if (LI->getVNInfoAt(Idx) != VNI) | |||
472 | continue; | |||
473 | ||||
474 | // Follow sibling copies down the dominator tree. | |||
475 | if (Register DstReg = isFullCopyOf(MI, Reg)) { | |||
476 | if (isSibling(DstReg)) { | |||
477 | LiveInterval &DstLI = LIS.getInterval(DstReg); | |||
478 | VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot()); | |||
479 | assert(DstVNI && "Missing defined value")((DstVNI && "Missing defined value") ? static_cast< void> (0) : __assert_fail ("DstVNI && \"Missing defined value\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 479, __PRETTY_FUNCTION__)); | |||
480 | assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot")((DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot" ) ? static_cast<void> (0) : __assert_fail ("DstVNI->def == Idx.getRegSlot() && \"Wrong copy def slot\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 480, __PRETTY_FUNCTION__)); | |||
481 | WorkList.push_back(std::make_pair(&DstLI, DstVNI)); | |||
482 | } | |||
483 | continue; | |||
484 | } | |||
485 | ||||
486 | // Erase spills. | |||
487 | int FI; | |||
488 | if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) { | |||
489 | LLVM_DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "Redundant spill " << Idx << '\t' << MI; } } while (false); | |||
490 | // eliminateDeadDefs won't normally remove stores, so switch opcode. | |||
491 | MI.setDesc(TII.get(TargetOpcode::KILL)); | |||
492 | DeadDefs.push_back(&MI); | |||
493 | ++NumSpillsRemoved; | |||
494 | if (HSpiller.rmFromMergeableSpills(MI, StackSlot)) | |||
495 | --NumSpills; | |||
496 | } | |||
497 | } | |||
498 | } while (!WorkList.empty()); | |||
499 | } | |||
500 | ||||
501 | //===----------------------------------------------------------------------===// | |||
502 | // Rematerialization | |||
503 | //===----------------------------------------------------------------------===// | |||
504 | ||||
505 | /// markValueUsed - Remember that VNI failed to rematerialize, so its defining | |||
506 | /// instruction cannot be eliminated. See through snippet copies | |||
507 | void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) { | |||
508 | SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList; | |||
509 | WorkList.push_back(std::make_pair(LI, VNI)); | |||
510 | do { | |||
511 | std::tie(LI, VNI) = WorkList.pop_back_val(); | |||
512 | if (!UsedValues.insert(VNI).second) | |||
513 | continue; | |||
514 | ||||
515 | if (VNI->isPHIDef()) { | |||
516 | MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def); | |||
517 | for (MachineBasicBlock *P : MBB->predecessors()) { | |||
518 | VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(P)); | |||
519 | if (PVNI) | |||
520 | WorkList.push_back(std::make_pair(LI, PVNI)); | |||
521 | } | |||
522 | continue; | |||
523 | } | |||
524 | ||||
525 | // Follow snippet copies. | |||
526 | MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); | |||
527 | if (!SnippetCopies.count(MI)) | |||
528 | continue; | |||
529 | LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg()); | |||
530 | assert(isRegToSpill(SnipLI.reg()) && "Unexpected register in copy")((isRegToSpill(SnipLI.reg()) && "Unexpected register in copy" ) ? static_cast<void> (0) : __assert_fail ("isRegToSpill(SnipLI.reg()) && \"Unexpected register in copy\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 530, __PRETTY_FUNCTION__)); | |||
531 | VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true)); | |||
532 | assert(SnipVNI && "Snippet undefined before copy")((SnipVNI && "Snippet undefined before copy") ? static_cast <void> (0) : __assert_fail ("SnipVNI && \"Snippet undefined before copy\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 532, __PRETTY_FUNCTION__)); | |||
533 | WorkList.push_back(std::make_pair(&SnipLI, SnipVNI)); | |||
534 | } while (!WorkList.empty()); | |||
535 | } | |||
536 | ||||
537 | bool InlineSpiller::canGuaranteeAssignmentAfterRemat(Register VReg, | |||
538 | MachineInstr &MI) { | |||
539 | if (!RestrictStatepointRemat) | |||
540 | return true; | |||
541 | // Here's a quick explanation of the problem we're trying to handle here: | |||
542 | // * There are some pseudo instructions with more vreg uses than there are | |||
543 | // physical registers on the machine. | |||
544 | // * This is normally handled by spilling the vreg, and folding the reload | |||
545 | // into the user instruction. (Thus decreasing the number of used vregs | |||
546 | // until the remainder can be assigned to physregs.) | |||
547 | // * However, since we may try to spill vregs in any order, we can end up | |||
548 | // trying to spill each operand to the instruction, and then rematting it | |||
549 | // instead. When that happens, the new live intervals (for the remats) are | |||
550 | // expected to be trivially assignable (i.e. RS_Done). However, since we | |||
551 | // may have more remats than physregs, we're guaranteed to fail to assign | |||
552 | // one. | |||
553 | // At the moment, we only handle this for STATEPOINTs since they're the only | |||
554 | // pseudo op where we've seen this. If we start seeing other instructions | |||
555 | // with the same problem, we need to revisit this. | |||
556 | if (MI.getOpcode() != TargetOpcode::STATEPOINT) | |||
557 | return true; | |||
558 | // For STATEPOINTs we allow re-materialization for fixed arguments only hoping | |||
559 | // that number of physical registers is enough to cover all fixed arguments. | |||
560 | // If it is not true we need to revisit it. | |||
561 | for (unsigned Idx = StatepointOpers(&MI).getVarIdx(), | |||
562 | EndIdx = MI.getNumOperands(); | |||
563 | Idx < EndIdx; ++Idx) { | |||
564 | MachineOperand &MO = MI.getOperand(Idx); | |||
565 | if (MO.isReg() && MO.getReg() == VReg) | |||
566 | return false; | |||
567 | } | |||
568 | return true; | |||
569 | } | |||
570 | ||||
571 | /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading. | |||
572 | bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) { | |||
573 | // Analyze instruction | |||
574 | SmallVector<std::pair<MachineInstr *, unsigned>, 8> Ops; | |||
575 | VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, VirtReg.reg(), &Ops); | |||
576 | ||||
577 | if (!RI.Reads) | |||
578 | return false; | |||
579 | ||||
580 | SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true); | |||
581 | VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex()); | |||
582 | ||||
583 | if (!ParentVNI) { | |||
584 | LLVM_DEBUG(dbgs() << "\tadding <undef> flags: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "\tadding <undef> flags: " ; } } while (false); | |||
585 | for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { | |||
586 | MachineOperand &MO = MI.getOperand(i); | |||
587 | if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg()) | |||
588 | MO.setIsUndef(); | |||
589 | } | |||
590 | LLVM_DEBUG(dbgs() << UseIdx << '\t' << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << UseIdx << '\t' << MI; } } while (false); | |||
591 | return true; | |||
592 | } | |||
593 | ||||
594 | if (SnippetCopies.count(&MI)) | |||
595 | return false; | |||
596 | ||||
597 | LiveInterval &OrigLI = LIS.getInterval(Original); | |||
598 | VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx); | |||
599 | LiveRangeEdit::Remat RM(ParentVNI); | |||
600 | RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def); | |||
601 | ||||
602 | if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx, false)) { | |||
603 | markValueUsed(&VirtReg, ParentVNI); | |||
604 | LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI; } } while (false); | |||
605 | return false; | |||
606 | } | |||
607 | ||||
608 | // If the instruction also writes VirtReg.reg, it had better not require the | |||
609 | // same register for uses and defs. | |||
610 | if (RI.Tied) { | |||
611 | markValueUsed(&VirtReg, ParentVNI); | |||
612 | LLVM_DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI; } } while (false); | |||
613 | return false; | |||
614 | } | |||
615 | ||||
616 | // Before rematerializing into a register for a single instruction, try to | |||
617 | // fold a load into the instruction. That avoids allocating a new register. | |||
618 | if (RM.OrigMI->canFoldAsLoad() && | |||
619 | foldMemoryOperand(Ops, RM.OrigMI)) { | |||
620 | Edit->markRematerialized(RM.ParentVNI); | |||
621 | ++NumFoldedLoads; | |||
622 | return true; | |||
623 | } | |||
624 | ||||
625 | // If we can't guarantee that we'll be able to actually assign the new vreg, | |||
626 | // we can't remat. | |||
627 | if (!canGuaranteeAssignmentAfterRemat(VirtReg.reg(), MI)) { | |||
628 | markValueUsed(&VirtReg, ParentVNI); | |||
629 | LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI; } } while (false); | |||
630 | return false; | |||
631 | } | |||
632 | ||||
633 | // Allocate a new register for the remat. | |||
634 | Register NewVReg = Edit->createFrom(Original); | |||
635 | ||||
636 | // Finally we can rematerialize OrigMI before MI. | |||
637 | SlotIndex DefIdx = | |||
638 | Edit->rematerializeAt(*MI.getParent(), MI, NewVReg, RM, TRI); | |||
639 | ||||
640 | // We take the DebugLoc from MI, since OrigMI may be attributed to a | |||
641 | // different source location. | |||
642 | auto *NewMI = LIS.getInstructionFromIndex(DefIdx); | |||
643 | NewMI->setDebugLoc(MI.getDebugLoc()); | |||
644 | ||||
645 | (void)DefIdx; | |||
646 | LLVM_DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "\tremat: " << DefIdx << '\t' << *LIS.getInstructionFromIndex(DefIdx); } } while (false) | |||
647 | << *LIS.getInstructionFromIndex(DefIdx))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "\tremat: " << DefIdx << '\t' << *LIS.getInstructionFromIndex(DefIdx); } } while (false); | |||
648 | ||||
649 | // Replace operands | |||
650 | for (const auto &OpPair : Ops) { | |||
651 | MachineOperand &MO = OpPair.first->getOperand(OpPair.second); | |||
652 | if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg()) { | |||
653 | MO.setReg(NewVReg); | |||
654 | MO.setIsKill(); | |||
655 | } | |||
656 | } | |||
657 | LLVM_DEBUG(dbgs() << "\t " << UseIdx << '\t' << MI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "\t " << UseIdx << '\t' << MI << '\n'; } } while (false); | |||
658 | ||||
659 | ++NumRemats; | |||
660 | return true; | |||
661 | } | |||
662 | ||||
663 | /// reMaterializeAll - Try to rematerialize as many uses as possible, | |||
664 | /// and trim the live ranges after. | |||
665 | void InlineSpiller::reMaterializeAll() { | |||
666 | if (!Edit->anyRematerializable(AA)) | |||
667 | return; | |||
668 | ||||
669 | UsedValues.clear(); | |||
670 | ||||
671 | // Try to remat before all uses of snippets. | |||
672 | bool anyRemat = false; | |||
673 | for (Register Reg : RegsToSpill) { | |||
674 | LiveInterval &LI = LIS.getInterval(Reg); | |||
675 | for (MachineRegisterInfo::reg_bundle_iterator | |||
676 | RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end(); | |||
677 | RegI != E; ) { | |||
678 | MachineInstr &MI = *RegI++; | |||
679 | ||||
680 | // Debug values are not allowed to affect codegen. | |||
681 | if (MI.isDebugValue()) | |||
682 | continue; | |||
683 | ||||
684 | assert(!MI.isDebugInstr() && "Did not expect to find a use in debug "((!MI.isDebugInstr() && "Did not expect to find a use in debug " "instruction that isn't a DBG_VALUE") ? static_cast<void> (0) : __assert_fail ("!MI.isDebugInstr() && \"Did not expect to find a use in debug \" \"instruction that isn't a DBG_VALUE\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 685, __PRETTY_FUNCTION__)) | |||
685 | "instruction that isn't a DBG_VALUE")((!MI.isDebugInstr() && "Did not expect to find a use in debug " "instruction that isn't a DBG_VALUE") ? static_cast<void> (0) : __assert_fail ("!MI.isDebugInstr() && \"Did not expect to find a use in debug \" \"instruction that isn't a DBG_VALUE\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 685, __PRETTY_FUNCTION__)); | |||
686 | ||||
687 | anyRemat |= reMaterializeFor(LI, MI); | |||
688 | } | |||
689 | } | |||
690 | if (!anyRemat) | |||
691 | return; | |||
692 | ||||
693 | // Remove any values that were completely rematted. | |||
694 | for (Register Reg : RegsToSpill) { | |||
695 | LiveInterval &LI = LIS.getInterval(Reg); | |||
696 | for (LiveInterval::vni_iterator I = LI.vni_begin(), E = LI.vni_end(); | |||
697 | I != E; ++I) { | |||
698 | VNInfo *VNI = *I; | |||
699 | if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI)) | |||
700 | continue; | |||
701 | MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); | |||
702 | MI->addRegisterDead(Reg, &TRI); | |||
703 | if (!MI->allDefsAreDead()) | |||
704 | continue; | |||
705 | LLVM_DEBUG(dbgs() << "All defs dead: " << *MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "All defs dead: " << *MI ; } } while (false); | |||
706 | DeadDefs.push_back(MI); | |||
707 | } | |||
708 | } | |||
709 | ||||
710 | // Eliminate dead code after remat. Note that some snippet copies may be | |||
711 | // deleted here. | |||
712 | if (DeadDefs.empty()) | |||
713 | return; | |||
714 | LLVM_DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "Remat created " << DeadDefs .size() << " dead defs.\n"; } } while (false); | |||
715 | Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA); | |||
716 | ||||
717 | // LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions | |||
718 | // after rematerialization. To remove a VNI for a vreg from its LiveInterval, | |||
719 | // LiveIntervals::removeVRegDefAt is used. However, after non-PHI VNIs are all | |||
720 | // removed, PHI VNI are still left in the LiveInterval. | |||
721 | // So to get rid of unused reg, we need to check whether it has non-dbg | |||
722 | // reference instead of whether it has non-empty interval. | |||
723 | unsigned ResultPos = 0; | |||
724 | for (Register Reg : RegsToSpill) { | |||
725 | if (MRI.reg_nodbg_empty(Reg)) { | |||
726 | Edit->eraseVirtReg(Reg); | |||
727 | continue; | |||
728 | } | |||
729 | ||||
730 | assert(LIS.hasInterval(Reg) &&((LIS.hasInterval(Reg) && (!LIS.getInterval(Reg).empty () || !MRI.reg_nodbg_empty(Reg)) && "Empty and not used live-range?!" ) ? static_cast<void> (0) : __assert_fail ("LIS.hasInterval(Reg) && (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) && \"Empty and not used live-range?!\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 732, __PRETTY_FUNCTION__)) | |||
731 | (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&((LIS.hasInterval(Reg) && (!LIS.getInterval(Reg).empty () || !MRI.reg_nodbg_empty(Reg)) && "Empty and not used live-range?!" ) ? static_cast<void> (0) : __assert_fail ("LIS.hasInterval(Reg) && (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) && \"Empty and not used live-range?!\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 732, __PRETTY_FUNCTION__)) | |||
732 | "Empty and not used live-range?!")((LIS.hasInterval(Reg) && (!LIS.getInterval(Reg).empty () || !MRI.reg_nodbg_empty(Reg)) && "Empty and not used live-range?!" ) ? static_cast<void> (0) : __assert_fail ("LIS.hasInterval(Reg) && (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) && \"Empty and not used live-range?!\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 732, __PRETTY_FUNCTION__)); | |||
733 | ||||
734 | RegsToSpill[ResultPos++] = Reg; | |||
735 | } | |||
736 | RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end()); | |||
737 | LLVM_DEBUG(dbgs() << RegsToSpill.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << RegsToSpill.size() << " registers to spill after remat.\n" ; } } while (false) | |||
738 | << " registers to spill after remat.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << RegsToSpill.size() << " registers to spill after remat.\n" ; } } while (false); | |||
739 | } | |||
740 | ||||
741 | //===----------------------------------------------------------------------===// | |||
742 | // Spilling | |||
743 | //===----------------------------------------------------------------------===// | |||
744 | ||||
745 | /// If MI is a load or store of StackSlot, it can be removed. | |||
746 | bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, Register Reg) { | |||
747 | int FI = 0; | |||
748 | Register InstrReg = TII.isLoadFromStackSlot(*MI, FI); | |||
749 | bool IsLoad = InstrReg; | |||
750 | if (!IsLoad) | |||
751 | InstrReg = TII.isStoreToStackSlot(*MI, FI); | |||
752 | ||||
753 | // We have a stack access. Is it the right register and slot? | |||
754 | if (InstrReg != Reg || FI != StackSlot) | |||
755 | return false; | |||
756 | ||||
757 | if (!IsLoad) | |||
758 | HSpiller.rmFromMergeableSpills(*MI, StackSlot); | |||
759 | ||||
760 | LLVM_DEBUG(dbgs() << "Coalescing stack access: " << *MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "Coalescing stack access: " << *MI; } } while (false); | |||
761 | LIS.RemoveMachineInstrFromMaps(*MI); | |||
762 | MI->eraseFromParent(); | |||
763 | ||||
764 | if (IsLoad) { | |||
765 | ++NumReloadsRemoved; | |||
766 | --NumReloads; | |||
767 | } else { | |||
768 | ++NumSpillsRemoved; | |||
769 | --NumSpills; | |||
770 | } | |||
771 | ||||
772 | return true; | |||
773 | } | |||
774 | ||||
775 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) | |||
776 | LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) | |||
777 | // Dump the range of instructions from B to E with their slot indexes. | |||
778 | static void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B, | |||
779 | MachineBasicBlock::iterator E, | |||
780 | LiveIntervals const &LIS, | |||
781 | const char *const header, | |||
782 | Register VReg = Register()) { | |||
783 | char NextLine = '\n'; | |||
784 | char SlotIndent = '\t'; | |||
785 | ||||
786 | if (std::next(B) == E) { | |||
787 | NextLine = ' '; | |||
788 | SlotIndent = ' '; | |||
789 | } | |||
790 | ||||
791 | dbgs() << '\t' << header << ": " << NextLine; | |||
792 | ||||
793 | for (MachineBasicBlock::iterator I = B; I != E; ++I) { | |||
794 | SlotIndex Idx = LIS.getInstructionIndex(*I).getRegSlot(); | |||
795 | ||||
796 | // If a register was passed in and this instruction has it as a | |||
797 | // destination that is marked as an early clobber, print the | |||
798 | // early-clobber slot index. | |||
799 | if (VReg) { | |||
800 | MachineOperand *MO = I->findRegisterDefOperand(VReg); | |||
801 | if (MO && MO->isEarlyClobber()) | |||
802 | Idx = Idx.getRegSlot(true); | |||
803 | } | |||
804 | ||||
805 | dbgs() << SlotIndent << Idx << '\t' << *I; | |||
806 | } | |||
807 | } | |||
808 | #endif | |||
809 | ||||
810 | /// foldMemoryOperand - Try folding stack slot references in Ops into their | |||
811 | /// instructions. | |||
812 | /// | |||
813 | /// @param Ops Operand indices from AnalyzeVirtRegInBundle(). | |||
814 | /// @param LoadMI Load instruction to use instead of stack slot when non-null. | |||
815 | /// @return True on success. | |||
816 | bool InlineSpiller:: | |||
817 | foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops, | |||
818 | MachineInstr *LoadMI) { | |||
819 | if (Ops.empty()) | |||
820 | return false; | |||
821 | // Don't attempt folding in bundles. | |||
822 | MachineInstr *MI = Ops.front().first; | |||
823 | if (Ops.back().first != MI || MI->isBundled()) | |||
824 | return false; | |||
825 | ||||
826 | bool WasCopy = MI->isCopy(); | |||
827 | Register ImpReg; | |||
828 | ||||
829 | // TII::foldMemoryOperand will do what we need here for statepoint | |||
830 | // (fold load into use and remove corresponding def). We will replace | |||
831 | // uses of removed def with loads (spillAroundUses). | |||
832 | // For that to work we need to untie def and use to pass it through | |||
833 | // foldMemoryOperand and signal foldPatchpoint that it is allowed to | |||
834 | // fold them. | |||
835 | bool UntieRegs = MI->getOpcode() == TargetOpcode::STATEPOINT; | |||
836 | ||||
837 | // Spill subregs if the target allows it. | |||
838 | // We always want to spill subregs for stackmap/patchpoint pseudos. | |||
839 | bool SpillSubRegs = TII.isSubregFoldable() || | |||
840 | MI->getOpcode() == TargetOpcode::STATEPOINT || | |||
841 | MI->getOpcode() == TargetOpcode::PATCHPOINT || | |||
842 | MI->getOpcode() == TargetOpcode::STACKMAP; | |||
843 | ||||
844 | // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied | |||
845 | // operands. | |||
846 | SmallVector<unsigned, 8> FoldOps; | |||
847 | for (const auto &OpPair : Ops) { | |||
848 | unsigned Idx = OpPair.second; | |||
849 | assert(MI == OpPair.first && "Instruction conflict during operand folding")((MI == OpPair.first && "Instruction conflict during operand folding" ) ? static_cast<void> (0) : __assert_fail ("MI == OpPair.first && \"Instruction conflict during operand folding\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 849, __PRETTY_FUNCTION__)); | |||
850 | MachineOperand &MO = MI->getOperand(Idx); | |||
851 | if (MO.isImplicit()) { | |||
852 | ImpReg = MO.getReg(); | |||
853 | continue; | |||
854 | } | |||
855 | ||||
856 | if (!SpillSubRegs && MO.getSubReg()) | |||
857 | return false; | |||
858 | // We cannot fold a load instruction into a def. | |||
859 | if (LoadMI && MO.isDef()) | |||
860 | return false; | |||
861 | // Tied use operands should not be passed to foldMemoryOperand. | |||
862 | if (UntieRegs || !MI->isRegTiedToDefOperand(Idx)) | |||
863 | FoldOps.push_back(Idx); | |||
864 | } | |||
865 | ||||
866 | // If we only have implicit uses, we won't be able to fold that. | |||
867 | // Moreover, TargetInstrInfo::foldMemoryOperand will assert if we try! | |||
868 | if (FoldOps.empty()) | |||
869 | return false; | |||
870 | ||||
871 | MachineInstrSpan MIS(MI, MI->getParent()); | |||
872 | ||||
873 | SmallVector<std::pair<unsigned, unsigned> > TiedOps; | |||
874 | if (UntieRegs) | |||
875 | for (unsigned Idx : FoldOps) { | |||
876 | MachineOperand &MO = MI->getOperand(Idx); | |||
877 | if (!MO.isTied()) | |||
878 | continue; | |||
879 | unsigned Tied = MI->findTiedOperandIdx(Idx); | |||
880 | if (MO.isUse()) | |||
881 | TiedOps.emplace_back(Tied, Idx); | |||
882 | else { | |||
883 | assert(MO.isDef() && "Tied to not use and def?")((MO.isDef() && "Tied to not use and def?") ? static_cast <void> (0) : __assert_fail ("MO.isDef() && \"Tied to not use and def?\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 883, __PRETTY_FUNCTION__)); | |||
884 | TiedOps.emplace_back(Idx, Tied); | |||
885 | } | |||
886 | MI->untieRegOperand(Idx); | |||
887 | } | |||
888 | ||||
889 | MachineInstr *FoldMI = | |||
890 | LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS) | |||
891 | : TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS, &VRM); | |||
892 | if (!FoldMI) { | |||
893 | // Re-tie operands. | |||
894 | for (auto Tied : TiedOps) | |||
895 | MI->tieOperands(Tied.first, Tied.second); | |||
896 | return false; | |||
897 | } | |||
898 | ||||
899 | // Remove LIS for any dead defs in the original MI not in FoldMI. | |||
900 | for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) { | |||
901 | if (!MO->isReg()) | |||
902 | continue; | |||
903 | Register Reg = MO->getReg(); | |||
904 | if (!Reg || Register::isVirtualRegister(Reg) || MRI.isReserved(Reg)) { | |||
905 | continue; | |||
906 | } | |||
907 | // Skip non-Defs, including undef uses and internal reads. | |||
908 | if (MO->isUse()) | |||
909 | continue; | |||
910 | PhysRegInfo RI = AnalyzePhysRegInBundle(*FoldMI, Reg, &TRI); | |||
911 | if (RI.FullyDefined) | |||
912 | continue; | |||
913 | // FoldMI does not define this physreg. Remove the LI segment. | |||
914 | assert(MO->isDead() && "Cannot fold physreg def")((MO->isDead() && "Cannot fold physreg def") ? static_cast <void> (0) : __assert_fail ("MO->isDead() && \"Cannot fold physreg def\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 914, __PRETTY_FUNCTION__)); | |||
915 | SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot(); | |||
916 | LIS.removePhysRegDefAt(Reg.asMCReg(), Idx); | |||
917 | } | |||
918 | ||||
919 | int FI; | |||
920 | if (TII.isStoreToStackSlot(*MI, FI) && | |||
921 | HSpiller.rmFromMergeableSpills(*MI, FI)) | |||
922 | --NumSpills; | |||
923 | LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI); | |||
924 | // Update the call site info. | |||
925 | if (MI->isCandidateForCallSiteEntry()) | |||
926 | MI->getMF()->moveCallSiteInfo(MI, FoldMI); | |||
927 | MI->eraseFromParent(); | |||
928 | ||||
929 | // Insert any new instructions other than FoldMI into the LIS maps. | |||
930 | assert(!MIS.empty() && "Unexpected empty span of instructions!")((!MIS.empty() && "Unexpected empty span of instructions!" ) ? static_cast<void> (0) : __assert_fail ("!MIS.empty() && \"Unexpected empty span of instructions!\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 930, __PRETTY_FUNCTION__)); | |||
931 | for (MachineInstr &MI : MIS) | |||
932 | if (&MI != FoldMI) | |||
933 | LIS.InsertMachineInstrInMaps(MI); | |||
934 | ||||
935 | // TII.foldMemoryOperand may have left some implicit operands on the | |||
936 | // instruction. Strip them. | |||
937 | if (ImpReg) | |||
938 | for (unsigned i = FoldMI->getNumOperands(); i; --i) { | |||
939 | MachineOperand &MO = FoldMI->getOperand(i - 1); | |||
940 | if (!MO.isReg() || !MO.isImplicit()) | |||
941 | break; | |||
942 | if (MO.getReg() == ImpReg) | |||
943 | FoldMI->RemoveOperand(i - 1); | |||
944 | } | |||
945 | ||||
946 | LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS,do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dumpMachineInstrRangeWithSlotIndex(MIS.begin( ), MIS.end(), LIS, "folded"); } } while (false) | |||
947 | "folded"))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dumpMachineInstrRangeWithSlotIndex(MIS.begin( ), MIS.end(), LIS, "folded"); } } while (false); | |||
948 | ||||
949 | if (!WasCopy) | |||
950 | ++NumFolded; | |||
951 | else if (Ops.front().second == 0) { | |||
952 | ++NumSpills; | |||
953 | // If there is only 1 store instruction is required for spill, add it | |||
954 | // to mergeable list. In X86 AMX, 2 intructions are required to store. | |||
955 | // We disable the merge for this case. | |||
956 | if (std::distance(MIS.begin(), MIS.end()) <= 1) | |||
957 | HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original); | |||
958 | } else | |||
959 | ++NumReloads; | |||
960 | return true; | |||
961 | } | |||
962 | ||||
963 | void InlineSpiller::insertReload(Register NewVReg, | |||
964 | SlotIndex Idx, | |||
965 | MachineBasicBlock::iterator MI) { | |||
966 | MachineBasicBlock &MBB = *MI->getParent(); | |||
967 | ||||
968 | MachineInstrSpan MIS(MI, &MBB); | |||
969 | TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot, | |||
970 | MRI.getRegClass(NewVReg), &TRI); | |||
971 | ||||
972 | LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI); | |||
973 | ||||
974 | LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MI, LIS, "reload",do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dumpMachineInstrRangeWithSlotIndex(MIS.begin( ), MI, LIS, "reload", NewVReg); } } while (false) | |||
975 | NewVReg))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dumpMachineInstrRangeWithSlotIndex(MIS.begin( ), MI, LIS, "reload", NewVReg); } } while (false); | |||
976 | ++NumReloads; | |||
977 | } | |||
978 | ||||
979 | /// Check if \p Def fully defines a VReg with an undefined value. | |||
980 | /// If that's the case, that means the value of VReg is actually | |||
981 | /// not relevant. | |||
982 | static bool isRealSpill(const MachineInstr &Def) { | |||
983 | if (!Def.isImplicitDef()) | |||
984 | return true; | |||
985 | assert(Def.getNumOperands() == 1 &&((Def.getNumOperands() == 1 && "Implicit def with more than one definition" ) ? static_cast<void> (0) : __assert_fail ("Def.getNumOperands() == 1 && \"Implicit def with more than one definition\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 986, __PRETTY_FUNCTION__)) | |||
986 | "Implicit def with more than one definition")((Def.getNumOperands() == 1 && "Implicit def with more than one definition" ) ? static_cast<void> (0) : __assert_fail ("Def.getNumOperands() == 1 && \"Implicit def with more than one definition\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 986, __PRETTY_FUNCTION__)); | |||
987 | // We can say that the VReg defined by Def is undef, only if it is | |||
988 | // fully defined by Def. Otherwise, some of the lanes may not be | |||
989 | // undef and the value of the VReg matters. | |||
990 | return Def.getOperand(0).getSubReg(); | |||
991 | } | |||
992 | ||||
993 | /// insertSpill - Insert a spill of NewVReg after MI. | |||
994 | void InlineSpiller::insertSpill(Register NewVReg, bool isKill, | |||
995 | MachineBasicBlock::iterator MI) { | |||
996 | // Spill are not terminators, so inserting spills after terminators will | |||
997 | // violate invariants in MachineVerifier. | |||
998 | assert(!MI->isTerminator() && "Inserting a spill after a terminator")((!MI->isTerminator() && "Inserting a spill after a terminator" ) ? static_cast<void> (0) : __assert_fail ("!MI->isTerminator() && \"Inserting a spill after a terminator\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 998, __PRETTY_FUNCTION__)); | |||
999 | MachineBasicBlock &MBB = *MI->getParent(); | |||
1000 | ||||
1001 | MachineInstrSpan MIS(MI, &MBB); | |||
1002 | MachineBasicBlock::iterator SpillBefore = std::next(MI); | |||
1003 | bool IsRealSpill = isRealSpill(*MI); | |||
1004 | ||||
1005 | if (IsRealSpill) | |||
1006 | TII.storeRegToStackSlot(MBB, SpillBefore, NewVReg, isKill, StackSlot, | |||
1007 | MRI.getRegClass(NewVReg), &TRI); | |||
1008 | else | |||
1009 | // Don't spill undef value. | |||
1010 | // Anything works for undef, in particular keeping the memory | |||
1011 | // uninitialized is a viable option and it saves code size and | |||
1012 | // run time. | |||
1013 | BuildMI(MBB, SpillBefore, MI->getDebugLoc(), TII.get(TargetOpcode::KILL)) | |||
1014 | .addReg(NewVReg, getKillRegState(isKill)); | |||
1015 | ||||
1016 | MachineBasicBlock::iterator Spill = std::next(MI); | |||
1017 | LIS.InsertMachineInstrRangeInMaps(Spill, MIS.end()); | |||
1018 | for (const MachineInstr &MI : make_range(Spill, MIS.end())) | |||
1019 | getVDefInterval(MI, LIS); | |||
1020 | ||||
1021 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dumpMachineInstrRangeWithSlotIndex(Spill, MIS .end(), LIS, "spill"); } } while (false) | |||
1022 | dumpMachineInstrRangeWithSlotIndex(Spill, MIS.end(), LIS, "spill"))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dumpMachineInstrRangeWithSlotIndex(Spill, MIS .end(), LIS, "spill"); } } while (false); | |||
1023 | ++NumSpills; | |||
1024 | // If there is only 1 store instruction is required for spill, add it | |||
1025 | // to mergeable list. In X86 AMX, 2 intructions are required to store. | |||
1026 | // We disable the merge for this case. | |||
1027 | if (IsRealSpill && std::distance(Spill, MIS.end()) <= 1) | |||
1028 | HSpiller.addToMergeableSpills(*Spill, StackSlot, Original); | |||
1029 | } | |||
1030 | ||||
1031 | /// spillAroundUses - insert spill code around each use of Reg. | |||
1032 | void InlineSpiller::spillAroundUses(Register Reg) { | |||
1033 | LLVM_DEBUG(dbgs() << "spillAroundUses " << printReg(Reg) << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "spillAroundUses " << printReg (Reg) << '\n'; } } while (false); | |||
1034 | LiveInterval &OldLI = LIS.getInterval(Reg); | |||
1035 | ||||
1036 | // Iterate over instructions using Reg. | |||
1037 | for (MachineRegisterInfo::reg_bundle_iterator | |||
1038 | RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end(); | |||
1039 | RegI != E; ) { | |||
1040 | MachineInstr *MI = &*(RegI++); | |||
1041 | ||||
1042 | // Debug values are not allowed to affect codegen. | |||
1043 | if (MI->isDebugValue()) { | |||
1044 | // Modify DBG_VALUE now that the value is in a spill slot. | |||
1045 | MachineBasicBlock *MBB = MI->getParent(); | |||
1046 | LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:\t" << *MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "Modifying debug info due to spill:\t" << *MI; } } while (false); | |||
1047 | buildDbgValueForSpill(*MBB, MI, *MI, StackSlot); | |||
1048 | MBB->erase(MI); | |||
1049 | continue; | |||
1050 | } | |||
1051 | ||||
1052 | assert(!MI->isDebugInstr() && "Did not expect to find a use in debug "((!MI->isDebugInstr() && "Did not expect to find a use in debug " "instruction that isn't a DBG_VALUE") ? static_cast<void> (0) : __assert_fail ("!MI->isDebugInstr() && \"Did not expect to find a use in debug \" \"instruction that isn't a DBG_VALUE\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 1053, __PRETTY_FUNCTION__)) | |||
1053 | "instruction that isn't a DBG_VALUE")((!MI->isDebugInstr() && "Did not expect to find a use in debug " "instruction that isn't a DBG_VALUE") ? static_cast<void> (0) : __assert_fail ("!MI->isDebugInstr() && \"Did not expect to find a use in debug \" \"instruction that isn't a DBG_VALUE\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 1053, __PRETTY_FUNCTION__)); | |||
1054 | ||||
1055 | // Ignore copies to/from snippets. We'll delete them. | |||
1056 | if (SnippetCopies.count(MI)) | |||
1057 | continue; | |||
1058 | ||||
1059 | // Stack slot accesses may coalesce away. | |||
1060 | if (coalesceStackAccess(MI, Reg)) | |||
1061 | continue; | |||
1062 | ||||
1063 | // Analyze instruction. | |||
1064 | SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops; | |||
1065 | VirtRegInfo RI = AnalyzeVirtRegInBundle(*MI, Reg, &Ops); | |||
1066 | ||||
1067 | // Find the slot index where this instruction reads and writes OldLI. | |||
1068 | // This is usually the def slot, except for tied early clobbers. | |||
1069 | SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot(); | |||
1070 | if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true))) | |||
1071 | if (SlotIndex::isSameInstr(Idx, VNI->def)) | |||
1072 | Idx = VNI->def; | |||
1073 | ||||
1074 | // Check for a sibling copy. | |||
1075 | Register SibReg = isFullCopyOf(*MI, Reg); | |||
1076 | if (SibReg && isSibling(SibReg)) { | |||
1077 | // This may actually be a copy between snippets. | |||
1078 | if (isRegToSpill(SibReg)) { | |||
1079 | LLVM_DEBUG(dbgs() << "Found new snippet copy: " << *MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "Found new snippet copy: " << *MI; } } while (false); | |||
1080 | SnippetCopies.insert(MI); | |||
1081 | continue; | |||
1082 | } | |||
1083 | if (RI.Writes) { | |||
1084 | if (hoistSpillInsideBB(OldLI, *MI)) { | |||
1085 | // This COPY is now dead, the value is already in the stack slot. | |||
1086 | MI->getOperand(0).setIsDead(); | |||
1087 | DeadDefs.push_back(MI); | |||
1088 | continue; | |||
1089 | } | |||
1090 | } else { | |||
1091 | // This is a reload for a sib-reg copy. Drop spills downstream. | |||
1092 | LiveInterval &SibLI = LIS.getInterval(SibReg); | |||
1093 | eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx)); | |||
1094 | // The COPY will fold to a reload below. | |||
1095 | } | |||
1096 | } | |||
1097 | ||||
1098 | // Attempt to fold memory ops. | |||
1099 | if (foldMemoryOperand(Ops)) | |||
1100 | continue; | |||
1101 | ||||
1102 | // Create a new virtual register for spill/fill. | |||
1103 | // FIXME: Infer regclass from instruction alone. | |||
1104 | Register NewVReg = Edit->createFrom(Reg); | |||
1105 | ||||
1106 | if (RI.Reads) | |||
1107 | insertReload(NewVReg, Idx, MI); | |||
1108 | ||||
1109 | // Rewrite instruction operands. | |||
1110 | bool hasLiveDef = false; | |||
1111 | for (const auto &OpPair : Ops) { | |||
1112 | MachineOperand &MO = OpPair.first->getOperand(OpPair.second); | |||
1113 | MO.setReg(NewVReg); | |||
1114 | if (MO.isUse()) { | |||
1115 | if (!OpPair.first->isRegTiedToDefOperand(OpPair.second)) | |||
1116 | MO.setIsKill(); | |||
1117 | } else { | |||
1118 | if (!MO.isDead()) | |||
1119 | hasLiveDef = true; | |||
1120 | } | |||
1121 | } | |||
1122 | LLVM_DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << *MI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "\trewrite: " << Idx << '\t' << *MI << '\n'; } } while (false); | |||
1123 | ||||
1124 | // FIXME: Use a second vreg if instruction has no tied ops. | |||
1125 | if (RI.Writes) | |||
1126 | if (hasLiveDef) | |||
1127 | insertSpill(NewVReg, true, MI); | |||
1128 | } | |||
1129 | } | |||
1130 | ||||
1131 | /// spillAll - Spill all registers remaining after rematerialization. | |||
1132 | void InlineSpiller::spillAll() { | |||
1133 | // Update LiveStacks now that we are committed to spilling. | |||
1134 | if (StackSlot == VirtRegMap::NO_STACK_SLOT) { | |||
1135 | StackSlot = VRM.assignVirt2StackSlot(Original); | |||
1136 | StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original)); | |||
1137 | StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator()); | |||
1138 | } else | |||
1139 | StackInt = &LSS.getInterval(StackSlot); | |||
1140 | ||||
1141 | if (Original != Edit->getReg()) | |||
1142 | VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot); | |||
1143 | ||||
1144 | assert(StackInt->getNumValNums() == 1 && "Bad stack interval values")((StackInt->getNumValNums() == 1 && "Bad stack interval values" ) ? static_cast<void> (0) : __assert_fail ("StackInt->getNumValNums() == 1 && \"Bad stack interval values\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 1144, __PRETTY_FUNCTION__)); | |||
1145 | for (Register Reg : RegsToSpill) | |||
1146 | StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg), | |||
1147 | StackInt->getValNumInfo(0)); | |||
1148 | LLVM_DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "Merged spilled regs: " << *StackInt << '\n'; } } while (false); | |||
1149 | ||||
1150 | // Spill around uses of all RegsToSpill. | |||
1151 | for (Register Reg : RegsToSpill) | |||
1152 | spillAroundUses(Reg); | |||
1153 | ||||
1154 | // Hoisted spills may cause dead code. | |||
1155 | if (!DeadDefs.empty()) { | |||
1156 | LLVM_DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "Eliminating " << DeadDefs .size() << " dead defs\n"; } } while (false); | |||
1157 | Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA); | |||
1158 | } | |||
1159 | ||||
1160 | // Finally delete the SnippetCopies. | |||
1161 | for (Register Reg : RegsToSpill) { | |||
1162 | for (MachineRegisterInfo::reg_instr_iterator | |||
1163 | RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end(); | |||
1164 | RI != E; ) { | |||
1165 | MachineInstr &MI = *(RI++); | |||
1166 | assert(SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy")((SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy" ) ? static_cast<void> (0) : __assert_fail ("SnippetCopies.count(&MI) && \"Remaining use wasn't a snippet copy\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 1166, __PRETTY_FUNCTION__)); | |||
1167 | // FIXME: Do this with a LiveRangeEdit callback. | |||
1168 | LIS.RemoveMachineInstrFromMaps(MI); | |||
1169 | MI.eraseFromParent(); | |||
1170 | } | |||
1171 | } | |||
1172 | ||||
1173 | // Delete all spilled registers. | |||
1174 | for (Register Reg : RegsToSpill) | |||
1175 | Edit->eraseVirtReg(Reg); | |||
1176 | } | |||
1177 | ||||
1178 | void InlineSpiller::spill(LiveRangeEdit &edit) { | |||
1179 | ++NumSpilledRanges; | |||
1180 | Edit = &edit; | |||
1181 | assert(!Register::isStackSlot(edit.getReg()) &&((!Register::isStackSlot(edit.getReg()) && "Trying to spill a stack slot." ) ? static_cast<void> (0) : __assert_fail ("!Register::isStackSlot(edit.getReg()) && \"Trying to spill a stack slot.\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 1182, __PRETTY_FUNCTION__)) | |||
1182 | "Trying to spill a stack slot.")((!Register::isStackSlot(edit.getReg()) && "Trying to spill a stack slot." ) ? static_cast<void> (0) : __assert_fail ("!Register::isStackSlot(edit.getReg()) && \"Trying to spill a stack slot.\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 1182, __PRETTY_FUNCTION__)); | |||
1183 | // Share a stack slot among all descendants of Original. | |||
1184 | Original = VRM.getOriginal(edit.getReg()); | |||
1185 | StackSlot = VRM.getStackSlot(Original); | |||
1186 | StackInt = nullptr; | |||
1187 | ||||
1188 | LLVM_DEBUG(dbgs() << "Inline spilling "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "Inline spilling " << TRI .getRegClassName(MRI.getRegClass(edit.getReg())) << ':' << edit.getParent() << "\nFrom original " << printReg(Original) << '\n'; } } while (false) | |||
1189 | << TRI.getRegClassName(MRI.getRegClass(edit.getReg()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "Inline spilling " << TRI .getRegClassName(MRI.getRegClass(edit.getReg())) << ':' << edit.getParent() << "\nFrom original " << printReg(Original) << '\n'; } } while (false) | |||
1190 | << ':' << edit.getParent() << "\nFrom original "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "Inline spilling " << TRI .getRegClassName(MRI.getRegClass(edit.getReg())) << ':' << edit.getParent() << "\nFrom original " << printReg(Original) << '\n'; } } while (false) | |||
1191 | << printReg(Original) << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "Inline spilling " << TRI .getRegClassName(MRI.getRegClass(edit.getReg())) << ':' << edit.getParent() << "\nFrom original " << printReg(Original) << '\n'; } } while (false); | |||
1192 | assert(edit.getParent().isSpillable() &&((edit.getParent().isSpillable() && "Attempting to spill already spilled value." ) ? static_cast<void> (0) : __assert_fail ("edit.getParent().isSpillable() && \"Attempting to spill already spilled value.\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 1193, __PRETTY_FUNCTION__)) | |||
1193 | "Attempting to spill already spilled value.")((edit.getParent().isSpillable() && "Attempting to spill already spilled value." ) ? static_cast<void> (0) : __assert_fail ("edit.getParent().isSpillable() && \"Attempting to spill already spilled value.\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 1193, __PRETTY_FUNCTION__)); | |||
1194 | assert(DeadDefs.empty() && "Previous spill didn't remove dead defs")((DeadDefs.empty() && "Previous spill didn't remove dead defs" ) ? static_cast<void> (0) : __assert_fail ("DeadDefs.empty() && \"Previous spill didn't remove dead defs\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 1194, __PRETTY_FUNCTION__)); | |||
1195 | ||||
1196 | collectRegsToSpill(); | |||
1197 | reMaterializeAll(); | |||
1198 | ||||
1199 | // Remat may handle everything. | |||
1200 | if (!RegsToSpill.empty()) | |||
1201 | spillAll(); | |||
1202 | ||||
1203 | Edit->calculateRegClassAndHint(MF, Loops, MBFI); | |||
1204 | } | |||
1205 | ||||
1206 | /// Optimizations after all the reg selections and spills are done. | |||
1207 | void InlineSpiller::postOptimization() { HSpiller.hoistAllSpills(); } | |||
1208 | ||||
1209 | /// When a spill is inserted, add the spill to MergeableSpills map. | |||
1210 | void HoistSpillHelper::addToMergeableSpills(MachineInstr &Spill, int StackSlot, | |||
1211 | unsigned Original) { | |||
1212 | BumpPtrAllocator &Allocator = LIS.getVNInfoAllocator(); | |||
1213 | LiveInterval &OrigLI = LIS.getInterval(Original); | |||
1214 | // save a copy of LiveInterval in StackSlotToOrigLI because the original | |||
1215 | // LiveInterval may be cleared after all its references are spilled. | |||
1216 | if (StackSlotToOrigLI.find(StackSlot) == StackSlotToOrigLI.end()) { | |||
1217 | auto LI = std::make_unique<LiveInterval>(OrigLI.reg(), OrigLI.weight()); | |||
1218 | LI->assign(OrigLI, Allocator); | |||
1219 | StackSlotToOrigLI[StackSlot] = std::move(LI); | |||
1220 | } | |||
1221 | SlotIndex Idx = LIS.getInstructionIndex(Spill); | |||
1222 | VNInfo *OrigVNI = StackSlotToOrigLI[StackSlot]->getVNInfoAt(Idx.getRegSlot()); | |||
1223 | std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI); | |||
1224 | MergeableSpills[MIdx].insert(&Spill); | |||
1225 | } | |||
1226 | ||||
1227 | /// When a spill is removed, remove the spill from MergeableSpills map. | |||
1228 | /// Return true if the spill is removed successfully. | |||
1229 | bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr &Spill, | |||
1230 | int StackSlot) { | |||
1231 | auto It = StackSlotToOrigLI.find(StackSlot); | |||
1232 | if (It == StackSlotToOrigLI.end()) | |||
1233 | return false; | |||
1234 | SlotIndex Idx = LIS.getInstructionIndex(Spill); | |||
1235 | VNInfo *OrigVNI = It->second->getVNInfoAt(Idx.getRegSlot()); | |||
1236 | std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI); | |||
1237 | return MergeableSpills[MIdx].erase(&Spill); | |||
1238 | } | |||
1239 | ||||
1240 | /// Check BB to see if it is a possible target BB to place a hoisted spill, | |||
1241 | /// i.e., there should be a living sibling of OrigReg at the insert point. | |||
1242 | bool HoistSpillHelper::isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI, | |||
1243 | MachineBasicBlock &BB, Register &LiveReg) { | |||
1244 | SlotIndex Idx; | |||
1245 | Register OrigReg = OrigLI.reg(); | |||
1246 | MachineBasicBlock::iterator MI = IPA.getLastInsertPointIter(OrigLI, BB); | |||
1247 | if (MI != BB.end()) | |||
1248 | Idx = LIS.getInstructionIndex(*MI); | |||
1249 | else | |||
1250 | Idx = LIS.getMBBEndIdx(&BB).getPrevSlot(); | |||
1251 | SmallSetVector<Register, 16> &Siblings = Virt2SiblingsMap[OrigReg]; | |||
1252 | assert(OrigLI.getVNInfoAt(Idx) == &OrigVNI && "Unexpected VNI")((OrigLI.getVNInfoAt(Idx) == &OrigVNI && "Unexpected VNI" ) ? static_cast<void> (0) : __assert_fail ("OrigLI.getVNInfoAt(Idx) == &OrigVNI && \"Unexpected VNI\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 1252, __PRETTY_FUNCTION__)); | |||
1253 | ||||
1254 | for (const Register &SibReg : Siblings) { | |||
1255 | LiveInterval &LI = LIS.getInterval(SibReg); | |||
1256 | VNInfo *VNI = LI.getVNInfoAt(Idx); | |||
1257 | if (VNI) { | |||
1258 | LiveReg = SibReg; | |||
1259 | return true; | |||
1260 | } | |||
1261 | } | |||
1262 | return false; | |||
1263 | } | |||
1264 | ||||
1265 | /// Remove redundant spills in the same BB. Save those redundant spills in | |||
1266 | /// SpillsToRm, and save the spill to keep and its BB in SpillBBToSpill map. | |||
1267 | void HoistSpillHelper::rmRedundantSpills( | |||
1268 | SmallPtrSet<MachineInstr *, 16> &Spills, | |||
1269 | SmallVectorImpl<MachineInstr *> &SpillsToRm, | |||
1270 | DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) { | |||
1271 | // For each spill saw, check SpillBBToSpill[] and see if its BB already has | |||
1272 | // another spill inside. If a BB contains more than one spill, only keep the | |||
1273 | // earlier spill with smaller SlotIndex. | |||
1274 | for (const auto CurrentSpill : Spills) { | |||
1275 | MachineBasicBlock *Block = CurrentSpill->getParent(); | |||
1276 | MachineDomTreeNode *Node = MDT.getBase().getNode(Block); | |||
1277 | MachineInstr *PrevSpill = SpillBBToSpill[Node]; | |||
1278 | if (PrevSpill) { | |||
1279 | SlotIndex PIdx = LIS.getInstructionIndex(*PrevSpill); | |||
1280 | SlotIndex CIdx = LIS.getInstructionIndex(*CurrentSpill); | |||
1281 | MachineInstr *SpillToRm = (CIdx > PIdx) ? CurrentSpill : PrevSpill; | |||
1282 | MachineInstr *SpillToKeep = (CIdx > PIdx) ? PrevSpill : CurrentSpill; | |||
1283 | SpillsToRm.push_back(SpillToRm); | |||
1284 | SpillBBToSpill[MDT.getBase().getNode(Block)] = SpillToKeep; | |||
1285 | } else { | |||
1286 | SpillBBToSpill[MDT.getBase().getNode(Block)] = CurrentSpill; | |||
1287 | } | |||
1288 | } | |||
1289 | for (const auto SpillToRm : SpillsToRm) | |||
1290 | Spills.erase(SpillToRm); | |||
1291 | } | |||
1292 | ||||
1293 | /// Starting from \p Root find a top-down traversal order of the dominator | |||
1294 | /// tree to visit all basic blocks containing the elements of \p Spills. | |||
1295 | /// Redundant spills will be found and put into \p SpillsToRm at the same | |||
1296 | /// time. \p SpillBBToSpill will be populated as part of the process and | |||
1297 | /// maps a basic block to the first store occurring in the basic block. | |||
1298 | /// \post SpillsToRm.union(Spills\@post) == Spills\@pre | |||
1299 | void HoistSpillHelper::getVisitOrders( | |||
1300 | MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills, | |||
1301 | SmallVectorImpl<MachineDomTreeNode *> &Orders, | |||
1302 | SmallVectorImpl<MachineInstr *> &SpillsToRm, | |||
1303 | DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep, | |||
1304 | DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) { | |||
1305 | // The set contains all the possible BB nodes to which we may hoist | |||
1306 | // original spills. | |||
1307 | SmallPtrSet<MachineDomTreeNode *, 8> WorkSet; | |||
1308 | // Save the BB nodes on the path from the first BB node containing | |||
1309 | // non-redundant spill to the Root node. | |||
1310 | SmallPtrSet<MachineDomTreeNode *, 8> NodesOnPath; | |||
1311 | // All the spills to be hoisted must originate from a single def instruction | |||
1312 | // to the OrigReg. It means the def instruction should dominate all the spills | |||
1313 | // to be hoisted. We choose the BB where the def instruction is located as | |||
1314 | // the Root. | |||
1315 | MachineDomTreeNode *RootIDomNode = MDT[Root]->getIDom(); | |||
1316 | // For every node on the dominator tree with spill, walk up on the dominator | |||
1317 | // tree towards the Root node until it is reached. If there is other node | |||
1318 | // containing spill in the middle of the path, the previous spill saw will | |||
1319 | // be redundant and the node containing it will be removed. All the nodes on | |||
1320 | // the path starting from the first node with non-redundant spill to the Root | |||
1321 | // node will be added to the WorkSet, which will contain all the possible | |||
1322 | // locations where spills may be hoisted to after the loop below is done. | |||
1323 | for (const auto Spill : Spills) { | |||
1324 | MachineBasicBlock *Block = Spill->getParent(); | |||
1325 | MachineDomTreeNode *Node = MDT[Block]; | |||
1326 | MachineInstr *SpillToRm = nullptr; | |||
1327 | while (Node != RootIDomNode) { | |||
1328 | // If Node dominates Block, and it already contains a spill, the spill in | |||
1329 | // Block will be redundant. | |||
1330 | if (Node != MDT[Block] && SpillBBToSpill[Node]) { | |||
1331 | SpillToRm = SpillBBToSpill[MDT[Block]]; | |||
1332 | break; | |||
1333 | /// If we see the Node already in WorkSet, the path from the Node to | |||
1334 | /// the Root node must already be traversed by another spill. | |||
1335 | /// Then no need to repeat. | |||
1336 | } else if (WorkSet.count(Node)) { | |||
1337 | break; | |||
1338 | } else { | |||
1339 | NodesOnPath.insert(Node); | |||
1340 | } | |||
1341 | Node = Node->getIDom(); | |||
1342 | } | |||
1343 | if (SpillToRm) { | |||
1344 | SpillsToRm.push_back(SpillToRm); | |||
1345 | } else { | |||
1346 | // Add a BB containing the original spills to SpillsToKeep -- i.e., | |||
1347 | // set the initial status before hoisting start. The value of BBs | |||
1348 | // containing original spills is set to 0, in order to descriminate | |||
1349 | // with BBs containing hoisted spills which will be inserted to | |||
1350 | // SpillsToKeep later during hoisting. | |||
1351 | SpillsToKeep[MDT[Block]] = 0; | |||
1352 | WorkSet.insert(NodesOnPath.begin(), NodesOnPath.end()); | |||
1353 | } | |||
1354 | NodesOnPath.clear(); | |||
1355 | } | |||
1356 | ||||
1357 | // Sort the nodes in WorkSet in top-down order and save the nodes | |||
1358 | // in Orders. Orders will be used for hoisting in runHoistSpills. | |||
1359 | unsigned idx = 0; | |||
1360 | Orders.push_back(MDT.getBase().getNode(Root)); | |||
1361 | do { | |||
1362 | MachineDomTreeNode *Node = Orders[idx++]; | |||
1363 | for (MachineDomTreeNode *Child : Node->children()) { | |||
1364 | if (WorkSet.count(Child)) | |||
1365 | Orders.push_back(Child); | |||
1366 | } | |||
1367 | } while (idx != Orders.size()); | |||
1368 | assert(Orders.size() == WorkSet.size() &&((Orders.size() == WorkSet.size() && "Orders have different size with WorkSet" ) ? static_cast<void> (0) : __assert_fail ("Orders.size() == WorkSet.size() && \"Orders have different size with WorkSet\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 1369, __PRETTY_FUNCTION__)) | |||
1369 | "Orders have different size with WorkSet")((Orders.size() == WorkSet.size() && "Orders have different size with WorkSet" ) ? static_cast<void> (0) : __assert_fail ("Orders.size() == WorkSet.size() && \"Orders have different size with WorkSet\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 1369, __PRETTY_FUNCTION__)); | |||
1370 | ||||
1371 | #ifndef NDEBUG | |||
1372 | LLVM_DEBUG(dbgs() << "Orders size is " << Orders.size() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "Orders size is " << Orders .size() << "\n"; } } while (false); | |||
1373 | SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin(); | |||
1374 | for (; RIt != Orders.rend(); RIt++) | |||
1375 | LLVM_DEBUG(dbgs() << "BB" << (*RIt)->getBlock()->getNumber() << ",")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "BB" << (*RIt)->getBlock ()->getNumber() << ","; } } while (false); | |||
1376 | LLVM_DEBUG(dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { dbgs() << "\n"; } } while (false); | |||
1377 | #endif | |||
1378 | } | |||
1379 | ||||
1380 | /// Try to hoist spills according to BB hotness. The spills to removed will | |||
1381 | /// be saved in \p SpillsToRm. The spills to be inserted will be saved in | |||
1382 | /// \p SpillsToIns. | |||
1383 | void HoistSpillHelper::runHoistSpills( | |||
1384 | LiveInterval &OrigLI, VNInfo &OrigVNI, | |||
1385 | SmallPtrSet<MachineInstr *, 16> &Spills, | |||
1386 | SmallVectorImpl<MachineInstr *> &SpillsToRm, | |||
1387 | DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns) { | |||
1388 | // Visit order of dominator tree nodes. | |||
1389 | SmallVector<MachineDomTreeNode *, 32> Orders; | |||
1390 | // SpillsToKeep contains all the nodes where spills are to be inserted | |||
1391 | // during hoisting. If the spill to be inserted is an original spill | |||
1392 | // (not a hoisted one), the value of the map entry is 0. If the spill | |||
1393 | // is a hoisted spill, the value of the map entry is the VReg to be used | |||
1394 | // as the source of the spill. | |||
1395 | DenseMap<MachineDomTreeNode *, unsigned> SpillsToKeep; | |||
1396 | // Map from BB to the first spill inside of it. | |||
1397 | DenseMap<MachineDomTreeNode *, MachineInstr *> SpillBBToSpill; | |||
1398 | ||||
1399 | rmRedundantSpills(Spills, SpillsToRm, SpillBBToSpill); | |||
1400 | ||||
1401 | MachineBasicBlock *Root = LIS.getMBBFromIndex(OrigVNI.def); | |||
1402 | getVisitOrders(Root, Spills, Orders, SpillsToRm, SpillsToKeep, | |||
1403 | SpillBBToSpill); | |||
1404 | ||||
1405 | // SpillsInSubTreeMap keeps the map from a dom tree node to a pair of | |||
1406 | // nodes set and the cost of all the spills inside those nodes. | |||
1407 | // The nodes set are the locations where spills are to be inserted | |||
1408 | // in the subtree of current node. | |||
1409 | using NodesCostPair = | |||
1410 | std::pair<SmallPtrSet<MachineDomTreeNode *, 16>, BlockFrequency>; | |||
1411 | DenseMap<MachineDomTreeNode *, NodesCostPair> SpillsInSubTreeMap; | |||
1412 | ||||
1413 | // Iterate Orders set in reverse order, which will be a bottom-up order | |||
1414 | // in the dominator tree. Once we visit a dom tree node, we know its | |||
1415 | // children have already been visited and the spill locations in the | |||
1416 | // subtrees of all the children have been determined. | |||
1417 | SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin(); | |||
1418 | for (; RIt != Orders.rend(); RIt++) { | |||
1419 | MachineBasicBlock *Block = (*RIt)->getBlock(); | |||
1420 | ||||
1421 | // If Block contains an original spill, simply continue. | |||
1422 | if (SpillsToKeep.find(*RIt) != SpillsToKeep.end() && !SpillsToKeep[*RIt]) { | |||
1423 | SpillsInSubTreeMap[*RIt].first.insert(*RIt); | |||
1424 | // SpillsInSubTreeMap[*RIt].second contains the cost of spill. | |||
1425 | SpillsInSubTreeMap[*RIt].second = MBFI.getBlockFreq(Block); | |||
1426 | continue; | |||
1427 | } | |||
1428 | ||||
1429 | // Collect spills in subtree of current node (*RIt) to | |||
1430 | // SpillsInSubTreeMap[*RIt].first. | |||
1431 | for (MachineDomTreeNode *Child : (*RIt)->children()) { | |||
1432 | if (SpillsInSubTreeMap.find(Child) == SpillsInSubTreeMap.end()) | |||
1433 | continue; | |||
1434 | // The stmt "SpillsInSubTree = SpillsInSubTreeMap[*RIt].first" below | |||
1435 | // should be placed before getting the begin and end iterators of | |||
1436 | // SpillsInSubTreeMap[Child].first, or else the iterators may be | |||
1437 | // invalidated when SpillsInSubTreeMap[*RIt] is seen the first time | |||
1438 | // and the map grows and then the original buckets in the map are moved. | |||
1439 | SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree = | |||
1440 | SpillsInSubTreeMap[*RIt].first; | |||
1441 | BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second; | |||
1442 | SubTreeCost += SpillsInSubTreeMap[Child].second; | |||
1443 | auto BI = SpillsInSubTreeMap[Child].first.begin(); | |||
1444 | auto EI = SpillsInSubTreeMap[Child].first.end(); | |||
1445 | SpillsInSubTree.insert(BI, EI); | |||
1446 | SpillsInSubTreeMap.erase(Child); | |||
1447 | } | |||
1448 | ||||
1449 | SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree = | |||
1450 | SpillsInSubTreeMap[*RIt].first; | |||
1451 | BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second; | |||
1452 | // No spills in subtree, simply continue. | |||
1453 | if (SpillsInSubTree.empty()) | |||
1454 | continue; | |||
1455 | ||||
1456 | // Check whether Block is a possible candidate to insert spill. | |||
1457 | Register LiveReg; | |||
1458 | if (!isSpillCandBB(OrigLI, OrigVNI, *Block, LiveReg)) | |||
1459 | continue; | |||
1460 | ||||
1461 | // If there are multiple spills that could be merged, bias a little | |||
1462 | // to hoist the spill. | |||
1463 | BranchProbability MarginProb = (SpillsInSubTree.size() > 1) | |||
1464 | ? BranchProbability(9, 10) | |||
1465 | : BranchProbability(1, 1); | |||
1466 | if (SubTreeCost > MBFI.getBlockFreq(Block) * MarginProb) { | |||
1467 | // Hoist: Move spills to current Block. | |||
1468 | for (const auto SpillBB : SpillsInSubTree) { | |||
1469 | // When SpillBB is a BB contains original spill, insert the spill | |||
1470 | // to SpillsToRm. | |||
1471 | if (SpillsToKeep.find(SpillBB) != SpillsToKeep.end() && | |||
1472 | !SpillsToKeep[SpillBB]) { | |||
1473 | MachineInstr *SpillToRm = SpillBBToSpill[SpillBB]; | |||
1474 | SpillsToRm.push_back(SpillToRm); | |||
1475 | } | |||
1476 | // SpillBB will not contain spill anymore, remove it from SpillsToKeep. | |||
1477 | SpillsToKeep.erase(SpillBB); | |||
1478 | } | |||
1479 | // Current Block is the BB containing the new hoisted spill. Add it to | |||
1480 | // SpillsToKeep. LiveReg is the source of the new spill. | |||
1481 | SpillsToKeep[*RIt] = LiveReg; | |||
1482 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { { dbgs() << "spills in BB: "; for (const auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock ()->getNumber() << " "; dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber() << "\n" ; }; } } while (false) | |||
1483 | dbgs() << "spills in BB: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { { dbgs() << "spills in BB: "; for (const auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock ()->getNumber() << " "; dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber() << "\n" ; }; } } while (false) | |||
1484 | for (const auto Rspill : SpillsInSubTree)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { { dbgs() << "spills in BB: "; for (const auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock ()->getNumber() << " "; dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber() << "\n" ; }; } } while (false) | |||
1485 | dbgs() << Rspill->getBlock()->getNumber() << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { { dbgs() << "spills in BB: "; for (const auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock ()->getNumber() << " "; dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber() << "\n" ; }; } } while (false) | |||
1486 | dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { { dbgs() << "spills in BB: "; for (const auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock ()->getNumber() << " "; dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber() << "\n" ; }; } } while (false) | |||
1487 | << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { { dbgs() << "spills in BB: "; for (const auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock ()->getNumber() << " "; dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber() << "\n" ; }; } } while (false) | |||
1488 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { { dbgs() << "spills in BB: "; for (const auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock ()->getNumber() << " "; dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber() << "\n" ; }; } } while (false); | |||
1489 | SpillsInSubTree.clear(); | |||
1490 | SpillsInSubTree.insert(*RIt); | |||
1491 | SubTreeCost = MBFI.getBlockFreq(Block); | |||
1492 | } | |||
1493 | } | |||
1494 | // For spills in SpillsToKeep with LiveReg set (i.e., not original spill), | |||
1495 | // save them to SpillsToIns. | |||
1496 | for (const auto &Ent : SpillsToKeep) { | |||
1497 | if (Ent.second) | |||
1498 | SpillsToIns[Ent.first->getBlock()] = Ent.second; | |||
1499 | } | |||
1500 | } | |||
1501 | ||||
1502 | /// For spills with equal values, remove redundant spills and hoist those left | |||
1503 | /// to less hot spots. | |||
1504 | /// | |||
1505 | /// Spills with equal values will be collected into the same set in | |||
1506 | /// MergeableSpills when spill is inserted. These equal spills are originated | |||
1507 | /// from the same defining instruction and are dominated by the instruction. | |||
1508 | /// Before hoisting all the equal spills, redundant spills inside in the same | |||
1509 | /// BB are first marked to be deleted. Then starting from the spills left, walk | |||
1510 | /// up on the dominator tree towards the Root node where the define instruction | |||
1511 | /// is located, mark the dominated spills to be deleted along the way and | |||
1512 | /// collect the BB nodes on the path from non-dominated spills to the define | |||
1513 | /// instruction into a WorkSet. The nodes in WorkSet are the candidate places | |||
1514 | /// where we are considering to hoist the spills. We iterate the WorkSet in | |||
1515 | /// bottom-up order, and for each node, we will decide whether to hoist spills | |||
1516 | /// inside its subtree to that node. In this way, we can get benefit locally | |||
1517 | /// even if hoisting all the equal spills to one cold place is impossible. | |||
1518 | void HoistSpillHelper::hoistAllSpills() { | |||
1519 | SmallVector<Register, 4> NewVRegs; | |||
1520 | LiveRangeEdit Edit(nullptr, NewVRegs, MF, LIS, &VRM, this); | |||
1521 | ||||
1522 | for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) { | |||
1523 | Register Reg = Register::index2VirtReg(i); | |||
1524 | Register Original = VRM.getPreSplitReg(Reg); | |||
1525 | if (!MRI.def_empty(Reg)) | |||
1526 | Virt2SiblingsMap[Original].insert(Reg); | |||
1527 | } | |||
1528 | ||||
1529 | // Each entry in MergeableSpills contains a spill set with equal values. | |||
1530 | for (auto &Ent : MergeableSpills) { | |||
1531 | int Slot = Ent.first.first; | |||
1532 | LiveInterval &OrigLI = *StackSlotToOrigLI[Slot]; | |||
1533 | VNInfo *OrigVNI = Ent.first.second; | |||
1534 | SmallPtrSet<MachineInstr *, 16> &EqValSpills = Ent.second; | |||
1535 | if (Ent.second.empty()) | |||
1536 | continue; | |||
1537 | ||||
1538 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { { dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: " ; for (const auto spill : EqValSpills) dbgs() << spill-> getParent()->getNumber() << " "; dbgs() << "\n" ; }; } } while (false) | |||
1539 | dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { { dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: " ; for (const auto spill : EqValSpills) dbgs() << spill-> getParent()->getNumber() << " "; dbgs() << "\n" ; }; } } while (false) | |||
1540 | << "Equal spills in BB: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { { dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: " ; for (const auto spill : EqValSpills) dbgs() << spill-> getParent()->getNumber() << " "; dbgs() << "\n" ; }; } } while (false) | |||
1541 | for (const auto spill : EqValSpills)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { { dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: " ; for (const auto spill : EqValSpills) dbgs() << spill-> getParent()->getNumber() << " "; dbgs() << "\n" ; }; } } while (false) | |||
1542 | dbgs() << spill->getParent()->getNumber() << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { { dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: " ; for (const auto spill : EqValSpills) dbgs() << spill-> getParent()->getNumber() << " "; dbgs() << "\n" ; }; } } while (false) | |||
1543 | dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { { dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: " ; for (const auto spill : EqValSpills) dbgs() << spill-> getParent()->getNumber() << " "; dbgs() << "\n" ; }; } } while (false) | |||
1544 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { { dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: " ; for (const auto spill : EqValSpills) dbgs() << spill-> getParent()->getNumber() << " "; dbgs() << "\n" ; }; } } while (false); | |||
1545 | ||||
1546 | // SpillsToRm is the spill set to be removed from EqValSpills. | |||
1547 | SmallVector<MachineInstr *, 16> SpillsToRm; | |||
1548 | // SpillsToIns is the spill set to be newly inserted after hoisting. | |||
1549 | DenseMap<MachineBasicBlock *, unsigned> SpillsToIns; | |||
1550 | ||||
1551 | runHoistSpills(OrigLI, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns); | |||
1552 | ||||
1553 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { { dbgs() << "Finally inserted spills in BB: " ; for (const auto &Ispill : SpillsToIns) dbgs() << Ispill .first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: " ; for (const auto Rspill : SpillsToRm) dbgs() << Rspill ->getParent()->getNumber() << " "; dbgs() << "\n"; }; } } while (false) | |||
1554 | dbgs() << "Finally inserted spills in BB: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { { dbgs() << "Finally inserted spills in BB: " ; for (const auto &Ispill : SpillsToIns) dbgs() << Ispill .first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: " ; for (const auto Rspill : SpillsToRm) dbgs() << Rspill ->getParent()->getNumber() << " "; dbgs() << "\n"; }; } } while (false) | |||
1555 | for (const auto &Ispill : SpillsToIns)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { { dbgs() << "Finally inserted spills in BB: " ; for (const auto &Ispill : SpillsToIns) dbgs() << Ispill .first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: " ; for (const auto Rspill : SpillsToRm) dbgs() << Rspill ->getParent()->getNumber() << " "; dbgs() << "\n"; }; } } while (false) | |||
1556 | dbgs() << Ispill.first->getNumber() << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { { dbgs() << "Finally inserted spills in BB: " ; for (const auto &Ispill : SpillsToIns) dbgs() << Ispill .first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: " ; for (const auto Rspill : SpillsToRm) dbgs() << Rspill ->getParent()->getNumber() << " "; dbgs() << "\n"; }; } } while (false) | |||
1557 | dbgs() << "\nFinally removed spills in BB: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { { dbgs() << "Finally inserted spills in BB: " ; for (const auto &Ispill : SpillsToIns) dbgs() << Ispill .first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: " ; for (const auto Rspill : SpillsToRm) dbgs() << Rspill ->getParent()->getNumber() << " "; dbgs() << "\n"; }; } } while (false) | |||
1558 | for (const auto Rspill : SpillsToRm)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { { dbgs() << "Finally inserted spills in BB: " ; for (const auto &Ispill : SpillsToIns) dbgs() << Ispill .first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: " ; for (const auto Rspill : SpillsToRm) dbgs() << Rspill ->getParent()->getNumber() << " "; dbgs() << "\n"; }; } } while (false) | |||
1559 | dbgs() << Rspill->getParent()->getNumber() << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { { dbgs() << "Finally inserted spills in BB: " ; for (const auto &Ispill : SpillsToIns) dbgs() << Ispill .first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: " ; for (const auto Rspill : SpillsToRm) dbgs() << Rspill ->getParent()->getNumber() << " "; dbgs() << "\n"; }; } } while (false) | |||
1560 | dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { { dbgs() << "Finally inserted spills in BB: " ; for (const auto &Ispill : SpillsToIns) dbgs() << Ispill .first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: " ; for (const auto Rspill : SpillsToRm) dbgs() << Rspill ->getParent()->getNumber() << " "; dbgs() << "\n"; }; } } while (false) | |||
1561 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("regalloc")) { { dbgs() << "Finally inserted spills in BB: " ; for (const auto &Ispill : SpillsToIns) dbgs() << Ispill .first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: " ; for (const auto Rspill : SpillsToRm) dbgs() << Rspill ->getParent()->getNumber() << " "; dbgs() << "\n"; }; } } while (false); | |||
1562 | ||||
1563 | // Stack live range update. | |||
1564 | LiveInterval &StackIntvl = LSS.getInterval(Slot); | |||
1565 | if (!SpillsToIns.empty() || !SpillsToRm.empty()) | |||
1566 | StackIntvl.MergeValueInAsValue(OrigLI, OrigVNI, | |||
1567 | StackIntvl.getValNumInfo(0)); | |||
1568 | ||||
1569 | // Insert hoisted spills. | |||
1570 | for (auto const &Insert : SpillsToIns) { | |||
1571 | MachineBasicBlock *BB = Insert.first; | |||
1572 | Register LiveReg = Insert.second; | |||
1573 | MachineBasicBlock::iterator MII = IPA.getLastInsertPointIter(OrigLI, *BB); | |||
1574 | MachineInstrSpan MIS(MII, BB); | |||
1575 | TII.storeRegToStackSlot(*BB, MII, LiveReg, false, Slot, | |||
1576 | MRI.getRegClass(LiveReg), &TRI); | |||
1577 | LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII); | |||
1578 | for (const MachineInstr &MI : make_range(MIS.begin(), MII)) | |||
1579 | getVDefInterval(MI, LIS); | |||
1580 | ++NumSpills; | |||
1581 | } | |||
1582 | ||||
1583 | // Remove redundant spills or change them to dead instructions. | |||
1584 | NumSpills -= SpillsToRm.size(); | |||
1585 | for (auto const RMEnt : SpillsToRm) { | |||
1586 | RMEnt->setDesc(TII.get(TargetOpcode::KILL)); | |||
1587 | for (unsigned i = RMEnt->getNumOperands(); i; --i) { | |||
1588 | MachineOperand &MO = RMEnt->getOperand(i - 1); | |||
1589 | if (MO.isReg() && MO.isImplicit() && MO.isDef() && !MO.isDead()) | |||
1590 | RMEnt->RemoveOperand(i - 1); | |||
1591 | } | |||
1592 | } | |||
1593 | Edit.eliminateDeadDefs(SpillsToRm, None, AA); | |||
1594 | } | |||
1595 | } | |||
1596 | ||||
1597 | /// For VirtReg clone, the \p New register should have the same physreg or | |||
1598 | /// stackslot as the \p old register. | |||
1599 | void HoistSpillHelper::LRE_DidCloneVirtReg(Register New, Register Old) { | |||
1600 | if (VRM.hasPhys(Old)) | |||
1601 | VRM.assignVirt2Phys(New, VRM.getPhys(Old)); | |||
1602 | else if (VRM.getStackSlot(Old) != VirtRegMap::NO_STACK_SLOT) | |||
1603 | VRM.assignVirt2StackSlot(New, VRM.getStackSlot(Old)); | |||
1604 | else | |||
1605 | llvm_unreachable("VReg should be assigned either physreg or stackslot")::llvm::llvm_unreachable_internal("VReg should be assigned either physreg or stackslot" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/CodeGen/InlineSpiller.cpp" , 1605); | |||
1606 | if (VRM.hasShape(Old)) | |||
1607 | VRM.assignVirt2Shape(New, VRM.getShape(Old)); | |||
1608 | } |
1 | //===- llvm/CodeGen/MachineInstr.h - MachineInstr class ---------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file contains the declaration of the MachineInstr class, which is the |
10 | // basic representation for all target dependent machine instructions used by |
11 | // the back end. |
12 | // |
13 | //===----------------------------------------------------------------------===// |
14 | |
15 | #ifndef LLVM_CODEGEN_MACHINEINSTR_H |
16 | #define LLVM_CODEGEN_MACHINEINSTR_H |
17 | |
18 | #include "llvm/ADT/DenseMapInfo.h" |
19 | #include "llvm/ADT/PointerSumType.h" |
20 | #include "llvm/ADT/ilist.h" |
21 | #include "llvm/ADT/ilist_node.h" |
22 | #include "llvm/ADT/iterator_range.h" |
23 | #include "llvm/CodeGen/MachineMemOperand.h" |
24 | #include "llvm/CodeGen/MachineOperand.h" |
25 | #include "llvm/CodeGen/TargetOpcodes.h" |
26 | #include "llvm/IR/DebugLoc.h" |
27 | #include "llvm/IR/InlineAsm.h" |
28 | #include "llvm/MC/MCInstrDesc.h" |
29 | #include "llvm/MC/MCSymbol.h" |
30 | #include "llvm/Support/ArrayRecycler.h" |
31 | #include "llvm/Support/TrailingObjects.h" |
32 | #include <algorithm> |
33 | #include <cassert> |
34 | #include <cstdint> |
35 | #include <utility> |
36 | |
37 | namespace llvm { |
38 | |
39 | class AAResults; |
40 | template <typename T> class ArrayRef; |
41 | class DIExpression; |
42 | class DILocalVariable; |
43 | class MachineBasicBlock; |
44 | class MachineFunction; |
45 | class MachineRegisterInfo; |
46 | class ModuleSlotTracker; |
47 | class raw_ostream; |
48 | template <typename T> class SmallVectorImpl; |
49 | class SmallBitVector; |
50 | class StringRef; |
51 | class TargetInstrInfo; |
52 | class TargetRegisterClass; |
53 | class TargetRegisterInfo; |
54 | |
55 | //===----------------------------------------------------------------------===// |
56 | /// Representation of each machine instruction. |
57 | /// |
58 | /// This class isn't a POD type, but it must have a trivial destructor. When a |
59 | /// MachineFunction is deleted, all the contained MachineInstrs are deallocated |
60 | /// without having their destructor called. |
61 | /// |
62 | class MachineInstr |
63 | : public ilist_node_with_parent<MachineInstr, MachineBasicBlock, |
64 | ilist_sentinel_tracking<true>> { |
65 | public: |
66 | using mmo_iterator = ArrayRef<MachineMemOperand *>::iterator; |
67 | |
68 | /// Flags to specify different kinds of comments to output in |
69 | /// assembly code. These flags carry semantic information not |
70 | /// otherwise easily derivable from the IR text. |
71 | /// |
72 | enum CommentFlag { |
73 | ReloadReuse = 0x1, // higher bits are reserved for target dep comments. |
74 | NoSchedComment = 0x2, |
75 | TAsmComments = 0x4 // Target Asm comments should start from this value. |
76 | }; |
77 | |
78 | enum MIFlag { |
79 | NoFlags = 0, |
80 | FrameSetup = 1 << 0, // Instruction is used as a part of |
81 | // function frame setup code. |
82 | FrameDestroy = 1 << 1, // Instruction is used as a part of |
83 | // function frame destruction code. |
84 | BundledPred = 1 << 2, // Instruction has bundled predecessors. |
85 | BundledSucc = 1 << 3, // Instruction has bundled successors. |
86 | FmNoNans = 1 << 4, // Instruction does not support Fast |
87 | // math nan values. |
88 | FmNoInfs = 1 << 5, // Instruction does not support Fast |
89 | // math infinity values. |
90 | FmNsz = 1 << 6, // Instruction is not required to retain |
91 | // signed zero values. |
92 | FmArcp = 1 << 7, // Instruction supports Fast math |
93 | // reciprocal approximations. |
94 | FmContract = 1 << 8, // Instruction supports Fast math |
95 | // contraction operations like fma. |
96 | FmAfn = 1 << 9, // Instruction may map to Fast math |
97 | // instrinsic approximation. |
98 | FmReassoc = 1 << 10, // Instruction supports Fast math |
99 | // reassociation of operand order. |
100 | NoUWrap = 1 << 11, // Instruction supports binary operator |
101 | // no unsigned wrap. |
102 | NoSWrap = 1 << 12, // Instruction supports binary operator |
103 | // no signed wrap. |
104 | IsExact = 1 << 13, // Instruction supports division is |
105 | // known to be exact. |
106 | NoFPExcept = 1 << 14, // Instruction does not raise |
107 | // floatint-point exceptions. |
108 | NoMerge = 1 << 15, // Passes that drop source location info |
109 | // (e.g. branch folding) should skip |
110 | // this instruction. |
111 | }; |
112 | |
113 | private: |
114 | const MCInstrDesc *MCID; // Instruction descriptor. |
115 | MachineBasicBlock *Parent = nullptr; // Pointer to the owning basic block. |
116 | |
117 | // Operands are allocated by an ArrayRecycler. |
118 | MachineOperand *Operands = nullptr; // Pointer to the first operand. |
119 | unsigned NumOperands = 0; // Number of operands on instruction. |
120 | |
121 | uint16_t Flags = 0; // Various bits of additional |
122 | // information about machine |
123 | // instruction. |
124 | |
125 | uint8_t AsmPrinterFlags = 0; // Various bits of information used by |
126 | // the AsmPrinter to emit helpful |
127 | // comments. This is *not* semantic |
128 | // information. Do not use this for |
129 | // anything other than to convey comment |
130 | // information to AsmPrinter. |
131 | |
132 | // OperandCapacity has uint8_t size, so it should be next to AsmPrinterFlags |
133 | // to properly pack. |
134 | using OperandCapacity = ArrayRecycler<MachineOperand>::Capacity; |
135 | OperandCapacity CapOperands; // Capacity of the Operands array. |
136 | |
137 | /// Internal implementation detail class that provides out-of-line storage for |
138 | /// extra info used by the machine instruction when this info cannot be stored |
139 | /// in-line within the instruction itself. |
140 | /// |
141 | /// This has to be defined eagerly due to the implementation constraints of |
142 | /// `PointerSumType` where it is used. |
143 | class ExtraInfo final |
144 | : TrailingObjects<ExtraInfo, MachineMemOperand *, MCSymbol *, MDNode *> { |
145 | public: |
146 | static ExtraInfo *create(BumpPtrAllocator &Allocator, |
147 | ArrayRef<MachineMemOperand *> MMOs, |
148 | MCSymbol *PreInstrSymbol = nullptr, |
149 | MCSymbol *PostInstrSymbol = nullptr, |
150 | MDNode *HeapAllocMarker = nullptr) { |
151 | bool HasPreInstrSymbol = PreInstrSymbol != nullptr; |
152 | bool HasPostInstrSymbol = PostInstrSymbol != nullptr; |
153 | bool HasHeapAllocMarker = HeapAllocMarker != nullptr; |
154 | auto *Result = new (Allocator.Allocate( |
155 | totalSizeToAlloc<MachineMemOperand *, MCSymbol *, MDNode *>( |
156 | MMOs.size(), HasPreInstrSymbol + HasPostInstrSymbol, |
157 | HasHeapAllocMarker), |
158 | alignof(ExtraInfo))) |
159 | ExtraInfo(MMOs.size(), HasPreInstrSymbol, HasPostInstrSymbol, |
160 | HasHeapAllocMarker); |
161 | |
162 | // Copy the actual data into the trailing objects. |
163 | std::copy(MMOs.begin(), MMOs.end(), |
164 | Result->getTrailingObjects<MachineMemOperand *>()); |
165 | |
166 | if (HasPreInstrSymbol) |
167 | Result->getTrailingObjects<MCSymbol *>()[0] = PreInstrSymbol; |
168 | if (HasPostInstrSymbol) |
169 | Result->getTrailingObjects<MCSymbol *>()[HasPreInstrSymbol] = |
170 | PostInstrSymbol; |
171 | if (HasHeapAllocMarker) |
172 | Result->getTrailingObjects<MDNode *>()[0] = HeapAllocMarker; |
173 | |
174 | return Result; |
175 | } |
176 | |
177 | ArrayRef<MachineMemOperand *> getMMOs() const { |
178 | return makeArrayRef(getTrailingObjects<MachineMemOperand *>(), NumMMOs); |
179 | } |
180 | |
181 | MCSymbol *getPreInstrSymbol() const { |
182 | return HasPreInstrSymbol ? getTrailingObjects<MCSymbol *>()[0] : nullptr; |
183 | } |
184 | |
185 | MCSymbol *getPostInstrSymbol() const { |
186 | return HasPostInstrSymbol |
187 | ? getTrailingObjects<MCSymbol *>()[HasPreInstrSymbol] |
188 | : nullptr; |
189 | } |
190 | |
191 | MDNode *getHeapAllocMarker() const { |
192 | return HasHeapAllocMarker ? getTrailingObjects<MDNode *>()[0] : nullptr; |
193 | } |
194 | |
195 | private: |
196 | friend TrailingObjects; |
197 | |
198 | // Description of the extra info, used to interpret the actual optional |
199 | // data appended. |
200 | // |
201 | // Note that this is not terribly space optimized. This leaves a great deal |
202 | // of flexibility to fit more in here later. |
203 | const int NumMMOs; |
204 | const bool HasPreInstrSymbol; |
205 | const bool HasPostInstrSymbol; |
206 | const bool HasHeapAllocMarker; |
207 | |
208 | // Implement the `TrailingObjects` internal API. |
209 | size_t numTrailingObjects(OverloadToken<MachineMemOperand *>) const { |
210 | return NumMMOs; |
211 | } |
212 | size_t numTrailingObjects(OverloadToken<MCSymbol *>) const { |
213 | return HasPreInstrSymbol + HasPostInstrSymbol; |
214 | } |
215 | size_t numTrailingObjects(OverloadToken<MDNode *>) const { |
216 | return HasHeapAllocMarker; |
217 | } |
218 | |
219 | // Just a boring constructor to allow us to initialize the sizes. Always use |
220 | // the `create` routine above. |
221 | ExtraInfo(int NumMMOs, bool HasPreInstrSymbol, bool HasPostInstrSymbol, |
222 | bool HasHeapAllocMarker) |
223 | : NumMMOs(NumMMOs), HasPreInstrSymbol(HasPreInstrSymbol), |
224 | HasPostInstrSymbol(HasPostInstrSymbol), |
225 | HasHeapAllocMarker(HasHeapAllocMarker) {} |
226 | }; |
227 | |
228 | /// Enumeration of the kinds of inline extra info available. It is important |
229 | /// that the `MachineMemOperand` inline kind has a tag value of zero to make |
230 | /// it accessible as an `ArrayRef`. |
231 | enum ExtraInfoInlineKinds { |
232 | EIIK_MMO = 0, |
233 | EIIK_PreInstrSymbol, |
234 | EIIK_PostInstrSymbol, |
235 | EIIK_OutOfLine |
236 | }; |
237 | |
238 | // We store extra information about the instruction here. The common case is |
239 | // expected to be nothing or a single pointer (typically a MMO or a symbol). |
240 | // We work to optimize this common case by storing it inline here rather than |
241 | // requiring a separate allocation, but we fall back to an allocation when |
242 | // multiple pointers are needed. |
243 | PointerSumType<ExtraInfoInlineKinds, |
244 | PointerSumTypeMember<EIIK_MMO, MachineMemOperand *>, |
245 | PointerSumTypeMember<EIIK_PreInstrSymbol, MCSymbol *>, |
246 | PointerSumTypeMember<EIIK_PostInstrSymbol, MCSymbol *>, |
247 | PointerSumTypeMember<EIIK_OutOfLine, ExtraInfo *>> |
248 | Info; |
249 | |
250 | DebugLoc debugLoc; // Source line information. |
251 | |
252 | /// Unique instruction number. Used by DBG_INSTR_REFs to refer to the values |
253 | /// defined by this instruction. |
254 | unsigned DebugInstrNum; |
255 | |
256 | // Intrusive list support |
257 | friend struct ilist_traits<MachineInstr>; |
258 | friend struct ilist_callback_traits<MachineBasicBlock>; |
259 | void setParent(MachineBasicBlock *P) { Parent = P; } |
260 | |
261 | /// This constructor creates a copy of the given |
262 | /// MachineInstr in the given MachineFunction. |
263 | MachineInstr(MachineFunction &, const MachineInstr &); |
264 | |
265 | /// This constructor create a MachineInstr and add the implicit operands. |
266 | /// It reserves space for number of operands specified by |
267 | /// MCInstrDesc. An explicit DebugLoc is supplied. |
268 | MachineInstr(MachineFunction &, const MCInstrDesc &tid, DebugLoc dl, |
269 | bool NoImp = false); |
270 | |
271 | // MachineInstrs are pool-allocated and owned by MachineFunction. |
272 | friend class MachineFunction; |
273 | |
274 | void |
275 | dumprImpl(const MachineRegisterInfo &MRI, unsigned Depth, unsigned MaxDepth, |
276 | SmallPtrSetImpl<const MachineInstr *> &AlreadySeenInstrs) const; |
277 | |
278 | public: |
279 | MachineInstr(const MachineInstr &) = delete; |
280 | MachineInstr &operator=(const MachineInstr &) = delete; |
281 | // Use MachineFunction::DeleteMachineInstr() instead. |
282 | ~MachineInstr() = delete; |
283 | |
284 | const MachineBasicBlock* getParent() const { return Parent; } |
285 | MachineBasicBlock* getParent() { return Parent; } |
286 | |
287 | /// Move the instruction before \p MovePos. |
288 | void moveBefore(MachineInstr *MovePos); |
289 | |
290 | /// Return the function that contains the basic block that this instruction |
291 | /// belongs to. |
292 | /// |
293 | /// Note: this is undefined behaviour if the instruction does not have a |
294 | /// parent. |
295 | const MachineFunction *getMF() const; |
296 | MachineFunction *getMF() { |
297 | return const_cast<MachineFunction *>( |
298 | static_cast<const MachineInstr *>(this)->getMF()); |
299 | } |
300 | |
301 | /// Return the asm printer flags bitvector. |
302 | uint8_t getAsmPrinterFlags() const { return AsmPrinterFlags; } |
303 | |
304 | /// Clear the AsmPrinter bitvector. |
305 | void clearAsmPrinterFlags() { AsmPrinterFlags = 0; } |
306 | |
307 | /// Return whether an AsmPrinter flag is set. |
308 | bool getAsmPrinterFlag(CommentFlag Flag) const { |
309 | return AsmPrinterFlags & Flag; |
310 | } |
311 | |
312 | /// Set a flag for the AsmPrinter. |
313 | void setAsmPrinterFlag(uint8_t Flag) { |
314 | AsmPrinterFlags |= Flag; |
315 | } |
316 | |
317 | /// Clear specific AsmPrinter flags. |
318 | void clearAsmPrinterFlag(CommentFlag Flag) { |
319 | AsmPrinterFlags &= ~Flag; |
320 | } |
321 | |
322 | /// Return the MI flags bitvector. |
323 | uint16_t getFlags() const { |
324 | return Flags; |
325 | } |
326 | |
327 | /// Return whether an MI flag is set. |
328 | bool getFlag(MIFlag Flag) const { |
329 | return Flags & Flag; |
330 | } |
331 | |
332 | /// Set a MI flag. |
333 | void setFlag(MIFlag Flag) { |
334 | Flags |= (uint16_t)Flag; |
335 | } |
336 | |
337 | void setFlags(unsigned flags) { |
338 | // Filter out the automatically maintained flags. |
339 | unsigned Mask = BundledPred | BundledSucc; |
340 | Flags = (Flags & Mask) | (flags & ~Mask); |
341 | } |
342 | |
343 | /// clearFlag - Clear a MI flag. |
344 | void clearFlag(MIFlag Flag) { |
345 | Flags &= ~((uint16_t)Flag); |
346 | } |
347 | |
348 | /// Return true if MI is in a bundle (but not the first MI in a bundle). |
349 | /// |
350 | /// A bundle looks like this before it's finalized: |
351 | /// ---------------- |
352 | /// | MI | |
353 | /// ---------------- |
354 | /// | |
355 | /// ---------------- |
356 | /// | MI * | |
357 | /// ---------------- |
358 | /// | |
359 | /// ---------------- |
360 | /// | MI * | |
361 | /// ---------------- |
362 | /// In this case, the first MI starts a bundle but is not inside a bundle, the |
363 | /// next 2 MIs are considered "inside" the bundle. |
364 | /// |
365 | /// After a bundle is finalized, it looks like this: |
366 | /// ---------------- |
367 | /// | Bundle | |
368 | /// ---------------- |
369 | /// | |
370 | /// ---------------- |
371 | /// | MI * | |
372 | /// ---------------- |
373 | /// | |
374 | /// ---------------- |
375 | /// | MI * | |
376 | /// ---------------- |
377 | /// | |
378 | /// ---------------- |
379 | /// | MI * | |
380 | /// ---------------- |
381 | /// The first instruction has the special opcode "BUNDLE". It's not "inside" |
382 | /// a bundle, but the next three MIs are. |
383 | bool isInsideBundle() const { |
384 | return getFlag(BundledPred); |
385 | } |
386 | |
387 | /// Return true if this instruction part of a bundle. This is true |
388 | /// if either itself or its following instruction is marked "InsideBundle". |
389 | bool isBundled() const { |
390 | return isBundledWithPred() || isBundledWithSucc(); |
391 | } |
392 | |
393 | /// Return true if this instruction is part of a bundle, and it is not the |
394 | /// first instruction in the bundle. |
395 | bool isBundledWithPred() const { return getFlag(BundledPred); } |
396 | |
397 | /// Return true if this instruction is part of a bundle, and it is not the |
398 | /// last instruction in the bundle. |
399 | bool isBundledWithSucc() const { return getFlag(BundledSucc); } |
400 | |
401 | /// Bundle this instruction with its predecessor. This can be an unbundled |
402 | /// instruction, or it can be the first instruction in a bundle. |
403 | void bundleWithPred(); |
404 | |
405 | /// Bundle this instruction with its successor. This can be an unbundled |
406 | /// instruction, or it can be the last instruction in a bundle. |
407 | void bundleWithSucc(); |
408 | |
409 | /// Break bundle above this instruction. |
410 | void unbundleFromPred(); |
411 | |
412 | /// Break bundle below this instruction. |
413 | void unbundleFromSucc(); |
414 | |
415 | /// Returns the debug location id of this MachineInstr. |
416 | const DebugLoc &getDebugLoc() const { return debugLoc; } |
417 | |
418 | /// Return the operand containing the offset to be used if this DBG_VALUE |
419 | /// instruction is indirect; will be an invalid register if this value is |
420 | /// not indirect, and an immediate with value 0 otherwise. |
421 | const MachineOperand &getDebugOffset() const { |
422 | assert(isDebugValue() && "not a DBG_VALUE")((isDebugValue() && "not a DBG_VALUE") ? static_cast< void> (0) : __assert_fail ("isDebugValue() && \"not a DBG_VALUE\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/MachineInstr.h" , 422, __PRETTY_FUNCTION__)); |
423 | return getOperand(1); |
424 | } |
425 | MachineOperand &getDebugOffset() { |
426 | assert(isDebugValue() && "not a DBG_VALUE")((isDebugValue() && "not a DBG_VALUE") ? static_cast< void> (0) : __assert_fail ("isDebugValue() && \"not a DBG_VALUE\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/MachineInstr.h" , 426, __PRETTY_FUNCTION__)); |
427 | return getOperand(1); |
428 | } |
429 | |
430 | /// Return the operand for the debug variable referenced by |
431 | /// this DBG_VALUE instruction. |
432 | const MachineOperand &getDebugVariableOp() const; |
433 | MachineOperand &getDebugVariableOp(); |
434 | |
435 | /// Return the debug variable referenced by |
436 | /// this DBG_VALUE instruction. |
437 | const DILocalVariable *getDebugVariable() const; |
438 | |
439 | /// Return the operand for the complex address expression referenced by |
440 | /// this DBG_VALUE instruction. |
441 | MachineOperand &getDebugExpressionOp(); |
442 | |
443 | /// Return the complex address expression referenced by |
444 | /// this DBG_VALUE instruction. |
445 | const DIExpression *getDebugExpression() const; |
446 | |
447 | /// Return the debug label referenced by |
448 | /// this DBG_LABEL instruction. |
449 | const DILabel *getDebugLabel() const; |
450 | |
451 | /// Fetch the instruction number of this MachineInstr. If it does not have |
452 | /// one already, a new and unique number will be assigned. |
453 | unsigned getDebugInstrNum(); |
454 | |
455 | /// Examine the instruction number of this MachineInstr. May be zero if |
456 | /// it hasn't been assigned a number yet. |
457 | unsigned peekDebugInstrNum() const { return DebugInstrNum; } |
458 | |
459 | /// Set instruction number of this MachineInstr. Avoid using unless you're |
460 | /// deserializing this information. |
461 | void setDebugInstrNum(unsigned Num) { DebugInstrNum = Num; } |
462 | |
463 | /// Emit an error referring to the source location of this instruction. |
464 | /// This should only be used for inline assembly that is somehow |
465 | /// impossible to compile. Other errors should have been handled much |
466 | /// earlier. |
467 | /// |
468 | /// If this method returns, the caller should try to recover from the error. |
469 | void emitError(StringRef Msg) const; |
470 | |
471 | /// Returns the target instruction descriptor of this MachineInstr. |
472 | const MCInstrDesc &getDesc() const { return *MCID; } |
473 | |
474 | /// Returns the opcode of this MachineInstr. |
475 | unsigned getOpcode() const { return MCID->Opcode; } |
476 | |
477 | /// Retuns the total number of operands. |
478 | unsigned getNumOperands() const { return NumOperands; } |
479 | |
480 | /// Returns the total number of operands which are debug locations. |
481 | unsigned getNumDebugOperands() const { |
482 | return std::distance(debug_operands().begin(), debug_operands().end()); |
483 | } |
484 | |
485 | const MachineOperand& getOperand(unsigned i) const { |
486 | assert(i < getNumOperands() && "getOperand() out of range!")((i < getNumOperands() && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i < getNumOperands() && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/MachineInstr.h" , 486, __PRETTY_FUNCTION__)); |
487 | return Operands[i]; |
488 | } |
489 | MachineOperand& getOperand(unsigned i) { |
490 | assert(i < getNumOperands() && "getOperand() out of range!")((i < getNumOperands() && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i < getNumOperands() && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/MachineInstr.h" , 490, __PRETTY_FUNCTION__)); |
491 | return Operands[i]; |
492 | } |
493 | |
494 | MachineOperand &getDebugOperand(unsigned Index) { |
495 | assert(Index < getNumDebugOperands() && "getDebugOperand() out of range!")((Index < getNumDebugOperands() && "getDebugOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("Index < getNumDebugOperands() && \"getDebugOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/MachineInstr.h" , 495, __PRETTY_FUNCTION__)); |
496 | return *(debug_operands().begin() + Index); |
497 | } |
498 | const MachineOperand &getDebugOperand(unsigned Index) const { |
499 | assert(Index < getNumDebugOperands() && "getDebugOperand() out of range!")((Index < getNumDebugOperands() && "getDebugOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("Index < getNumDebugOperands() && \"getDebugOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/MachineInstr.h" , 499, __PRETTY_FUNCTION__)); |
500 | return *(debug_operands().begin() + Index); |
501 | } |
502 | |
503 | /// Returns a pointer to the operand corresponding to a debug use of Reg, or |
504 | /// nullptr if Reg is not used in any debug operand. |
505 | const MachineOperand *getDebugOperandForReg(Register Reg) const { |
506 | const MachineOperand *RegOp = |
507 | find_if(debug_operands(), [Reg](const MachineOperand &Op) { |
508 | return Op.isReg() && Op.getReg() == Reg; |
509 | }); |
510 | return RegOp == adl_end(debug_operands()) ? nullptr : RegOp; |
511 | } |
512 | MachineOperand *getDebugOperandForReg(Register Reg) { |
513 | MachineOperand *RegOp = |
514 | find_if(debug_operands(), [Reg](const MachineOperand &Op) { |
515 | return Op.isReg() && Op.getReg() == Reg; |
516 | }); |
517 | return RegOp == adl_end(debug_operands()) ? nullptr : RegOp; |
518 | } |
519 | |
520 | unsigned getDebugOperandIndex(const MachineOperand *Op) const { |
521 | assert(Op >= adl_begin(debug_operands()) &&((Op >= adl_begin(debug_operands()) && Op <= adl_end (debug_operands()) && "Expected a debug operand.") ? static_cast <void> (0) : __assert_fail ("Op >= adl_begin(debug_operands()) && Op <= adl_end(debug_operands()) && \"Expected a debug operand.\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/MachineInstr.h" , 522, __PRETTY_FUNCTION__)) |
522 | Op <= adl_end(debug_operands()) && "Expected a debug operand.")((Op >= adl_begin(debug_operands()) && Op <= adl_end (debug_operands()) && "Expected a debug operand.") ? static_cast <void> (0) : __assert_fail ("Op >= adl_begin(debug_operands()) && Op <= adl_end(debug_operands()) && \"Expected a debug operand.\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/MachineInstr.h" , 522, __PRETTY_FUNCTION__)); |
523 | return std::distance(adl_begin(debug_operands()), Op); |
524 | } |
525 | |
526 | /// Returns the total number of definitions. |
527 | unsigned getNumDefs() const { |
528 | return getNumExplicitDefs() + MCID->getNumImplicitDefs(); |
529 | } |
530 | |
531 | /// Returns true if the instruction has implicit definition. |
532 | bool hasImplicitDef() const { |
533 | for (unsigned I = getNumExplicitOperands(), E = getNumOperands(); |
534 | I != E; ++I) { |
535 | const MachineOperand &MO = getOperand(I); |
536 | if (MO.isDef() && MO.isImplicit()) |
537 | return true; |
538 | } |
539 | return false; |
540 | } |
541 | |
542 | /// Returns the implicit operands number. |
543 | unsigned getNumImplicitOperands() const { |
544 | return getNumOperands() - getNumExplicitOperands(); |
545 | } |
546 | |
547 | /// Return true if operand \p OpIdx is a subregister index. |
548 | bool isOperandSubregIdx(unsigned OpIdx) const { |
549 | assert(getOperand(OpIdx).getType() == MachineOperand::MO_Immediate &&((getOperand(OpIdx).getType() == MachineOperand::MO_Immediate && "Expected MO_Immediate operand type.") ? static_cast <void> (0) : __assert_fail ("getOperand(OpIdx).getType() == MachineOperand::MO_Immediate && \"Expected MO_Immediate operand type.\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/MachineInstr.h" , 550, __PRETTY_FUNCTION__)) |
550 | "Expected MO_Immediate operand type.")((getOperand(OpIdx).getType() == MachineOperand::MO_Immediate && "Expected MO_Immediate operand type.") ? static_cast <void> (0) : __assert_fail ("getOperand(OpIdx).getType() == MachineOperand::MO_Immediate && \"Expected MO_Immediate operand type.\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/MachineInstr.h" , 550, __PRETTY_FUNCTION__)); |
551 | if (isExtractSubreg() && OpIdx == 2) |
552 | return true; |
553 | if (isInsertSubreg() && OpIdx == 3) |
554 | return true; |
555 | if (isRegSequence() && OpIdx > 1 && (OpIdx % 2) == 0) |
556 | return true; |
557 | if (isSubregToReg() && OpIdx == 3) |
558 | return true; |
559 | return false; |
560 | } |
561 | |
562 | /// Returns the number of non-implicit operands. |
563 | unsigned getNumExplicitOperands() const; |
564 | |
565 | /// Returns the number of non-implicit definitions. |
566 | unsigned getNumExplicitDefs() const; |
567 | |
568 | /// iterator/begin/end - Iterate over all operands of a machine instruction. |
569 | using mop_iterator = MachineOperand *; |
570 | using const_mop_iterator = const MachineOperand *; |
571 | |
572 | mop_iterator operands_begin() { return Operands; } |
573 | mop_iterator operands_end() { return Operands + NumOperands; } |
574 | |
575 | const_mop_iterator operands_begin() const { return Operands; } |
576 | const_mop_iterator operands_end() const { return Operands + NumOperands; } |
577 | |
578 | iterator_range<mop_iterator> operands() { |
579 | return make_range(operands_begin(), operands_end()); |
580 | } |
581 | iterator_range<const_mop_iterator> operands() const { |
582 | return make_range(operands_begin(), operands_end()); |
583 | } |
584 | iterator_range<mop_iterator> explicit_operands() { |
585 | return make_range(operands_begin(), |
586 | operands_begin() + getNumExplicitOperands()); |
587 | } |
588 | iterator_range<const_mop_iterator> explicit_operands() const { |
589 | return make_range(operands_begin(), |
590 | operands_begin() + getNumExplicitOperands()); |
591 | } |
592 | iterator_range<mop_iterator> implicit_operands() { |
593 | return make_range(explicit_operands().end(), operands_end()); |
594 | } |
595 | iterator_range<const_mop_iterator> implicit_operands() const { |
596 | return make_range(explicit_operands().end(), operands_end()); |
597 | } |
598 | /// Returns a range over all operands that are used to determine the variable |
599 | /// location for this DBG_VALUE instruction. |
600 | iterator_range<mop_iterator> debug_operands() { |
601 | assert(isDebugValue() && "Must be a debug value instruction.")((isDebugValue() && "Must be a debug value instruction." ) ? static_cast<void> (0) : __assert_fail ("isDebugValue() && \"Must be a debug value instruction.\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/MachineInstr.h" , 601, __PRETTY_FUNCTION__)); |
602 | return make_range(operands_begin(), operands_begin() + 1); |
603 | } |
604 | /// \copydoc debug_operands() |
605 | iterator_range<const_mop_iterator> debug_operands() const { |
606 | assert(isDebugValue() && "Must be a debug value instruction.")((isDebugValue() && "Must be a debug value instruction." ) ? static_cast<void> (0) : __assert_fail ("isDebugValue() && \"Must be a debug value instruction.\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/MachineInstr.h" , 606, __PRETTY_FUNCTION__)); |
607 | return make_range(operands_begin(), operands_begin() + 1); |
608 | } |
609 | /// Returns a range over all explicit operands that are register definitions. |
610 | /// Implicit definition are not included! |
611 | iterator_range<mop_iterator> defs() { |
612 | return make_range(operands_begin(), |
613 | operands_begin() + getNumExplicitDefs()); |
614 | } |
615 | /// \copydoc defs() |
616 | iterator_range<const_mop_iterator> defs() const { |
617 | return make_range(operands_begin(), |
618 | operands_begin() + getNumExplicitDefs()); |
619 | } |
620 | /// Returns a range that includes all operands that are register uses. |
621 | /// This may include unrelated operands which are not register uses. |
622 | iterator_range<mop_iterator> uses() { |
623 | return make_range(operands_begin() + getNumExplicitDefs(), operands_end()); |
624 | } |
625 | /// \copydoc uses() |
626 | iterator_range<const_mop_iterator> uses() const { |
627 | return make_range(operands_begin() + getNumExplicitDefs(), operands_end()); |
628 | } |
629 | iterator_range<mop_iterator> explicit_uses() { |
630 | return make_range(operands_begin() + getNumExplicitDefs(), |
631 | operands_begin() + getNumExplicitOperands()); |
632 | } |
633 | iterator_range<const_mop_iterator> explicit_uses() const { |
634 | return make_range(operands_begin() + getNumExplicitDefs(), |
635 | operands_begin() + getNumExplicitOperands()); |
636 | } |
637 | |
638 | /// Returns the number of the operand iterator \p I points to. |
639 | unsigned getOperandNo(const_mop_iterator I) const { |
640 | return I - operands_begin(); |
641 | } |
642 | |
643 | /// Access to memory operands of the instruction. If there are none, that does |
644 | /// not imply anything about whether the function accesses memory. Instead, |
645 | /// the caller must behave conservatively. |
646 | ArrayRef<MachineMemOperand *> memoperands() const { |
647 | if (!Info) |
648 | return {}; |
649 | |
650 | if (Info.is<EIIK_MMO>()) |
651 | return makeArrayRef(Info.getAddrOfZeroTagPointer(), 1); |
652 | |
653 | if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>()) |
654 | return EI->getMMOs(); |
655 | |
656 | return {}; |
657 | } |
658 | |
659 | /// Access to memory operands of the instruction. |
660 | /// |
661 | /// If `memoperands_begin() == memoperands_end()`, that does not imply |
662 | /// anything about whether the function accesses memory. Instead, the caller |
663 | /// must behave conservatively. |
664 | mmo_iterator memoperands_begin() const { return memoperands().begin(); } |
665 | |
666 | /// Access to memory operands of the instruction. |
667 | /// |
668 | /// If `memoperands_begin() == memoperands_end()`, that does not imply |
669 | /// anything about whether the function accesses memory. Instead, the caller |
670 | /// must behave conservatively. |
671 | mmo_iterator memoperands_end() const { return memoperands().end(); } |
672 | |
673 | /// Return true if we don't have any memory operands which described the |
674 | /// memory access done by this instruction. If this is true, calling code |
675 | /// must be conservative. |
676 | bool memoperands_empty() const { return memoperands().empty(); } |
677 | |
678 | /// Return true if this instruction has exactly one MachineMemOperand. |
679 | bool hasOneMemOperand() const { return memoperands().size() == 1; } |
680 | |
681 | /// Return the number of memory operands. |
682 | unsigned getNumMemOperands() const { return memoperands().size(); } |
683 | |
684 | /// Helper to extract a pre-instruction symbol if one has been added. |
685 | MCSymbol *getPreInstrSymbol() const { |
686 | if (!Info) |
687 | return nullptr; |
688 | if (MCSymbol *S = Info.get<EIIK_PreInstrSymbol>()) |
689 | return S; |
690 | if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>()) |
691 | return EI->getPreInstrSymbol(); |
692 | |
693 | return nullptr; |
694 | } |
695 | |
696 | /// Helper to extract a post-instruction symbol if one has been added. |
697 | MCSymbol *getPostInstrSymbol() const { |
698 | if (!Info) |
699 | return nullptr; |
700 | if (MCSymbol *S = Info.get<EIIK_PostInstrSymbol>()) |
701 | return S; |
702 | if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>()) |
703 | return EI->getPostInstrSymbol(); |
704 | |
705 | return nullptr; |
706 | } |
707 | |
708 | /// Helper to extract a heap alloc marker if one has been added. |
709 | MDNode *getHeapAllocMarker() const { |
710 | if (!Info) |
711 | return nullptr; |
712 | if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>()) |
713 | return EI->getHeapAllocMarker(); |
714 | |
715 | return nullptr; |
716 | } |
717 | |
718 | /// API for querying MachineInstr properties. They are the same as MCInstrDesc |
719 | /// queries but they are bundle aware. |
720 | |
721 | enum QueryType { |
722 | IgnoreBundle, // Ignore bundles |
723 | AnyInBundle, // Return true if any instruction in bundle has property |
724 | AllInBundle // Return true if all instructions in bundle have property |
725 | }; |
726 | |
727 | /// Return true if the instruction (or in the case of a bundle, |
728 | /// the instructions inside the bundle) has the specified property. |
729 | /// The first argument is the property being queried. |
730 | /// The second argument indicates whether the query should look inside |
731 | /// instruction bundles. |
732 | bool hasProperty(unsigned MCFlag, QueryType Type = AnyInBundle) const { |
733 | assert(MCFlag < 64 &&((MCFlag < 64 && "MCFlag out of range for bit mask in getFlags/hasPropertyInBundle." ) ? static_cast<void> (0) : __assert_fail ("MCFlag < 64 && \"MCFlag out of range for bit mask in getFlags/hasPropertyInBundle.\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/MachineInstr.h" , 734, __PRETTY_FUNCTION__)) |
734 | "MCFlag out of range for bit mask in getFlags/hasPropertyInBundle.")((MCFlag < 64 && "MCFlag out of range for bit mask in getFlags/hasPropertyInBundle." ) ? static_cast<void> (0) : __assert_fail ("MCFlag < 64 && \"MCFlag out of range for bit mask in getFlags/hasPropertyInBundle.\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/MachineInstr.h" , 734, __PRETTY_FUNCTION__)); |
735 | // Inline the fast path for unbundled or bundle-internal instructions. |
736 | if (Type == IgnoreBundle || !isBundled() || isBundledWithPred()) |
737 | return getDesc().getFlags() & (1ULL << MCFlag); |
738 | |
739 | // If this is the first instruction in a bundle, take the slow path. |
740 | return hasPropertyInBundle(1ULL << MCFlag, Type); |
741 | } |
742 | |
743 | /// Return true if this is an instruction that should go through the usual |
744 | /// legalization steps. |
745 | bool isPreISelOpcode(QueryType Type = IgnoreBundle) const { |
746 | return hasProperty(MCID::PreISelOpcode, Type); |
747 | } |
748 | |
749 | /// Return true if this instruction can have a variable number of operands. |
750 | /// In this case, the variable operands will be after the normal |
751 | /// operands but before the implicit definitions and uses (if any are |
752 | /// present). |
753 | bool isVariadic(QueryType Type = IgnoreBundle) const { |
754 | return hasProperty(MCID::Variadic, Type); |
755 | } |
756 | |
757 | /// Set if this instruction has an optional definition, e.g. |
758 | /// ARM instructions which can set condition code if 's' bit is set. |
759 | bool hasOptionalDef(QueryType Type = IgnoreBundle) const { |
760 | return hasProperty(MCID::HasOptionalDef, Type); |
761 | } |
762 | |
763 | /// Return true if this is a pseudo instruction that doesn't |
764 | /// correspond to a real machine instruction. |
765 | bool isPseudo(QueryType Type = IgnoreBundle) const { |
766 | return hasProperty(MCID::Pseudo, Type); |
767 | } |
768 | |
769 | bool isReturn(QueryType Type = AnyInBundle) const { |
770 | return hasProperty(MCID::Return, Type); |
771 | } |
772 | |
773 | /// Return true if this is an instruction that marks the end of an EH scope, |
774 | /// i.e., a catchpad or a cleanuppad instruction. |
775 | bool isEHScopeReturn(QueryType Type = AnyInBundle) const { |
776 | return hasProperty(MCID::EHScopeReturn, Type); |
777 | } |
778 | |
779 | bool isCall(QueryType Type = AnyInBundle) const { |
780 | return hasProperty(MCID::Call, Type); |
781 | } |
782 | |
783 | /// Return true if this is a call instruction that may have an associated |
784 | /// call site entry in the debug info. |
785 | bool isCandidateForCallSiteEntry(QueryType Type = IgnoreBundle) const; |
786 | /// Return true if copying, moving, or erasing this instruction requires |
787 | /// updating Call Site Info (see \ref copyCallSiteInfo, \ref moveCallSiteInfo, |
788 | /// \ref eraseCallSiteInfo). |
789 | bool shouldUpdateCallSiteInfo() const; |
790 | |
791 | /// Returns true if the specified instruction stops control flow |
792 | /// from executing the instruction immediately following it. Examples include |
793 | /// unconditional branches and return instructions. |
794 | bool isBarrier(QueryType Type = AnyInBundle) const { |
795 | return hasProperty(MCID::Barrier, Type); |
796 | } |
797 | |
798 | /// Returns true if this instruction part of the terminator for a basic block. |
799 | /// Typically this is things like return and branch instructions. |
800 | /// |
801 | /// Various passes use this to insert code into the bottom of a basic block, |
802 | /// but before control flow occurs. |
803 | bool isTerminator(QueryType Type = AnyInBundle) const { |
804 | return hasProperty(MCID::Terminator, Type); |
805 | } |
806 | |
807 | /// Returns true if this is a conditional, unconditional, or indirect branch. |
808 | /// Predicates below can be used to discriminate between |
809 | /// these cases, and the TargetInstrInfo::analyzeBranch method can be used to |
810 | /// get more information. |
811 | bool isBranch(QueryType Type = AnyInBundle) const { |
812 | return hasProperty(MCID::Branch, Type); |
813 | } |
814 | |
815 | /// Return true if this is an indirect branch, such as a |
816 | /// branch through a register. |
817 | bool isIndirectBranch(QueryType Type = AnyInBundle) const { |
818 | return hasProperty(MCID::IndirectBranch, Type); |
819 | } |
820 | |
821 | /// Return true if this is a branch which may fall |
822 | /// through to the next instruction or may transfer control flow to some other |
823 | /// block. The TargetInstrInfo::analyzeBranch method can be used to get more |
824 | /// information about this branch. |
825 | bool isConditionalBranch(QueryType Type = AnyInBundle) const { |
826 | return isBranch(Type) && !isBarrier(Type) && !isIndirectBranch(Type); |
827 | } |
828 | |
829 | /// Return true if this is a branch which always |
830 | /// transfers control flow to some other block. The |
831 | /// TargetInstrInfo::analyzeBranch method can be used to get more information |
832 | /// about this branch. |
833 | bool isUnconditionalBranch(QueryType Type = AnyInBundle) const { |
834 | return isBranch(Type) && isBarrier(Type) && !isIndirectBranch(Type); |
835 | } |
836 | |
837 | /// Return true if this instruction has a predicate operand that |
838 | /// controls execution. It may be set to 'always', or may be set to other |
839 | /// values. There are various methods in TargetInstrInfo that can be used to |
840 | /// control and modify the predicate in this instruction. |
841 | bool isPredicable(QueryType Type = AllInBundle) const { |
842 | // If it's a bundle than all bundled instructions must be predicable for this |
843 | // to return true. |
844 | return hasProperty(MCID::Predicable, Type); |
845 | } |
846 | |
847 | /// Return true if this instruction is a comparison. |
848 | bool isCompare(QueryType Type = IgnoreBundle) const { |
849 | return hasProperty(MCID::Compare, Type); |
850 | } |
851 | |
852 | /// Return true if this instruction is a move immediate |
853 | /// (including conditional moves) instruction. |
854 | bool isMoveImmediate(QueryType Type = IgnoreBundle) const { |
855 | return hasProperty(MCID::MoveImm, Type); |
856 | } |
857 | |
858 | /// Return true if this instruction is a register move. |
859 | /// (including moving values from subreg to reg) |
860 | bool isMoveReg(QueryType Type = IgnoreBundle) const { |
861 | return hasProperty(MCID::MoveReg, Type); |
862 | } |
863 | |
864 | /// Return true if this instruction is a bitcast instruction. |
865 | bool isBitcast(QueryType Type = IgnoreBundle) const { |
866 | return hasProperty(MCID::Bitcast, Type); |
867 | } |
868 | |
869 | /// Return true if this instruction is a select instruction. |
870 | bool isSelect(QueryType Type = IgnoreBundle) const { |
871 | return hasProperty(MCID::Select, Type); |
872 | } |
873 | |
874 | /// Return true if this instruction cannot be safely duplicated. |
875 | /// For example, if the instruction has a unique labels attached |
876 | /// to it, duplicating it would cause multiple definition errors. |
877 | bool isNotDuplicable(QueryType Type = AnyInBundle) const { |
878 | return hasProperty(MCID::NotDuplicable, Type); |
879 | } |
880 | |
881 | /// Return true if this instruction is convergent. |
882 | /// Convergent instructions can not be made control-dependent on any |
883 | /// additional values. |
884 | bool isConvergent(QueryType Type = AnyInBundle) const { |
885 | if (isInlineAsm()) { |
886 | unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); |
887 | if (ExtraInfo & InlineAsm::Extra_IsConvergent) |
888 | return true; |
889 | } |
890 | return hasProperty(MCID::Convergent, Type); |
891 | } |
892 | |
893 | /// Returns true if the specified instruction has a delay slot |
894 | /// which must be filled by the code generator. |
895 | bool hasDelaySlot(QueryType Type = AnyInBundle) const { |
896 | return hasProperty(MCID::DelaySlot, Type); |
897 | } |
898 | |
899 | /// Return true for instructions that can be folded as |
900 | /// memory operands in other instructions. The most common use for this |
901 | /// is instructions that are simple loads from memory that don't modify |
902 | /// the loaded value in any way, but it can also be used for instructions |
903 | /// that can be expressed as constant-pool loads, such as V_SETALLONES |
904 | /// on x86, to allow them to be folded when it is beneficial. |
905 | /// This should only be set on instructions that return a value in their |
906 | /// only virtual register definition. |
907 | bool canFoldAsLoad(QueryType Type = IgnoreBundle) const { |
908 | return hasProperty(MCID::FoldableAsLoad, Type); |
909 | } |
910 | |
911 | /// Return true if this instruction behaves |
912 | /// the same way as the generic REG_SEQUENCE instructions. |
913 | /// E.g., on ARM, |
914 | /// dX VMOVDRR rY, rZ |
915 | /// is equivalent to |
916 | /// dX = REG_SEQUENCE rY, ssub_0, rZ, ssub_1. |
917 | /// |
918 | /// Note that for the optimizers to be able to take advantage of |
919 | /// this property, TargetInstrInfo::getRegSequenceLikeInputs has to be |
920 | /// override accordingly. |
921 | bool isRegSequenceLike(QueryType Type = IgnoreBundle) const { |
922 | return hasProperty(MCID::RegSequence, Type); |
923 | } |
924 | |
925 | /// Return true if this instruction behaves |
926 | /// the same way as the generic EXTRACT_SUBREG instructions. |
927 | /// E.g., on ARM, |
928 | /// rX, rY VMOVRRD dZ |
929 | /// is equivalent to two EXTRACT_SUBREG: |
930 | /// rX = EXTRACT_SUBREG dZ, ssub_0 |
931 | /// rY = EXTRACT_SUBREG dZ, ssub_1 |
932 | /// |
933 | /// Note that for the optimizers to be able to take advantage of |
934 | /// this property, TargetInstrInfo::getExtractSubregLikeInputs has to be |
935 | /// override accordingly. |
936 | bool isExtractSubregLike(QueryType Type = IgnoreBundle) const { |
937 | return hasProperty(MCID::ExtractSubreg, Type); |
938 | } |
939 | |
940 | /// Return true if this instruction behaves |
941 | /// the same way as the generic INSERT_SUBREG instructions. |
942 | /// E.g., on ARM, |
943 | /// dX = VSETLNi32 dY, rZ, Imm |
944 | /// is equivalent to a INSERT_SUBREG: |
945 | /// dX = INSERT_SUBREG dY, rZ, translateImmToSubIdx(Imm) |
946 | /// |
947 | /// Note that for the optimizers to be able to take advantage of |
948 | /// this property, TargetInstrInfo::getInsertSubregLikeInputs has to be |
949 | /// override accordingly. |
950 | bool isInsertSubregLike(QueryType Type = IgnoreBundle) const { |
951 | return hasProperty(MCID::InsertSubreg, Type); |
952 | } |
953 | |
954 | //===--------------------------------------------------------------------===// |
955 | // Side Effect Analysis |
956 | //===--------------------------------------------------------------------===// |
957 | |
958 | /// Return true if this instruction could possibly read memory. |
959 | /// Instructions with this flag set are not necessarily simple load |
960 | /// instructions, they may load a value and modify it, for example. |
961 | bool mayLoad(QueryType Type = AnyInBundle) const { |
962 | if (isInlineAsm()) { |
963 | unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); |
964 | if (ExtraInfo & InlineAsm::Extra_MayLoad) |
965 | return true; |
966 | } |
967 | return hasProperty(MCID::MayLoad, Type); |
968 | } |
969 | |
970 | /// Return true if this instruction could possibly modify memory. |
971 | /// Instructions with this flag set are not necessarily simple store |
972 | /// instructions, they may store a modified value based on their operands, or |
973 | /// may not actually modify anything, for example. |
974 | bool mayStore(QueryType Type = AnyInBundle) const { |
975 | if (isInlineAsm()) { |
976 | unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); |
977 | if (ExtraInfo & InlineAsm::Extra_MayStore) |
978 | return true; |
979 | } |
980 | return hasProperty(MCID::MayStore, Type); |
981 | } |
982 | |
983 | /// Return true if this instruction could possibly read or modify memory. |
984 | bool mayLoadOrStore(QueryType Type = AnyInBundle) const { |
985 | return mayLoad(Type) || mayStore(Type); |
986 | } |
987 | |
988 | /// Return true if this instruction could possibly raise a floating-point |
989 | /// exception. This is the case if the instruction is a floating-point |
990 | /// instruction that can in principle raise an exception, as indicated |
991 | /// by the MCID::MayRaiseFPException property, *and* at the same time, |
992 | /// the instruction is used in a context where we expect floating-point |
993 | /// exceptions are not disabled, as indicated by the NoFPExcept MI flag. |
994 | bool mayRaiseFPException() const { |
995 | return hasProperty(MCID::MayRaiseFPException) && |
996 | !getFlag(MachineInstr::MIFlag::NoFPExcept); |
997 | } |
998 | |
999 | //===--------------------------------------------------------------------===// |
1000 | // Flags that indicate whether an instruction can be modified by a method. |
1001 | //===--------------------------------------------------------------------===// |
1002 | |
1003 | /// Return true if this may be a 2- or 3-address |
1004 | /// instruction (of the form "X = op Y, Z, ..."), which produces the same |
1005 | /// result if Y and Z are exchanged. If this flag is set, then the |
1006 | /// TargetInstrInfo::commuteInstruction method may be used to hack on the |
1007 | /// instruction. |
1008 | /// |
1009 | /// Note that this flag may be set on instructions that are only commutable |
1010 | /// sometimes. In these cases, the call to commuteInstruction will fail. |
1011 | /// Also note that some instructions require non-trivial modification to |
1012 | /// commute them. |
1013 | bool isCommutable(QueryType Type = IgnoreBundle) const { |
1014 | return hasProperty(MCID::Commutable, Type); |
1015 | } |
1016 | |
1017 | /// Return true if this is a 2-address instruction |
1018 | /// which can be changed into a 3-address instruction if needed. Doing this |
1019 | /// transformation can be profitable in the register allocator, because it |
1020 | /// means that the instruction can use a 2-address form if possible, but |
1021 | /// degrade into a less efficient form if the source and dest register cannot |
1022 | /// be assigned to the same register. For example, this allows the x86 |
1023 | /// backend to turn a "shl reg, 3" instruction into an LEA instruction, which |
1024 | /// is the same speed as the shift but has bigger code size. |
1025 | /// |
1026 | /// If this returns true, then the target must implement the |
1027 | /// TargetInstrInfo::convertToThreeAddress method for this instruction, which |
1028 | /// is allowed to fail if the transformation isn't valid for this specific |
1029 | /// instruction (e.g. shl reg, 4 on x86). |
1030 | /// |
1031 | bool isConvertibleTo3Addr(QueryType Type = IgnoreBundle) const { |
1032 | return hasProperty(MCID::ConvertibleTo3Addr, Type); |
1033 | } |
1034 | |
1035 | /// Return true if this instruction requires |
1036 | /// custom insertion support when the DAG scheduler is inserting it into a |
1037 | /// machine basic block. If this is true for the instruction, it basically |
1038 | /// means that it is a pseudo instruction used at SelectionDAG time that is |
1039 | /// expanded out into magic code by the target when MachineInstrs are formed. |
1040 | /// |
1041 | /// If this is true, the TargetLoweringInfo::InsertAtEndOfBasicBlock method |
1042 | /// is used to insert this into the MachineBasicBlock. |
1043 | bool usesCustomInsertionHook(QueryType Type = IgnoreBundle) const { |
1044 | return hasProperty(MCID::UsesCustomInserter, Type); |
1045 | } |
1046 | |
1047 | /// Return true if this instruction requires *adjustment* |
1048 | /// after instruction selection by calling a target hook. For example, this |
1049 | /// can be used to fill in ARM 's' optional operand depending on whether |
1050 | /// the conditional flag register is used. |
1051 | bool hasPostISelHook(QueryType Type = IgnoreBundle) const { |
1052 | return hasProperty(MCID::HasPostISelHook, Type); |
1053 | } |
1054 | |
1055 | /// Returns true if this instruction is a candidate for remat. |
1056 | /// This flag is deprecated, please don't use it anymore. If this |
1057 | /// flag is set, the isReallyTriviallyReMaterializable() method is called to |
1058 | /// verify the instruction is really rematable. |
1059 | bool isRematerializable(QueryType Type = AllInBundle) const { |
1060 | // It's only possible to re-mat a bundle if all bundled instructions are |
1061 | // re-materializable. |
1062 | return hasProperty(MCID::Rematerializable, Type); |
1063 | } |
1064 | |
1065 | /// Returns true if this instruction has the same cost (or less) than a move |
1066 | /// instruction. This is useful during certain types of optimizations |
1067 | /// (e.g., remat during two-address conversion or machine licm) |
1068 | /// where we would like to remat or hoist the instruction, but not if it costs |
1069 | /// more than moving the instruction into the appropriate register. Note, we |
1070 | /// are not marking copies from and to the same register class with this flag. |
1071 | bool isAsCheapAsAMove(QueryType Type = AllInBundle) const { |
1072 | // Only returns true for a bundle if all bundled instructions are cheap. |
1073 | return hasProperty(MCID::CheapAsAMove, Type); |
1074 | } |
1075 | |
1076 | /// Returns true if this instruction source operands |
1077 | /// have special register allocation requirements that are not captured by the |
1078 | /// operand register classes. e.g. ARM::STRD's two source registers must be an |
1079 | /// even / odd pair, ARM::STM registers have to be in ascending order. |
1080 | /// Post-register allocation passes should not attempt to change allocations |
1081 | /// for sources of instructions with this flag. |
1082 | bool hasExtraSrcRegAllocReq(QueryType Type = AnyInBundle) const { |
1083 | return hasProperty(MCID::ExtraSrcRegAllocReq, Type); |
1084 | } |
1085 | |
1086 | /// Returns true if this instruction def operands |
1087 | /// have special register allocation requirements that are not captured by the |
1088 | /// operand register classes. e.g. ARM::LDRD's two def registers must be an |
1089 | /// even / odd pair, ARM::LDM registers have to be in ascending order. |
1090 | /// Post-register allocation passes should not attempt to change allocations |
1091 | /// for definitions of instructions with this flag. |
1092 | bool hasExtraDefRegAllocReq(QueryType Type = AnyInBundle) const { |
1093 | return hasProperty(MCID::ExtraDefRegAllocReq, Type); |
1094 | } |
1095 | |
1096 | enum MICheckType { |
1097 | CheckDefs, // Check all operands for equality |
1098 | CheckKillDead, // Check all operands including kill / dead markers |
1099 | IgnoreDefs, // Ignore all definitions |
1100 | IgnoreVRegDefs // Ignore virtual register definitions |
1101 | }; |
1102 | |
1103 | /// Return true if this instruction is identical to \p Other. |
1104 | /// Two instructions are identical if they have the same opcode and all their |
1105 | /// operands are identical (with respect to MachineOperand::isIdenticalTo()). |
1106 | /// Note that this means liveness related flags (dead, undef, kill) do not |
1107 | /// affect the notion of identical. |
1108 | bool isIdenticalTo(const MachineInstr &Other, |
1109 | MICheckType Check = CheckDefs) const; |
1110 | |
1111 | /// Unlink 'this' from the containing basic block, and return it without |
1112 | /// deleting it. |
1113 | /// |
1114 | /// This function can not be used on bundled instructions, use |
1115 | /// removeFromBundle() to remove individual instructions from a bundle. |
1116 | MachineInstr *removeFromParent(); |
1117 | |
1118 | /// Unlink this instruction from its basic block and return it without |
1119 | /// deleting it. |
1120 | /// |
1121 | /// If the instruction is part of a bundle, the other instructions in the |
1122 | /// bundle remain bundled. |
1123 | MachineInstr *removeFromBundle(); |
1124 | |
1125 | /// Unlink 'this' from the containing basic block and delete it. |
1126 | /// |
1127 | /// If this instruction is the header of a bundle, the whole bundle is erased. |
1128 | /// This function can not be used for instructions inside a bundle, use |
1129 | /// eraseFromBundle() to erase individual bundled instructions. |
1130 | void eraseFromParent(); |
1131 | |
1132 | /// Unlink 'this' from the containing basic block and delete it. |
1133 | /// |
1134 | /// For all definitions mark their uses in DBG_VALUE nodes |
1135 | /// as undefined. Otherwise like eraseFromParent(). |
1136 | void eraseFromParentAndMarkDBGValuesForRemoval(); |
1137 | |
1138 | /// Unlink 'this' form its basic block and delete it. |
1139 | /// |
1140 | /// If the instruction is part of a bundle, the other instructions in the |
1141 | /// bundle remain bundled. |
1142 | void eraseFromBundle(); |
1143 | |
1144 | bool isEHLabel() const { return getOpcode() == TargetOpcode::EH_LABEL; } |
1145 | bool isGCLabel() const { return getOpcode() == TargetOpcode::GC_LABEL; } |
1146 | bool isAnnotationLabel() const { |
1147 | return getOpcode() == TargetOpcode::ANNOTATION_LABEL; |
1148 | } |
1149 | |
1150 | /// Returns true if the MachineInstr represents a label. |
1151 | bool isLabel() const { |
1152 | return isEHLabel() || isGCLabel() || isAnnotationLabel(); |
1153 | } |
1154 | |
1155 | bool isCFIInstruction() const { |
1156 | return getOpcode() == TargetOpcode::CFI_INSTRUCTION; |
1157 | } |
1158 | |
1159 | // True if the instruction represents a position in the function. |
1160 | bool isPosition() const { return isLabel() || isCFIInstruction(); } |
1161 | |
1162 | bool isDebugValue() const { return getOpcode() == TargetOpcode::DBG_VALUE; } |
1163 | bool isDebugLabel() const { return getOpcode() == TargetOpcode::DBG_LABEL; } |
1164 | bool isDebugRef() const { return getOpcode() == TargetOpcode::DBG_INSTR_REF; } |
1165 | bool isDebugInstr() const { |
1166 | return isDebugValue() || isDebugLabel() || isDebugRef(); |
1167 | } |
1168 | |
1169 | bool isDebugOffsetImm() const { return getDebugOffset().isImm(); } |
1170 | |
1171 | /// A DBG_VALUE is indirect iff the location operand is a register and |
1172 | /// the offset operand is an immediate. |
1173 | bool isIndirectDebugValue() const { |
1174 | return isDebugValue() && getDebugOperand(0).isReg() && isDebugOffsetImm(); |
1175 | } |
1176 | |
1177 | /// A DBG_VALUE is an entry value iff its debug expression contains the |
1178 | /// DW_OP_LLVM_entry_value operation. |
1179 | bool isDebugEntryValue() const; |
1180 | |
1181 | /// Return true if the instruction is a debug value which describes a part of |
1182 | /// a variable as unavailable. |
1183 | bool isUndefDebugValue() const { |
1184 | return isDebugValue() && getDebugOperand(0).isReg() && |
1185 | !getDebugOperand(0).getReg().isValid(); |
1186 | } |
1187 | |
1188 | bool isPHI() const { |
1189 | return getOpcode() == TargetOpcode::PHI || |
1190 | getOpcode() == TargetOpcode::G_PHI; |
1191 | } |
1192 | bool isKill() const { return getOpcode() == TargetOpcode::KILL; } |
1193 | bool isImplicitDef() const { return getOpcode()==TargetOpcode::IMPLICIT_DEF; } |
1194 | bool isInlineAsm() const { |
1195 | return getOpcode() == TargetOpcode::INLINEASM || |
1196 | getOpcode() == TargetOpcode::INLINEASM_BR; |
1197 | } |
1198 | |
1199 | /// FIXME: Seems like a layering violation that the AsmDialect, which is X86 |
1200 | /// specific, be attached to a generic MachineInstr. |
1201 | bool isMSInlineAsm() const { |
1202 | return isInlineAsm() && getInlineAsmDialect() == InlineAsm::AD_Intel; |
1203 | } |
1204 | |
1205 | bool isStackAligningInlineAsm() const; |
1206 | InlineAsm::AsmDialect getInlineAsmDialect() const; |
1207 | |
1208 | bool isInsertSubreg() const { |
1209 | return getOpcode() == TargetOpcode::INSERT_SUBREG; |
1210 | } |
1211 | |
1212 | bool isSubregToReg() const { |
1213 | return getOpcode() == TargetOpcode::SUBREG_TO_REG; |
1214 | } |
1215 | |
1216 | bool isRegSequence() const { |
1217 | return getOpcode() == TargetOpcode::REG_SEQUENCE; |
1218 | } |
1219 | |
1220 | bool isBundle() const { |
1221 | return getOpcode() == TargetOpcode::BUNDLE; |
1222 | } |
1223 | |
1224 | bool isCopy() const { |
1225 | return getOpcode() == TargetOpcode::COPY; |
1226 | } |
1227 | |
1228 | bool isFullCopy() const { |
1229 | return isCopy() && !getOperand(0).getSubReg() && !getOperand(1).getSubReg(); |
1230 | } |
1231 | |
1232 | bool isExtractSubreg() const { |
1233 | return getOpcode() == TargetOpcode::EXTRACT_SUBREG; |
1234 | } |
1235 | |
1236 | /// Return true if the instruction behaves like a copy. |
1237 | /// This does not include native copy instructions. |
1238 | bool isCopyLike() const { |
1239 | return isCopy() || isSubregToReg(); |
1240 | } |
1241 | |
1242 | /// Return true is the instruction is an identity copy. |
1243 | bool isIdentityCopy() const { |
1244 | return isCopy() && getOperand(0).getReg() == getOperand(1).getReg() && |
1245 | getOperand(0).getSubReg() == getOperand(1).getSubReg(); |
1246 | } |
1247 | |
1248 | /// Return true if this instruction doesn't produce any output in the form of |
1249 | /// executable instructions. |
1250 | bool isMetaInstruction() const { |
1251 | switch (getOpcode()) { |
1252 | default: |
1253 | return false; |
1254 | case TargetOpcode::IMPLICIT_DEF: |
1255 | case TargetOpcode::KILL: |
1256 | case TargetOpcode::CFI_INSTRUCTION: |
1257 | case TargetOpcode::EH_LABEL: |
1258 | case TargetOpcode::GC_LABEL: |
1259 | case TargetOpcode::DBG_VALUE: |
1260 | case TargetOpcode::DBG_INSTR_REF: |
1261 | case TargetOpcode::DBG_LABEL: |
1262 | case TargetOpcode::LIFETIME_START: |
1263 | case TargetOpcode::LIFETIME_END: |
1264 | case TargetOpcode::PSEUDO_PROBE: |
1265 | return true; |
1266 | } |
1267 | } |
1268 | |
1269 | /// Return true if this is a transient instruction that is either very likely |
1270 | /// to be eliminated during register allocation (such as copy-like |
1271 | /// instructions), or if this instruction doesn't have an execution-time cost. |
1272 | bool isTransient() const { |
1273 | switch (getOpcode()) { |
1274 | default: |
1275 | return isMetaInstruction(); |
1276 | // Copy-like instructions are usually eliminated during register allocation. |
1277 | case TargetOpcode::PHI: |
1278 | case TargetOpcode::G_PHI: |
1279 | case TargetOpcode::COPY: |
1280 | case TargetOpcode::INSERT_SUBREG: |
1281 | case TargetOpcode::SUBREG_TO_REG: |
1282 | case TargetOpcode::REG_SEQUENCE: |
1283 | return true; |
1284 | } |
1285 | } |
1286 | |
1287 | /// Return the number of instructions inside the MI bundle, excluding the |
1288 | /// bundle header. |
1289 | /// |
1290 | /// This is the number of instructions that MachineBasicBlock::iterator |
1291 | /// skips, 0 for unbundled instructions. |
1292 | unsigned getBundleSize() const; |
1293 | |
1294 | /// Return true if the MachineInstr reads the specified register. |
1295 | /// If TargetRegisterInfo is passed, then it also checks if there |
1296 | /// is a read of a super-register. |
1297 | /// This does not count partial redefines of virtual registers as reads: |
1298 | /// %reg1024:6 = OP. |
1299 | bool readsRegister(Register Reg, |
1300 | const TargetRegisterInfo *TRI = nullptr) const { |
1301 | return findRegisterUseOperandIdx(Reg, false, TRI) != -1; |
1302 | } |
1303 | |
1304 | /// Return true if the MachineInstr reads the specified virtual register. |
1305 | /// Take into account that a partial define is a |
1306 | /// read-modify-write operation. |
1307 | bool readsVirtualRegister(Register Reg) const { |
1308 | return readsWritesVirtualRegister(Reg).first; |
1309 | } |
1310 | |
1311 | /// Return a pair of bools (reads, writes) indicating if this instruction |
1312 | /// reads or writes Reg. This also considers partial defines. |
1313 | /// If Ops is not null, all operand indices for Reg are added. |
1314 | std::pair<bool,bool> readsWritesVirtualRegister(Register Reg, |
1315 | SmallVectorImpl<unsigned> *Ops = nullptr) const; |
1316 | |
1317 | /// Return true if the MachineInstr kills the specified register. |
1318 | /// If TargetRegisterInfo is passed, then it also checks if there is |
1319 | /// a kill of a super-register. |
1320 | bool killsRegister(Register Reg, |
1321 | const TargetRegisterInfo *TRI = nullptr) const { |
1322 | return findRegisterUseOperandIdx(Reg, true, TRI) != -1; |
1323 | } |
1324 | |
1325 | /// Return true if the MachineInstr fully defines the specified register. |
1326 | /// If TargetRegisterInfo is passed, then it also checks |
1327 | /// if there is a def of a super-register. |
1328 | /// NOTE: It's ignoring subreg indices on virtual registers. |
1329 | bool definesRegister(Register Reg, |
1330 | const TargetRegisterInfo *TRI = nullptr) const { |
1331 | return findRegisterDefOperandIdx(Reg, false, false, TRI) != -1; |
1332 | } |
1333 | |
1334 | /// Return true if the MachineInstr modifies (fully define or partially |
1335 | /// define) the specified register. |
1336 | /// NOTE: It's ignoring subreg indices on virtual registers. |
1337 | bool modifiesRegister(Register Reg, |
1338 | const TargetRegisterInfo *TRI = nullptr) const { |
1339 | return findRegisterDefOperandIdx(Reg, false, true, TRI) != -1; |
1340 | } |
1341 | |
1342 | /// Returns true if the register is dead in this machine instruction. |
1343 | /// If TargetRegisterInfo is passed, then it also checks |
1344 | /// if there is a dead def of a super-register. |
1345 | bool registerDefIsDead(Register Reg, |
1346 | const TargetRegisterInfo *TRI = nullptr) const { |
1347 | return findRegisterDefOperandIdx(Reg, true, false, TRI) != -1; |
1348 | } |
1349 | |
1350 | /// Returns true if the MachineInstr has an implicit-use operand of exactly |
1351 | /// the given register (not considering sub/super-registers). |
1352 | bool hasRegisterImplicitUseOperand(Register Reg) const; |
1353 | |
1354 | /// Returns the operand index that is a use of the specific register or -1 |
1355 | /// if it is not found. It further tightens the search criteria to a use |
1356 | /// that kills the register if isKill is true. |
1357 | int findRegisterUseOperandIdx(Register Reg, bool isKill = false, |
1358 | const TargetRegisterInfo *TRI = nullptr) const; |
1359 | |
1360 | /// Wrapper for findRegisterUseOperandIdx, it returns |
1361 | /// a pointer to the MachineOperand rather than an index. |
1362 | MachineOperand *findRegisterUseOperand(Register Reg, bool isKill = false, |
1363 | const TargetRegisterInfo *TRI = nullptr) { |
1364 | int Idx = findRegisterUseOperandIdx(Reg, isKill, TRI); |
1365 | return (Idx == -1) ? nullptr : &getOperand(Idx); |
1366 | } |
1367 | |
1368 | const MachineOperand *findRegisterUseOperand( |
1369 | Register Reg, bool isKill = false, |
1370 | const TargetRegisterInfo *TRI = nullptr) const { |
1371 | return const_cast<MachineInstr *>(this)-> |
1372 | findRegisterUseOperand(Reg, isKill, TRI); |
1373 | } |
1374 | |
1375 | /// Returns the operand index that is a def of the specified register or |
1376 | /// -1 if it is not found. If isDead is true, defs that are not dead are |
1377 | /// skipped. If Overlap is true, then it also looks for defs that merely |
1378 | /// overlap the specified register. If TargetRegisterInfo is non-null, |
1379 | /// then it also checks if there is a def of a super-register. |
1380 | /// This may also return a register mask operand when Overlap is true. |
1381 | int findRegisterDefOperandIdx(Register Reg, |
1382 | bool isDead = false, bool Overlap = false, |
1383 | const TargetRegisterInfo *TRI = nullptr) const; |
1384 | |
1385 | /// Wrapper for findRegisterDefOperandIdx, it returns |
1386 | /// a pointer to the MachineOperand rather than an index. |
1387 | MachineOperand * |
1388 | findRegisterDefOperand(Register Reg, bool isDead = false, |
1389 | bool Overlap = false, |
1390 | const TargetRegisterInfo *TRI = nullptr) { |
1391 | int Idx = findRegisterDefOperandIdx(Reg, isDead, Overlap, TRI); |
1392 | return (Idx == -1) ? nullptr : &getOperand(Idx); |
1393 | } |
1394 | |
1395 | const MachineOperand * |
1396 | findRegisterDefOperand(Register Reg, bool isDead = false, |
1397 | bool Overlap = false, |
1398 | const TargetRegisterInfo *TRI = nullptr) const { |
1399 | return const_cast<MachineInstr *>(this)->findRegisterDefOperand( |
1400 | Reg, isDead, Overlap, TRI); |
1401 | } |
1402 | |
1403 | /// Find the index of the first operand in the |
1404 | /// operand list that is used to represent the predicate. It returns -1 if |
1405 | /// none is found. |
1406 | int findFirstPredOperandIdx() const; |
1407 | |
1408 | /// Find the index of the flag word operand that |
1409 | /// corresponds to operand OpIdx on an inline asm instruction. Returns -1 if |
1410 | /// getOperand(OpIdx) does not belong to an inline asm operand group. |
1411 | /// |
1412 | /// If GroupNo is not NULL, it will receive the number of the operand group |
1413 | /// containing OpIdx. |
1414 | /// |
1415 | /// The flag operand is an immediate that can be decoded with methods like |
1416 | /// InlineAsm::hasRegClassConstraint(). |
1417 | int findInlineAsmFlagIdx(unsigned OpIdx, unsigned *GroupNo = nullptr) const; |
1418 | |
1419 | /// Compute the static register class constraint for operand OpIdx. |
1420 | /// For normal instructions, this is derived from the MCInstrDesc. |
1421 | /// For inline assembly it is derived from the flag words. |
1422 | /// |
1423 | /// Returns NULL if the static register class constraint cannot be |
1424 | /// determined. |
1425 | const TargetRegisterClass* |
1426 | getRegClassConstraint(unsigned OpIdx, |
1427 | const TargetInstrInfo *TII, |
1428 | const TargetRegisterInfo *TRI) const; |
1429 | |
1430 | /// Applies the constraints (def/use) implied by this MI on \p Reg to |
1431 | /// the given \p CurRC. |
1432 | /// If \p ExploreBundle is set and MI is part of a bundle, all the |
1433 | /// instructions inside the bundle will be taken into account. In other words, |
1434 | /// this method accumulates all the constraints of the operand of this MI and |
1435 | /// the related bundle if MI is a bundle or inside a bundle. |
1436 | /// |
1437 | /// Returns the register class that satisfies both \p CurRC and the |
1438 | /// constraints set by MI. Returns NULL if such a register class does not |
1439 | /// exist. |
1440 | /// |
1441 | /// \pre CurRC must not be NULL. |
1442 | const TargetRegisterClass *getRegClassConstraintEffectForVReg( |
1443 | Register Reg, const TargetRegisterClass *CurRC, |
1444 | const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, |
1445 | bool ExploreBundle = false) const; |
1446 | |
1447 | /// Applies the constraints (def/use) implied by the \p OpIdx operand |
1448 | /// to the given \p CurRC. |
1449 | /// |
1450 | /// Returns the register class that satisfies both \p CurRC and the |
1451 | /// constraints set by \p OpIdx MI. Returns NULL if such a register class |
1452 | /// does not exist. |
1453 | /// |
1454 | /// \pre CurRC must not be NULL. |
1455 | /// \pre The operand at \p OpIdx must be a register. |
1456 | const TargetRegisterClass * |
1457 | getRegClassConstraintEffect(unsigned OpIdx, const TargetRegisterClass *CurRC, |
1458 | const TargetInstrInfo *TII, |
1459 | const TargetRegisterInfo *TRI) const; |
1460 | |
1461 | /// Add a tie between the register operands at DefIdx and UseIdx. |
1462 | /// The tie will cause the register allocator to ensure that the two |
1463 | /// operands are assigned the same physical register. |
1464 | /// |
1465 | /// Tied operands are managed automatically for explicit operands in the |
1466 | /// MCInstrDesc. This method is for exceptional cases like inline asm. |
1467 | void tieOperands(unsigned DefIdx, unsigned UseIdx); |
1468 | |
1469 | /// Given the index of a tied register operand, find the |
1470 | /// operand it is tied to. Defs are tied to uses and vice versa. Returns the |
1471 | /// index of the tied operand which must exist. |
1472 | unsigned findTiedOperandIdx(unsigned OpIdx) const; |
1473 | |
1474 | /// Given the index of a register def operand, |
1475 | /// check if the register def is tied to a source operand, due to either |
1476 | /// two-address elimination or inline assembly constraints. Returns the |
1477 | /// first tied use operand index by reference if UseOpIdx is not null. |
1478 | bool isRegTiedToUseOperand(unsigned DefOpIdx, |
1479 | unsigned *UseOpIdx = nullptr) const { |
1480 | const MachineOperand &MO = getOperand(DefOpIdx); |
1481 | if (!MO.isReg() || !MO.isDef() || !MO.isTied()) |
1482 | return false; |
1483 | if (UseOpIdx) |
1484 | *UseOpIdx = findTiedOperandIdx(DefOpIdx); |
1485 | return true; |
1486 | } |
1487 | |
1488 | /// Return true if the use operand of the specified index is tied to a def |
1489 | /// operand. It also returns the def operand index by reference if DefOpIdx |
1490 | /// is not null. |
1491 | bool isRegTiedToDefOperand(unsigned UseOpIdx, |
1492 | unsigned *DefOpIdx = nullptr) const { |
1493 | const MachineOperand &MO = getOperand(UseOpIdx); |
1494 | if (!MO.isReg() || !MO.isUse() || !MO.isTied()) |
1495 | return false; |
1496 | if (DefOpIdx) |
1497 | *DefOpIdx = findTiedOperandIdx(UseOpIdx); |
1498 | return true; |
1499 | } |
1500 | |
1501 | /// Clears kill flags on all operands. |
1502 | void clearKillInfo(); |
1503 | |
1504 | /// Replace all occurrences of FromReg with ToReg:SubIdx, |
1505 | /// properly composing subreg indices where necessary. |
1506 | void substituteRegister(Register FromReg, Register ToReg, unsigned SubIdx, |
1507 | const TargetRegisterInfo &RegInfo); |
1508 | |
1509 | /// We have determined MI kills a register. Look for the |
1510 | /// operand that uses it and mark it as IsKill. If AddIfNotFound is true, |
1511 | /// add a implicit operand if it's not found. Returns true if the operand |
1512 | /// exists / is added. |
1513 | bool addRegisterKilled(Register IncomingReg, |
1514 | const TargetRegisterInfo *RegInfo, |
1515 | bool AddIfNotFound = false); |
1516 | |
1517 | /// Clear all kill flags affecting Reg. If RegInfo is provided, this includes |
1518 | /// all aliasing registers. |
1519 | void clearRegisterKills(Register Reg, const TargetRegisterInfo *RegInfo); |
1520 | |
1521 | /// We have determined MI defined a register without a use. |
1522 | /// Look for the operand that defines it and mark it as IsDead. If |
1523 | /// AddIfNotFound is true, add a implicit operand if it's not found. Returns |
1524 | /// true if the operand exists / is added. |
1525 | bool addRegisterDead(Register Reg, const TargetRegisterInfo *RegInfo, |
1526 | bool AddIfNotFound = false); |
1527 | |
1528 | /// Clear all dead flags on operands defining register @p Reg. |
1529 | void clearRegisterDeads(Register Reg); |
1530 | |
1531 | /// Mark all subregister defs of register @p Reg with the undef flag. |
1532 | /// This function is used when we determined to have a subregister def in an |
1533 | /// otherwise undefined super register. |
1534 | void setRegisterDefReadUndef(Register Reg, bool IsUndef = true); |
1535 | |
1536 | /// We have determined MI defines a register. Make sure there is an operand |
1537 | /// defining Reg. |
1538 | void addRegisterDefined(Register Reg, |
1539 | const TargetRegisterInfo *RegInfo = nullptr); |
1540 | |
1541 | /// Mark every physreg used by this instruction as |
1542 | /// dead except those in the UsedRegs list. |
1543 | /// |
1544 | /// On instructions with register mask operands, also add implicit-def |
1545 | /// operands for all registers in UsedRegs. |
1546 | void setPhysRegsDeadExcept(ArrayRef<Register> UsedRegs, |
1547 | const TargetRegisterInfo &TRI); |
1548 | |
1549 | /// Return true if it is safe to move this instruction. If |
1550 | /// SawStore is set to true, it means that there is a store (or call) between |
1551 | /// the instruction's location and its intended destination. |
1552 | bool isSafeToMove(AAResults *AA, bool &SawStore) const; |
1553 | |
1554 | /// Returns true if this instruction's memory access aliases the memory |
1555 | /// access of Other. |
1556 | // |
1557 | /// Assumes any physical registers used to compute addresses |
1558 | /// have the same value for both instructions. Returns false if neither |
1559 | /// instruction writes to memory. |
1560 | /// |
1561 | /// @param AA Optional alias analysis, used to compare memory operands. |
1562 | /// @param Other MachineInstr to check aliasing against. |
1563 | /// @param UseTBAA Whether to pass TBAA information to alias analysis. |
1564 | bool mayAlias(AAResults *AA, const MachineInstr &Other, bool UseTBAA) const; |
1565 | |
1566 | /// Return true if this instruction may have an ordered |
1567 | /// or volatile memory reference, or if the information describing the memory |
1568 | /// reference is not available. Return false if it is known to have no |
1569 | /// ordered or volatile memory references. |
1570 | bool hasOrderedMemoryRef() const; |
1571 | |
1572 | /// Return true if this load instruction never traps and points to a memory |
1573 | /// location whose value doesn't change during the execution of this function. |
1574 | /// |
1575 | /// Examples include loading a value from the constant pool or from the |
1576 | /// argument area of a function (if it does not change). If the instruction |
1577 | /// does multiple loads, this returns true only if all of the loads are |
1578 | /// dereferenceable and invariant. |
1579 | bool isDereferenceableInvariantLoad(AAResults *AA) const; |
1580 | |
1581 | /// If the specified instruction is a PHI that always merges together the |
1582 | /// same virtual register, return the register, otherwise return 0. |
1583 | unsigned isConstantValuePHI() const; |
1584 | |
1585 | /// Return true if this instruction has side effects that are not modeled |
1586 | /// by mayLoad / mayStore, etc. |
1587 | /// For all instructions, the property is encoded in MCInstrDesc::Flags |
1588 | /// (see MCInstrDesc::hasUnmodeledSideEffects(). The only exception is |
1589 | /// INLINEASM instruction, in which case the side effect property is encoded |
1590 | /// in one of its operands (see InlineAsm::Extra_HasSideEffect). |
1591 | /// |
1592 | bool hasUnmodeledSideEffects() const; |
1593 | |
1594 | /// Returns true if it is illegal to fold a load across this instruction. |
1595 | bool isLoadFoldBarrier() const; |
1596 | |
1597 | /// Return true if all the defs of this instruction are dead. |
1598 | bool allDefsAreDead() const; |
1599 | |
1600 | /// Return a valid size if the instruction is a spill instruction. |
1601 | Optional<unsigned> getSpillSize(const TargetInstrInfo *TII) const; |
1602 | |
1603 | /// Return a valid size if the instruction is a folded spill instruction. |
1604 | Optional<unsigned> getFoldedSpillSize(const TargetInstrInfo *TII) const; |
1605 | |
1606 | /// Return a valid size if the instruction is a restore instruction. |
1607 | Optional<unsigned> getRestoreSize(const TargetInstrInfo *TII) const; |
1608 | |
1609 | /// Return a valid size if the instruction is a folded restore instruction. |
1610 | Optional<unsigned> |
1611 | getFoldedRestoreSize(const TargetInstrInfo *TII) const; |
1612 | |
1613 | /// Copy implicit register operands from specified |
1614 | /// instruction to this instruction. |
1615 | void copyImplicitOps(MachineFunction &MF, const MachineInstr &MI); |
1616 | |
1617 | /// Debugging support |
1618 | /// @{ |
1619 | /// Determine the generic type to be printed (if needed) on uses and defs. |
1620 | LLT getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes, |
1621 | const MachineRegisterInfo &MRI) const; |
1622 | |
1623 | /// Return true when an instruction has tied register that can't be determined |
1624 | /// by the instruction's descriptor. This is useful for MIR printing, to |
1625 | /// determine whether we need to print the ties or not. |
1626 | bool hasComplexRegisterTies() const; |
1627 | |
1628 | /// Print this MI to \p OS. |
1629 | /// Don't print information that can be inferred from other instructions if |
1630 | /// \p IsStandalone is false. It is usually true when only a fragment of the |
1631 | /// function is printed. |
1632 | /// Only print the defs and the opcode if \p SkipOpers is true. |
1633 | /// Otherwise, also print operands if \p SkipDebugLoc is true. |
1634 | /// Otherwise, also print the debug loc, with a terminating newline. |
1635 | /// \p TII is used to print the opcode name. If it's not present, but the |
1636 | /// MI is in a function, the opcode will be printed using the function's TII. |
1637 | void print(raw_ostream &OS, bool IsStandalone = true, bool SkipOpers = false, |
1638 | bool SkipDebugLoc = false, bool AddNewLine = true, |
1639 | const TargetInstrInfo *TII = nullptr) const; |
1640 | void print(raw_ostream &OS, ModuleSlotTracker &MST, bool IsStandalone = true, |
1641 | bool SkipOpers = false, bool SkipDebugLoc = false, |
1642 | bool AddNewLine = true, |
1643 | const TargetInstrInfo *TII = nullptr) const; |
1644 | void dump() const; |
1645 | /// Print on dbgs() the current instruction and the instructions defining its |
1646 | /// operands and so on until we reach \p MaxDepth. |
1647 | void dumpr(const MachineRegisterInfo &MRI, |
1648 | unsigned MaxDepth = UINT_MAX(2147483647 *2U +1U)) const; |
1649 | /// @} |
1650 | |
1651 | //===--------------------------------------------------------------------===// |
1652 | // Accessors used to build up machine instructions. |
1653 | |
1654 | /// Add the specified operand to the instruction. If it is an implicit |
1655 | /// operand, it is added to the end of the operand list. If it is an |
1656 | /// explicit operand it is added at the end of the explicit operand list |
1657 | /// (before the first implicit operand). |
1658 | /// |
1659 | /// MF must be the machine function that was used to allocate this |
1660 | /// instruction. |
1661 | /// |
1662 | /// MachineInstrBuilder provides a more convenient interface for creating |
1663 | /// instructions and adding operands. |
1664 | void addOperand(MachineFunction &MF, const MachineOperand &Op); |
1665 | |
1666 | /// Add an operand without providing an MF reference. This only works for |
1667 | /// instructions that are inserted in a basic block. |
1668 | /// |
1669 | /// MachineInstrBuilder and the two-argument addOperand(MF, MO) should be |
1670 | /// preferred. |
1671 | void addOperand(const MachineOperand &Op); |
1672 | |
1673 | /// Replace the instruction descriptor (thus opcode) of |
1674 | /// the current instruction with a new one. |
1675 | void setDesc(const MCInstrDesc &tid) { MCID = &tid; } |
1676 | |
1677 | /// Replace current source information with new such. |
1678 | /// Avoid using this, the constructor argument is preferable. |
1679 | void setDebugLoc(DebugLoc dl) { |
1680 | debugLoc = std::move(dl); |
1681 | assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor")((debugLoc.hasTrivialDestructor() && "Expected trivial destructor" ) ? static_cast<void> (0) : __assert_fail ("debugLoc.hasTrivialDestructor() && \"Expected trivial destructor\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/MachineInstr.h" , 1681, __PRETTY_FUNCTION__)); |
1682 | } |
1683 | |
1684 | /// Erase an operand from an instruction, leaving it with one |
1685 | /// fewer operand than it started with. |
1686 | void RemoveOperand(unsigned OpNo); |
1687 | |
1688 | /// Clear this MachineInstr's memory reference descriptor list. This resets |
1689 | /// the memrefs to their most conservative state. This should be used only |
1690 | /// as a last resort since it greatly pessimizes our knowledge of the memory |
1691 | /// access performed by the instruction. |
1692 | void dropMemRefs(MachineFunction &MF); |
1693 | |
1694 | /// Assign this MachineInstr's memory reference descriptor list. |
1695 | /// |
1696 | /// Unlike other methods, this *will* allocate them into a new array |
1697 | /// associated with the provided `MachineFunction`. |
1698 | void setMemRefs(MachineFunction &MF, ArrayRef<MachineMemOperand *> MemRefs); |
1699 | |
1700 | /// Add a MachineMemOperand to the machine instruction. |
1701 | /// This function should be used only occasionally. The setMemRefs function |
1702 | /// is the primary method for setting up a MachineInstr's MemRefs list. |
1703 | void addMemOperand(MachineFunction &MF, MachineMemOperand *MO); |
1704 | |
1705 | /// Clone another MachineInstr's memory reference descriptor list and replace |
1706 | /// ours with it. |
1707 | /// |
1708 | /// Note that `*this` may be the incoming MI! |
1709 | /// |
1710 | /// Prefer this API whenever possible as it can avoid allocations in common |
1711 | /// cases. |
1712 | void cloneMemRefs(MachineFunction &MF, const MachineInstr &MI); |
1713 | |
1714 | /// Clone the merge of multiple MachineInstrs' memory reference descriptors |
1715 | /// list and replace ours with it. |
1716 | /// |
1717 | /// Note that `*this` may be one of the incoming MIs! |
1718 | /// |
1719 | /// Prefer this API whenever possible as it can avoid allocations in common |
1720 | /// cases. |
1721 | void cloneMergedMemRefs(MachineFunction &MF, |
1722 | ArrayRef<const MachineInstr *> MIs); |
1723 | |
1724 | /// Set a symbol that will be emitted just prior to the instruction itself. |
1725 | /// |
1726 | /// Setting this to a null pointer will remove any such symbol. |
1727 | /// |
1728 | /// FIXME: This is not fully implemented yet. |
1729 | void setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol); |
1730 | |
1731 | /// Set a symbol that will be emitted just after the instruction itself. |
1732 | /// |
1733 | /// Setting this to a null pointer will remove any such symbol. |
1734 | /// |
1735 | /// FIXME: This is not fully implemented yet. |
1736 | void setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol); |
1737 | |
1738 | /// Clone another MachineInstr's pre- and post- instruction symbols and |
1739 | /// replace ours with it. |
1740 | void cloneInstrSymbols(MachineFunction &MF, const MachineInstr &MI); |
1741 | |
1742 | /// Set a marker on instructions that denotes where we should create and emit |
1743 | /// heap alloc site labels. This waits until after instruction selection and |
1744 | /// optimizations to create the label, so it should still work if the |
1745 | /// instruction is removed or duplicated. |
1746 | void setHeapAllocMarker(MachineFunction &MF, MDNode *MD); |
1747 | |
1748 | /// Return the MIFlags which represent both MachineInstrs. This |
1749 | /// should be used when merging two MachineInstrs into one. This routine does |
1750 | /// not modify the MIFlags of this MachineInstr. |
1751 | uint16_t mergeFlagsWith(const MachineInstr& Other) const; |
1752 | |
1753 | static uint16_t copyFlagsFromInstruction(const Instruction &I); |
1754 | |
1755 | /// Copy all flags to MachineInst MIFlags |
1756 | void copyIRFlags(const Instruction &I); |
1757 | |
1758 | /// Break any tie involving OpIdx. |
1759 | void untieRegOperand(unsigned OpIdx) { |
1760 | MachineOperand &MO = getOperand(OpIdx); |
1761 | if (MO.isReg() && MO.isTied()) { |
1762 | getOperand(findTiedOperandIdx(OpIdx)).TiedTo = 0; |
1763 | MO.TiedTo = 0; |
1764 | } |
1765 | } |
1766 | |
1767 | /// Add all implicit def and use operands to this instruction. |
1768 | void addImplicitDefUseOperands(MachineFunction &MF); |
1769 | |
1770 | /// Scan instructions immediately following MI and collect any matching |
1771 | /// DBG_VALUEs. |
1772 | void collectDebugValues(SmallVectorImpl<MachineInstr *> &DbgValues); |
1773 | |
1774 | /// Find all DBG_VALUEs that point to the register def in this instruction |
1775 | /// and point them to \p Reg instead. |
1776 | void changeDebugValuesDefReg(Register Reg); |
1777 | |
1778 | /// Returns the Intrinsic::ID for this instruction. |
1779 | /// \pre Must have an intrinsic ID operand. |
1780 | unsigned getIntrinsicID() const { |
1781 | return getOperand(getNumExplicitDefs()).getIntrinsicID(); |
1782 | } |
1783 | |
1784 | /// Sets all register debug operands in this debug value instruction to be |
1785 | /// undef. |
1786 | void setDebugValueUndef() { |
1787 | assert(isDebugValue() && "Must be a debug value instruction.")((isDebugValue() && "Must be a debug value instruction." ) ? static_cast<void> (0) : __assert_fail ("isDebugValue() && \"Must be a debug value instruction.\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/MachineInstr.h" , 1787, __PRETTY_FUNCTION__)); |
1788 | for (MachineOperand &MO : debug_operands()) { |
1789 | if (MO.isReg()) { |
1790 | MO.setReg(0); |
1791 | MO.setSubReg(0); |
1792 | } |
1793 | } |
1794 | } |
1795 | |
1796 | private: |
1797 | /// If this instruction is embedded into a MachineFunction, return the |
1798 | /// MachineRegisterInfo object for the current function, otherwise |
1799 | /// return null. |
1800 | MachineRegisterInfo *getRegInfo(); |
1801 | |
1802 | /// Unlink all of the register operands in this instruction from their |
1803 | /// respective use lists. This requires that the operands already be on their |
1804 | /// use lists. |
1805 | void RemoveRegOperandsFromUseLists(MachineRegisterInfo&); |
1806 | |
1807 | /// Add all of the register operands in this instruction from their |
1808 | /// respective use lists. This requires that the operands not be on their |
1809 | /// use lists yet. |
1810 | void AddRegOperandsToUseLists(MachineRegisterInfo&); |
1811 | |
1812 | /// Slow path for hasProperty when we're dealing with a bundle. |
1813 | bool hasPropertyInBundle(uint64_t Mask, QueryType Type) const; |
1814 | |
1815 | /// Implements the logic of getRegClassConstraintEffectForVReg for the |
1816 | /// this MI and the given operand index \p OpIdx. |
1817 | /// If the related operand does not constrained Reg, this returns CurRC. |
1818 | const TargetRegisterClass *getRegClassConstraintEffectForVRegImpl( |
1819 | unsigned OpIdx, Register Reg, const TargetRegisterClass *CurRC, |
1820 | const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const; |
1821 | |
1822 | /// Stores extra instruction information inline or allocates as ExtraInfo |
1823 | /// based on the number of pointers. |
1824 | void setExtraInfo(MachineFunction &MF, ArrayRef<MachineMemOperand *> MMOs, |
1825 | MCSymbol *PreInstrSymbol, MCSymbol *PostInstrSymbol, |
1826 | MDNode *HeapAllocMarker); |
1827 | }; |
1828 | |
1829 | /// Special DenseMapInfo traits to compare MachineInstr* by *value* of the |
1830 | /// instruction rather than by pointer value. |
1831 | /// The hashing and equality testing functions ignore definitions so this is |
1832 | /// useful for CSE, etc. |
1833 | struct MachineInstrExpressionTrait : DenseMapInfo<MachineInstr*> { |
1834 | static inline MachineInstr *getEmptyKey() { |
1835 | return nullptr; |
1836 | } |
1837 | |
1838 | static inline MachineInstr *getTombstoneKey() { |
1839 | return reinterpret_cast<MachineInstr*>(-1); |
1840 | } |
1841 | |
1842 | static unsigned getHashValue(const MachineInstr* const &MI); |
1843 | |
1844 | static bool isEqual(const MachineInstr* const &LHS, |
1845 | const MachineInstr* const &RHS) { |
1846 | if (RHS == getEmptyKey() || RHS == getTombstoneKey() || |
1847 | LHS == getEmptyKey() || LHS == getTombstoneKey()) |
1848 | return LHS == RHS; |
1849 | return LHS->isIdenticalTo(*RHS, MachineInstr::IgnoreVRegDefs); |
1850 | } |
1851 | }; |
1852 | |
1853 | //===----------------------------------------------------------------------===// |
1854 | // Debugging Support |
1855 | |
1856 | inline raw_ostream& operator<<(raw_ostream &OS, const MachineInstr &MI) { |
1857 | MI.print(OS); |
1858 | return OS; |
1859 | } |
1860 | |
1861 | } // end namespace llvm |
1862 | |
1863 | #endif // LLVM_CODEGEN_MACHINEINSTR_H |
1 | //===-- llvm/CodeGen/Register.h ---------------------------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #ifndef LLVM_CODEGEN_REGISTER_H |
10 | #define LLVM_CODEGEN_REGISTER_H |
11 | |
12 | #include "llvm/MC/MCRegister.h" |
13 | #include <cassert> |
14 | |
15 | namespace llvm { |
16 | |
17 | /// Wrapper class representing virtual and physical registers. Should be passed |
18 | /// by value. |
19 | class Register { |
20 | unsigned Reg; |
21 | |
22 | public: |
23 | constexpr Register(unsigned Val = 0): Reg(Val) {} |
24 | constexpr Register(MCRegister Val): Reg(Val) {} |
25 | |
26 | // Register numbers can represent physical registers, virtual registers, and |
27 | // sometimes stack slots. The unsigned values are divided into these ranges: |
28 | // |
29 | // 0 Not a register, can be used as a sentinel. |
30 | // [1;2^30) Physical registers assigned by TableGen. |
31 | // [2^30;2^31) Stack slots. (Rarely used.) |
32 | // [2^31;2^32) Virtual registers assigned by MachineRegisterInfo. |
33 | // |
34 | // Further sentinels can be allocated from the small negative integers. |
35 | // DenseMapInfo<unsigned> uses -1u and -2u. |
36 | static_assert(std::numeric_limits<decltype(Reg)>::max() >= 0xFFFFFFFF, |
37 | "Reg isn't large enough to hold full range."); |
38 | |
39 | /// isStackSlot - Sometimes it is useful the be able to store a non-negative |
40 | /// frame index in a variable that normally holds a register. isStackSlot() |
41 | /// returns true if Reg is in the range used for stack slots. |
42 | /// |
43 | /// FIXME: remove in favor of member. |
44 | static bool isStackSlot(unsigned Reg) { |
45 | return MCRegister::isStackSlot(Reg); |
46 | } |
47 | |
48 | /// Return true if this is a stack slot. |
49 | bool isStack() const { return MCRegister::isStackSlot(Reg); } |
50 | |
51 | /// Compute the frame index from a register value representing a stack slot. |
52 | static int stackSlot2Index(Register Reg) { |
53 | assert(Reg.isStack() && "Not a stack slot")((Reg.isStack() && "Not a stack slot") ? static_cast< void> (0) : __assert_fail ("Reg.isStack() && \"Not a stack slot\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/Register.h" , 53, __PRETTY_FUNCTION__)); |
54 | return int(Reg - MCRegister::FirstStackSlot); |
55 | } |
56 | |
57 | /// Convert a non-negative frame index to a stack slot register value. |
58 | static Register index2StackSlot(int FI) { |
59 | assert(FI >= 0 && "Cannot hold a negative frame index.")((FI >= 0 && "Cannot hold a negative frame index." ) ? static_cast<void> (0) : __assert_fail ("FI >= 0 && \"Cannot hold a negative frame index.\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/Register.h" , 59, __PRETTY_FUNCTION__)); |
60 | return Register(FI + MCRegister::FirstStackSlot); |
61 | } |
62 | |
63 | /// Return true if the specified register number is in |
64 | /// the physical register namespace. |
65 | static bool isPhysicalRegister(unsigned Reg) { |
66 | return MCRegister::isPhysicalRegister(Reg); |
67 | } |
68 | |
69 | /// Return true if the specified register number is in |
70 | /// the virtual register namespace. |
71 | static bool isVirtualRegister(unsigned Reg) { |
72 | return Reg & MCRegister::VirtualRegFlag && !isStackSlot(Reg); |
73 | } |
74 | |
75 | /// Convert a virtual register number to a 0-based index. |
76 | /// The first virtual register in a function will get the index 0. |
77 | static unsigned virtReg2Index(Register Reg) { |
78 | assert(isVirtualRegister(Reg) && "Not a virtual register")((isVirtualRegister(Reg) && "Not a virtual register") ? static_cast<void> (0) : __assert_fail ("isVirtualRegister(Reg) && \"Not a virtual register\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/Register.h" , 78, __PRETTY_FUNCTION__)); |
79 | return Reg & ~MCRegister::VirtualRegFlag; |
80 | } |
81 | |
82 | /// Convert a 0-based index to a virtual register number. |
83 | /// This is the inverse operation of VirtReg2IndexFunctor below. |
84 | static Register index2VirtReg(unsigned Index) { |
85 | assert(Index < (1u << 31) && "Index too large for virtual register range.")((Index < (1u << 31) && "Index too large for virtual register range." ) ? static_cast<void> (0) : __assert_fail ("Index < (1u << 31) && \"Index too large for virtual register range.\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/Register.h" , 85, __PRETTY_FUNCTION__)); |
86 | return Index | MCRegister::VirtualRegFlag; |
87 | } |
88 | |
89 | /// Return true if the specified register number is in the virtual register |
90 | /// namespace. |
91 | bool isVirtual() const { |
92 | return isVirtualRegister(Reg); |
93 | } |
94 | |
95 | /// Return true if the specified register number is in the physical register |
96 | /// namespace. |
97 | bool isPhysical() const { |
98 | return isPhysicalRegister(Reg); |
99 | } |
100 | |
101 | /// Convert a virtual register number to a 0-based index. The first virtual |
102 | /// register in a function will get the index 0. |
103 | unsigned virtRegIndex() const { |
104 | return virtReg2Index(Reg); |
105 | } |
106 | |
107 | constexpr operator unsigned() const { |
108 | return Reg; |
109 | } |
110 | |
111 | unsigned id() const { return Reg; } |
112 | |
113 | operator MCRegister() const { |
114 | return MCRegister(Reg); |
115 | } |
116 | |
117 | /// Utility to check-convert this value to a MCRegister. The caller is |
118 | /// expected to have already validated that this Register is, indeed, |
119 | /// physical. |
120 | MCRegister asMCReg() const { |
121 | assert(Reg == MCRegister::NoRegister ||((Reg == MCRegister::NoRegister || MCRegister::isPhysicalRegister (Reg)) ? static_cast<void> (0) : __assert_fail ("Reg == MCRegister::NoRegister || MCRegister::isPhysicalRegister(Reg)" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/Register.h" , 122, __PRETTY_FUNCTION__)) |
122 | MCRegister::isPhysicalRegister(Reg))((Reg == MCRegister::NoRegister || MCRegister::isPhysicalRegister (Reg)) ? static_cast<void> (0) : __assert_fail ("Reg == MCRegister::NoRegister || MCRegister::isPhysicalRegister(Reg)" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/Register.h" , 122, __PRETTY_FUNCTION__)); |
123 | return MCRegister(Reg); |
124 | } |
125 | |
126 | bool isValid() const { return Reg != MCRegister::NoRegister; } |
127 | |
128 | /// Comparisons between register objects |
129 | bool operator==(const Register &Other) const { return Reg == Other.Reg; } |
130 | bool operator!=(const Register &Other) const { return Reg != Other.Reg; } |
131 | bool operator==(const MCRegister &Other) const { return Reg == Other.id(); } |
132 | bool operator!=(const MCRegister &Other) const { return Reg != Other.id(); } |
133 | |
134 | /// Comparisons against register constants. E.g. |
135 | /// * R == AArch64::WZR |
136 | /// * R == 0 |
137 | /// * R == VirtRegMap::NO_PHYS_REG |
138 | bool operator==(unsigned Other) const { return Reg == Other; } |
139 | bool operator!=(unsigned Other) const { return Reg != Other; } |
140 | bool operator==(int Other) const { return Reg == unsigned(Other); } |
141 | bool operator!=(int Other) const { return Reg != unsigned(Other); } |
142 | // MSVC requires that we explicitly declare these two as well. |
143 | bool operator==(MCPhysReg Other) const { return Reg == unsigned(Other); } |
144 | bool operator!=(MCPhysReg Other) const { return Reg != unsigned(Other); } |
145 | }; |
146 | |
147 | // Provide DenseMapInfo for Register |
148 | template<> struct DenseMapInfo<Register> { |
149 | static inline unsigned getEmptyKey() { |
150 | return DenseMapInfo<unsigned>::getEmptyKey(); |
151 | } |
152 | static inline unsigned getTombstoneKey() { |
153 | return DenseMapInfo<unsigned>::getTombstoneKey(); |
154 | } |
155 | static unsigned getHashValue(const Register &Val) { |
156 | return DenseMapInfo<unsigned>::getHashValue(Val.id()); |
157 | } |
158 | static bool isEqual(const Register &LHS, const Register &RHS) { |
159 | return DenseMapInfo<unsigned>::isEqual(LHS.id(), RHS.id()); |
160 | } |
161 | }; |
162 | |
163 | } |
164 | |
165 | #endif // ifndef LLVM_CODEGEN_REGISTER_H |
1 | //===- llvm/CodeGen/TargetInstrInfo.h - Instruction Info --------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file describes the target machine instruction set to the code generator. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #ifndef LLVM_TARGET_TARGETINSTRINFO_H |
14 | #define LLVM_TARGET_TARGETINSTRINFO_H |
15 | |
16 | #include "llvm/ADT/ArrayRef.h" |
17 | #include "llvm/ADT/DenseMap.h" |
18 | #include "llvm/ADT/DenseMapInfo.h" |
19 | #include "llvm/ADT/None.h" |
20 | #include "llvm/CodeGen/MIRFormatter.h" |
21 | #include "llvm/CodeGen/MachineBasicBlock.h" |
22 | #include "llvm/CodeGen/MachineCombinerPattern.h" |
23 | #include "llvm/CodeGen/MachineFunction.h" |
24 | #include "llvm/CodeGen/MachineInstr.h" |
25 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
26 | #include "llvm/CodeGen/MachineOperand.h" |
27 | #include "llvm/CodeGen/MachineOutliner.h" |
28 | #include "llvm/CodeGen/RegisterClassInfo.h" |
29 | #include "llvm/CodeGen/VirtRegMap.h" |
30 | #include "llvm/MC/MCInstrInfo.h" |
31 | #include "llvm/Support/BranchProbability.h" |
32 | #include "llvm/Support/ErrorHandling.h" |
33 | #include <cassert> |
34 | #include <cstddef> |
35 | #include <cstdint> |
36 | #include <utility> |
37 | #include <vector> |
38 | |
39 | namespace llvm { |
40 | |
41 | class AAResults; |
42 | class DFAPacketizer; |
43 | class InstrItineraryData; |
44 | class LiveIntervals; |
45 | class LiveVariables; |
46 | class MachineLoop; |
47 | class MachineMemOperand; |
48 | class MachineRegisterInfo; |
49 | class MCAsmInfo; |
50 | class MCInst; |
51 | struct MCSchedModel; |
52 | class Module; |
53 | class ScheduleDAG; |
54 | class ScheduleDAGMI; |
55 | class ScheduleHazardRecognizer; |
56 | class SDNode; |
57 | class SelectionDAG; |
58 | class RegScavenger; |
59 | class TargetRegisterClass; |
60 | class TargetRegisterInfo; |
61 | class TargetSchedModel; |
62 | class TargetSubtargetInfo; |
63 | |
64 | template <class T> class SmallVectorImpl; |
65 | |
66 | using ParamLoadedValue = std::pair<MachineOperand, DIExpression*>; |
67 | |
68 | struct DestSourcePair { |
69 | const MachineOperand *Destination; |
70 | const MachineOperand *Source; |
71 | |
72 | DestSourcePair(const MachineOperand &Dest, const MachineOperand &Src) |
73 | : Destination(&Dest), Source(&Src) {} |
74 | }; |
75 | |
76 | /// Used to describe a register and immediate addition. |
77 | struct RegImmPair { |
78 | Register Reg; |
79 | int64_t Imm; |
80 | |
81 | RegImmPair(Register Reg, int64_t Imm) : Reg(Reg), Imm(Imm) {} |
82 | }; |
83 | |
84 | /// Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare. |
85 | /// It holds the register values, the scale value and the displacement. |
86 | struct ExtAddrMode { |
87 | Register BaseReg; |
88 | Register ScaledReg; |
89 | int64_t Scale; |
90 | int64_t Displacement; |
91 | }; |
92 | |
93 | //--------------------------------------------------------------------------- |
94 | /// |
95 | /// TargetInstrInfo - Interface to description of machine instruction set |
96 | /// |
97 | class TargetInstrInfo : public MCInstrInfo { |
98 | public: |
99 | TargetInstrInfo(unsigned CFSetupOpcode = ~0u, unsigned CFDestroyOpcode = ~0u, |
100 | unsigned CatchRetOpcode = ~0u, unsigned ReturnOpcode = ~0u) |
101 | : CallFrameSetupOpcode(CFSetupOpcode), |
102 | CallFrameDestroyOpcode(CFDestroyOpcode), CatchRetOpcode(CatchRetOpcode), |
103 | ReturnOpcode(ReturnOpcode) {} |
104 | TargetInstrInfo(const TargetInstrInfo &) = delete; |
105 | TargetInstrInfo &operator=(const TargetInstrInfo &) = delete; |
106 | virtual ~TargetInstrInfo(); |
107 | |
108 | static bool isGenericOpcode(unsigned Opc) { |
109 | return Opc <= TargetOpcode::GENERIC_OP_END; |
110 | } |
111 | |
112 | /// Given a machine instruction descriptor, returns the register |
113 | /// class constraint for OpNum, or NULL. |
114 | virtual |
115 | const TargetRegisterClass *getRegClass(const MCInstrDesc &MCID, unsigned OpNum, |
116 | const TargetRegisterInfo *TRI, |
117 | const MachineFunction &MF) const; |
118 | |
119 | /// Return true if the instruction is trivially rematerializable, meaning it |
120 | /// has no side effects and requires no operands that aren't always available. |
121 | /// This means the only allowed uses are constants and unallocatable physical |
122 | /// registers so that the instructions result is independent of the place |
123 | /// in the function. |
124 | bool isTriviallyReMaterializable(const MachineInstr &MI, |
125 | AAResults *AA = nullptr) const { |
126 | return MI.getOpcode() == TargetOpcode::IMPLICIT_DEF || |
127 | (MI.getDesc().isRematerializable() && |
128 | (isReallyTriviallyReMaterializable(MI, AA) || |
129 | isReallyTriviallyReMaterializableGeneric(MI, AA))); |
130 | } |
131 | |
132 | protected: |
133 | /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is |
134 | /// set, this hook lets the target specify whether the instruction is actually |
135 | /// trivially rematerializable, taking into consideration its operands. This |
136 | /// predicate must return false if the instruction has any side effects other |
137 | /// than producing a value, or if it requres any address registers that are |
138 | /// not always available. |
139 | /// Requirements must be check as stated in isTriviallyReMaterializable() . |
140 | virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI, |
141 | AAResults *AA) const { |
142 | return false; |
143 | } |
144 | |
145 | /// This method commutes the operands of the given machine instruction MI. |
146 | /// The operands to be commuted are specified by their indices OpIdx1 and |
147 | /// OpIdx2. |
148 | /// |
149 | /// If a target has any instructions that are commutable but require |
150 | /// converting to different instructions or making non-trivial changes |
151 | /// to commute them, this method can be overloaded to do that. |
152 | /// The default implementation simply swaps the commutable operands. |
153 | /// |
154 | /// If NewMI is false, MI is modified in place and returned; otherwise, a |
155 | /// new machine instruction is created and returned. |
156 | /// |
157 | /// Do not call this method for a non-commutable instruction. |
158 | /// Even though the instruction is commutable, the method may still |
159 | /// fail to commute the operands, null pointer is returned in such cases. |
160 | virtual MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI, |
161 | unsigned OpIdx1, |
162 | unsigned OpIdx2) const; |
163 | |
164 | /// Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable |
165 | /// operand indices to (ResultIdx1, ResultIdx2). |
166 | /// One or both input values of the pair: (ResultIdx1, ResultIdx2) may be |
167 | /// predefined to some indices or be undefined (designated by the special |
168 | /// value 'CommuteAnyOperandIndex'). |
169 | /// The predefined result indices cannot be re-defined. |
170 | /// The function returns true iff after the result pair redefinition |
171 | /// the fixed result pair is equal to or equivalent to the source pair of |
172 | /// indices: (CommutableOpIdx1, CommutableOpIdx2). It is assumed here that |
173 | /// the pairs (x,y) and (y,x) are equivalent. |
174 | static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2, |
175 | unsigned CommutableOpIdx1, |
176 | unsigned CommutableOpIdx2); |
177 | |
178 | private: |
179 | /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is |
180 | /// set and the target hook isReallyTriviallyReMaterializable returns false, |
181 | /// this function does target-independent tests to determine if the |
182 | /// instruction is really trivially rematerializable. |
183 | bool isReallyTriviallyReMaterializableGeneric(const MachineInstr &MI, |
184 | AAResults *AA) const; |
185 | |
186 | public: |
187 | /// These methods return the opcode of the frame setup/destroy instructions |
188 | /// if they exist (-1 otherwise). Some targets use pseudo instructions in |
189 | /// order to abstract away the difference between operating with a frame |
190 | /// pointer and operating without, through the use of these two instructions. |
191 | /// |
192 | unsigned getCallFrameSetupOpcode() const { return CallFrameSetupOpcode; } |
193 | unsigned getCallFrameDestroyOpcode() const { return CallFrameDestroyOpcode; } |
194 | |
195 | /// Returns true if the argument is a frame pseudo instruction. |
196 | bool isFrameInstr(const MachineInstr &I) const { |
197 | return I.getOpcode() == getCallFrameSetupOpcode() || |
198 | I.getOpcode() == getCallFrameDestroyOpcode(); |
199 | } |
200 | |
201 | /// Returns true if the argument is a frame setup pseudo instruction. |
202 | bool isFrameSetup(const MachineInstr &I) const { |
203 | return I.getOpcode() == getCallFrameSetupOpcode(); |
204 | } |
205 | |
206 | /// Returns size of the frame associated with the given frame instruction. |
207 | /// For frame setup instruction this is frame that is set up space set up |
208 | /// after the instruction. For frame destroy instruction this is the frame |
209 | /// freed by the caller. |
210 | /// Note, in some cases a call frame (or a part of it) may be prepared prior |
211 | /// to the frame setup instruction. It occurs in the calls that involve |
212 | /// inalloca arguments. This function reports only the size of the frame part |
213 | /// that is set up between the frame setup and destroy pseudo instructions. |
214 | int64_t getFrameSize(const MachineInstr &I) const { |
215 | assert(isFrameInstr(I) && "Not a frame instruction")((isFrameInstr(I) && "Not a frame instruction") ? static_cast <void> (0) : __assert_fail ("isFrameInstr(I) && \"Not a frame instruction\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/TargetInstrInfo.h" , 215, __PRETTY_FUNCTION__)); |
216 | assert(I.getOperand(0).getImm() >= 0)((I.getOperand(0).getImm() >= 0) ? static_cast<void> (0) : __assert_fail ("I.getOperand(0).getImm() >= 0", "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/TargetInstrInfo.h" , 216, __PRETTY_FUNCTION__)); |
217 | return I.getOperand(0).getImm(); |
218 | } |
219 | |
220 | /// Returns the total frame size, which is made up of the space set up inside |
221 | /// the pair of frame start-stop instructions and the space that is set up |
222 | /// prior to the pair. |
223 | int64_t getFrameTotalSize(const MachineInstr &I) const { |
224 | if (isFrameSetup(I)) { |
225 | assert(I.getOperand(1).getImm() >= 0 &&((I.getOperand(1).getImm() >= 0 && "Frame size must not be negative" ) ? static_cast<void> (0) : __assert_fail ("I.getOperand(1).getImm() >= 0 && \"Frame size must not be negative\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/TargetInstrInfo.h" , 226, __PRETTY_FUNCTION__)) |
226 | "Frame size must not be negative")((I.getOperand(1).getImm() >= 0 && "Frame size must not be negative" ) ? static_cast<void> (0) : __assert_fail ("I.getOperand(1).getImm() >= 0 && \"Frame size must not be negative\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/TargetInstrInfo.h" , 226, __PRETTY_FUNCTION__)); |
227 | return getFrameSize(I) + I.getOperand(1).getImm(); |
228 | } |
229 | return getFrameSize(I); |
230 | } |
231 | |
232 | unsigned getCatchReturnOpcode() const { return CatchRetOpcode; } |
233 | unsigned getReturnOpcode() const { return ReturnOpcode; } |
234 | |
235 | /// Returns the actual stack pointer adjustment made by an instruction |
236 | /// as part of a call sequence. By default, only call frame setup/destroy |
237 | /// instructions adjust the stack, but targets may want to override this |
238 | /// to enable more fine-grained adjustment, or adjust by a different value. |
239 | virtual int getSPAdjust(const MachineInstr &MI) const; |
240 | |
241 | /// Return true if the instruction is a "coalescable" extension instruction. |
242 | /// That is, it's like a copy where it's legal for the source to overlap the |
243 | /// destination. e.g. X86::MOVSX64rr32. If this returns true, then it's |
244 | /// expected the pre-extension value is available as a subreg of the result |
245 | /// register. This also returns the sub-register index in SubIdx. |
246 | virtual bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg, |
247 | Register &DstReg, unsigned &SubIdx) const { |
248 | return false; |
249 | } |
250 | |
251 | /// If the specified machine instruction is a direct |
252 | /// load from a stack slot, return the virtual or physical register number of |
253 | /// the destination along with the FrameIndex of the loaded stack slot. If |
254 | /// not, return 0. This predicate must return 0 if the instruction has |
255 | /// any side effects other than loading from the stack slot. |
256 | virtual unsigned isLoadFromStackSlot(const MachineInstr &MI, |
257 | int &FrameIndex) const { |
258 | return 0; |
259 | } |
260 | |
261 | /// Optional extension of isLoadFromStackSlot that returns the number of |
262 | /// bytes loaded from the stack. This must be implemented if a backend |
263 | /// supports partial stack slot spills/loads to further disambiguate |
264 | /// what the load does. |
265 | virtual unsigned isLoadFromStackSlot(const MachineInstr &MI, |
266 | int &FrameIndex, |
267 | unsigned &MemBytes) const { |
268 | MemBytes = 0; |
269 | return isLoadFromStackSlot(MI, FrameIndex); |
270 | } |
271 | |
272 | /// Check for post-frame ptr elimination stack locations as well. |
273 | /// This uses a heuristic so it isn't reliable for correctness. |
274 | virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr &MI, |
275 | int &FrameIndex) const { |
276 | return 0; |
277 | } |
278 | |
279 | /// If the specified machine instruction has a load from a stack slot, |
280 | /// return true along with the FrameIndices of the loaded stack slot and the |
281 | /// machine mem operands containing the reference. |
282 | /// If not, return false. Unlike isLoadFromStackSlot, this returns true for |
283 | /// any instructions that loads from the stack. This is just a hint, as some |
284 | /// cases may be missed. |
285 | virtual bool hasLoadFromStackSlot( |
286 | const MachineInstr &MI, |
287 | SmallVectorImpl<const MachineMemOperand *> &Accesses) const; |
288 | |
289 | /// If the specified machine instruction is a direct |
290 | /// store to a stack slot, return the virtual or physical register number of |
291 | /// the source reg along with the FrameIndex of the loaded stack slot. If |
292 | /// not, return 0. This predicate must return 0 if the instruction has |
293 | /// any side effects other than storing to the stack slot. |
294 | virtual unsigned isStoreToStackSlot(const MachineInstr &MI, |
295 | int &FrameIndex) const { |
296 | return 0; |
297 | } |
298 | |
299 | /// Optional extension of isStoreToStackSlot that returns the number of |
300 | /// bytes stored to the stack. This must be implemented if a backend |
301 | /// supports partial stack slot spills/loads to further disambiguate |
302 | /// what the store does. |
303 | virtual unsigned isStoreToStackSlot(const MachineInstr &MI, |
304 | int &FrameIndex, |
305 | unsigned &MemBytes) const { |
306 | MemBytes = 0; |
307 | return isStoreToStackSlot(MI, FrameIndex); |
308 | } |
309 | |
310 | /// Check for post-frame ptr elimination stack locations as well. |
311 | /// This uses a heuristic, so it isn't reliable for correctness. |
312 | virtual unsigned isStoreToStackSlotPostFE(const MachineInstr &MI, |
313 | int &FrameIndex) const { |
314 | return 0; |
315 | } |
316 | |
317 | /// If the specified machine instruction has a store to a stack slot, |
318 | /// return true along with the FrameIndices of the loaded stack slot and the |
319 | /// machine mem operands containing the reference. |
320 | /// If not, return false. Unlike isStoreToStackSlot, |
321 | /// this returns true for any instructions that stores to the |
322 | /// stack. This is just a hint, as some cases may be missed. |
323 | virtual bool hasStoreToStackSlot( |
324 | const MachineInstr &MI, |
325 | SmallVectorImpl<const MachineMemOperand *> &Accesses) const; |
326 | |
327 | /// Return true if the specified machine instruction |
328 | /// is a copy of one stack slot to another and has no other effect. |
329 | /// Provide the identity of the two frame indices. |
330 | virtual bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex, |
331 | int &SrcFrameIndex) const { |
332 | return false; |
333 | } |
334 | |
335 | /// Compute the size in bytes and offset within a stack slot of a spilled |
336 | /// register or subregister. |
337 | /// |
338 | /// \param [out] Size in bytes of the spilled value. |
339 | /// \param [out] Offset in bytes within the stack slot. |
340 | /// \returns true if both Size and Offset are successfully computed. |
341 | /// |
342 | /// Not all subregisters have computable spill slots. For example, |
343 | /// subregisters registers may not be byte-sized, and a pair of discontiguous |
344 | /// subregisters has no single offset. |
345 | /// |
346 | /// Targets with nontrivial bigendian implementations may need to override |
347 | /// this, particularly to support spilled vector registers. |
348 | virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx, |
349 | unsigned &Size, unsigned &Offset, |
350 | const MachineFunction &MF) const; |
351 | |
352 | /// Return true if the given instruction is terminator that is unspillable, |
353 | /// according to isUnspillableTerminatorImpl. |
354 | bool isUnspillableTerminator(const MachineInstr *MI) const { |
355 | return MI->isTerminator() && isUnspillableTerminatorImpl(MI); |
356 | } |
357 | |
358 | /// Returns the size in bytes of the specified MachineInstr, or ~0U |
359 | /// when this function is not implemented by a target. |
360 | virtual unsigned getInstSizeInBytes(const MachineInstr &MI) const { |
361 | return ~0U; |
362 | } |
363 | |
364 | /// Return true if the instruction is as cheap as a move instruction. |
365 | /// |
366 | /// Targets for different archs need to override this, and different |
367 | /// micro-architectures can also be finely tuned inside. |
368 | virtual bool isAsCheapAsAMove(const MachineInstr &MI) const { |
369 | return MI.isAsCheapAsAMove(); |
370 | } |
371 | |
372 | /// Return true if the instruction should be sunk by MachineSink. |
373 | /// |
374 | /// MachineSink determines on its own whether the instruction is safe to sink; |
375 | /// this gives the target a hook to override the default behavior with regards |
376 | /// to which instructions should be sunk. |
377 | virtual bool shouldSink(const MachineInstr &MI) const { return true; } |
378 | |
379 | /// Re-issue the specified 'original' instruction at the |
380 | /// specific location targeting a new destination register. |
381 | /// The register in Orig->getOperand(0).getReg() will be substituted by |
382 | /// DestReg:SubIdx. Any existing subreg index is preserved or composed with |
383 | /// SubIdx. |
384 | virtual void reMaterialize(MachineBasicBlock &MBB, |
385 | MachineBasicBlock::iterator MI, Register DestReg, |
386 | unsigned SubIdx, const MachineInstr &Orig, |
387 | const TargetRegisterInfo &TRI) const; |
388 | |
389 | /// Clones instruction or the whole instruction bundle \p Orig and |
390 | /// insert into \p MBB before \p InsertBefore. The target may update operands |
391 | /// that are required to be unique. |
392 | /// |
393 | /// \p Orig must not return true for MachineInstr::isNotDuplicable(). |
394 | virtual MachineInstr &duplicate(MachineBasicBlock &MBB, |
395 | MachineBasicBlock::iterator InsertBefore, |
396 | const MachineInstr &Orig) const; |
397 | |
398 | /// This method must be implemented by targets that |
399 | /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target |
400 | /// may be able to convert a two-address instruction into one or more true |
401 | /// three-address instructions on demand. This allows the X86 target (for |
402 | /// example) to convert ADD and SHL instructions into LEA instructions if they |
403 | /// would require register copies due to two-addressness. |
404 | /// |
405 | /// This method returns a null pointer if the transformation cannot be |
406 | /// performed, otherwise it returns the last new instruction. |
407 | /// |
408 | virtual MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI, |
409 | MachineInstr &MI, |
410 | LiveVariables *LV) const { |
411 | return nullptr; |
412 | } |
413 | |
414 | // This constant can be used as an input value of operand index passed to |
415 | // the method findCommutedOpIndices() to tell the method that the |
416 | // corresponding operand index is not pre-defined and that the method |
417 | // can pick any commutable operand. |
418 | static const unsigned CommuteAnyOperandIndex = ~0U; |
419 | |
420 | /// This method commutes the operands of the given machine instruction MI. |
421 | /// |
422 | /// The operands to be commuted are specified by their indices OpIdx1 and |
423 | /// OpIdx2. OpIdx1 and OpIdx2 arguments may be set to a special value |
424 | /// 'CommuteAnyOperandIndex', which means that the method is free to choose |
425 | /// any arbitrarily chosen commutable operand. If both arguments are set to |
426 | /// 'CommuteAnyOperandIndex' then the method looks for 2 different commutable |
427 | /// operands; then commutes them if such operands could be found. |
428 | /// |
429 | /// If NewMI is false, MI is modified in place and returned; otherwise, a |
430 | /// new machine instruction is created and returned. |
431 | /// |
432 | /// Do not call this method for a non-commutable instruction or |
433 | /// for non-commuable operands. |
434 | /// Even though the instruction is commutable, the method may still |
435 | /// fail to commute the operands, null pointer is returned in such cases. |
436 | MachineInstr * |
437 | commuteInstruction(MachineInstr &MI, bool NewMI = false, |
438 | unsigned OpIdx1 = CommuteAnyOperandIndex, |
439 | unsigned OpIdx2 = CommuteAnyOperandIndex) const; |
440 | |
441 | /// Returns true iff the routine could find two commutable operands in the |
442 | /// given machine instruction. |
443 | /// The 'SrcOpIdx1' and 'SrcOpIdx2' are INPUT and OUTPUT arguments. |
444 | /// If any of the INPUT values is set to the special value |
445 | /// 'CommuteAnyOperandIndex' then the method arbitrarily picks a commutable |
446 | /// operand, then returns its index in the corresponding argument. |
447 | /// If both of INPUT values are set to 'CommuteAnyOperandIndex' then method |
448 | /// looks for 2 commutable operands. |
449 | /// If INPUT values refer to some operands of MI, then the method simply |
450 | /// returns true if the corresponding operands are commutable and returns |
451 | /// false otherwise. |
452 | /// |
453 | /// For example, calling this method this way: |
454 | /// unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex; |
455 | /// findCommutedOpIndices(MI, Op1, Op2); |
456 | /// can be interpreted as a query asking to find an operand that would be |
457 | /// commutable with the operand#1. |
458 | virtual bool findCommutedOpIndices(const MachineInstr &MI, |
459 | unsigned &SrcOpIdx1, |
460 | unsigned &SrcOpIdx2) const; |
461 | |
462 | /// A pair composed of a register and a sub-register index. |
463 | /// Used to give some type checking when modeling Reg:SubReg. |
464 | struct RegSubRegPair { |
465 | Register Reg; |
466 | unsigned SubReg; |
467 | |
468 | RegSubRegPair(Register Reg = Register(), unsigned SubReg = 0) |
469 | : Reg(Reg), SubReg(SubReg) {} |
470 | |
471 | bool operator==(const RegSubRegPair& P) const { |
472 | return Reg == P.Reg && SubReg == P.SubReg; |
473 | } |
474 | bool operator!=(const RegSubRegPair& P) const { |
475 | return !(*this == P); |
476 | } |
477 | }; |
478 | |
479 | /// A pair composed of a pair of a register and a sub-register index, |
480 | /// and another sub-register index. |
481 | /// Used to give some type checking when modeling Reg:SubReg1, SubReg2. |
482 | struct RegSubRegPairAndIdx : RegSubRegPair { |
483 | unsigned SubIdx; |
484 | |
485 | RegSubRegPairAndIdx(Register Reg = Register(), unsigned SubReg = 0, |
486 | unsigned SubIdx = 0) |
487 | : RegSubRegPair(Reg, SubReg), SubIdx(SubIdx) {} |
488 | }; |
489 | |
490 | /// Build the equivalent inputs of a REG_SEQUENCE for the given \p MI |
491 | /// and \p DefIdx. |
492 | /// \p [out] InputRegs of the equivalent REG_SEQUENCE. Each element of |
493 | /// the list is modeled as <Reg:SubReg, SubIdx>. Operands with the undef |
494 | /// flag are not added to this list. |
495 | /// E.g., REG_SEQUENCE %1:sub1, sub0, %2, sub1 would produce |
496 | /// two elements: |
497 | /// - %1:sub1, sub0 |
498 | /// - %2<:0>, sub1 |
499 | /// |
500 | /// \returns true if it is possible to build such an input sequence |
501 | /// with the pair \p MI, \p DefIdx. False otherwise. |
502 | /// |
503 | /// \pre MI.isRegSequence() or MI.isRegSequenceLike(). |
504 | /// |
505 | /// \note The generic implementation does not provide any support for |
506 | /// MI.isRegSequenceLike(). In other words, one has to override |
507 | /// getRegSequenceLikeInputs for target specific instructions. |
508 | bool |
509 | getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx, |
510 | SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const; |
511 | |
512 | /// Build the equivalent inputs of a EXTRACT_SUBREG for the given \p MI |
513 | /// and \p DefIdx. |
514 | /// \p [out] InputReg of the equivalent EXTRACT_SUBREG. |
515 | /// E.g., EXTRACT_SUBREG %1:sub1, sub0, sub1 would produce: |
516 | /// - %1:sub1, sub0 |
517 | /// |
518 | /// \returns true if it is possible to build such an input sequence |
519 | /// with the pair \p MI, \p DefIdx and the operand has no undef flag set. |
520 | /// False otherwise. |
521 | /// |
522 | /// \pre MI.isExtractSubreg() or MI.isExtractSubregLike(). |
523 | /// |
524 | /// \note The generic implementation does not provide any support for |
525 | /// MI.isExtractSubregLike(). In other words, one has to override |
526 | /// getExtractSubregLikeInputs for target specific instructions. |
527 | bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx, |
528 | RegSubRegPairAndIdx &InputReg) const; |
529 | |
530 | /// Build the equivalent inputs of a INSERT_SUBREG for the given \p MI |
531 | /// and \p DefIdx. |
532 | /// \p [out] BaseReg and \p [out] InsertedReg contain |
533 | /// the equivalent inputs of INSERT_SUBREG. |
534 | /// E.g., INSERT_SUBREG %0:sub0, %1:sub1, sub3 would produce: |
535 | /// - BaseReg: %0:sub0 |
536 | /// - InsertedReg: %1:sub1, sub3 |
537 | /// |
538 | /// \returns true if it is possible to build such an input sequence |
539 | /// with the pair \p MI, \p DefIdx and the operand has no undef flag set. |
540 | /// False otherwise. |
541 | /// |
542 | /// \pre MI.isInsertSubreg() or MI.isInsertSubregLike(). |
543 | /// |
544 | /// \note The generic implementation does not provide any support for |
545 | /// MI.isInsertSubregLike(). In other words, one has to override |
546 | /// getInsertSubregLikeInputs for target specific instructions. |
547 | bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx, |
548 | RegSubRegPair &BaseReg, |
549 | RegSubRegPairAndIdx &InsertedReg) const; |
550 | |
551 | /// Return true if two machine instructions would produce identical values. |
552 | /// By default, this is only true when the two instructions |
553 | /// are deemed identical except for defs. If this function is called when the |
554 | /// IR is still in SSA form, the caller can pass the MachineRegisterInfo for |
555 | /// aggressive checks. |
556 | virtual bool produceSameValue(const MachineInstr &MI0, |
557 | const MachineInstr &MI1, |
558 | const MachineRegisterInfo *MRI = nullptr) const; |
559 | |
560 | /// \returns true if a branch from an instruction with opcode \p BranchOpc |
561 | /// bytes is capable of jumping to a position \p BrOffset bytes away. |
562 | virtual bool isBranchOffsetInRange(unsigned BranchOpc, |
563 | int64_t BrOffset) const { |
564 | llvm_unreachable("target did not implement")::llvm::llvm_unreachable_internal("target did not implement", "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/TargetInstrInfo.h" , 564); |
565 | } |
566 | |
567 | /// \returns The block that branch instruction \p MI jumps to. |
568 | virtual MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const { |
569 | llvm_unreachable("target did not implement")::llvm::llvm_unreachable_internal("target did not implement", "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/TargetInstrInfo.h" , 569); |
570 | } |
571 | |
572 | /// Insert an unconditional indirect branch at the end of \p MBB to \p |
573 | /// NewDestBB. \p BrOffset indicates the offset of \p NewDestBB relative to |
574 | /// the offset of the position to insert the new branch. |
575 | /// |
576 | /// \returns The number of bytes added to the block. |
577 | virtual unsigned insertIndirectBranch(MachineBasicBlock &MBB, |
578 | MachineBasicBlock &NewDestBB, |
579 | const DebugLoc &DL, |
580 | int64_t BrOffset = 0, |
581 | RegScavenger *RS = nullptr) const { |
582 | llvm_unreachable("target did not implement")::llvm::llvm_unreachable_internal("target did not implement", "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/TargetInstrInfo.h" , 582); |
583 | } |
584 | |
585 | /// Analyze the branching code at the end of MBB, returning |
586 | /// true if it cannot be understood (e.g. it's a switch dispatch or isn't |
587 | /// implemented for a target). Upon success, this returns false and returns |
588 | /// with the following information in various cases: |
589 | /// |
590 | /// 1. If this block ends with no branches (it just falls through to its succ) |
591 | /// just return false, leaving TBB/FBB null. |
592 | /// 2. If this block ends with only an unconditional branch, it sets TBB to be |
593 | /// the destination block. |
594 | /// 3. If this block ends with a conditional branch and it falls through to a |
595 | /// successor block, it sets TBB to be the branch destination block and a |
596 | /// list of operands that evaluate the condition. These operands can be |
597 | /// passed to other TargetInstrInfo methods to create new branches. |
598 | /// 4. If this block ends with a conditional branch followed by an |
599 | /// unconditional branch, it returns the 'true' destination in TBB, the |
600 | /// 'false' destination in FBB, and a list of operands that evaluate the |
601 | /// condition. These operands can be passed to other TargetInstrInfo |
602 | /// methods to create new branches. |
603 | /// |
604 | /// Note that removeBranch and insertBranch must be implemented to support |
605 | /// cases where this method returns success. |
606 | /// |
607 | /// If AllowModify is true, then this routine is allowed to modify the basic |
608 | /// block (e.g. delete instructions after the unconditional branch). |
609 | /// |
610 | /// The CFG information in MBB.Predecessors and MBB.Successors must be valid |
611 | /// before calling this function. |
612 | virtual bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, |
613 | MachineBasicBlock *&FBB, |
614 | SmallVectorImpl<MachineOperand> &Cond, |
615 | bool AllowModify = false) const { |
616 | return true; |
617 | } |
618 | |
619 | /// Represents a predicate at the MachineFunction level. The control flow a |
620 | /// MachineBranchPredicate represents is: |
621 | /// |
622 | /// Reg = LHS `Predicate` RHS == ConditionDef |
623 | /// if Reg then goto TrueDest else goto FalseDest |
624 | /// |
625 | struct MachineBranchPredicate { |
626 | enum ComparePredicate { |
627 | PRED_EQ, // True if two values are equal |
628 | PRED_NE, // True if two values are not equal |
629 | PRED_INVALID // Sentinel value |
630 | }; |
631 | |
632 | ComparePredicate Predicate = PRED_INVALID; |
633 | MachineOperand LHS = MachineOperand::CreateImm(0); |
634 | MachineOperand RHS = MachineOperand::CreateImm(0); |
635 | MachineBasicBlock *TrueDest = nullptr; |
636 | MachineBasicBlock *FalseDest = nullptr; |
637 | MachineInstr *ConditionDef = nullptr; |
638 | |
639 | /// SingleUseCondition is true if ConditionDef is dead except for the |
640 | /// branch(es) at the end of the basic block. |
641 | /// |
642 | bool SingleUseCondition = false; |
643 | |
644 | explicit MachineBranchPredicate() = default; |
645 | }; |
646 | |
647 | /// Analyze the branching code at the end of MBB and parse it into the |
648 | /// MachineBranchPredicate structure if possible. Returns false on success |
649 | /// and true on failure. |
650 | /// |
651 | /// If AllowModify is true, then this routine is allowed to modify the basic |
652 | /// block (e.g. delete instructions after the unconditional branch). |
653 | /// |
654 | virtual bool analyzeBranchPredicate(MachineBasicBlock &MBB, |
655 | MachineBranchPredicate &MBP, |
656 | bool AllowModify = false) const { |
657 | return true; |
658 | } |
659 | |
660 | /// Remove the branching code at the end of the specific MBB. |
661 | /// This is only invoked in cases where analyzeBranch returns success. It |
662 | /// returns the number of instructions that were removed. |
663 | /// If \p BytesRemoved is non-null, report the change in code size from the |
664 | /// removed instructions. |
665 | virtual unsigned removeBranch(MachineBasicBlock &MBB, |
666 | int *BytesRemoved = nullptr) const { |
667 | llvm_unreachable("Target didn't implement TargetInstrInfo::removeBranch!")::llvm::llvm_unreachable_internal("Target didn't implement TargetInstrInfo::removeBranch!" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/TargetInstrInfo.h" , 667); |
668 | } |
669 | |
670 | /// Insert branch code into the end of the specified MachineBasicBlock. The |
671 | /// operands to this method are the same as those returned by analyzeBranch. |
672 | /// This is only invoked in cases where analyzeBranch returns success. It |
673 | /// returns the number of instructions inserted. If \p BytesAdded is non-null, |
674 | /// report the change in code size from the added instructions. |
675 | /// |
676 | /// It is also invoked by tail merging to add unconditional branches in |
677 | /// cases where analyzeBranch doesn't apply because there was no original |
678 | /// branch to analyze. At least this much must be implemented, else tail |
679 | /// merging needs to be disabled. |
680 | /// |
681 | /// The CFG information in MBB.Predecessors and MBB.Successors must be valid |
682 | /// before calling this function. |
683 | virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, |
684 | MachineBasicBlock *FBB, |
685 | ArrayRef<MachineOperand> Cond, |
686 | const DebugLoc &DL, |
687 | int *BytesAdded = nullptr) const { |
688 | llvm_unreachable("Target didn't implement TargetInstrInfo::insertBranch!")::llvm::llvm_unreachable_internal("Target didn't implement TargetInstrInfo::insertBranch!" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/TargetInstrInfo.h" , 688); |
689 | } |
690 | |
691 | unsigned insertUnconditionalBranch(MachineBasicBlock &MBB, |
692 | MachineBasicBlock *DestBB, |
693 | const DebugLoc &DL, |
694 | int *BytesAdded = nullptr) const { |
695 | return insertBranch(MBB, DestBB, nullptr, ArrayRef<MachineOperand>(), DL, |
696 | BytesAdded); |
697 | } |
698 | |
699 | /// Object returned by analyzeLoopForPipelining. Allows software pipelining |
700 | /// implementations to query attributes of the loop being pipelined and to |
701 | /// apply target-specific updates to the loop once pipelining is complete. |
702 | class PipelinerLoopInfo { |
703 | public: |
704 | virtual ~PipelinerLoopInfo(); |
705 | /// Return true if the given instruction should not be pipelined and should |
706 | /// be ignored. An example could be a loop comparison, or induction variable |
707 | /// update with no users being pipelined. |
708 | virtual bool shouldIgnoreForPipelining(const MachineInstr *MI) const = 0; |
709 | |
710 | /// Create a condition to determine if the trip count of the loop is greater |
711 | /// than TC. |
712 | /// |
713 | /// If the trip count is statically known to be greater than TC, return |
714 | /// true. If the trip count is statically known to be not greater than TC, |
715 | /// return false. Otherwise return nullopt and fill out Cond with the test |
716 | /// condition. |
717 | virtual Optional<bool> |
718 | createTripCountGreaterCondition(int TC, MachineBasicBlock &MBB, |
719 | SmallVectorImpl<MachineOperand> &Cond) = 0; |
720 | |
721 | /// Modify the loop such that the trip count is |
722 | /// OriginalTC + TripCountAdjust. |
723 | virtual void adjustTripCount(int TripCountAdjust) = 0; |
724 | |
725 | /// Called when the loop's preheader has been modified to NewPreheader. |
726 | virtual void setPreheader(MachineBasicBlock *NewPreheader) = 0; |
727 | |
728 | /// Called when the loop is being removed. Any instructions in the preheader |
729 | /// should be removed. |
730 | /// |
731 | /// Once this function is called, no other functions on this object are |
732 | /// valid; the loop has been removed. |
733 | virtual void disposed() = 0; |
734 | }; |
735 | |
736 | /// Analyze loop L, which must be a single-basic-block loop, and if the |
737 | /// conditions can be understood enough produce a PipelinerLoopInfo object. |
738 | virtual std::unique_ptr<PipelinerLoopInfo> |
739 | analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const { |
740 | return nullptr; |
741 | } |
742 | |
743 | /// Analyze the loop code, return true if it cannot be understood. Upon |
744 | /// success, this function returns false and returns information about the |
745 | /// induction variable and compare instruction used at the end. |
746 | virtual bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst, |
747 | MachineInstr *&CmpInst) const { |
748 | return true; |
749 | } |
750 | |
751 | /// Generate code to reduce the loop iteration by one and check if the loop |
752 | /// is finished. Return the value/register of the new loop count. We need |
753 | /// this function when peeling off one or more iterations of a loop. This |
754 | /// function assumes the nth iteration is peeled first. |
755 | virtual unsigned reduceLoopCount(MachineBasicBlock &MBB, |
756 | MachineBasicBlock &PreHeader, |
757 | MachineInstr *IndVar, MachineInstr &Cmp, |
758 | SmallVectorImpl<MachineOperand> &Cond, |
759 | SmallVectorImpl<MachineInstr *> &PrevInsts, |
760 | unsigned Iter, unsigned MaxIter) const { |
761 | llvm_unreachable("Target didn't implement ReduceLoopCount")::llvm::llvm_unreachable_internal("Target didn't implement ReduceLoopCount" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/TargetInstrInfo.h" , 761); |
762 | } |
763 | |
764 | /// Delete the instruction OldInst and everything after it, replacing it with |
765 | /// an unconditional branch to NewDest. This is used by the tail merging pass. |
766 | virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, |
767 | MachineBasicBlock *NewDest) const; |
768 | |
769 | /// Return true if it's legal to split the given basic |
770 | /// block at the specified instruction (i.e. instruction would be the start |
771 | /// of a new basic block). |
772 | virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB, |
773 | MachineBasicBlock::iterator MBBI) const { |
774 | return true; |
775 | } |
776 | |
777 | /// Return true if it's profitable to predicate |
778 | /// instructions with accumulated instruction latency of "NumCycles" |
779 | /// of the specified basic block, where the probability of the instructions |
780 | /// being executed is given by Probability, and Confidence is a measure |
781 | /// of our confidence that it will be properly predicted. |
782 | virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, |
783 | unsigned ExtraPredCycles, |
784 | BranchProbability Probability) const { |
785 | return false; |
786 | } |
787 | |
788 | /// Second variant of isProfitableToIfCvt. This one |
789 | /// checks for the case where two basic blocks from true and false path |
790 | /// of a if-then-else (diamond) are predicated on mutually exclusive |
791 | /// predicates, where the probability of the true path being taken is given |
792 | /// by Probability, and Confidence is a measure of our confidence that it |
793 | /// will be properly predicted. |
794 | virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTCycles, |
795 | unsigned ExtraTCycles, |
796 | MachineBasicBlock &FMBB, unsigned NumFCycles, |
797 | unsigned ExtraFCycles, |
798 | BranchProbability Probability) const { |
799 | return false; |
800 | } |
801 | |
802 | /// Return true if it's profitable for if-converter to duplicate instructions |
803 | /// of specified accumulated instruction latencies in the specified MBB to |
804 | /// enable if-conversion. |
805 | /// The probability of the instructions being executed is given by |
806 | /// Probability, and Confidence is a measure of our confidence that it |
807 | /// will be properly predicted. |
808 | virtual bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, |
809 | unsigned NumCycles, |
810 | BranchProbability Probability) const { |
811 | return false; |
812 | } |
813 | |
814 | /// Return the increase in code size needed to predicate a contiguous run of |
815 | /// NumInsts instructions. |
816 | virtual unsigned extraSizeToPredicateInstructions(const MachineFunction &MF, |
817 | unsigned NumInsts) const { |
818 | return 0; |
819 | } |
820 | |
821 | /// Return an estimate for the code size reduction (in bytes) which will be |
822 | /// caused by removing the given branch instruction during if-conversion. |
823 | virtual unsigned predictBranchSizeForIfCvt(MachineInstr &MI) const { |
824 | return getInstSizeInBytes(MI); |
825 | } |
826 | |
827 | /// Return true if it's profitable to unpredicate |
828 | /// one side of a 'diamond', i.e. two sides of if-else predicated on mutually |
829 | /// exclusive predicates. |
830 | /// e.g. |
831 | /// subeq r0, r1, #1 |
832 | /// addne r0, r1, #1 |
833 | /// => |
834 | /// sub r0, r1, #1 |
835 | /// addne r0, r1, #1 |
836 | /// |
837 | /// This may be profitable is conditional instructions are always executed. |
838 | virtual bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, |
839 | MachineBasicBlock &FMBB) const { |
840 | return false; |
841 | } |
842 | |
843 | /// Return true if it is possible to insert a select |
844 | /// instruction that chooses between TrueReg and FalseReg based on the |
845 | /// condition code in Cond. |
846 | /// |
847 | /// When successful, also return the latency in cycles from TrueReg, |
848 | /// FalseReg, and Cond to the destination register. In most cases, a select |
849 | /// instruction will be 1 cycle, so CondCycles = TrueCycles = FalseCycles = 1 |
850 | /// |
851 | /// Some x86 implementations have 2-cycle cmov instructions. |
852 | /// |
853 | /// @param MBB Block where select instruction would be inserted. |
854 | /// @param Cond Condition returned by analyzeBranch. |
855 | /// @param DstReg Virtual dest register that the result should write to. |
856 | /// @param TrueReg Virtual register to select when Cond is true. |
857 | /// @param FalseReg Virtual register to select when Cond is false. |
858 | /// @param CondCycles Latency from Cond+Branch to select output. |
859 | /// @param TrueCycles Latency from TrueReg to select output. |
860 | /// @param FalseCycles Latency from FalseReg to select output. |
861 | virtual bool canInsertSelect(const MachineBasicBlock &MBB, |
862 | ArrayRef<MachineOperand> Cond, Register DstReg, |
863 | Register TrueReg, Register FalseReg, |
864 | int &CondCycles, int &TrueCycles, |
865 | int &FalseCycles) const { |
866 | return false; |
867 | } |
868 | |
869 | /// Insert a select instruction into MBB before I that will copy TrueReg to |
870 | /// DstReg when Cond is true, and FalseReg to DstReg when Cond is false. |
871 | /// |
872 | /// This function can only be called after canInsertSelect() returned true. |
873 | /// The condition in Cond comes from analyzeBranch, and it can be assumed |
874 | /// that the same flags or registers required by Cond are available at the |
875 | /// insertion point. |
876 | /// |
877 | /// @param MBB Block where select instruction should be inserted. |
878 | /// @param I Insertion point. |
879 | /// @param DL Source location for debugging. |
880 | /// @param DstReg Virtual register to be defined by select instruction. |
881 | /// @param Cond Condition as computed by analyzeBranch. |
882 | /// @param TrueReg Virtual register to copy when Cond is true. |
883 | /// @param FalseReg Virtual register to copy when Cons is false. |
884 | virtual void insertSelect(MachineBasicBlock &MBB, |
885 | MachineBasicBlock::iterator I, const DebugLoc &DL, |
886 | Register DstReg, ArrayRef<MachineOperand> Cond, |
887 | Register TrueReg, Register FalseReg) const { |
888 | llvm_unreachable("Target didn't implement TargetInstrInfo::insertSelect!")::llvm::llvm_unreachable_internal("Target didn't implement TargetInstrInfo::insertSelect!" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/TargetInstrInfo.h" , 888); |
889 | } |
890 | |
891 | /// Analyze the given select instruction, returning true if |
892 | /// it cannot be understood. It is assumed that MI->isSelect() is true. |
893 | /// |
894 | /// When successful, return the controlling condition and the operands that |
895 | /// determine the true and false result values. |
896 | /// |
897 | /// Result = SELECT Cond, TrueOp, FalseOp |
898 | /// |
899 | /// Some targets can optimize select instructions, for example by predicating |
900 | /// the instruction defining one of the operands. Such targets should set |
901 | /// Optimizable. |
902 | /// |
903 | /// @param MI Select instruction to analyze. |
904 | /// @param Cond Condition controlling the select. |
905 | /// @param TrueOp Operand number of the value selected when Cond is true. |
906 | /// @param FalseOp Operand number of the value selected when Cond is false. |
907 | /// @param Optimizable Returned as true if MI is optimizable. |
908 | /// @returns False on success. |
909 | virtual bool analyzeSelect(const MachineInstr &MI, |
910 | SmallVectorImpl<MachineOperand> &Cond, |
911 | unsigned &TrueOp, unsigned &FalseOp, |
912 | bool &Optimizable) const { |
913 | assert(MI.getDesc().isSelect() && "MI must be a select instruction")((MI.getDesc().isSelect() && "MI must be a select instruction" ) ? static_cast<void> (0) : __assert_fail ("MI.getDesc().isSelect() && \"MI must be a select instruction\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/TargetInstrInfo.h" , 913, __PRETTY_FUNCTION__)); |
914 | return true; |
915 | } |
916 | |
917 | /// Given a select instruction that was understood by |
918 | /// analyzeSelect and returned Optimizable = true, attempt to optimize MI by |
919 | /// merging it with one of its operands. Returns NULL on failure. |
920 | /// |
921 | /// When successful, returns the new select instruction. The client is |
922 | /// responsible for deleting MI. |
923 | /// |
924 | /// If both sides of the select can be optimized, PreferFalse is used to pick |
925 | /// a side. |
926 | /// |
927 | /// @param MI Optimizable select instruction. |
928 | /// @param NewMIs Set that record all MIs in the basic block up to \p |
929 | /// MI. Has to be updated with any newly created MI or deleted ones. |
930 | /// @param PreferFalse Try to optimize FalseOp instead of TrueOp. |
931 | /// @returns Optimized instruction or NULL. |
932 | virtual MachineInstr *optimizeSelect(MachineInstr &MI, |
933 | SmallPtrSetImpl<MachineInstr *> &NewMIs, |
934 | bool PreferFalse = false) const { |
935 | // This function must be implemented if Optimizable is ever set. |
936 | llvm_unreachable("Target must implement TargetInstrInfo::optimizeSelect!")::llvm::llvm_unreachable_internal("Target must implement TargetInstrInfo::optimizeSelect!" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/TargetInstrInfo.h" , 936); |
937 | } |
938 | |
939 | /// Emit instructions to copy a pair of physical registers. |
940 | /// |
941 | /// This function should support copies within any legal register class as |
942 | /// well as any cross-class copies created during instruction selection. |
943 | /// |
944 | /// The source and destination registers may overlap, which may require a |
945 | /// careful implementation when multiple copy instructions are required for |
946 | /// large registers. See for example the ARM target. |
947 | virtual void copyPhysReg(MachineBasicBlock &MBB, |
948 | MachineBasicBlock::iterator MI, const DebugLoc &DL, |
949 | MCRegister DestReg, MCRegister SrcReg, |
950 | bool KillSrc) const { |
951 | llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!")::llvm::llvm_unreachable_internal("Target didn't implement TargetInstrInfo::copyPhysReg!" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/TargetInstrInfo.h" , 951); |
952 | } |
953 | |
954 | protected: |
955 | /// Target-dependent implementation for IsCopyInstr. |
956 | /// If the specific machine instruction is a instruction that moves/copies |
957 | /// value from one register to another register return destination and source |
958 | /// registers as machine operands. |
959 | virtual Optional<DestSourcePair> |
960 | isCopyInstrImpl(const MachineInstr &MI) const { |
961 | return None; |
962 | } |
963 | |
964 | /// Return true if the given terminator MI is not expected to spill. This |
965 | /// sets the live interval as not spillable and adjusts phi node lowering to |
966 | /// not introduce copies after the terminator. Use with care, these are |
967 | /// currently used for hardware loop intrinsics in very controlled situations, |
968 | /// created prior to registry allocation in loops that only have single phi |
969 | /// users for the terminators value. They may run out of registers if not used |
970 | /// carefully. |
971 | virtual bool isUnspillableTerminatorImpl(const MachineInstr *MI) const { |
972 | return false; |
973 | } |
974 | |
975 | public: |
976 | /// If the specific machine instruction is a instruction that moves/copies |
977 | /// value from one register to another register return destination and source |
978 | /// registers as machine operands. |
979 | /// For COPY-instruction the method naturally returns destination and source |
980 | /// registers as machine operands, for all other instructions the method calls |
981 | /// target-dependent implementation. |
982 | Optional<DestSourcePair> isCopyInstr(const MachineInstr &MI) const { |
983 | if (MI.isCopy()) { |
984 | return DestSourcePair{MI.getOperand(0), MI.getOperand(1)}; |
985 | } |
986 | return isCopyInstrImpl(MI); |
987 | } |
988 | |
989 | /// If the specific machine instruction is an instruction that adds an |
990 | /// immediate value and a physical register, and stores the result in |
991 | /// the given physical register \c Reg, return a pair of the source |
992 | /// register and the offset which has been added. |
993 | virtual Optional<RegImmPair> isAddImmediate(const MachineInstr &MI, |
994 | Register Reg) const { |
995 | return None; |
996 | } |
997 | |
998 | /// Returns true if MI is an instruction that defines Reg to have a constant |
999 | /// value and the value is recorded in ImmVal. The ImmVal is a result that |
1000 | /// should be interpreted as modulo size of Reg. |
1001 | virtual bool getConstValDefinedInReg(const MachineInstr &MI, |
1002 | const Register Reg, |
1003 | int64_t &ImmVal) const { |
1004 | return false; |
1005 | } |
1006 | |
1007 | /// Store the specified register of the given register class to the specified |
1008 | /// stack frame index. The store instruction is to be added to the given |
1009 | /// machine basic block before the specified machine instruction. If isKill |
1010 | /// is true, the register operand is the last use and must be marked kill. |
1011 | virtual void storeRegToStackSlot(MachineBasicBlock &MBB, |
1012 | MachineBasicBlock::iterator MI, |
1013 | Register SrcReg, bool isKill, int FrameIndex, |
1014 | const TargetRegisterClass *RC, |
1015 | const TargetRegisterInfo *TRI) const { |
1016 | llvm_unreachable("Target didn't implement "::llvm::llvm_unreachable_internal("Target didn't implement " "TargetInstrInfo::storeRegToStackSlot!" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/TargetInstrInfo.h" , 1017) |
1017 | "TargetInstrInfo::storeRegToStackSlot!")::llvm::llvm_unreachable_internal("Target didn't implement " "TargetInstrInfo::storeRegToStackSlot!" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/TargetInstrInfo.h" , 1017); |
1018 | } |
1019 | |
1020 | /// Load the specified register of the given register class from the specified |
1021 | /// stack frame index. The load instruction is to be added to the given |
1022 | /// machine basic block before the specified machine instruction. |
1023 | virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, |
1024 | MachineBasicBlock::iterator MI, |
1025 | Register DestReg, int FrameIndex, |
1026 | const TargetRegisterClass *RC, |
1027 | const TargetRegisterInfo *TRI) const { |
1028 | llvm_unreachable("Target didn't implement "::llvm::llvm_unreachable_internal("Target didn't implement " "TargetInstrInfo::loadRegFromStackSlot!" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/TargetInstrInfo.h" , 1029) |
1029 | "TargetInstrInfo::loadRegFromStackSlot!")::llvm::llvm_unreachable_internal("Target didn't implement " "TargetInstrInfo::loadRegFromStackSlot!" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/TargetInstrInfo.h" , 1029); |
1030 | } |
1031 | |
1032 | /// This function is called for all pseudo instructions |
1033 | /// that remain after register allocation. Many pseudo instructions are |
1034 | /// created to help register allocation. This is the place to convert them |
1035 | /// into real instructions. The target can edit MI in place, or it can insert |
1036 | /// new instructions and erase MI. The function should return true if |
1037 | /// anything was changed. |
1038 | virtual bool expandPostRAPseudo(MachineInstr &MI) const { return false; } |
1039 | |
1040 | /// Check whether the target can fold a load that feeds a subreg operand |
1041 | /// (or a subreg operand that feeds a store). |
1042 | /// For example, X86 may want to return true if it can fold |
1043 | /// movl (%esp), %eax |
1044 | /// subb, %al, ... |
1045 | /// Into: |
1046 | /// subb (%esp), ... |
1047 | /// |
1048 | /// Ideally, we'd like the target implementation of foldMemoryOperand() to |
1049 | /// reject subregs - but since this behavior used to be enforced in the |
1050 | /// target-independent code, moving this responsibility to the targets |
1051 | /// has the potential of causing nasty silent breakage in out-of-tree targets. |
1052 | virtual bool isSubregFoldable() const { return false; } |
1053 | |
1054 | /// Attempt to fold a load or store of the specified stack |
1055 | /// slot into the specified machine instruction for the specified operand(s). |
1056 | /// If this is possible, a new instruction is returned with the specified |
1057 | /// operand folded, otherwise NULL is returned. |
1058 | /// The new instruction is inserted before MI, and the client is responsible |
1059 | /// for removing the old instruction. |
1060 | /// If VRM is passed, the assigned physregs can be inspected by target to |
1061 | /// decide on using an opcode (note that those assignments can still change). |
1062 | MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops, |
1063 | int FI, |
1064 | LiveIntervals *LIS = nullptr, |
1065 | VirtRegMap *VRM = nullptr) const; |
1066 | |
1067 | /// Same as the previous version except it allows folding of any load and |
1068 | /// store from / to any address, not just from a specific stack slot. |
1069 | MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops, |
1070 | MachineInstr &LoadMI, |
1071 | LiveIntervals *LIS = nullptr) const; |
1072 | |
1073 | /// Return true when there is potentially a faster code sequence |
1074 | /// for an instruction chain ending in \p Root. All potential patterns are |
1075 | /// returned in the \p Pattern vector. Pattern should be sorted in priority |
1076 | /// order since the pattern evaluator stops checking as soon as it finds a |
1077 | /// faster sequence. |
1078 | /// \param Root - Instruction that could be combined with one of its operands |
1079 | /// \param Patterns - Vector of possible combination patterns |
1080 | virtual bool |
1081 | getMachineCombinerPatterns(MachineInstr &Root, |
1082 | SmallVectorImpl<MachineCombinerPattern> &Patterns, |
1083 | bool DoRegPressureReduce) const; |
1084 | |
1085 | /// Return true if target supports reassociation of instructions in machine |
1086 | /// combiner pass to reduce register pressure for a given BB. |
1087 | virtual bool |
1088 | shouldReduceRegisterPressure(MachineBasicBlock *MBB, |
1089 | RegisterClassInfo *RegClassInfo) const { |
1090 | return false; |
1091 | } |
1092 | |
1093 | /// Fix up the placeholder we may add in genAlternativeCodeSequence(). |
1094 | virtual void |
1095 | finalizeInsInstrs(MachineInstr &Root, MachineCombinerPattern &P, |
1096 | SmallVectorImpl<MachineInstr *> &InsInstrs) const {} |
1097 | |
1098 | /// Return true when a code sequence can improve throughput. It |
1099 | /// should be called only for instructions in loops. |
1100 | /// \param Pattern - combiner pattern |
1101 | virtual bool isThroughputPattern(MachineCombinerPattern Pattern) const; |
1102 | |
1103 | /// Return true if the input \P Inst is part of a chain of dependent ops |
1104 | /// that are suitable for reassociation, otherwise return false. |
1105 | /// If the instruction's operands must be commuted to have a previous |
1106 | /// instruction of the same type define the first source operand, \P Commuted |
1107 | /// will be set to true. |
1108 | bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const; |
1109 | |
1110 | /// Return true when \P Inst is both associative and commutative. |
1111 | virtual bool isAssociativeAndCommutative(const MachineInstr &Inst) const { |
1112 | return false; |
1113 | } |
1114 | |
1115 | /// Return true when \P Inst has reassociable operands in the same \P MBB. |
1116 | virtual bool hasReassociableOperands(const MachineInstr &Inst, |
1117 | const MachineBasicBlock *MBB) const; |
1118 | |
1119 | /// Return true when \P Inst has reassociable sibling. |
1120 | bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const; |
1121 | |
1122 | /// When getMachineCombinerPatterns() finds patterns, this function generates |
1123 | /// the instructions that could replace the original code sequence. The client |
1124 | /// has to decide whether the actual replacement is beneficial or not. |
1125 | /// \param Root - Instruction that could be combined with one of its operands |
1126 | /// \param Pattern - Combination pattern for Root |
1127 | /// \param InsInstrs - Vector of new instructions that implement P |
1128 | /// \param DelInstrs - Old instructions, including Root, that could be |
1129 | /// replaced by InsInstr |
1130 | /// \param InstIdxForVirtReg - map of virtual register to instruction in |
1131 | /// InsInstr that defines it |
1132 | virtual void genAlternativeCodeSequence( |
1133 | MachineInstr &Root, MachineCombinerPattern Pattern, |
1134 | SmallVectorImpl<MachineInstr *> &InsInstrs, |
1135 | SmallVectorImpl<MachineInstr *> &DelInstrs, |
1136 | DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const; |
1137 | |
1138 | /// Attempt to reassociate \P Root and \P Prev according to \P Pattern to |
1139 | /// reduce critical path length. |
1140 | void reassociateOps(MachineInstr &Root, MachineInstr &Prev, |
1141 | MachineCombinerPattern Pattern, |
1142 | SmallVectorImpl<MachineInstr *> &InsInstrs, |
1143 | SmallVectorImpl<MachineInstr *> &DelInstrs, |
1144 | DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const; |
1145 | |
1146 | /// The limit on resource length extension we accept in MachineCombiner Pass. |
1147 | virtual int getExtendResourceLenLimit() const { return 0; } |
1148 | |
1149 | /// This is an architecture-specific helper function of reassociateOps. |
1150 | /// Set special operand attributes for new instructions after reassociation. |
1151 | virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, |
1152 | MachineInstr &NewMI1, |
1153 | MachineInstr &NewMI2) const {} |
1154 | |
1155 | virtual void setSpecialOperandAttr(MachineInstr &MI, uint16_t Flags) const {} |
1156 | |
1157 | /// Return true when a target supports MachineCombiner. |
1158 | virtual bool useMachineCombiner() const { return false; } |
1159 | |
1160 | /// Return true if the given SDNode can be copied during scheduling |
1161 | /// even if it has glue. |
1162 | virtual bool canCopyGluedNodeDuringSchedule(SDNode *N) const { return false; } |
1163 | |
1164 | protected: |
1165 | /// Target-dependent implementation for foldMemoryOperand. |
1166 | /// Target-independent code in foldMemoryOperand will |
1167 | /// take care of adding a MachineMemOperand to the newly created instruction. |
1168 | /// The instruction and any auxiliary instructions necessary will be inserted |
1169 | /// at InsertPt. |
1170 | virtual MachineInstr * |
1171 | foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, |
1172 | ArrayRef<unsigned> Ops, |
1173 | MachineBasicBlock::iterator InsertPt, int FrameIndex, |
1174 | LiveIntervals *LIS = nullptr, |
1175 | VirtRegMap *VRM = nullptr) const { |
1176 | return nullptr; |
1177 | } |
1178 | |
1179 | /// Target-dependent implementation for foldMemoryOperand. |
1180 | /// Target-independent code in foldMemoryOperand will |
1181 | /// take care of adding a MachineMemOperand to the newly created instruction. |
1182 | /// The instruction and any auxiliary instructions necessary will be inserted |
1183 | /// at InsertPt. |
1184 | virtual MachineInstr *foldMemoryOperandImpl( |
1185 | MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, |
1186 | MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI, |
1187 | LiveIntervals *LIS = nullptr) const { |
1188 | return nullptr; |
1189 | } |
1190 | |
1191 | /// Target-dependent implementation of getRegSequenceInputs. |
1192 | /// |
1193 | /// \returns true if it is possible to build the equivalent |
1194 | /// REG_SEQUENCE inputs with the pair \p MI, \p DefIdx. False otherwise. |
1195 | /// |
1196 | /// \pre MI.isRegSequenceLike(). |
1197 | /// |
1198 | /// \see TargetInstrInfo::getRegSequenceInputs. |
1199 | virtual bool getRegSequenceLikeInputs( |
1200 | const MachineInstr &MI, unsigned DefIdx, |
1201 | SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const { |
1202 | return false; |
1203 | } |
1204 | |
1205 | /// Target-dependent implementation of getExtractSubregInputs. |
1206 | /// |
1207 | /// \returns true if it is possible to build the equivalent |
1208 | /// EXTRACT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise. |
1209 | /// |
1210 | /// \pre MI.isExtractSubregLike(). |
1211 | /// |
1212 | /// \see TargetInstrInfo::getExtractSubregInputs. |
1213 | virtual bool getExtractSubregLikeInputs(const MachineInstr &MI, |
1214 | unsigned DefIdx, |
1215 | RegSubRegPairAndIdx &InputReg) const { |
1216 | return false; |
1217 | } |
1218 | |
1219 | /// Target-dependent implementation of getInsertSubregInputs. |
1220 | /// |
1221 | /// \returns true if it is possible to build the equivalent |
1222 | /// INSERT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise. |
1223 | /// |
1224 | /// \pre MI.isInsertSubregLike(). |
1225 | /// |
1226 | /// \see TargetInstrInfo::getInsertSubregInputs. |
1227 | virtual bool |
1228 | getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, |
1229 | RegSubRegPair &BaseReg, |
1230 | RegSubRegPairAndIdx &InsertedReg) const { |
1231 | return false; |
1232 | } |
1233 | |
1234 | public: |
1235 | /// getAddressSpaceForPseudoSourceKind - Given the kind of memory |
1236 | /// (e.g. stack) the target returns the corresponding address space. |
1237 | virtual unsigned |
1238 | getAddressSpaceForPseudoSourceKind(unsigned Kind) const { |
1239 | return 0; |
1240 | } |
1241 | |
1242 | /// unfoldMemoryOperand - Separate a single instruction which folded a load or |
1243 | /// a store or a load and a store into two or more instruction. If this is |
1244 | /// possible, returns true as well as the new instructions by reference. |
1245 | virtual bool |
1246 | unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, unsigned Reg, |
1247 | bool UnfoldLoad, bool UnfoldStore, |
1248 | SmallVectorImpl<MachineInstr *> &NewMIs) const { |
1249 | return false; |
1250 | } |
1251 | |
1252 | virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, |
1253 | SmallVectorImpl<SDNode *> &NewNodes) const { |
1254 | return false; |
1255 | } |
1256 | |
1257 | /// Returns the opcode of the would be new |
1258 | /// instruction after load / store are unfolded from an instruction of the |
1259 | /// specified opcode. It returns zero if the specified unfolding is not |
1260 | /// possible. If LoadRegIndex is non-null, it is filled in with the operand |
1261 | /// index of the operand which will hold the register holding the loaded |
1262 | /// value. |
1263 | virtual unsigned |
1264 | getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore, |
1265 | unsigned *LoadRegIndex = nullptr) const { |
1266 | return 0; |
1267 | } |
1268 | |
1269 | /// This is used by the pre-regalloc scheduler to determine if two loads are |
1270 | /// loading from the same base address. It should only return true if the base |
1271 | /// pointers are the same and the only differences between the two addresses |
1272 | /// are the offset. It also returns the offsets by reference. |
1273 | virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, |
1274 | int64_t &Offset1, |
1275 | int64_t &Offset2) const { |
1276 | return false; |
1277 | } |
1278 | |
1279 | /// This is a used by the pre-regalloc scheduler to determine (in conjunction |
1280 | /// with areLoadsFromSameBasePtr) if two loads should be scheduled together. |
1281 | /// On some targets if two loads are loading from |
1282 | /// addresses in the same cache line, it's better if they are scheduled |
1283 | /// together. This function takes two integers that represent the load offsets |
1284 | /// from the common base address. It returns true if it decides it's desirable |
1285 | /// to schedule the two loads together. "NumLoads" is the number of loads that |
1286 | /// have already been scheduled after Load1. |
1287 | virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, |
1288 | int64_t Offset1, int64_t Offset2, |
1289 | unsigned NumLoads) const { |
1290 | return false; |
1291 | } |
1292 | |
1293 | /// Get the base operand and byte offset of an instruction that reads/writes |
1294 | /// memory. This is a convenience function for callers that are only prepared |
1295 | /// to handle a single base operand. |
1296 | bool getMemOperandWithOffset(const MachineInstr &MI, |
1297 | const MachineOperand *&BaseOp, int64_t &Offset, |
1298 | bool &OffsetIsScalable, |
1299 | const TargetRegisterInfo *TRI) const; |
1300 | |
1301 | /// Get zero or more base operands and the byte offset of an instruction that |
1302 | /// reads/writes memory. Note that there may be zero base operands if the |
1303 | /// instruction accesses a constant address. |
1304 | /// It returns false if MI does not read/write memory. |
1305 | /// It returns false if base operands and offset could not be determined. |
1306 | /// It is not guaranteed to always recognize base operands and offsets in all |
1307 | /// cases. |
1308 | virtual bool getMemOperandsWithOffsetWidth( |
1309 | const MachineInstr &MI, SmallVectorImpl<const MachineOperand *> &BaseOps, |
1310 | int64_t &Offset, bool &OffsetIsScalable, unsigned &Width, |
1311 | const TargetRegisterInfo *TRI) const { |
1312 | return false; |
1313 | } |
1314 | |
1315 | /// Return true if the instruction contains a base register and offset. If |
1316 | /// true, the function also sets the operand position in the instruction |
1317 | /// for the base register and offset. |
1318 | virtual bool getBaseAndOffsetPosition(const MachineInstr &MI, |
1319 | unsigned &BasePos, |
1320 | unsigned &OffsetPos) const { |
1321 | return false; |
1322 | } |
1323 | |
1324 | /// Target dependent implementation to get the values constituting the address |
1325 | /// MachineInstr that is accessing memory. These values are returned as a |
1326 | /// struct ExtAddrMode which contains all relevant information to make up the |
1327 | /// address. |
1328 | virtual Optional<ExtAddrMode> |
1329 | getAddrModeFromMemoryOp(const MachineInstr &MemI, |
1330 | const TargetRegisterInfo *TRI) const { |
1331 | return None; |
1332 | } |
1333 | |
1334 | /// Returns true if MI's Def is NullValueReg, and the MI |
1335 | /// does not change the Zero value. i.e. cases such as rax = shr rax, X where |
1336 | /// NullValueReg = rax. Note that if the NullValueReg is non-zero, this |
1337 | /// function can return true even if becomes zero. Specifically cases such as |
1338 | /// NullValueReg = shl NullValueReg, 63. |
1339 | virtual bool preservesZeroValueInReg(const MachineInstr *MI, |
1340 | const Register NullValueReg, |
1341 | const TargetRegisterInfo *TRI) const { |
1342 | return false; |
1343 | } |
1344 | |
1345 | /// If the instruction is an increment of a constant value, return the amount. |
1346 | virtual bool getIncrementValue(const MachineInstr &MI, int &Value) const { |
1347 | return false; |
1348 | } |
1349 | |
1350 | /// Returns true if the two given memory operations should be scheduled |
1351 | /// adjacent. Note that you have to add: |
1352 | /// DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); |
1353 | /// or |
1354 | /// DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI)); |
1355 | /// to TargetPassConfig::createMachineScheduler() to have an effect. |
1356 | /// |
1357 | /// \p BaseOps1 and \p BaseOps2 are memory operands of two memory operations. |
1358 | /// \p NumLoads is the number of loads that will be in the cluster if this |
1359 | /// hook returns true. |
1360 | /// \p NumBytes is the number of bytes that will be loaded from all the |
1361 | /// clustered loads if this hook returns true. |
1362 | virtual bool shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1, |
1363 | ArrayRef<const MachineOperand *> BaseOps2, |
1364 | unsigned NumLoads, unsigned NumBytes) const { |
1365 | llvm_unreachable("target did not implement shouldClusterMemOps()")::llvm::llvm_unreachable_internal("target did not implement shouldClusterMemOps()" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/TargetInstrInfo.h" , 1365); |
1366 | } |
1367 | |
1368 | /// Reverses the branch condition of the specified condition list, |
1369 | /// returning false on success and true if it cannot be reversed. |
1370 | virtual bool |
1371 | reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { |
1372 | return true; |
1373 | } |
1374 | |
1375 | /// Insert a noop into the instruction stream at the specified point. |
1376 | virtual void insertNoop(MachineBasicBlock &MBB, |
1377 | MachineBasicBlock::iterator MI) const; |
1378 | |
1379 | /// Insert noops into the instruction stream at the specified point. |
1380 | virtual void insertNoops(MachineBasicBlock &MBB, |
1381 | MachineBasicBlock::iterator MI, |
1382 | unsigned Quantity) const; |
1383 | |
1384 | /// Return the noop instruction to use for a noop. |
1385 | virtual void getNoop(MCInst &NopInst) const; |
1386 | |
1387 | /// Return true for post-incremented instructions. |
1388 | virtual bool isPostIncrement(const MachineInstr &MI) const { return false; } |
1389 | |
1390 | /// Returns true if the instruction is already predicated. |
1391 | virtual bool isPredicated(const MachineInstr &MI) const { return false; } |
1392 | |
1393 | // Returns a MIRPrinter comment for this machine operand. |
1394 | virtual std::string |
1395 | createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, |
1396 | unsigned OpIdx, const TargetRegisterInfo *TRI) const; |
1397 | |
1398 | /// Returns true if the instruction is a |
1399 | /// terminator instruction that has not been predicated. |
1400 | bool isUnpredicatedTerminator(const MachineInstr &MI) const; |
1401 | |
1402 | /// Returns true if MI is an unconditional tail call. |
1403 | virtual bool isUnconditionalTailCall(const MachineInstr &MI) const { |
1404 | return false; |
1405 | } |
1406 | |
1407 | /// Returns true if the tail call can be made conditional on BranchCond. |
1408 | virtual bool canMakeTailCallConditional(SmallVectorImpl<MachineOperand> &Cond, |
1409 | const MachineInstr &TailCall) const { |
1410 | return false; |
1411 | } |
1412 | |
1413 | /// Replace the conditional branch in MBB with a conditional tail call. |
1414 | virtual void replaceBranchWithTailCall(MachineBasicBlock &MBB, |
1415 | SmallVectorImpl<MachineOperand> &Cond, |
1416 | const MachineInstr &TailCall) const { |
1417 | llvm_unreachable("Target didn't implement replaceBranchWithTailCall!")::llvm::llvm_unreachable_internal("Target didn't implement replaceBranchWithTailCall!" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/TargetInstrInfo.h" , 1417); |
1418 | } |
1419 | |
1420 | /// Convert the instruction into a predicated instruction. |
1421 | /// It returns true if the operation was successful. |
1422 | virtual bool PredicateInstruction(MachineInstr &MI, |
1423 | ArrayRef<MachineOperand> Pred) const; |
1424 | |
1425 | /// Returns true if the first specified predicate |
1426 | /// subsumes the second, e.g. GE subsumes GT. |
1427 | virtual bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1, |
1428 | ArrayRef<MachineOperand> Pred2) const { |
1429 | return false; |
1430 | } |
1431 | |
1432 | /// If the specified instruction defines any predicate |
1433 | /// or condition code register(s) used for predication, returns true as well |
1434 | /// as the definition predicate(s) by reference. |
1435 | /// SkipDead should be set to false at any point that dead |
1436 | /// predicate instructions should be considered as being defined. |
1437 | /// A dead predicate instruction is one that is guaranteed to be removed |
1438 | /// after a call to PredicateInstruction. |
1439 | virtual bool ClobbersPredicate(MachineInstr &MI, |
1440 | std::vector<MachineOperand> &Pred, |
1441 | bool SkipDead) const { |
1442 | return false; |
1443 | } |
1444 | |
1445 | /// Return true if the specified instruction can be predicated. |
1446 | /// By default, this returns true for every instruction with a |
1447 | /// PredicateOperand. |
1448 | virtual bool isPredicable(const MachineInstr &MI) const { |
1449 | return MI.getDesc().isPredicable(); |
1450 | } |
1451 | |
1452 | /// Return true if it's safe to move a machine |
1453 | /// instruction that defines the specified register class. |
1454 | virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const { |
1455 | return true; |
1456 | } |
1457 | |
1458 | /// Test if the given instruction should be considered a scheduling boundary. |
1459 | /// This primarily includes labels and terminators. |
1460 | virtual bool isSchedulingBoundary(const MachineInstr &MI, |
1461 | const MachineBasicBlock *MBB, |
1462 | const MachineFunction &MF) const; |
1463 | |
1464 | /// Measure the specified inline asm to determine an approximation of its |
1465 | /// length. |
1466 | virtual unsigned getInlineAsmLength( |
1467 | const char *Str, const MCAsmInfo &MAI, |
1468 | const TargetSubtargetInfo *STI = nullptr) const; |
1469 | |
1470 | /// Allocate and return a hazard recognizer to use for this target when |
1471 | /// scheduling the machine instructions before register allocation. |
1472 | virtual ScheduleHazardRecognizer * |
1473 | CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, |
1474 | const ScheduleDAG *DAG) const; |
1475 | |
1476 | /// Allocate and return a hazard recognizer to use for this target when |
1477 | /// scheduling the machine instructions before register allocation. |
1478 | virtual ScheduleHazardRecognizer * |
1479 | CreateTargetMIHazardRecognizer(const InstrItineraryData *, |
1480 | const ScheduleDAGMI *DAG) const; |
1481 | |
1482 | /// Allocate and return a hazard recognizer to use for this target when |
1483 | /// scheduling the machine instructions after register allocation. |
1484 | virtual ScheduleHazardRecognizer * |
1485 | CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, |
1486 | const ScheduleDAG *DAG) const; |
1487 | |
1488 | /// Allocate and return a hazard recognizer to use for by non-scheduling |
1489 | /// passes. |
1490 | virtual ScheduleHazardRecognizer * |
1491 | CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const { |
1492 | return nullptr; |
1493 | } |
1494 | |
1495 | /// Provide a global flag for disabling the PreRA hazard recognizer that |
1496 | /// targets may choose to honor. |
1497 | bool usePreRAHazardRecognizer() const; |
1498 | |
1499 | /// For a comparison instruction, return the source registers |
1500 | /// in SrcReg and SrcReg2 if having two register operands, and the value it |
1501 | /// compares against in CmpValue. Return true if the comparison instruction |
1502 | /// can be analyzed. |
1503 | virtual bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, |
1504 | Register &SrcReg2, int &Mask, int &Value) const { |
1505 | return false; |
1506 | } |
1507 | |
1508 | /// See if the comparison instruction can be converted |
1509 | /// into something more efficient. E.g., on ARM most instructions can set the |
1510 | /// flags register, obviating the need for a separate CMP. |
1511 | virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, |
1512 | Register SrcReg2, int Mask, int Value, |
1513 | const MachineRegisterInfo *MRI) const { |
1514 | return false; |
1515 | } |
1516 | virtual bool optimizeCondBranch(MachineInstr &MI) const { return false; } |
1517 | |
1518 | /// Try to remove the load by folding it to a register operand at the use. |
1519 | /// We fold the load instructions if and only if the |
1520 | /// def and use are in the same BB. We only look at one load and see |
1521 | /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register |
1522 | /// defined by the load we are trying to fold. DefMI returns the machine |
1523 | /// instruction that defines FoldAsLoadDefReg, and the function returns |
1524 | /// the machine instruction generated due to folding. |
1525 | virtual MachineInstr *optimizeLoadInstr(MachineInstr &MI, |
1526 | const MachineRegisterInfo *MRI, |
1527 | Register &FoldAsLoadDefReg, |
1528 | MachineInstr *&DefMI) const { |
1529 | return nullptr; |
1530 | } |
1531 | |
1532 | /// 'Reg' is known to be defined by a move immediate instruction, |
1533 | /// try to fold the immediate into the use instruction. |
1534 | /// If MRI->hasOneNonDBGUse(Reg) is true, and this function returns true, |
1535 | /// then the caller may assume that DefMI has been erased from its parent |
1536 | /// block. The caller may assume that it will not be erased by this |
1537 | /// function otherwise. |
1538 | virtual bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, |
1539 | Register Reg, MachineRegisterInfo *MRI) const { |
1540 | return false; |
1541 | } |
1542 | |
1543 | /// Return the number of u-operations the given machine |
1544 | /// instruction will be decoded to on the target cpu. The itinerary's |
1545 | /// IssueWidth is the number of microops that can be dispatched each |
1546 | /// cycle. An instruction with zero microops takes no dispatch resources. |
1547 | virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, |
1548 | const MachineInstr &MI) const; |
1549 | |
1550 | /// Return true for pseudo instructions that don't consume any |
1551 | /// machine resources in their current form. These are common cases that the |
1552 | /// scheduler should consider free, rather than conservatively handling them |
1553 | /// as instructions with no itinerary. |
1554 | bool isZeroCost(unsigned Opcode) const { |
1555 | return Opcode <= TargetOpcode::COPY; |
1556 | } |
1557 | |
1558 | virtual int getOperandLatency(const InstrItineraryData *ItinData, |
1559 | SDNode *DefNode, unsigned DefIdx, |
1560 | SDNode *UseNode, unsigned UseIdx) const; |
1561 | |
1562 | /// Compute and return the use operand latency of a given pair of def and use. |
1563 | /// In most cases, the static scheduling itinerary was enough to determine the |
1564 | /// operand latency. But it may not be possible for instructions with variable |
1565 | /// number of defs / uses. |
1566 | /// |
1567 | /// This is a raw interface to the itinerary that may be directly overridden |
1568 | /// by a target. Use computeOperandLatency to get the best estimate of |
1569 | /// latency. |
1570 | virtual int getOperandLatency(const InstrItineraryData *ItinData, |
1571 | const MachineInstr &DefMI, unsigned DefIdx, |
1572 | const MachineInstr &UseMI, |
1573 | unsigned UseIdx) const; |
1574 | |
1575 | /// Compute the instruction latency of a given instruction. |
1576 | /// If the instruction has higher cost when predicated, it's returned via |
1577 | /// PredCost. |
1578 | virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, |
1579 | const MachineInstr &MI, |
1580 | unsigned *PredCost = nullptr) const; |
1581 | |
1582 | virtual unsigned getPredicationCost(const MachineInstr &MI) const; |
1583 | |
1584 | virtual int getInstrLatency(const InstrItineraryData *ItinData, |
1585 | SDNode *Node) const; |
1586 | |
1587 | /// Return the default expected latency for a def based on its opcode. |
1588 | unsigned defaultDefLatency(const MCSchedModel &SchedModel, |
1589 | const MachineInstr &DefMI) const; |
1590 | |
1591 | int computeDefOperandLatency(const InstrItineraryData *ItinData, |
1592 | const MachineInstr &DefMI) const; |
1593 | |
1594 | /// Return true if this opcode has high latency to its result. |
1595 | virtual bool isHighLatencyDef(int opc) const { return false; } |
1596 | |
1597 | /// Compute operand latency between a def of 'Reg' |
1598 | /// and a use in the current loop. Return true if the target considered |
1599 | /// it 'high'. This is used by optimization passes such as machine LICM to |
1600 | /// determine whether it makes sense to hoist an instruction out even in a |
1601 | /// high register pressure situation. |
1602 | virtual bool hasHighOperandLatency(const TargetSchedModel &SchedModel, |
1603 | const MachineRegisterInfo *MRI, |
1604 | const MachineInstr &DefMI, unsigned DefIdx, |
1605 | const MachineInstr &UseMI, |
1606 | unsigned UseIdx) const { |
1607 | return false; |
1608 | } |
1609 | |
1610 | /// Compute operand latency of a def of 'Reg'. Return true |
1611 | /// if the target considered it 'low'. |
1612 | virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel, |
1613 | const MachineInstr &DefMI, |
1614 | unsigned DefIdx) const; |
1615 | |
1616 | /// Perform target-specific instruction verification. |
1617 | virtual bool verifyInstruction(const MachineInstr &MI, |
1618 | StringRef &ErrInfo) const { |
1619 | return true; |
1620 | } |
1621 | |
1622 | /// Return the current execution domain and bit mask of |
1623 | /// possible domains for instruction. |
1624 | /// |
1625 | /// Some micro-architectures have multiple execution domains, and multiple |
1626 | /// opcodes that perform the same operation in different domains. For |
1627 | /// example, the x86 architecture provides the por, orps, and orpd |
1628 | /// instructions that all do the same thing. There is a latency penalty if a |
1629 | /// register is written in one domain and read in another. |
1630 | /// |
1631 | /// This function returns a pair (domain, mask) containing the execution |
1632 | /// domain of MI, and a bit mask of possible domains. The setExecutionDomain |
1633 | /// function can be used to change the opcode to one of the domains in the |
1634 | /// bit mask. Instructions whose execution domain can't be changed should |
1635 | /// return a 0 mask. |
1636 | /// |
1637 | /// The execution domain numbers don't have any special meaning except domain |
1638 | /// 0 is used for instructions that are not associated with any interesting |
1639 | /// execution domain. |
1640 | /// |
1641 | virtual std::pair<uint16_t, uint16_t> |
1642 | getExecutionDomain(const MachineInstr &MI) const { |
1643 | return std::make_pair(0, 0); |
1644 | } |
1645 | |
1646 | /// Change the opcode of MI to execute in Domain. |
1647 | /// |
1648 | /// The bit (1 << Domain) must be set in the mask returned from |
1649 | /// getExecutionDomain(MI). |
1650 | virtual void setExecutionDomain(MachineInstr &MI, unsigned Domain) const {} |
1651 | |
1652 | /// Returns the preferred minimum clearance |
1653 | /// before an instruction with an unwanted partial register update. |
1654 | /// |
1655 | /// Some instructions only write part of a register, and implicitly need to |
1656 | /// read the other parts of the register. This may cause unwanted stalls |
1657 | /// preventing otherwise unrelated instructions from executing in parallel in |
1658 | /// an out-of-order CPU. |
1659 | /// |
1660 | /// For example, the x86 instruction cvtsi2ss writes its result to bits |
1661 | /// [31:0] of the destination xmm register. Bits [127:32] are unaffected, so |
1662 | /// the instruction needs to wait for the old value of the register to become |
1663 | /// available: |
1664 | /// |
1665 | /// addps %xmm1, %xmm0 |
1666 | /// movaps %xmm0, (%rax) |
1667 | /// cvtsi2ss %rbx, %xmm0 |
1668 | /// |
1669 | /// In the code above, the cvtsi2ss instruction needs to wait for the addps |
1670 | /// instruction before it can issue, even though the high bits of %xmm0 |
1671 | /// probably aren't needed. |
1672 | /// |
1673 | /// This hook returns the preferred clearance before MI, measured in |
1674 | /// instructions. Other defs of MI's operand OpNum are avoided in the last N |
1675 | /// instructions before MI. It should only return a positive value for |
1676 | /// unwanted dependencies. If the old bits of the defined register have |
1677 | /// useful values, or if MI is determined to otherwise read the dependency, |
1678 | /// the hook should return 0. |
1679 | /// |
1680 | /// The unwanted dependency may be handled by: |
1681 | /// |
1682 | /// 1. Allocating the same register for an MI def and use. That makes the |
1683 | /// unwanted dependency identical to a required dependency. |
1684 | /// |
1685 | /// 2. Allocating a register for the def that has no defs in the previous N |
1686 | /// instructions. |
1687 | /// |
1688 | /// 3. Calling breakPartialRegDependency() with the same arguments. This |
1689 | /// allows the target to insert a dependency breaking instruction. |
1690 | /// |
1691 | virtual unsigned |
1692 | getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum, |
1693 | const TargetRegisterInfo *TRI) const { |
1694 | // The default implementation returns 0 for no partial register dependency. |
1695 | return 0; |
1696 | } |
1697 | |
1698 | /// Return the minimum clearance before an instruction that reads an |
1699 | /// unused register. |
1700 | /// |
1701 | /// For example, AVX instructions may copy part of a register operand into |
1702 | /// the unused high bits of the destination register. |
1703 | /// |
1704 | /// vcvtsi2sdq %rax, undef %xmm0, %xmm14 |
1705 | /// |
1706 | /// In the code above, vcvtsi2sdq copies %xmm0[127:64] into %xmm14 creating a |
1707 | /// false dependence on any previous write to %xmm0. |
1708 | /// |
1709 | /// This hook works similarly to getPartialRegUpdateClearance, except that it |
1710 | /// does not take an operand index. Instead sets \p OpNum to the index of the |
1711 | /// unused register. |
1712 | virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned OpNum, |
1713 | const TargetRegisterInfo *TRI) const { |
1714 | // The default implementation returns 0 for no undef register dependency. |
1715 | return 0; |
1716 | } |
1717 | |
1718 | /// Insert a dependency-breaking instruction |
1719 | /// before MI to eliminate an unwanted dependency on OpNum. |
1720 | /// |
1721 | /// If it wasn't possible to avoid a def in the last N instructions before MI |
1722 | /// (see getPartialRegUpdateClearance), this hook will be called to break the |
1723 | /// unwanted dependency. |
1724 | /// |
1725 | /// On x86, an xorps instruction can be used as a dependency breaker: |
1726 | /// |
1727 | /// addps %xmm1, %xmm0 |
1728 | /// movaps %xmm0, (%rax) |
1729 | /// xorps %xmm0, %xmm0 |
1730 | /// cvtsi2ss %rbx, %xmm0 |
1731 | /// |
1732 | /// An <imp-kill> operand should be added to MI if an instruction was |
1733 | /// inserted. This ties the instructions together in the post-ra scheduler. |
1734 | /// |
1735 | virtual void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum, |
1736 | const TargetRegisterInfo *TRI) const {} |
1737 | |
1738 | /// Create machine specific model for scheduling. |
1739 | virtual DFAPacketizer * |
1740 | CreateTargetScheduleState(const TargetSubtargetInfo &) const { |
1741 | return nullptr; |
1742 | } |
1743 | |
1744 | /// Sometimes, it is possible for the target |
1745 | /// to tell, even without aliasing information, that two MIs access different |
1746 | /// memory addresses. This function returns true if two MIs access different |
1747 | /// memory addresses and false otherwise. |
1748 | /// |
1749 | /// Assumes any physical registers used to compute addresses have the same |
1750 | /// value for both instructions. (This is the most useful assumption for |
1751 | /// post-RA scheduling.) |
1752 | /// |
1753 | /// See also MachineInstr::mayAlias, which is implemented on top of this |
1754 | /// function. |
1755 | virtual bool |
1756 | areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, |
1757 | const MachineInstr &MIb) const { |
1758 | assert(MIa.mayLoadOrStore() &&((MIa.mayLoadOrStore() && "MIa must load from or modify a memory location" ) ? static_cast<void> (0) : __assert_fail ("MIa.mayLoadOrStore() && \"MIa must load from or modify a memory location\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/TargetInstrInfo.h" , 1759, __PRETTY_FUNCTION__)) |
1759 | "MIa must load from or modify a memory location")((MIa.mayLoadOrStore() && "MIa must load from or modify a memory location" ) ? static_cast<void> (0) : __assert_fail ("MIa.mayLoadOrStore() && \"MIa must load from or modify a memory location\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/TargetInstrInfo.h" , 1759, __PRETTY_FUNCTION__)); |
1760 | assert(MIb.mayLoadOrStore() &&((MIb.mayLoadOrStore() && "MIb must load from or modify a memory location" ) ? static_cast<void> (0) : __assert_fail ("MIb.mayLoadOrStore() && \"MIb must load from or modify a memory location\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/TargetInstrInfo.h" , 1761, __PRETTY_FUNCTION__)) |
1761 | "MIb must load from or modify a memory location")((MIb.mayLoadOrStore() && "MIb must load from or modify a memory location" ) ? static_cast<void> (0) : __assert_fail ("MIb.mayLoadOrStore() && \"MIb must load from or modify a memory location\"" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/TargetInstrInfo.h" , 1761, __PRETTY_FUNCTION__)); |
1762 | return false; |
1763 | } |
1764 | |
1765 | /// Return the value to use for the MachineCSE's LookAheadLimit, |
1766 | /// which is a heuristic used for CSE'ing phys reg defs. |
1767 | virtual unsigned getMachineCSELookAheadLimit() const { |
1768 | // The default lookahead is small to prevent unprofitable quadratic |
1769 | // behavior. |
1770 | return 5; |
1771 | } |
1772 | |
1773 | /// Return the maximal number of alias checks on memory operands. For |
1774 | /// instructions with more than one memory operands, the alias check on a |
1775 | /// single MachineInstr pair has quadratic overhead and results in |
1776 | /// unacceptable performance in the worst case. The limit here is to clamp |
1777 | /// that maximal checks performed. Usually, that's the product of memory |
1778 | /// operand numbers from that pair of MachineInstr to be checked. For |
1779 | /// instance, with two MachineInstrs with 4 and 5 memory operands |
1780 | /// correspondingly, a total of 20 checks are required. With this limit set to |
1781 | /// 16, their alias check is skipped. We choose to limit the product instead |
1782 | /// of the individual instruction as targets may have special MachineInstrs |
1783 | /// with a considerably high number of memory operands, such as `ldm` in ARM. |
1784 | /// Setting this limit per MachineInstr would result in either too high |
1785 | /// overhead or too rigid restriction. |
1786 | virtual unsigned getMemOperandAACheckLimit() const { return 16; } |
1787 | |
1788 | /// Return an array that contains the ids of the target indices (used for the |
1789 | /// TargetIndex machine operand) and their names. |
1790 | /// |
1791 | /// MIR Serialization is able to serialize only the target indices that are |
1792 | /// defined by this method. |
1793 | virtual ArrayRef<std::pair<int, const char *>> |
1794 | getSerializableTargetIndices() const { |
1795 | return None; |
1796 | } |
1797 | |
1798 | /// Decompose the machine operand's target flags into two values - the direct |
1799 | /// target flag value and any of bit flags that are applied. |
1800 | virtual std::pair<unsigned, unsigned> |
1801 | decomposeMachineOperandsTargetFlags(unsigned /*TF*/) const { |
1802 | return std::make_pair(0u, 0u); |
1803 | } |
1804 | |
1805 | /// Return an array that contains the direct target flag values and their |
1806 | /// names. |
1807 | /// |
1808 | /// MIR Serialization is able to serialize only the target flags that are |
1809 | /// defined by this method. |
1810 | virtual ArrayRef<std::pair<unsigned, const char *>> |
1811 | getSerializableDirectMachineOperandTargetFlags() const { |
1812 | return None; |
1813 | } |
1814 | |
1815 | /// Return an array that contains the bitmask target flag values and their |
1816 | /// names. |
1817 | /// |
1818 | /// MIR Serialization is able to serialize only the target flags that are |
1819 | /// defined by this method. |
1820 | virtual ArrayRef<std::pair<unsigned, const char *>> |
1821 | getSerializableBitmaskMachineOperandTargetFlags() const { |
1822 | return None; |
1823 | } |
1824 | |
1825 | /// Return an array that contains the MMO target flag values and their |
1826 | /// names. |
1827 | /// |
1828 | /// MIR Serialization is able to serialize only the MMO target flags that are |
1829 | /// defined by this method. |
1830 | virtual ArrayRef<std::pair<MachineMemOperand::Flags, const char *>> |
1831 | getSerializableMachineMemOperandTargetFlags() const { |
1832 | return None; |
1833 | } |
1834 | |
1835 | /// Determines whether \p Inst is a tail call instruction. Override this |
1836 | /// method on targets that do not properly set MCID::Return and MCID::Call on |
1837 | /// tail call instructions." |
1838 | virtual bool isTailCall(const MachineInstr &Inst) const { |
1839 | return Inst.isReturn() && Inst.isCall(); |
1840 | } |
1841 | |
1842 | /// True if the instruction is bound to the top of its basic block and no |
1843 | /// other instructions shall be inserted before it. This can be implemented |
1844 | /// to prevent register allocator to insert spills before such instructions. |
1845 | virtual bool isBasicBlockPrologue(const MachineInstr &MI) const { |
1846 | return false; |
1847 | } |
1848 | |
1849 | /// During PHI eleimination lets target to make necessary checks and |
1850 | /// insert the copy to the PHI destination register in a target specific |
1851 | /// manner. |
1852 | virtual MachineInstr *createPHIDestinationCopy( |
1853 | MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, |
1854 | const DebugLoc &DL, Register Src, Register Dst) const { |
1855 | return BuildMI(MBB, InsPt, DL, get(TargetOpcode::COPY), Dst) |
1856 | .addReg(Src); |
1857 | } |
1858 | |
1859 | /// During PHI eleimination lets target to make necessary checks and |
1860 | /// insert the copy to the PHI destination register in a target specific |
1861 | /// manner. |
1862 | virtual MachineInstr *createPHISourceCopy(MachineBasicBlock &MBB, |
1863 | MachineBasicBlock::iterator InsPt, |
1864 | const DebugLoc &DL, Register Src, |
1865 | unsigned SrcSubReg, |
1866 | Register Dst) const { |
1867 | return BuildMI(MBB, InsPt, DL, get(TargetOpcode::COPY), Dst) |
1868 | .addReg(Src, 0, SrcSubReg); |
1869 | } |
1870 | |
1871 | /// Returns a \p outliner::OutlinedFunction struct containing target-specific |
1872 | /// information for a set of outlining candidates. |
1873 | virtual outliner::OutlinedFunction getOutliningCandidateInfo( |
1874 | std::vector<outliner::Candidate> &RepeatedSequenceLocs) const { |
1875 | llvm_unreachable(::llvm::llvm_unreachable_internal("Target didn't implement TargetInstrInfo::getOutliningCandidateInfo!" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/TargetInstrInfo.h" , 1876) |
1876 | "Target didn't implement TargetInstrInfo::getOutliningCandidateInfo!")::llvm::llvm_unreachable_internal("Target didn't implement TargetInstrInfo::getOutliningCandidateInfo!" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/TargetInstrInfo.h" , 1876); |
1877 | } |
1878 | |
1879 | /// Returns how or if \p MI should be outlined. |
1880 | virtual outliner::InstrType |
1881 | getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const { |
1882 | llvm_unreachable(::llvm::llvm_unreachable_internal("Target didn't implement TargetInstrInfo::getOutliningType!" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/TargetInstrInfo.h" , 1883) |
1883 | "Target didn't implement TargetInstrInfo::getOutliningType!")::llvm::llvm_unreachable_internal("Target didn't implement TargetInstrInfo::getOutliningType!" , "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include/llvm/CodeGen/TargetInstrInfo.h" , 1883); |
1884 |