Bug Summary

File:llvm/lib/CodeGen/InlineSpiller.cpp
Warning:line 487, column 55
The left operand of '==' is a garbage value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name InlineSpiller.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220125101009+ceec4383681c/build-llvm -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/CodeGen -I /build/llvm-toolchain-snapshot-14~++20220125101009+ceec4383681c/llvm/lib/CodeGen -I include -I /build/llvm-toolchain-snapshot-14~++20220125101009+ceec4383681c/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220125101009+ceec4383681c/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220125101009+ceec4383681c/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220125101009+ceec4383681c/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220125101009+ceec4383681c/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220125101009+ceec4383681c/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220125101009+ceec4383681c/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220125101009+ceec4383681c/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-01-25-232935-20746-1 -x c++ /build/llvm-toolchain-snapshot-14~++20220125101009+ceec4383681c/llvm/lib/CodeGen/InlineSpiller.cpp

/build/llvm-toolchain-snapshot-14~++20220125101009+ceec4383681c/llvm/lib/CodeGen/InlineSpiller.cpp

1//===- InlineSpiller.cpp - Insert spills and restores inline --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The inline spiller modifies the machine function directly instead of
10// inserting spills and restores in VirtRegMap.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SplitKit.h"
15#include "llvm/ADT/ArrayRef.h"
16#include "llvm/ADT/DenseMap.h"
17#include "llvm/ADT/MapVector.h"
18#include "llvm/ADT/None.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SetVector.h"
21#include "llvm/ADT/SmallPtrSet.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/Statistic.h"
24#include "llvm/Analysis/AliasAnalysis.h"
25#include "llvm/CodeGen/LiveInterval.h"
26#include "llvm/CodeGen/LiveIntervalCalc.h"
27#include "llvm/CodeGen/LiveIntervals.h"
28#include "llvm/CodeGen/LiveRangeEdit.h"
29#include "llvm/CodeGen/LiveStacks.h"
30#include "llvm/CodeGen/MachineBasicBlock.h"
31#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
32#include "llvm/CodeGen/MachineDominators.h"
33#include "llvm/CodeGen/MachineFunction.h"
34#include "llvm/CodeGen/MachineFunctionPass.h"
35#include "llvm/CodeGen/MachineInstr.h"
36#include "llvm/CodeGen/MachineInstrBuilder.h"
37#include "llvm/CodeGen/MachineInstrBundle.h"
38#include "llvm/CodeGen/MachineLoopInfo.h"
39#include "llvm/CodeGen/MachineOperand.h"
40#include "llvm/CodeGen/MachineRegisterInfo.h"
41#include "llvm/CodeGen/SlotIndexes.h"
42#include "llvm/CodeGen/Spiller.h"
43#include "llvm/CodeGen/StackMaps.h"
44#include "llvm/CodeGen/TargetInstrInfo.h"
45#include "llvm/CodeGen/TargetOpcodes.h"
46#include "llvm/CodeGen/TargetRegisterInfo.h"
47#include "llvm/CodeGen/TargetSubtargetInfo.h"
48#include "llvm/CodeGen/VirtRegMap.h"
49#include "llvm/Config/llvm-config.h"
50#include "llvm/Support/BlockFrequency.h"
51#include "llvm/Support/BranchProbability.h"
52#include "llvm/Support/CommandLine.h"
53#include "llvm/Support/Compiler.h"
54#include "llvm/Support/Debug.h"
55#include "llvm/Support/ErrorHandling.h"
56#include "llvm/Support/raw_ostream.h"
57#include <cassert>
58#include <iterator>
59#include <tuple>
60#include <utility>
61#include <vector>
62
63using namespace llvm;
64
65#define DEBUG_TYPE"regalloc" "regalloc"
66
67STATISTIC(NumSpilledRanges, "Number of spilled live ranges")static llvm::Statistic NumSpilledRanges = {"regalloc", "NumSpilledRanges"
, "Number of spilled live ranges"}
;
68STATISTIC(NumSnippets, "Number of spilled snippets")static llvm::Statistic NumSnippets = {"regalloc", "NumSnippets"
, "Number of spilled snippets"}
;
69STATISTIC(NumSpills, "Number of spills inserted")static llvm::Statistic NumSpills = {"regalloc", "NumSpills", "Number of spills inserted"
}
;
70STATISTIC(NumSpillsRemoved, "Number of spills removed")static llvm::Statistic NumSpillsRemoved = {"regalloc", "NumSpillsRemoved"
, "Number of spills removed"}
;
71STATISTIC(NumReloads, "Number of reloads inserted")static llvm::Statistic NumReloads = {"regalloc", "NumReloads"
, "Number of reloads inserted"}
;
72STATISTIC(NumReloadsRemoved, "Number of reloads removed")static llvm::Statistic NumReloadsRemoved = {"regalloc", "NumReloadsRemoved"
, "Number of reloads removed"}
;
73STATISTIC(NumFolded, "Number of folded stack accesses")static llvm::Statistic NumFolded = {"regalloc", "NumFolded", "Number of folded stack accesses"
}
;
74STATISTIC(NumFoldedLoads, "Number of folded loads")static llvm::Statistic NumFoldedLoads = {"regalloc", "NumFoldedLoads"
, "Number of folded loads"}
;
75STATISTIC(NumRemats, "Number of rematerialized defs for spilling")static llvm::Statistic NumRemats = {"regalloc", "NumRemats", "Number of rematerialized defs for spilling"
}
;
76
77static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden,
78 cl::desc("Disable inline spill hoisting"));
79static cl::opt<bool>
80RestrictStatepointRemat("restrict-statepoint-remat",
81 cl::init(false), cl::Hidden,
82 cl::desc("Restrict remat for statepoint operands"));
83
84namespace {
85
86class HoistSpillHelper : private LiveRangeEdit::Delegate {
87 MachineFunction &MF;
88 LiveIntervals &LIS;
89 LiveStacks &LSS;
90 AliasAnalysis *AA;
91 MachineDominatorTree &MDT;
92 MachineLoopInfo &Loops;
93 VirtRegMap &VRM;
94 MachineRegisterInfo &MRI;
95 const TargetInstrInfo &TII;
96 const TargetRegisterInfo &TRI;
97 const MachineBlockFrequencyInfo &MBFI;
98
99 InsertPointAnalysis IPA;
100
101 // Map from StackSlot to the LiveInterval of the original register.
102 // Note the LiveInterval of the original register may have been deleted
103 // after it is spilled. We keep a copy here to track the range where
104 // spills can be moved.
105 DenseMap<int, std::unique_ptr<LiveInterval>> StackSlotToOrigLI;
106
107 // Map from pair of (StackSlot and Original VNI) to a set of spills which
108 // have the same stackslot and have equal values defined by Original VNI.
109 // These spills are mergeable and are hoist candiates.
110 using MergeableSpillsMap =
111 MapVector<std::pair<int, VNInfo *>, SmallPtrSet<MachineInstr *, 16>>;
112 MergeableSpillsMap MergeableSpills;
113
114 /// This is the map from original register to a set containing all its
115 /// siblings. To hoist a spill to another BB, we need to find out a live
116 /// sibling there and use it as the source of the new spill.
117 DenseMap<Register, SmallSetVector<Register, 16>> Virt2SiblingsMap;
118
119 bool isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
120 MachineBasicBlock &BB, Register &LiveReg);
121
122 void rmRedundantSpills(
123 SmallPtrSet<MachineInstr *, 16> &Spills,
124 SmallVectorImpl<MachineInstr *> &SpillsToRm,
125 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill);
126
127 void getVisitOrders(
128 MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills,
129 SmallVectorImpl<MachineDomTreeNode *> &Orders,
130 SmallVectorImpl<MachineInstr *> &SpillsToRm,
131 DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep,
132 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill);
133
134 void runHoistSpills(LiveInterval &OrigLI, VNInfo &OrigVNI,
135 SmallPtrSet<MachineInstr *, 16> &Spills,
136 SmallVectorImpl<MachineInstr *> &SpillsToRm,
137 DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns);
138
139public:
140 HoistSpillHelper(MachineFunctionPass &pass, MachineFunction &mf,
141 VirtRegMap &vrm)
142 : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
143 LSS(pass.getAnalysis<LiveStacks>()),
144 AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
145 MDT(pass.getAnalysis<MachineDominatorTree>()),
146 Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
147 MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()),
148 TRI(*mf.getSubtarget().getRegisterInfo()),
149 MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),
150 IPA(LIS, mf.getNumBlockIDs()) {}
151
152 void addToMergeableSpills(MachineInstr &Spill, int StackSlot,
153 unsigned Original);
154 bool rmFromMergeableSpills(MachineInstr &Spill, int StackSlot);
155 void hoistAllSpills();
156 void LRE_DidCloneVirtReg(Register, Register) override;
157};
158
159class InlineSpiller : public Spiller {
160 MachineFunction &MF;
161 LiveIntervals &LIS;
162 LiveStacks &LSS;
163 AliasAnalysis *AA;
164 MachineDominatorTree &MDT;
165 MachineLoopInfo &Loops;
166 VirtRegMap &VRM;
167 MachineRegisterInfo &MRI;
168 const TargetInstrInfo &TII;
169 const TargetRegisterInfo &TRI;
170 const MachineBlockFrequencyInfo &MBFI;
171
172 // Variables that are valid during spill(), but used by multiple methods.
173 LiveRangeEdit *Edit;
174 LiveInterval *StackInt;
175 int StackSlot;
176 Register Original;
177
178 // All registers to spill to StackSlot, including the main register.
179 SmallVector<Register, 8> RegsToSpill;
180
181 // All COPY instructions to/from snippets.
182 // They are ignored since both operands refer to the same stack slot.
183 SmallPtrSet<MachineInstr*, 8> SnippetCopies;
184
185 // Values that failed to remat at some point.
186 SmallPtrSet<VNInfo*, 8> UsedValues;
187
188 // Dead defs generated during spilling.
189 SmallVector<MachineInstr*, 8> DeadDefs;
190
191 // Object records spills information and does the hoisting.
192 HoistSpillHelper HSpiller;
193
194 // Live range weight calculator.
195 VirtRegAuxInfo &VRAI;
196
197 ~InlineSpiller() override = default;
198
199public:
200 InlineSpiller(MachineFunctionPass &Pass, MachineFunction &MF, VirtRegMap &VRM,
201 VirtRegAuxInfo &VRAI)
202 : MF(MF), LIS(Pass.getAnalysis<LiveIntervals>()),
203 LSS(Pass.getAnalysis<LiveStacks>()),
204 AA(&Pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
205 MDT(Pass.getAnalysis<MachineDominatorTree>()),
206 Loops(Pass.getAnalysis<MachineLoopInfo>()), VRM(VRM),
207 MRI(MF.getRegInfo()), TII(*MF.getSubtarget().getInstrInfo()),
208 TRI(*MF.getSubtarget().getRegisterInfo()),
209 MBFI(Pass.getAnalysis<MachineBlockFrequencyInfo>()),
210 HSpiller(Pass, MF, VRM), VRAI(VRAI) {}
211
212 void spill(LiveRangeEdit &) override;
213 void postOptimization() override;
214
215private:
216 bool isSnippet(const LiveInterval &SnipLI);
217 void collectRegsToSpill();
218
219 bool isRegToSpill(Register Reg) { return is_contained(RegsToSpill, Reg); }
41
Calling 'is_contained<llvm::SmallVector<llvm::Register, 8> &, llvm::Register>'
44
Returning from 'is_contained<llvm::SmallVector<llvm::Register, 8> &, llvm::Register>'
45
Returning zero, which participates in a condition later
220
221 bool isSibling(Register Reg);
222 bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI);
223 void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI);
224
225 void markValueUsed(LiveInterval*, VNInfo*);
226 bool canGuaranteeAssignmentAfterRemat(Register VReg, MachineInstr &MI);
227 bool reMaterializeFor(LiveInterval &, MachineInstr &MI);
228 void reMaterializeAll();
229
230 bool coalesceStackAccess(MachineInstr *MI, Register Reg);
231 bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>>,
232 MachineInstr *LoadMI = nullptr);
233 void insertReload(Register VReg, SlotIndex, MachineBasicBlock::iterator MI);
234 void insertSpill(Register VReg, bool isKill, MachineBasicBlock::iterator MI);
235
236 void spillAroundUses(Register Reg);
237 void spillAll();
238};
239
240} // end anonymous namespace
241
242Spiller::~Spiller() = default;
243
244void Spiller::anchor() {}
245
246Spiller *llvm::createInlineSpiller(MachineFunctionPass &Pass,
247 MachineFunction &MF, VirtRegMap &VRM,
248 VirtRegAuxInfo &VRAI) {
249 return new InlineSpiller(Pass, MF, VRM, VRAI);
250}
251
252//===----------------------------------------------------------------------===//
253// Snippets
254//===----------------------------------------------------------------------===//
255
256// When spilling a virtual register, we also spill any snippets it is connected
257// to. The snippets are small live ranges that only have a single real use,
258// leftovers from live range splitting. Spilling them enables memory operand
259// folding or tightens the live range around the single use.
260//
261// This minimizes register pressure and maximizes the store-to-load distance for
262// spill slots which can be important in tight loops.
263
264/// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
265/// otherwise return 0.
266static Register isFullCopyOf(const MachineInstr &MI, Register Reg) {
267 if (!MI.isFullCopy())
64
Assuming the condition is false
65
Taking false branch
268 return Register();
269 if (MI.getOperand(0).getReg() == Reg)
66
Calling 'Register::operator=='
69
Returning from 'Register::operator=='
70
Taking false branch
270 return MI.getOperand(1).getReg();
271 if (MI.getOperand(1).getReg() == Reg)
71
Calling 'Register::operator=='
74
Returning from 'Register::operator=='
75
Taking false branch
272 return MI.getOperand(0).getReg();
273 return Register();
274}
275
276static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS) {
277 for (const MachineOperand &MO : MI.operands())
278 if (MO.isReg() && MO.isDef() && Register::isVirtualRegister(MO.getReg()))
279 LIS.getInterval(MO.getReg());
280}
281
282/// isSnippet - Identify if a live interval is a snippet that should be spilled.
283/// It is assumed that SnipLI is a virtual register with the same original as
284/// Edit->getReg().
285bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
286 Register Reg = Edit->getReg();
287
288 // A snippet is a tiny live range with only a single instruction using it
289 // besides copies to/from Reg or spills/fills. We accept:
290 //
291 // %snip = COPY %Reg / FILL fi#
292 // %snip = USE %snip
293 // %Reg = COPY %snip / SPILL %snip, fi#
294 //
295 if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI))
296 return false;
297
298 MachineInstr *UseMI = nullptr;
299
300 // Check that all uses satisfy our criteria.
301 for (MachineRegisterInfo::reg_instr_nodbg_iterator
302 RI = MRI.reg_instr_nodbg_begin(SnipLI.reg()),
303 E = MRI.reg_instr_nodbg_end();
304 RI != E;) {
305 MachineInstr &MI = *RI++;
306
307 // Allow copies to/from Reg.
308 if (isFullCopyOf(MI, Reg))
309 continue;
310
311 // Allow stack slot loads.
312 int FI;
313 if (SnipLI.reg() == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot)
314 continue;
315
316 // Allow stack slot stores.
317 if (SnipLI.reg() == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot)
318 continue;
319
320 // Allow a single additional instruction.
321 if (UseMI && &MI != UseMI)
322 return false;
323 UseMI = &MI;
324 }
325 return true;
326}
327
328/// collectRegsToSpill - Collect live range snippets that only have a single
329/// real use.
330void InlineSpiller::collectRegsToSpill() {
331 Register Reg = Edit->getReg();
332
333 // Main register always spills.
334 RegsToSpill.assign(1, Reg);
335 SnippetCopies.clear();
336
337 // Snippets all have the same original, so there can't be any for an original
338 // register.
339 if (Original == Reg)
340 return;
341
342 for (MachineInstr &MI :
343 llvm::make_early_inc_range(MRI.reg_instructions(Reg))) {
344 Register SnipReg = isFullCopyOf(MI, Reg);
345 if (!isSibling(SnipReg))
346 continue;
347 LiveInterval &SnipLI = LIS.getInterval(SnipReg);
348 if (!isSnippet(SnipLI))
349 continue;
350 SnippetCopies.insert(&MI);
351 if (isRegToSpill(SnipReg))
352 continue;
353 RegsToSpill.push_back(SnipReg);
354 LLVM_DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\talso spill snippet " <<
SnipLI << '\n'; } } while (false)
;
355 ++NumSnippets;
356 }
357}
358
359bool InlineSpiller::isSibling(Register Reg) {
360 return Reg.isVirtual() && VRM.getOriginal(Reg) == Original;
361}
362
363/// It is beneficial to spill to earlier place in the same BB in case
364/// as follows:
365/// There is an alternative def earlier in the same MBB.
366/// Hoist the spill as far as possible in SpillMBB. This can ease
367/// register pressure:
368///
369/// x = def
370/// y = use x
371/// s = copy x
372///
373/// Hoisting the spill of s to immediately after the def removes the
374/// interference between x and y:
375///
376/// x = def
377/// spill x
378/// y = use killed x
379///
380/// This hoist only helps when the copy kills its source.
381///
382bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,
383 MachineInstr &CopyMI) {
384 SlotIndex Idx = LIS.getInstructionIndex(CopyMI);
385#ifndef NDEBUG
386 VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot());
387 assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy")(static_cast <bool> (VNI && VNI->def == Idx.
getRegSlot() && "Not defined by copy") ? void (0) : __assert_fail
("VNI && VNI->def == Idx.getRegSlot() && \"Not defined by copy\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 387, __extension__ __PRETTY_FUNCTION__
))
;
388#endif
389
390 Register SrcReg = CopyMI.getOperand(1).getReg();
391 LiveInterval &SrcLI = LIS.getInterval(SrcReg);
392 VNInfo *SrcVNI = SrcLI.getVNInfoAt(Idx);
393 LiveQueryResult SrcQ = SrcLI.Query(Idx);
394 MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(SrcVNI->def);
395 if (DefMBB != CopyMI.getParent() || !SrcQ.isKill())
396 return false;
397
398 // Conservatively extend the stack slot range to the range of the original
399 // value. We may be able to do better with stack slot coloring by being more
400 // careful here.
401 assert(StackInt && "No stack slot assigned yet.")(static_cast <bool> (StackInt && "No stack slot assigned yet."
) ? void (0) : __assert_fail ("StackInt && \"No stack slot assigned yet.\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 401, __extension__ __PRETTY_FUNCTION__
))
;
402 LiveInterval &OrigLI = LIS.getInterval(Original);
403 VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx);
404 StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0));
405 LLVM_DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tmerged orig valno " <<
OrigVNI->id << ": " << *StackInt << '\n'
; } } while (false)
406 << *StackInt << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tmerged orig valno " <<
OrigVNI->id << ": " << *StackInt << '\n'
; } } while (false)
;
407
408 // We are going to spill SrcVNI immediately after its def, so clear out
409 // any later spills of the same value.
410 eliminateRedundantSpills(SrcLI, SrcVNI);
411
412 MachineBasicBlock *MBB = LIS.getMBBFromIndex(SrcVNI->def);
413 MachineBasicBlock::iterator MII;
414 if (SrcVNI->isPHIDef())
415 MII = MBB->SkipPHIsLabelsAndDebug(MBB->begin());
416 else {
417 MachineInstr *DefMI = LIS.getInstructionFromIndex(SrcVNI->def);
418 assert(DefMI && "Defining instruction disappeared")(static_cast <bool> (DefMI && "Defining instruction disappeared"
) ? void (0) : __assert_fail ("DefMI && \"Defining instruction disappeared\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 418, __extension__ __PRETTY_FUNCTION__
))
;
419 MII = DefMI;
420 ++MII;
421 }
422 MachineInstrSpan MIS(MII, MBB);
423 // Insert spill without kill flag immediately after def.
424 TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot,
425 MRI.getRegClass(SrcReg), &TRI);
426 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
427 for (const MachineInstr &MI : make_range(MIS.begin(), MII))
428 getVDefInterval(MI, LIS);
429 --MII; // Point to store instruction.
430 LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\thoisted: " << SrcVNI
->def << '\t' << *MII; } } while (false)
;
431
432 // If there is only 1 store instruction is required for spill, add it
433 // to mergeable list. In X86 AMX, 2 intructions are required to store.
434 // We disable the merge for this case.
435 if (MIS.begin() == MII)
436 HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
437 ++NumSpills;
438 return true;
439}
440
441/// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any
442/// redundant spills of this value in SLI.reg and sibling copies.
443void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
444 assert(VNI && "Missing value")(static_cast <bool> (VNI && "Missing value") ? void
(0) : __assert_fail ("VNI && \"Missing value\"", "llvm/lib/CodeGen/InlineSpiller.cpp"
, 444, __extension__ __PRETTY_FUNCTION__))
;
34
Assuming 'VNI' is non-null
35
'?' condition is true
445 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList;
446 WorkList.push_back(std::make_pair(&SLI, VNI));
447 assert(StackInt && "No stack slot assigned yet.")(static_cast <bool> (StackInt && "No stack slot assigned yet."
) ? void (0) : __assert_fail ("StackInt && \"No stack slot assigned yet.\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 447, __extension__ __PRETTY_FUNCTION__
))
;
36
Assuming field 'StackInt' is non-null
37
'?' condition is true
448
449 do {
450 LiveInterval *LI;
451 std::tie(LI, VNI) = WorkList.pop_back_val();
452 Register Reg = LI->reg();
453 LLVM_DEBUG(dbgs() << "Checking redundant spills for " << VNI->id << '@'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Checking redundant spills for "
<< VNI->id << '@' << VNI->def <<
" in " << *LI << '\n'; } } while (false)
38
Assuming 'DebugFlag' is false
39
Loop condition is false. Exiting loop
454 << VNI->def << " in " << *LI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Checking redundant spills for "
<< VNI->id << '@' << VNI->def <<
" in " << *LI << '\n'; } } while (false)
;
455
456 // Regs to spill are taken care of.
457 if (isRegToSpill(Reg))
40
Calling 'InlineSpiller::isRegToSpill'
46
Returning from 'InlineSpiller::isRegToSpill'
47
Taking false branch
458 continue;
459
460 // Add all of VNI's live range to StackInt.
461 StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0));
462 LLVM_DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Merged to stack int: " <<
*StackInt << '\n'; } } while (false)
;
48
Assuming 'DebugFlag' is false
49
Loop condition is false. Exiting loop
463
464 // Find all spills and copies of VNI.
465 for (MachineInstr &MI :
466 llvm::make_early_inc_range(MRI.use_nodbg_instructions(Reg))) {
467 if (!MI.isCopy() && !MI.mayStore())
50
Calling 'MachineInstr::isCopy'
53
Returning from 'MachineInstr::isCopy'
54
Calling 'MachineInstr::mayStore'
58
Returning from 'MachineInstr::mayStore'
59
Assuming the condition is false
60
Taking false branch
468 continue;
469 SlotIndex Idx = LIS.getInstructionIndex(MI);
470 if (LI->getVNInfoAt(Idx) != VNI)
61
Assuming the condition is false
62
Taking false branch
471 continue;
472
473 // Follow sibling copies down the dominator tree.
474 if (Register DstReg = isFullCopyOf(MI, Reg)) {
63
Calling 'isFullCopyOf'
76
Returning from 'isFullCopyOf'
77
Calling 'Register::operator unsigned int'
79
Returning from 'Register::operator unsigned int'
80
Taking false branch
475 if (isSibling(DstReg)) {
476 LiveInterval &DstLI = LIS.getInterval(DstReg);
477 VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot());
478 assert(DstVNI && "Missing defined value")(static_cast <bool> (DstVNI && "Missing defined value"
) ? void (0) : __assert_fail ("DstVNI && \"Missing defined value\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 478, __extension__ __PRETTY_FUNCTION__
))
;
479 assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot")(static_cast <bool> (DstVNI->def == Idx.getRegSlot()
&& "Wrong copy def slot") ? void (0) : __assert_fail
("DstVNI->def == Idx.getRegSlot() && \"Wrong copy def slot\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 479, __extension__ __PRETTY_FUNCTION__
))
;
480 WorkList.push_back(std::make_pair(&DstLI, DstVNI));
481 }
482 continue;
483 }
484
485 // Erase spills.
486 int FI;
81
'FI' declared without an initial value
487 if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) {
82
Calling 'TargetInstrInfo::isStoreToStackSlot'
84
Returning from 'TargetInstrInfo::isStoreToStackSlot'
85
Calling 'Register::operator=='
88
Returning from 'Register::operator=='
89
The left operand of '==' is a garbage value
488 LLVM_DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Redundant spill " << Idx
<< '\t' << MI; } } while (false)
;
489 // eliminateDeadDefs won't normally remove stores, so switch opcode.
490 MI.setDesc(TII.get(TargetOpcode::KILL));
491 DeadDefs.push_back(&MI);
492 ++NumSpillsRemoved;
493 if (HSpiller.rmFromMergeableSpills(MI, StackSlot))
494 --NumSpills;
495 }
496 }
497 } while (!WorkList.empty());
498}
499
500//===----------------------------------------------------------------------===//
501// Rematerialization
502//===----------------------------------------------------------------------===//
503
504/// markValueUsed - Remember that VNI failed to rematerialize, so its defining
505/// instruction cannot be eliminated. See through snippet copies
506void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
507 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList;
508 WorkList.push_back(std::make_pair(LI, VNI));
509 do {
510 std::tie(LI, VNI) = WorkList.pop_back_val();
511 if (!UsedValues.insert(VNI).second)
512 continue;
513
514 if (VNI->isPHIDef()) {
515 MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
516 for (MachineBasicBlock *P : MBB->predecessors()) {
517 VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(P));
518 if (PVNI)
519 WorkList.push_back(std::make_pair(LI, PVNI));
520 }
521 continue;
522 }
523
524 // Follow snippet copies.
525 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
526 if (!SnippetCopies.count(MI))
527 continue;
528 LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg());
529 assert(isRegToSpill(SnipLI.reg()) && "Unexpected register in copy")(static_cast <bool> (isRegToSpill(SnipLI.reg()) &&
"Unexpected register in copy") ? void (0) : __assert_fail ("isRegToSpill(SnipLI.reg()) && \"Unexpected register in copy\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 529, __extension__ __PRETTY_FUNCTION__
))
;
530 VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true));
531 assert(SnipVNI && "Snippet undefined before copy")(static_cast <bool> (SnipVNI && "Snippet undefined before copy"
) ? void (0) : __assert_fail ("SnipVNI && \"Snippet undefined before copy\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 531, __extension__ __PRETTY_FUNCTION__
))
;
532 WorkList.push_back(std::make_pair(&SnipLI, SnipVNI));
533 } while (!WorkList.empty());
534}
535
536bool InlineSpiller::canGuaranteeAssignmentAfterRemat(Register VReg,
537 MachineInstr &MI) {
538 if (!RestrictStatepointRemat)
539 return true;
540 // Here's a quick explanation of the problem we're trying to handle here:
541 // * There are some pseudo instructions with more vreg uses than there are
542 // physical registers on the machine.
543 // * This is normally handled by spilling the vreg, and folding the reload
544 // into the user instruction. (Thus decreasing the number of used vregs
545 // until the remainder can be assigned to physregs.)
546 // * However, since we may try to spill vregs in any order, we can end up
547 // trying to spill each operand to the instruction, and then rematting it
548 // instead. When that happens, the new live intervals (for the remats) are
549 // expected to be trivially assignable (i.e. RS_Done). However, since we
550 // may have more remats than physregs, we're guaranteed to fail to assign
551 // one.
552 // At the moment, we only handle this for STATEPOINTs since they're the only
553 // pseudo op where we've seen this. If we start seeing other instructions
554 // with the same problem, we need to revisit this.
555 if (MI.getOpcode() != TargetOpcode::STATEPOINT)
556 return true;
557 // For STATEPOINTs we allow re-materialization for fixed arguments only hoping
558 // that number of physical registers is enough to cover all fixed arguments.
559 // If it is not true we need to revisit it.
560 for (unsigned Idx = StatepointOpers(&MI).getVarIdx(),
561 EndIdx = MI.getNumOperands();
562 Idx < EndIdx; ++Idx) {
563 MachineOperand &MO = MI.getOperand(Idx);
564 if (MO.isReg() && MO.getReg() == VReg)
565 return false;
566 }
567 return true;
568}
569
570/// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
571bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
572 // Analyze instruction
573 SmallVector<std::pair<MachineInstr *, unsigned>, 8> Ops;
574 VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, VirtReg.reg(), &Ops);
575
576 if (!RI.Reads)
577 return false;
578
579 SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true);
580 VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex());
581
582 if (!ParentVNI) {
583 LLVM_DEBUG(dbgs() << "\tadding <undef> flags: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tadding <undef> flags: "
; } } while (false)
;
584 for (MachineOperand &MO : MI.operands())
585 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg())
586 MO.setIsUndef();
587 LLVM_DEBUG(dbgs() << UseIdx << '\t' << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << UseIdx << '\t' <<
MI; } } while (false)
;
588 return true;
589 }
590
591 if (SnippetCopies.count(&MI))
592 return false;
593
594 LiveInterval &OrigLI = LIS.getInterval(Original);
595 VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx);
596 LiveRangeEdit::Remat RM(ParentVNI);
597 RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def);
598
599 if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx, false)) {
600 markValueUsed(&VirtReg, ParentVNI);
601 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tcannot remat for " <<
UseIdx << '\t' << MI; } } while (false)
;
602 return false;
603 }
604
605 // If the instruction also writes VirtReg.reg, it had better not require the
606 // same register for uses and defs.
607 if (RI.Tied) {
608 markValueUsed(&VirtReg, ParentVNI);
609 LLVM_DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tcannot remat tied reg: " <<
UseIdx << '\t' << MI; } } while (false)
;
610 return false;
611 }
612
613 // Before rematerializing into a register for a single instruction, try to
614 // fold a load into the instruction. That avoids allocating a new register.
615 if (RM.OrigMI->canFoldAsLoad() &&
616 foldMemoryOperand(Ops, RM.OrigMI)) {
617 Edit->markRematerialized(RM.ParentVNI);
618 ++NumFoldedLoads;
619 return true;
620 }
621
622 // If we can't guarantee that we'll be able to actually assign the new vreg,
623 // we can't remat.
624 if (!canGuaranteeAssignmentAfterRemat(VirtReg.reg(), MI)) {
625 markValueUsed(&VirtReg, ParentVNI);
626 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tcannot remat for " <<
UseIdx << '\t' << MI; } } while (false)
;
627 return false;
628 }
629
630 // Allocate a new register for the remat.
631 Register NewVReg = Edit->createFrom(Original);
632
633 // Finally we can rematerialize OrigMI before MI.
634 SlotIndex DefIdx =
635 Edit->rematerializeAt(*MI.getParent(), MI, NewVReg, RM, TRI);
636
637 // We take the DebugLoc from MI, since OrigMI may be attributed to a
638 // different source location.
639 auto *NewMI = LIS.getInstructionFromIndex(DefIdx);
640 NewMI->setDebugLoc(MI.getDebugLoc());
641
642 (void)DefIdx;
643 LLVM_DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tremat: " << DefIdx <<
'\t' << *LIS.getInstructionFromIndex(DefIdx); } } while
(false)
644 << *LIS.getInstructionFromIndex(DefIdx))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tremat: " << DefIdx <<
'\t' << *LIS.getInstructionFromIndex(DefIdx); } } while
(false)
;
645
646 // Replace operands
647 for (const auto &OpPair : Ops) {
648 MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
649 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg()) {
650 MO.setReg(NewVReg);
651 MO.setIsKill();
652 }
653 }
654 LLVM_DEBUG(dbgs() << "\t " << UseIdx << '\t' << MI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t " << UseIdx <<
'\t' << MI << '\n'; } } while (false)
;
655
656 ++NumRemats;
657 return true;
658}
659
660/// reMaterializeAll - Try to rematerialize as many uses as possible,
661/// and trim the live ranges after.
662void InlineSpiller::reMaterializeAll() {
663 if (!Edit->anyRematerializable(AA))
664 return;
665
666 UsedValues.clear();
667
668 // Try to remat before all uses of snippets.
669 bool anyRemat = false;
670 for (Register Reg : RegsToSpill) {
671 LiveInterval &LI = LIS.getInterval(Reg);
672 for (MachineInstr &MI : llvm::make_early_inc_range(MRI.reg_bundles(Reg))) {
673 // Debug values are not allowed to affect codegen.
674 if (MI.isDebugValue())
675 continue;
676
677 assert(!MI.isDebugInstr() && "Did not expect to find a use in debug "(static_cast <bool> (!MI.isDebugInstr() && "Did not expect to find a use in debug "
"instruction that isn't a DBG_VALUE") ? void (0) : __assert_fail
("!MI.isDebugInstr() && \"Did not expect to find a use in debug \" \"instruction that isn't a DBG_VALUE\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 678, __extension__ __PRETTY_FUNCTION__
))
678 "instruction that isn't a DBG_VALUE")(static_cast <bool> (!MI.isDebugInstr() && "Did not expect to find a use in debug "
"instruction that isn't a DBG_VALUE") ? void (0) : __assert_fail
("!MI.isDebugInstr() && \"Did not expect to find a use in debug \" \"instruction that isn't a DBG_VALUE\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 678, __extension__ __PRETTY_FUNCTION__
))
;
679
680 anyRemat |= reMaterializeFor(LI, MI);
681 }
682 }
683 if (!anyRemat)
684 return;
685
686 // Remove any values that were completely rematted.
687 for (Register Reg : RegsToSpill) {
688 LiveInterval &LI = LIS.getInterval(Reg);
689 for (VNInfo *VNI : llvm::make_range(LI.vni_begin(), LI.vni_end())) {
690 if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI))
691 continue;
692 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
693 MI->addRegisterDead(Reg, &TRI);
694 if (!MI->allDefsAreDead())
695 continue;
696 LLVM_DEBUG(dbgs() << "All defs dead: " << *MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "All defs dead: " << *MI
; } } while (false)
;
697 DeadDefs.push_back(MI);
698 }
699 }
700
701 // Eliminate dead code after remat. Note that some snippet copies may be
702 // deleted here.
703 if (DeadDefs.empty())
704 return;
705 LLVM_DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Remat created " << DeadDefs
.size() << " dead defs.\n"; } } while (false)
;
706 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
707
708 // LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions
709 // after rematerialization. To remove a VNI for a vreg from its LiveInterval,
710 // LiveIntervals::removeVRegDefAt is used. However, after non-PHI VNIs are all
711 // removed, PHI VNI are still left in the LiveInterval.
712 // So to get rid of unused reg, we need to check whether it has non-dbg
713 // reference instead of whether it has non-empty interval.
714 unsigned ResultPos = 0;
715 for (Register Reg : RegsToSpill) {
716 if (MRI.reg_nodbg_empty(Reg)) {
717 Edit->eraseVirtReg(Reg);
718 continue;
719 }
720
721 assert(LIS.hasInterval(Reg) &&(static_cast <bool> (LIS.hasInterval(Reg) && (!
LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&
"Empty and not used live-range?!") ? void (0) : __assert_fail
("LIS.hasInterval(Reg) && (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) && \"Empty and not used live-range?!\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 723, __extension__ __PRETTY_FUNCTION__
))
722 (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&(static_cast <bool> (LIS.hasInterval(Reg) && (!
LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&
"Empty and not used live-range?!") ? void (0) : __assert_fail
("LIS.hasInterval(Reg) && (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) && \"Empty and not used live-range?!\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 723, __extension__ __PRETTY_FUNCTION__
))
723 "Empty and not used live-range?!")(static_cast <bool> (LIS.hasInterval(Reg) && (!
LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&
"Empty and not used live-range?!") ? void (0) : __assert_fail
("LIS.hasInterval(Reg) && (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) && \"Empty and not used live-range?!\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 723, __extension__ __PRETTY_FUNCTION__
))
;
724
725 RegsToSpill[ResultPos++] = Reg;
726 }
727 RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end());
728 LLVM_DEBUG(dbgs() << RegsToSpill.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << RegsToSpill.size() << " registers to spill after remat.\n"
; } } while (false)
729 << " registers to spill after remat.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << RegsToSpill.size() << " registers to spill after remat.\n"
; } } while (false)
;
730}
731
732//===----------------------------------------------------------------------===//
733// Spilling
734//===----------------------------------------------------------------------===//
735
736/// If MI is a load or store of StackSlot, it can be removed.
737bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, Register Reg) {
738 int FI = 0;
739 Register InstrReg = TII.isLoadFromStackSlot(*MI, FI);
740 bool IsLoad = InstrReg;
741 if (!IsLoad)
742 InstrReg = TII.isStoreToStackSlot(*MI, FI);
743
744 // We have a stack access. Is it the right register and slot?
745 if (InstrReg != Reg || FI != StackSlot)
746 return false;
747
748 if (!IsLoad)
749 HSpiller.rmFromMergeableSpills(*MI, StackSlot);
750
751 LLVM_DEBUG(dbgs() << "Coalescing stack access: " << *MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Coalescing stack access: " <<
*MI; } } while (false)
;
752 LIS.RemoveMachineInstrFromMaps(*MI);
753 MI->eraseFromParent();
754
755 if (IsLoad) {
756 ++NumReloadsRemoved;
757 --NumReloads;
758 } else {
759 ++NumSpillsRemoved;
760 --NumSpills;
761 }
762
763 return true;
764}
765
766#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
767LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__))
768// Dump the range of instructions from B to E with their slot indexes.
769static void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B,
770 MachineBasicBlock::iterator E,
771 LiveIntervals const &LIS,
772 const char *const header,
773 Register VReg = Register()) {
774 char NextLine = '\n';
775 char SlotIndent = '\t';
776
777 if (std::next(B) == E) {
778 NextLine = ' ';
779 SlotIndent = ' ';
780 }
781
782 dbgs() << '\t' << header << ": " << NextLine;
783
784 for (MachineBasicBlock::iterator I = B; I != E; ++I) {
785 SlotIndex Idx = LIS.getInstructionIndex(*I).getRegSlot();
786
787 // If a register was passed in and this instruction has it as a
788 // destination that is marked as an early clobber, print the
789 // early-clobber slot index.
790 if (VReg) {
791 MachineOperand *MO = I->findRegisterDefOperand(VReg);
792 if (MO && MO->isEarlyClobber())
793 Idx = Idx.getRegSlot(true);
794 }
795
796 dbgs() << SlotIndent << Idx << '\t' << *I;
797 }
798}
799#endif
800
801/// foldMemoryOperand - Try folding stack slot references in Ops into their
802/// instructions.
803///
804/// @param Ops Operand indices from AnalyzeVirtRegInBundle().
805/// @param LoadMI Load instruction to use instead of stack slot when non-null.
806/// @return True on success.
807bool InlineSpiller::
808foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
809 MachineInstr *LoadMI) {
810 if (Ops.empty())
811 return false;
812 // Don't attempt folding in bundles.
813 MachineInstr *MI = Ops.front().first;
814 if (Ops.back().first != MI || MI->isBundled())
815 return false;
816
817 bool WasCopy = MI->isCopy();
818 Register ImpReg;
819
820 // TII::foldMemoryOperand will do what we need here for statepoint
821 // (fold load into use and remove corresponding def). We will replace
822 // uses of removed def with loads (spillAroundUses).
823 // For that to work we need to untie def and use to pass it through
824 // foldMemoryOperand and signal foldPatchpoint that it is allowed to
825 // fold them.
826 bool UntieRegs = MI->getOpcode() == TargetOpcode::STATEPOINT;
827
828 // Spill subregs if the target allows it.
829 // We always want to spill subregs for stackmap/patchpoint pseudos.
830 bool SpillSubRegs = TII.isSubregFoldable() ||
831 MI->getOpcode() == TargetOpcode::STATEPOINT ||
832 MI->getOpcode() == TargetOpcode::PATCHPOINT ||
833 MI->getOpcode() == TargetOpcode::STACKMAP;
834
835 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
836 // operands.
837 SmallVector<unsigned, 8> FoldOps;
838 for (const auto &OpPair : Ops) {
839 unsigned Idx = OpPair.second;
840 assert(MI == OpPair.first && "Instruction conflict during operand folding")(static_cast <bool> (MI == OpPair.first && "Instruction conflict during operand folding"
) ? void (0) : __assert_fail ("MI == OpPair.first && \"Instruction conflict during operand folding\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 840, __extension__ __PRETTY_FUNCTION__
))
;
841 MachineOperand &MO = MI->getOperand(Idx);
842 if (MO.isImplicit()) {
843 ImpReg = MO.getReg();
844 continue;
845 }
846
847 if (!SpillSubRegs && MO.getSubReg())
848 return false;
849 // We cannot fold a load instruction into a def.
850 if (LoadMI && MO.isDef())
851 return false;
852 // Tied use operands should not be passed to foldMemoryOperand.
853 if (UntieRegs || !MI->isRegTiedToDefOperand(Idx))
854 FoldOps.push_back(Idx);
855 }
856
857 // If we only have implicit uses, we won't be able to fold that.
858 // Moreover, TargetInstrInfo::foldMemoryOperand will assert if we try!
859 if (FoldOps.empty())
860 return false;
861
862 MachineInstrSpan MIS(MI, MI->getParent());
863
864 SmallVector<std::pair<unsigned, unsigned> > TiedOps;
865 if (UntieRegs)
866 for (unsigned Idx : FoldOps) {
867 MachineOperand &MO = MI->getOperand(Idx);
868 if (!MO.isTied())
869 continue;
870 unsigned Tied = MI->findTiedOperandIdx(Idx);
871 if (MO.isUse())
872 TiedOps.emplace_back(Tied, Idx);
873 else {
874 assert(MO.isDef() && "Tied to not use and def?")(static_cast <bool> (MO.isDef() && "Tied to not use and def?"
) ? void (0) : __assert_fail ("MO.isDef() && \"Tied to not use and def?\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 874, __extension__ __PRETTY_FUNCTION__
))
;
875 TiedOps.emplace_back(Idx, Tied);
876 }
877 MI->untieRegOperand(Idx);
878 }
879
880 MachineInstr *FoldMI =
881 LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS)
882 : TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS, &VRM);
883 if (!FoldMI) {
884 // Re-tie operands.
885 for (auto Tied : TiedOps)
886 MI->tieOperands(Tied.first, Tied.second);
887 return false;
888 }
889
890 // Remove LIS for any dead defs in the original MI not in FoldMI.
891 for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) {
892 if (!MO->isReg())
893 continue;
894 Register Reg = MO->getReg();
895 if (!Reg || Register::isVirtualRegister(Reg) || MRI.isReserved(Reg)) {
896 continue;
897 }
898 // Skip non-Defs, including undef uses and internal reads.
899 if (MO->isUse())
900 continue;
901 PhysRegInfo RI = AnalyzePhysRegInBundle(*FoldMI, Reg, &TRI);
902 if (RI.FullyDefined)
903 continue;
904 // FoldMI does not define this physreg. Remove the LI segment.
905 assert(MO->isDead() && "Cannot fold physreg def")(static_cast <bool> (MO->isDead() && "Cannot fold physreg def"
) ? void (0) : __assert_fail ("MO->isDead() && \"Cannot fold physreg def\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 905, __extension__ __PRETTY_FUNCTION__
))
;
906 SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
907 LIS.removePhysRegDefAt(Reg.asMCReg(), Idx);
908 }
909
910 int FI;
911 if (TII.isStoreToStackSlot(*MI, FI) &&
912 HSpiller.rmFromMergeableSpills(*MI, FI))
913 --NumSpills;
914 LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI);
915 // Update the call site info.
916 if (MI->isCandidateForCallSiteEntry())
917 MI->getMF()->moveCallSiteInfo(MI, FoldMI);
918
919 // If we've folded a store into an instruction labelled with debug-info,
920 // record a substitution from the old operand to the memory operand. Handle
921 // the simple common case where operand 0 is the one being folded, plus when
922 // the destination operand is also a tied def. More values could be
923 // substituted / preserved with more analysis.
924 if (MI->peekDebugInstrNum() && Ops[0].second == 0) {
925 // Helper lambda.
926 auto MakeSubstitution = [this,FoldMI,MI,&Ops]() {
927 // Substitute old operand zero to the new instructions memory operand.
928 unsigned OldOperandNum = Ops[0].second;
929 unsigned NewNum = FoldMI->getDebugInstrNum();
930 unsigned OldNum = MI->getDebugInstrNum();
931 MF.makeDebugValueSubstitution({OldNum, OldOperandNum},
932 {NewNum, MachineFunction::DebugOperandMemNumber});
933 };
934
935 const MachineOperand &Op0 = MI->getOperand(Ops[0].second);
936 if (Ops.size() == 1 && Op0.isDef()) {
937 MakeSubstitution();
938 } else if (Ops.size() == 2 && Op0.isDef() && MI->getOperand(1).isTied() &&
939 Op0.getReg() == MI->getOperand(1).getReg()) {
940 MakeSubstitution();
941 }
942 } else if (MI->peekDebugInstrNum()) {
943 // This is a debug-labelled instruction, but the operand being folded isn't
944 // at operand zero. Most likely this means it's a load being folded in.
945 // Substitute any register defs from operand zero up to the one being
946 // folded -- past that point, we don't know what the new operand indexes
947 // will be.
948 MF.substituteDebugValuesForInst(*MI, *FoldMI, Ops[0].second);
949 }
950
951 MI->eraseFromParent();
952
953 // Insert any new instructions other than FoldMI into the LIS maps.
954 assert(!MIS.empty() && "Unexpected empty span of instructions!")(static_cast <bool> (!MIS.empty() && "Unexpected empty span of instructions!"
) ? void (0) : __assert_fail ("!MIS.empty() && \"Unexpected empty span of instructions!\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 954, __extension__ __PRETTY_FUNCTION__
))
;
955 for (MachineInstr &MI : MIS)
956 if (&MI != FoldMI)
957 LIS.InsertMachineInstrInMaps(MI);
958
959 // TII.foldMemoryOperand may have left some implicit operands on the
960 // instruction. Strip them.
961 if (ImpReg)
962 for (unsigned i = FoldMI->getNumOperands(); i; --i) {
963 MachineOperand &MO = FoldMI->getOperand(i - 1);
964 if (!MO.isReg() || !MO.isImplicit())
965 break;
966 if (MO.getReg() == ImpReg)
967 FoldMI->RemoveOperand(i - 1);
968 }
969
970 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS,do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dumpMachineInstrRangeWithSlotIndex(MIS.begin(
), MIS.end(), LIS, "folded"); } } while (false)
971 "folded"))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dumpMachineInstrRangeWithSlotIndex(MIS.begin(
), MIS.end(), LIS, "folded"); } } while (false)
;
972
973 if (!WasCopy)
974 ++NumFolded;
975 else if (Ops.front().second == 0) {
976 ++NumSpills;
977 // If there is only 1 store instruction is required for spill, add it
978 // to mergeable list. In X86 AMX, 2 intructions are required to store.
979 // We disable the merge for this case.
980 if (std::distance(MIS.begin(), MIS.end()) <= 1)
981 HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original);
982 } else
983 ++NumReloads;
984 return true;
985}
986
987void InlineSpiller::insertReload(Register NewVReg,
988 SlotIndex Idx,
989 MachineBasicBlock::iterator MI) {
990 MachineBasicBlock &MBB = *MI->getParent();
991
992 MachineInstrSpan MIS(MI, &MBB);
993 TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot,
994 MRI.getRegClass(NewVReg), &TRI);
995
996 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI);
997
998 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MI, LIS, "reload",do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dumpMachineInstrRangeWithSlotIndex(MIS.begin(
), MI, LIS, "reload", NewVReg); } } while (false)
999 NewVReg))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dumpMachineInstrRangeWithSlotIndex(MIS.begin(
), MI, LIS, "reload", NewVReg); } } while (false)
;
1000 ++NumReloads;
1001}
1002
1003/// Check if \p Def fully defines a VReg with an undefined value.
1004/// If that's the case, that means the value of VReg is actually
1005/// not relevant.
1006static bool isRealSpill(const MachineInstr &Def) {
1007 if (!Def.isImplicitDef())
1008 return true;
1009 assert(Def.getNumOperands() == 1 &&(static_cast <bool> (Def.getNumOperands() == 1 &&
"Implicit def with more than one definition") ? void (0) : __assert_fail
("Def.getNumOperands() == 1 && \"Implicit def with more than one definition\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 1010, __extension__ __PRETTY_FUNCTION__
))
1010 "Implicit def with more than one definition")(static_cast <bool> (Def.getNumOperands() == 1 &&
"Implicit def with more than one definition") ? void (0) : __assert_fail
("Def.getNumOperands() == 1 && \"Implicit def with more than one definition\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 1010, __extension__ __PRETTY_FUNCTION__
))
;
1011 // We can say that the VReg defined by Def is undef, only if it is
1012 // fully defined by Def. Otherwise, some of the lanes may not be
1013 // undef and the value of the VReg matters.
1014 return Def.getOperand(0).getSubReg();
1015}
1016
1017/// insertSpill - Insert a spill of NewVReg after MI.
1018void InlineSpiller::insertSpill(Register NewVReg, bool isKill,
1019 MachineBasicBlock::iterator MI) {
1020 // Spill are not terminators, so inserting spills after terminators will
1021 // violate invariants in MachineVerifier.
1022 assert(!MI->isTerminator() && "Inserting a spill after a terminator")(static_cast <bool> (!MI->isTerminator() && "Inserting a spill after a terminator"
) ? void (0) : __assert_fail ("!MI->isTerminator() && \"Inserting a spill after a terminator\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 1022, __extension__ __PRETTY_FUNCTION__
))
;
1023 MachineBasicBlock &MBB = *MI->getParent();
1024
1025 MachineInstrSpan MIS(MI, &MBB);
1026 MachineBasicBlock::iterator SpillBefore = std::next(MI);
1027 bool IsRealSpill = isRealSpill(*MI);
1028
1029 if (IsRealSpill)
1030 TII.storeRegToStackSlot(MBB, SpillBefore, NewVReg, isKill, StackSlot,
1031 MRI.getRegClass(NewVReg), &TRI);
1032 else
1033 // Don't spill undef value.
1034 // Anything works for undef, in particular keeping the memory
1035 // uninitialized is a viable option and it saves code size and
1036 // run time.
1037 BuildMI(MBB, SpillBefore, MI->getDebugLoc(), TII.get(TargetOpcode::KILL))
1038 .addReg(NewVReg, getKillRegState(isKill));
1039
1040 MachineBasicBlock::iterator Spill = std::next(MI);
1041 LIS.InsertMachineInstrRangeInMaps(Spill, MIS.end());
1042 for (const MachineInstr &MI : make_range(Spill, MIS.end()))
1043 getVDefInterval(MI, LIS);
1044
1045 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dumpMachineInstrRangeWithSlotIndex(Spill, MIS
.end(), LIS, "spill"); } } while (false)
1046 dumpMachineInstrRangeWithSlotIndex(Spill, MIS.end(), LIS, "spill"))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dumpMachineInstrRangeWithSlotIndex(Spill, MIS
.end(), LIS, "spill"); } } while (false)
;
1047 ++NumSpills;
1048 // If there is only 1 store instruction is required for spill, add it
1049 // to mergeable list. In X86 AMX, 2 intructions are required to store.
1050 // We disable the merge for this case.
1051 if (IsRealSpill && std::distance(Spill, MIS.end()) <= 1)
1052 HSpiller.addToMergeableSpills(*Spill, StackSlot, Original);
1053}
1054
1055/// spillAroundUses - insert spill code around each use of Reg.
1056void InlineSpiller::spillAroundUses(Register Reg) {
1057 LLVM_DEBUG(dbgs() << "spillAroundUses " << printReg(Reg) << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "spillAroundUses " << printReg
(Reg) << '\n'; } } while (false)
;
19
Assuming 'DebugFlag' is false
20
Loop condition is false. Exiting loop
1058 LiveInterval &OldLI = LIS.getInterval(Reg);
1059
1060 // Iterate over instructions using Reg.
1061 for (MachineInstr &MI : llvm::make_early_inc_range(MRI.reg_bundles(Reg))) {
1062 // Debug values are not allowed to affect codegen.
1063 if (MI.isDebugValue()) {
1064 // Modify DBG_VALUE now that the value is in a spill slot.
1065 MachineBasicBlock *MBB = MI.getParent();
1066 LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:\t" << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Modifying debug info due to spill:\t"
<< MI; } } while (false)
;
1067 buildDbgValueForSpill(*MBB, &MI, MI, StackSlot, Reg);
1068 MBB->erase(MI);
1069 continue;
1070 }
1071
1072 assert(!MI.isDebugInstr() && "Did not expect to find a use in debug "(static_cast <bool> (!MI.isDebugInstr() && "Did not expect to find a use in debug "
"instruction that isn't a DBG_VALUE") ? void (0) : __assert_fail
("!MI.isDebugInstr() && \"Did not expect to find a use in debug \" \"instruction that isn't a DBG_VALUE\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 1073, __extension__ __PRETTY_FUNCTION__
))
21
Taking false branch
22
'?' condition is true
1073 "instruction that isn't a DBG_VALUE")(static_cast <bool> (!MI.isDebugInstr() && "Did not expect to find a use in debug "
"instruction that isn't a DBG_VALUE") ? void (0) : __assert_fail
("!MI.isDebugInstr() && \"Did not expect to find a use in debug \" \"instruction that isn't a DBG_VALUE\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 1073, __extension__ __PRETTY_FUNCTION__
))
;
1074
1075 // Ignore copies to/from snippets. We'll delete them.
1076 if (SnippetCopies.count(&MI))
23
Assuming the condition is false
24
Taking false branch
1077 continue;
1078
1079 // Stack slot accesses may coalesce away.
1080 if (coalesceStackAccess(&MI, Reg))
25
Taking false branch
1081 continue;
1082
1083 // Analyze instruction.
1084 SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops;
1085 VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, Reg, &Ops);
1086
1087 // Find the slot index where this instruction reads and writes OldLI.
1088 // This is usually the def slot, except for tied early clobbers.
1089 SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot();
1090 if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true)))
26
Assuming 'VNI' is null
27
Taking false branch
1091 if (SlotIndex::isSameInstr(Idx, VNI->def))
1092 Idx = VNI->def;
1093
1094 // Check for a sibling copy.
1095 Register SibReg = isFullCopyOf(MI, Reg);
1096 if (SibReg && isSibling(SibReg)) {
28
Assuming the condition is true
29
Taking true branch
1097 // This may actually be a copy between snippets.
1098 if (isRegToSpill(SibReg)) {
30
Taking false branch
1099 LLVM_DEBUG(dbgs() << "Found new snippet copy: " << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Found new snippet copy: " <<
MI; } } while (false)
;
1100 SnippetCopies.insert(&MI);
1101 continue;
1102 }
1103 if (RI.Writes) {
31
Assuming field 'Writes' is false
32
Taking false branch
1104 if (hoistSpillInsideBB(OldLI, MI)) {
1105 // This COPY is now dead, the value is already in the stack slot.
1106 MI.getOperand(0).setIsDead();
1107 DeadDefs.push_back(&MI);
1108 continue;
1109 }
1110 } else {
1111 // This is a reload for a sib-reg copy. Drop spills downstream.
1112 LiveInterval &SibLI = LIS.getInterval(SibReg);
1113 eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx));
33
Calling 'InlineSpiller::eliminateRedundantSpills'
1114 // The COPY will fold to a reload below.
1115 }
1116 }
1117
1118 // Attempt to fold memory ops.
1119 if (foldMemoryOperand(Ops))
1120 continue;
1121
1122 // Create a new virtual register for spill/fill.
1123 // FIXME: Infer regclass from instruction alone.
1124 Register NewVReg = Edit->createFrom(Reg);
1125
1126 if (RI.Reads)
1127 insertReload(NewVReg, Idx, &MI);
1128
1129 // Rewrite instruction operands.
1130 bool hasLiveDef = false;
1131 for (const auto &OpPair : Ops) {
1132 MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
1133 MO.setReg(NewVReg);
1134 if (MO.isUse()) {
1135 if (!OpPair.first->isRegTiedToDefOperand(OpPair.second))
1136 MO.setIsKill();
1137 } else {
1138 if (!MO.isDead())
1139 hasLiveDef = true;
1140 }
1141 }
1142 LLVM_DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << MI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\trewrite: " << Idx <<
'\t' << MI << '\n'; } } while (false)
;
1143
1144 // FIXME: Use a second vreg if instruction has no tied ops.
1145 if (RI.Writes)
1146 if (hasLiveDef)
1147 insertSpill(NewVReg, true, &MI);
1148 }
1149}
1150
1151/// spillAll - Spill all registers remaining after rematerialization.
1152void InlineSpiller::spillAll() {
1153 // Update LiveStacks now that we are committed to spilling.
1154 if (StackSlot == VirtRegMap::NO_STACK_SLOT) {
9
Assuming field 'StackSlot' is not equal to NO_STACK_SLOT
10
Taking false branch
1155 StackSlot = VRM.assignVirt2StackSlot(Original);
1156 StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original));
1157 StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator());
1158 } else
1159 StackInt = &LSS.getInterval(StackSlot);
1160
1161 if (Original != Edit->getReg())
1162 VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot);
1163
1164 assert(StackInt->getNumValNums() == 1 && "Bad stack interval values")(static_cast <bool> (StackInt->getNumValNums() == 1 &&
"Bad stack interval values") ? void (0) : __assert_fail ("StackInt->getNumValNums() == 1 && \"Bad stack interval values\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 1164, __extension__ __PRETTY_FUNCTION__
))
;
11
Taking false branch
12
Assuming the condition is true
13
'?' condition is true
1165 for (Register Reg : RegsToSpill)
14
Assuming '__begin1' is equal to '__end1'
1166 StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg),
1167 StackInt->getValNumInfo(0));
1168 LLVM_DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Merged spilled regs: " <<
*StackInt << '\n'; } } while (false)
;
15
Assuming 'DebugFlag' is false
16
Loop condition is false. Exiting loop
1169
1170 // Spill around uses of all RegsToSpill.
1171 for (Register Reg : RegsToSpill)
17
Assuming '__begin1' is not equal to '__end1'
1172 spillAroundUses(Reg);
18
Calling 'InlineSpiller::spillAroundUses'
1173
1174 // Hoisted spills may cause dead code.
1175 if (!DeadDefs.empty()) {
1176 LLVM_DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Eliminating " << DeadDefs
.size() << " dead defs\n"; } } while (false)
;
1177 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
1178 }
1179
1180 // Finally delete the SnippetCopies.
1181 for (Register Reg : RegsToSpill) {
1182 for (MachineInstr &MI :
1183 llvm::make_early_inc_range(MRI.reg_instructions(Reg))) {
1184 assert(SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy")(static_cast <bool> (SnippetCopies.count(&MI) &&
"Remaining use wasn't a snippet copy") ? void (0) : __assert_fail
("SnippetCopies.count(&MI) && \"Remaining use wasn't a snippet copy\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 1184, __extension__ __PRETTY_FUNCTION__
))
;
1185 // FIXME: Do this with a LiveRangeEdit callback.
1186 LIS.RemoveMachineInstrFromMaps(MI);
1187 MI.eraseFromParent();
1188 }
1189 }
1190
1191 // Delete all spilled registers.
1192 for (Register Reg : RegsToSpill)
1193 Edit->eraseVirtReg(Reg);
1194}
1195
1196void InlineSpiller::spill(LiveRangeEdit &edit) {
1197 ++NumSpilledRanges;
1198 Edit = &edit;
1199 assert(!Register::isStackSlot(edit.getReg()) &&(static_cast <bool> (!Register::isStackSlot(edit.getReg
()) && "Trying to spill a stack slot.") ? void (0) : __assert_fail
("!Register::isStackSlot(edit.getReg()) && \"Trying to spill a stack slot.\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 1200, __extension__ __PRETTY_FUNCTION__
))
1
'?' condition is true
1200 "Trying to spill a stack slot.")(static_cast <bool> (!Register::isStackSlot(edit.getReg
()) && "Trying to spill a stack slot.") ? void (0) : __assert_fail
("!Register::isStackSlot(edit.getReg()) && \"Trying to spill a stack slot.\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 1200, __extension__ __PRETTY_FUNCTION__
))
;
1201 // Share a stack slot among all descendants of Original.
1202 Original = VRM.getOriginal(edit.getReg());
1203 StackSlot = VRM.getStackSlot(Original);
1204 StackInt = nullptr;
1205
1206 LLVM_DEBUG(dbgs() << "Inline spilling "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Inline spilling " << TRI
.getRegClassName(MRI.getRegClass(edit.getReg())) << ':'
<< edit.getParent() << "\nFrom original " <<
printReg(Original) << '\n'; } } while (false)
2
Assuming 'DebugFlag' is false
1207 << TRI.getRegClassName(MRI.getRegClass(edit.getReg()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Inline spilling " << TRI
.getRegClassName(MRI.getRegClass(edit.getReg())) << ':'
<< edit.getParent() << "\nFrom original " <<
printReg(Original) << '\n'; } } while (false)
1208 << ':' << edit.getParent() << "\nFrom original "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Inline spilling " << TRI
.getRegClassName(MRI.getRegClass(edit.getReg())) << ':'
<< edit.getParent() << "\nFrom original " <<
printReg(Original) << '\n'; } } while (false)
1209 << printReg(Original) << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Inline spilling " << TRI
.getRegClassName(MRI.getRegClass(edit.getReg())) << ':'
<< edit.getParent() << "\nFrom original " <<
printReg(Original) << '\n'; } } while (false)
;
1210 assert(edit.getParent().isSpillable() &&(static_cast <bool> (edit.getParent().isSpillable() &&
"Attempting to spill already spilled value.") ? void (0) : __assert_fail
("edit.getParent().isSpillable() && \"Attempting to spill already spilled value.\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 1211, __extension__ __PRETTY_FUNCTION__
))
3
Loop condition is false. Exiting loop
4
Assuming the condition is true
5
'?' condition is true
1211 "Attempting to spill already spilled value.")(static_cast <bool> (edit.getParent().isSpillable() &&
"Attempting to spill already spilled value.") ? void (0) : __assert_fail
("edit.getParent().isSpillable() && \"Attempting to spill already spilled value.\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 1211, __extension__ __PRETTY_FUNCTION__
))
;
1212 assert(DeadDefs.empty() && "Previous spill didn't remove dead defs")(static_cast <bool> (DeadDefs.empty() && "Previous spill didn't remove dead defs"
) ? void (0) : __assert_fail ("DeadDefs.empty() && \"Previous spill didn't remove dead defs\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 1212, __extension__ __PRETTY_FUNCTION__
))
;
6
'?' condition is true
1213
1214 collectRegsToSpill();
1215 reMaterializeAll();
1216
1217 // Remat may handle everything.
1218 if (!RegsToSpill.empty())
7
Taking true branch
1219 spillAll();
8
Calling 'InlineSpiller::spillAll'
1220
1221 Edit->calculateRegClassAndHint(MF, VRAI);
1222}
1223
1224/// Optimizations after all the reg selections and spills are done.
1225void InlineSpiller::postOptimization() { HSpiller.hoistAllSpills(); }
1226
1227/// When a spill is inserted, add the spill to MergeableSpills map.
1228void HoistSpillHelper::addToMergeableSpills(MachineInstr &Spill, int StackSlot,
1229 unsigned Original) {
1230 BumpPtrAllocator &Allocator = LIS.getVNInfoAllocator();
1231 LiveInterval &OrigLI = LIS.getInterval(Original);
1232 // save a copy of LiveInterval in StackSlotToOrigLI because the original
1233 // LiveInterval may be cleared after all its references are spilled.
1234 if (StackSlotToOrigLI.find(StackSlot) == StackSlotToOrigLI.end()) {
1235 auto LI = std::make_unique<LiveInterval>(OrigLI.reg(), OrigLI.weight());
1236 LI->assign(OrigLI, Allocator);
1237 StackSlotToOrigLI[StackSlot] = std::move(LI);
1238 }
1239 SlotIndex Idx = LIS.getInstructionIndex(Spill);
1240 VNInfo *OrigVNI = StackSlotToOrigLI[StackSlot]->getVNInfoAt(Idx.getRegSlot());
1241 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1242 MergeableSpills[MIdx].insert(&Spill);
1243}
1244
1245/// When a spill is removed, remove the spill from MergeableSpills map.
1246/// Return true if the spill is removed successfully.
1247bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr &Spill,
1248 int StackSlot) {
1249 auto It = StackSlotToOrigLI.find(StackSlot);
1250 if (It == StackSlotToOrigLI.end())
1251 return false;
1252 SlotIndex Idx = LIS.getInstructionIndex(Spill);
1253 VNInfo *OrigVNI = It->second->getVNInfoAt(Idx.getRegSlot());
1254 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1255 return MergeableSpills[MIdx].erase(&Spill);
1256}
1257
1258/// Check BB to see if it is a possible target BB to place a hoisted spill,
1259/// i.e., there should be a living sibling of OrigReg at the insert point.
1260bool HoistSpillHelper::isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
1261 MachineBasicBlock &BB, Register &LiveReg) {
1262 SlotIndex Idx = IPA.getLastInsertPoint(OrigLI, BB);
1263 // The original def could be after the last insert point in the root block,
1264 // we can't hoist to here.
1265 if (Idx < OrigVNI.def) {
1266 // TODO: We could be better here. If LI is not alive in landing pad
1267 // we could hoist spill after LIP.
1268 LLVM_DEBUG(dbgs() << "can't spill in root block - def after LIP\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "can't spill in root block - def after LIP\n"
; } } while (false)
;
1269 return false;
1270 }
1271 Register OrigReg = OrigLI.reg();
1272 SmallSetVector<Register, 16> &Siblings = Virt2SiblingsMap[OrigReg];
1273 assert(OrigLI.getVNInfoAt(Idx) == &OrigVNI && "Unexpected VNI")(static_cast <bool> (OrigLI.getVNInfoAt(Idx) == &OrigVNI
&& "Unexpected VNI") ? void (0) : __assert_fail ("OrigLI.getVNInfoAt(Idx) == &OrigVNI && \"Unexpected VNI\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 1273, __extension__ __PRETTY_FUNCTION__
))
;
1274
1275 for (const Register &SibReg : Siblings) {
1276 LiveInterval &LI = LIS.getInterval(SibReg);
1277 VNInfo *VNI = LI.getVNInfoAt(Idx);
1278 if (VNI) {
1279 LiveReg = SibReg;
1280 return true;
1281 }
1282 }
1283 return false;
1284}
1285
1286/// Remove redundant spills in the same BB. Save those redundant spills in
1287/// SpillsToRm, and save the spill to keep and its BB in SpillBBToSpill map.
1288void HoistSpillHelper::rmRedundantSpills(
1289 SmallPtrSet<MachineInstr *, 16> &Spills,
1290 SmallVectorImpl<MachineInstr *> &SpillsToRm,
1291 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) {
1292 // For each spill saw, check SpillBBToSpill[] and see if its BB already has
1293 // another spill inside. If a BB contains more than one spill, only keep the
1294 // earlier spill with smaller SlotIndex.
1295 for (const auto CurrentSpill : Spills) {
1296 MachineBasicBlock *Block = CurrentSpill->getParent();
1297 MachineDomTreeNode *Node = MDT.getBase().getNode(Block);
1298 MachineInstr *PrevSpill = SpillBBToSpill[Node];
1299 if (PrevSpill) {
1300 SlotIndex PIdx = LIS.getInstructionIndex(*PrevSpill);
1301 SlotIndex CIdx = LIS.getInstructionIndex(*CurrentSpill);
1302 MachineInstr *SpillToRm = (CIdx > PIdx) ? CurrentSpill : PrevSpill;
1303 MachineInstr *SpillToKeep = (CIdx > PIdx) ? PrevSpill : CurrentSpill;
1304 SpillsToRm.push_back(SpillToRm);
1305 SpillBBToSpill[MDT.getBase().getNode(Block)] = SpillToKeep;
1306 } else {
1307 SpillBBToSpill[MDT.getBase().getNode(Block)] = CurrentSpill;
1308 }
1309 }
1310 for (const auto SpillToRm : SpillsToRm)
1311 Spills.erase(SpillToRm);
1312}
1313
1314/// Starting from \p Root find a top-down traversal order of the dominator
1315/// tree to visit all basic blocks containing the elements of \p Spills.
1316/// Redundant spills will be found and put into \p SpillsToRm at the same
1317/// time. \p SpillBBToSpill will be populated as part of the process and
1318/// maps a basic block to the first store occurring in the basic block.
1319/// \post SpillsToRm.union(Spills\@post) == Spills\@pre
1320void HoistSpillHelper::getVisitOrders(
1321 MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills,
1322 SmallVectorImpl<MachineDomTreeNode *> &Orders,
1323 SmallVectorImpl<MachineInstr *> &SpillsToRm,
1324 DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep,
1325 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) {
1326 // The set contains all the possible BB nodes to which we may hoist
1327 // original spills.
1328 SmallPtrSet<MachineDomTreeNode *, 8> WorkSet;
1329 // Save the BB nodes on the path from the first BB node containing
1330 // non-redundant spill to the Root node.
1331 SmallPtrSet<MachineDomTreeNode *, 8> NodesOnPath;
1332 // All the spills to be hoisted must originate from a single def instruction
1333 // to the OrigReg. It means the def instruction should dominate all the spills
1334 // to be hoisted. We choose the BB where the def instruction is located as
1335 // the Root.
1336 MachineDomTreeNode *RootIDomNode = MDT[Root]->getIDom();
1337 // For every node on the dominator tree with spill, walk up on the dominator
1338 // tree towards the Root node until it is reached. If there is other node
1339 // containing spill in the middle of the path, the previous spill saw will
1340 // be redundant and the node containing it will be removed. All the nodes on
1341 // the path starting from the first node with non-redundant spill to the Root
1342 // node will be added to the WorkSet, which will contain all the possible
1343 // locations where spills may be hoisted to after the loop below is done.
1344 for (const auto Spill : Spills) {
1345 MachineBasicBlock *Block = Spill->getParent();
1346 MachineDomTreeNode *Node = MDT[Block];
1347 MachineInstr *SpillToRm = nullptr;
1348 while (Node != RootIDomNode) {
1349 // If Node dominates Block, and it already contains a spill, the spill in
1350 // Block will be redundant.
1351 if (Node != MDT[Block] && SpillBBToSpill[Node]) {
1352 SpillToRm = SpillBBToSpill[MDT[Block]];
1353 break;
1354 /// If we see the Node already in WorkSet, the path from the Node to
1355 /// the Root node must already be traversed by another spill.
1356 /// Then no need to repeat.
1357 } else if (WorkSet.count(Node)) {
1358 break;
1359 } else {
1360 NodesOnPath.insert(Node);
1361 }
1362 Node = Node->getIDom();
1363 }
1364 if (SpillToRm) {
1365 SpillsToRm.push_back(SpillToRm);
1366 } else {
1367 // Add a BB containing the original spills to SpillsToKeep -- i.e.,
1368 // set the initial status before hoisting start. The value of BBs
1369 // containing original spills is set to 0, in order to descriminate
1370 // with BBs containing hoisted spills which will be inserted to
1371 // SpillsToKeep later during hoisting.
1372 SpillsToKeep[MDT[Block]] = 0;
1373 WorkSet.insert(NodesOnPath.begin(), NodesOnPath.end());
1374 }
1375 NodesOnPath.clear();
1376 }
1377
1378 // Sort the nodes in WorkSet in top-down order and save the nodes
1379 // in Orders. Orders will be used for hoisting in runHoistSpills.
1380 unsigned idx = 0;
1381 Orders.push_back(MDT.getBase().getNode(Root));
1382 do {
1383 MachineDomTreeNode *Node = Orders[idx++];
1384 for (MachineDomTreeNode *Child : Node->children()) {
1385 if (WorkSet.count(Child))
1386 Orders.push_back(Child);
1387 }
1388 } while (idx != Orders.size());
1389 assert(Orders.size() == WorkSet.size() &&(static_cast <bool> (Orders.size() == WorkSet.size() &&
"Orders have different size with WorkSet") ? void (0) : __assert_fail
("Orders.size() == WorkSet.size() && \"Orders have different size with WorkSet\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 1390, __extension__ __PRETTY_FUNCTION__
))
1390 "Orders have different size with WorkSet")(static_cast <bool> (Orders.size() == WorkSet.size() &&
"Orders have different size with WorkSet") ? void (0) : __assert_fail
("Orders.size() == WorkSet.size() && \"Orders have different size with WorkSet\""
, "llvm/lib/CodeGen/InlineSpiller.cpp", 1390, __extension__ __PRETTY_FUNCTION__
))
;
1391
1392#ifndef NDEBUG
1393 LLVM_DEBUG(dbgs() << "Orders size is " << Orders.size() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Orders size is " << Orders
.size() << "\n"; } } while (false)
;
1394 SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin();
1395 for (; RIt != Orders.rend(); RIt++)
1396 LLVM_DEBUG(dbgs() << "BB" << (*RIt)->getBlock()->getNumber() << ",")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "BB" << (*RIt)->getBlock
()->getNumber() << ","; } } while (false)
;
1397 LLVM_DEBUG(dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\n"; } } while (false)
;
1398#endif
1399}
1400
1401/// Try to hoist spills according to BB hotness. The spills to removed will
1402/// be saved in \p SpillsToRm. The spills to be inserted will be saved in
1403/// \p SpillsToIns.
1404void HoistSpillHelper::runHoistSpills(
1405 LiveInterval &OrigLI, VNInfo &OrigVNI,
1406 SmallPtrSet<MachineInstr *, 16> &Spills,
1407 SmallVectorImpl<MachineInstr *> &SpillsToRm,
1408 DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns) {
1409 // Visit order of dominator tree nodes.
1410 SmallVector<MachineDomTreeNode *, 32> Orders;
1411 // SpillsToKeep contains all the nodes where spills are to be inserted
1412 // during hoisting. If the spill to be inserted is an original spill
1413 // (not a hoisted one), the value of the map entry is 0. If the spill
1414 // is a hoisted spill, the value of the map entry is the VReg to be used
1415 // as the source of the spill.
1416 DenseMap<MachineDomTreeNode *, unsigned> SpillsToKeep;
1417 // Map from BB to the first spill inside of it.
1418 DenseMap<MachineDomTreeNode *, MachineInstr *> SpillBBToSpill;
1419
1420 rmRedundantSpills(Spills, SpillsToRm, SpillBBToSpill);
1421
1422 MachineBasicBlock *Root = LIS.getMBBFromIndex(OrigVNI.def);
1423 getVisitOrders(Root, Spills, Orders, SpillsToRm, SpillsToKeep,
1424 SpillBBToSpill);
1425
1426 // SpillsInSubTreeMap keeps the map from a dom tree node to a pair of
1427 // nodes set and the cost of all the spills inside those nodes.
1428 // The nodes set are the locations where spills are to be inserted
1429 // in the subtree of current node.
1430 using NodesCostPair =
1431 std::pair<SmallPtrSet<MachineDomTreeNode *, 16>, BlockFrequency>;
1432 DenseMap<MachineDomTreeNode *, NodesCostPair> SpillsInSubTreeMap;
1433
1434 // Iterate Orders set in reverse order, which will be a bottom-up order
1435 // in the dominator tree. Once we visit a dom tree node, we know its
1436 // children have already been visited and the spill locations in the
1437 // subtrees of all the children have been determined.
1438 SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin();
1439 for (; RIt != Orders.rend(); RIt++) {
1440 MachineBasicBlock *Block = (*RIt)->getBlock();
1441
1442 // If Block contains an original spill, simply continue.
1443 if (SpillsToKeep.find(*RIt) != SpillsToKeep.end() && !SpillsToKeep[*RIt]) {
1444 SpillsInSubTreeMap[*RIt].first.insert(*RIt);
1445 // SpillsInSubTreeMap[*RIt].second contains the cost of spill.
1446 SpillsInSubTreeMap[*RIt].second = MBFI.getBlockFreq(Block);
1447 continue;
1448 }
1449
1450 // Collect spills in subtree of current node (*RIt) to
1451 // SpillsInSubTreeMap[*RIt].first.
1452 for (MachineDomTreeNode *Child : (*RIt)->children()) {
1453 if (SpillsInSubTreeMap.find(Child) == SpillsInSubTreeMap.end())
1454 continue;
1455 // The stmt "SpillsInSubTree = SpillsInSubTreeMap[*RIt].first" below
1456 // should be placed before getting the begin and end iterators of
1457 // SpillsInSubTreeMap[Child].first, or else the iterators may be
1458 // invalidated when SpillsInSubTreeMap[*RIt] is seen the first time
1459 // and the map grows and then the original buckets in the map are moved.
1460 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1461 SpillsInSubTreeMap[*RIt].first;
1462 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1463 SubTreeCost += SpillsInSubTreeMap[Child].second;
1464 auto BI = SpillsInSubTreeMap[Child].first.begin();
1465 auto EI = SpillsInSubTreeMap[Child].first.end();
1466 SpillsInSubTree.insert(BI, EI);
1467 SpillsInSubTreeMap.erase(Child);
1468 }
1469
1470 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1471 SpillsInSubTreeMap[*RIt].first;
1472 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1473 // No spills in subtree, simply continue.
1474 if (SpillsInSubTree.empty())
1475 continue;
1476
1477 // Check whether Block is a possible candidate to insert spill.
1478 Register LiveReg;
1479 if (!isSpillCandBB(OrigLI, OrigVNI, *Block, LiveReg))
1480 continue;
1481
1482 // If there are multiple spills that could be merged, bias a little
1483 // to hoist the spill.
1484 BranchProbability MarginProb = (SpillsInSubTree.size() > 1)
1485 ? BranchProbability(9, 10)
1486 : BranchProbability(1, 1);
1487 if (SubTreeCost > MBFI.getBlockFreq(Block) * MarginProb) {
1488 // Hoist: Move spills to current Block.
1489 for (const auto SpillBB : SpillsInSubTree) {
1490 // When SpillBB is a BB contains original spill, insert the spill
1491 // to SpillsToRm.
1492 if (SpillsToKeep.find(SpillBB) != SpillsToKeep.end() &&
1493 !SpillsToKeep[SpillBB]) {
1494 MachineInstr *SpillToRm = SpillBBToSpill[SpillBB];
1495 SpillsToRm.push_back(SpillToRm);
1496 }
1497 // SpillBB will not contain spill anymore, remove it from SpillsToKeep.
1498 SpillsToKeep.erase(SpillBB);
1499 }
1500 // Current Block is the BB containing the new hoisted spill. Add it to
1501 // SpillsToKeep. LiveReg is the source of the new spill.
1502 SpillsToKeep[*RIt] = LiveReg;
1503 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
1504 dbgs() << "spills in BB: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
1505 for (const auto Rspill : SpillsInSubTree)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
1506 dbgs() << Rspill->getBlock()->getNumber() << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
1507 dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
1508 << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
1509 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
;
1510 SpillsInSubTree.clear();
1511 SpillsInSubTree.insert(*RIt);
1512 SubTreeCost = MBFI.getBlockFreq(Block);
1513 }
1514 }
1515 // For spills in SpillsToKeep with LiveReg set (i.e., not original spill),
1516 // save them to SpillsToIns.
1517 for (const auto &Ent : SpillsToKeep) {
1518 if (Ent.second)
1519 SpillsToIns[Ent.first->getBlock()] = Ent.second;
1520 }
1521}
1522
1523/// For spills with equal values, remove redundant spills and hoist those left
1524/// to less hot spots.
1525///
1526/// Spills with equal values will be collected into the same set in
1527/// MergeableSpills when spill is inserted. These equal spills are originated
1528/// from the same defining instruction and are dominated by the instruction.
1529/// Before hoisting all the equal spills, redundant spills inside in the same
1530/// BB are first marked to be deleted. Then starting from the spills left, walk
1531/// up on the dominator tree towards the Root node where the define instruction
1532/// is located, mark the dominated spills to be deleted along the way and
1533/// collect the BB nodes on the path from non-dominated spills to the define
1534/// instruction into a WorkSet. The nodes in WorkSet are the candidate places
1535/// where we are considering to hoist the spills. We iterate the WorkSet in
1536/// bottom-up order, and for each node, we will decide whether to hoist spills
1537/// inside its subtree to that node. In this way, we can get benefit locally
1538/// even if hoisting all the equal spills to one cold place is impossible.
1539void HoistSpillHelper::hoistAllSpills() {
1540 SmallVector<Register, 4> NewVRegs;
1541 LiveRangeEdit Edit(nullptr, NewVRegs, MF, LIS, &VRM, this);
1542
1543 for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
1544 Register Reg = Register::index2VirtReg(i);
1545 Register Original = VRM.getPreSplitReg(Reg);
1546 if (!MRI.def_empty(Reg))
1547 Virt2SiblingsMap[Original].insert(Reg);
1548 }
1549
1550 // Each entry in MergeableSpills contains a spill set with equal values.
1551 for (auto &Ent : MergeableSpills) {
1552 int Slot = Ent.first.first;
1553 LiveInterval &OrigLI = *StackSlotToOrigLI[Slot];
1554 VNInfo *OrigVNI = Ent.first.second;
1555 SmallPtrSet<MachineInstr *, 16> &EqValSpills = Ent.second;
1556 if (Ent.second.empty())
1557 continue;
1558
1559 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
1560 dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
1561 << "Equal spills in BB: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
1562 for (const auto spill : EqValSpills)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
1563 dbgs() << spill->getParent()->getNumber() << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
1564 dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
1565 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
;
1566
1567 // SpillsToRm is the spill set to be removed from EqValSpills.
1568 SmallVector<MachineInstr *, 16> SpillsToRm;
1569 // SpillsToIns is the spill set to be newly inserted after hoisting.
1570 DenseMap<MachineBasicBlock *, unsigned> SpillsToIns;
1571
1572 runHoistSpills(OrigLI, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns);
1573
1574 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto &Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1575 dbgs() << "Finally inserted spills in BB: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto &Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1576 for (const auto &Ispill : SpillsToIns)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto &Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1577 dbgs() << Ispill.first->getNumber() << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto &Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1578 dbgs() << "\nFinally removed spills in BB: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto &Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1579 for (const auto Rspill : SpillsToRm)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto &Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1580 dbgs() << Rspill->getParent()->getNumber() << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto &Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1581 dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto &Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1582 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto &Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
;
1583
1584 // Stack live range update.
1585 LiveInterval &StackIntvl = LSS.getInterval(Slot);
1586 if (!SpillsToIns.empty() || !SpillsToRm.empty())
1587 StackIntvl.MergeValueInAsValue(OrigLI, OrigVNI,
1588 StackIntvl.getValNumInfo(0));
1589
1590 // Insert hoisted spills.
1591 for (auto const &Insert : SpillsToIns) {
1592 MachineBasicBlock *BB = Insert.first;
1593 Register LiveReg = Insert.second;
1594 MachineBasicBlock::iterator MII = IPA.getLastInsertPointIter(OrigLI, *BB);
1595 MachineInstrSpan MIS(MII, BB);
1596 TII.storeRegToStackSlot(*BB, MII, LiveReg, false, Slot,
1597 MRI.getRegClass(LiveReg), &TRI);
1598 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
1599 for (const MachineInstr &MI : make_range(MIS.begin(), MII))
1600 getVDefInterval(MI, LIS);
1601 ++NumSpills;
1602 }
1603
1604 // Remove redundant spills or change them to dead instructions.
1605 NumSpills -= SpillsToRm.size();
1606 for (auto const RMEnt : SpillsToRm) {
1607 RMEnt->setDesc(TII.get(TargetOpcode::KILL));
1608 for (unsigned i = RMEnt->getNumOperands(); i; --i) {
1609 MachineOperand &MO = RMEnt->getOperand(i - 1);
1610 if (MO.isReg() && MO.isImplicit() && MO.isDef() && !MO.isDead())
1611 RMEnt->RemoveOperand(i - 1);
1612 }
1613 }
1614 Edit.eliminateDeadDefs(SpillsToRm, None, AA);
1615 }
1616}
1617
1618/// For VirtReg clone, the \p New register should have the same physreg or
1619/// stackslot as the \p old register.
1620void HoistSpillHelper::LRE_DidCloneVirtReg(Register New, Register Old) {
1621 if (VRM.hasPhys(Old))
1622 VRM.assignVirt2Phys(New, VRM.getPhys(Old));
1623 else if (VRM.getStackSlot(Old) != VirtRegMap::NO_STACK_SLOT)
1624 VRM.assignVirt2StackSlot(New, VRM.getStackSlot(Old));
1625 else
1626 llvm_unreachable("VReg should be assigned either physreg or stackslot")::llvm::llvm_unreachable_internal("VReg should be assigned either physreg or stackslot"
, "llvm/lib/CodeGen/InlineSpiller.cpp", 1626)
;
1627 if (VRM.hasShape(Old))
1628 VRM.assignVirt2Shape(New, VRM.getShape(Old));
1629}

/build/llvm-toolchain-snapshot-14~++20220125101009+ceec4383681c/llvm/include/llvm/ADT/STLExtras.h

1//===- llvm/ADT/STLExtras.h - Useful STL related functions ------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains some templates that are useful if you are working with the
10// STL at all.
11//
12// No library is required when using these functions.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_ADT_STLEXTRAS_H
17#define LLVM_ADT_STLEXTRAS_H
18
19#include "llvm/ADT/identity.h"
20#include "llvm/ADT/Optional.h"
21#include "llvm/ADT/STLForwardCompat.h"
22#include "llvm/ADT/STLFunctionalExtras.h"
23#include "llvm/ADT/iterator.h"
24#include "llvm/ADT/iterator_range.h"
25#include "llvm/Config/abi-breaking.h"
26#include "llvm/Support/ErrorHandling.h"
27#include <algorithm>
28#include <cassert>
29#include <cstddef>
30#include <cstdint>
31#include <cstdlib>
32#include <functional>
33#include <initializer_list>
34#include <iterator>
35#include <limits>
36#include <memory>
37#include <tuple>
38#include <type_traits>
39#include <utility>
40
41#ifdef EXPENSIVE_CHECKS
42#include <random> // for std::mt19937
43#endif
44
45namespace llvm {
46
47// Only used by compiler if both template types are the same. Useful when
48// using SFINAE to test for the existence of member functions.
49template <typename T, T> struct SameType;
50
51namespace detail {
52
53template <typename RangeT>
54using IterOfRange = decltype(std::begin(std::declval<RangeT &>()));
55
56template <typename RangeT>
57using ValueOfRange = typename std::remove_reference<decltype(
58 *std::begin(std::declval<RangeT &>()))>::type;
59
60} // end namespace detail
61
62//===----------------------------------------------------------------------===//
63// Extra additions to <type_traits>
64//===----------------------------------------------------------------------===//
65
66template <typename T> struct make_const_ptr {
67 using type =
68 typename std::add_pointer<typename std::add_const<T>::type>::type;
69};
70
71template <typename T> struct make_const_ref {
72 using type = typename std::add_lvalue_reference<
73 typename std::add_const<T>::type>::type;
74};
75
76namespace detail {
77template <typename...> using void_t = void;
78template <class, template <class...> class Op, class... Args> struct detector {
79 using value_t = std::false_type;
80};
81template <template <class...> class Op, class... Args>
82struct detector<void_t<Op<Args...>>, Op, Args...> {
83 using value_t = std::true_type;
84};
85} // end namespace detail
86
87/// Detects if a given trait holds for some set of arguments 'Args'.
88/// For example, the given trait could be used to detect if a given type
89/// has a copy assignment operator:
90/// template<class T>
91/// using has_copy_assign_t = decltype(std::declval<T&>()
92/// = std::declval<const T&>());
93/// bool fooHasCopyAssign = is_detected<has_copy_assign_t, FooClass>::value;
94template <template <class...> class Op, class... Args>
95using is_detected = typename detail::detector<void, Op, Args...>::value_t;
96
97namespace detail {
98template <typename Callable, typename... Args>
99using is_invocable =
100 decltype(std::declval<Callable &>()(std::declval<Args>()...));
101} // namespace detail
102
103/// Check if a Callable type can be invoked with the given set of arg types.
104template <typename Callable, typename... Args>
105using is_invocable = is_detected<detail::is_invocable, Callable, Args...>;
106
107/// This class provides various trait information about a callable object.
108/// * To access the number of arguments: Traits::num_args
109/// * To access the type of an argument: Traits::arg_t<Index>
110/// * To access the type of the result: Traits::result_t
111template <typename T, bool isClass = std::is_class<T>::value>
112struct function_traits : public function_traits<decltype(&T::operator())> {};
113
114/// Overload for class function types.
115template <typename ClassType, typename ReturnType, typename... Args>
116struct function_traits<ReturnType (ClassType::*)(Args...) const, false> {
117 /// The number of arguments to this function.
118 enum { num_args = sizeof...(Args) };
119
120 /// The result type of this function.
121 using result_t = ReturnType;
122
123 /// The type of an argument to this function.
124 template <size_t Index>
125 using arg_t = typename std::tuple_element<Index, std::tuple<Args...>>::type;
126};
127/// Overload for class function types.
128template <typename ClassType, typename ReturnType, typename... Args>
129struct function_traits<ReturnType (ClassType::*)(Args...), false>
130 : function_traits<ReturnType (ClassType::*)(Args...) const> {};
131/// Overload for non-class function types.
132template <typename ReturnType, typename... Args>
133struct function_traits<ReturnType (*)(Args...), false> {
134 /// The number of arguments to this function.
135 enum { num_args = sizeof...(Args) };
136
137 /// The result type of this function.
138 using result_t = ReturnType;
139
140 /// The type of an argument to this function.
141 template <size_t i>
142 using arg_t = typename std::tuple_element<i, std::tuple<Args...>>::type;
143};
144/// Overload for non-class function type references.
145template <typename ReturnType, typename... Args>
146struct function_traits<ReturnType (&)(Args...), false>
147 : public function_traits<ReturnType (*)(Args...)> {};
148
149/// traits class for checking whether type T is one of any of the given
150/// types in the variadic list.
151template <typename T, typename... Ts>
152using is_one_of = disjunction<std::is_same<T, Ts>...>;
153
154/// traits class for checking whether type T is a base class for all
155/// the given types in the variadic list.
156template <typename T, typename... Ts>
157using are_base_of = conjunction<std::is_base_of<T, Ts>...>;
158
159namespace detail {
160template <typename T, typename... Us> struct TypesAreDistinct;
161template <typename T, typename... Us>
162struct TypesAreDistinct
163 : std::integral_constant<bool, !is_one_of<T, Us...>::value &&
164 TypesAreDistinct<Us...>::value> {};
165template <typename T> struct TypesAreDistinct<T> : std::true_type {};
166} // namespace detail
167
168/// Determine if all types in Ts are distinct.
169///
170/// Useful to statically assert when Ts is intended to describe a non-multi set
171/// of types.
172///
173/// Expensive (currently quadratic in sizeof(Ts...)), and so should only be
174/// asserted once per instantiation of a type which requires it.
175template <typename... Ts> struct TypesAreDistinct;
176template <> struct TypesAreDistinct<> : std::true_type {};
177template <typename... Ts>
178struct TypesAreDistinct
179 : std::integral_constant<bool, detail::TypesAreDistinct<Ts...>::value> {};
180
181/// Find the first index where a type appears in a list of types.
182///
183/// FirstIndexOfType<T, Us...>::value is the first index of T in Us.
184///
185/// Typically only meaningful when it is otherwise statically known that the
186/// type pack has no duplicate types. This should be guaranteed explicitly with
187/// static_assert(TypesAreDistinct<Us...>::value).
188///
189/// It is a compile-time error to instantiate when T is not present in Us, i.e.
190/// if is_one_of<T, Us...>::value is false.
191template <typename T, typename... Us> struct FirstIndexOfType;
192template <typename T, typename U, typename... Us>
193struct FirstIndexOfType<T, U, Us...>
194 : std::integral_constant<size_t, 1 + FirstIndexOfType<T, Us...>::value> {};
195template <typename T, typename... Us>
196struct FirstIndexOfType<T, T, Us...> : std::integral_constant<size_t, 0> {};
197
198/// Find the type at a given index in a list of types.
199///
200/// TypeAtIndex<I, Ts...> is the type at index I in Ts.
201template <size_t I, typename... Ts>
202using TypeAtIndex = std::tuple_element_t<I, std::tuple<Ts...>>;
203
204//===----------------------------------------------------------------------===//
205// Extra additions to <iterator>
206//===----------------------------------------------------------------------===//
207
208namespace adl_detail {
209
210using std::begin;
211
212template <typename ContainerTy>
213decltype(auto) adl_begin(ContainerTy &&container) {
214 return begin(std::forward<ContainerTy>(container));
215}
216
217using std::end;
218
219template <typename ContainerTy>
220decltype(auto) adl_end(ContainerTy &&container) {
221 return end(std::forward<ContainerTy>(container));
222}
223
224using std::swap;
225
226template <typename T>
227void adl_swap(T &&lhs, T &&rhs) noexcept(noexcept(swap(std::declval<T>(),
228 std::declval<T>()))) {
229 swap(std::forward<T>(lhs), std::forward<T>(rhs));
230}
231
232} // end namespace adl_detail
233
234template <typename ContainerTy>
235decltype(auto) adl_begin(ContainerTy &&container) {
236 return adl_detail::adl_begin(std::forward<ContainerTy>(container));
237}
238
239template <typename ContainerTy>
240decltype(auto) adl_end(ContainerTy &&container) {
241 return adl_detail::adl_end(std::forward<ContainerTy>(container));
242}
243
244template <typename T>
245void adl_swap(T &&lhs, T &&rhs) noexcept(
246 noexcept(adl_detail::adl_swap(std::declval<T>(), std::declval<T>()))) {
247 adl_detail::adl_swap(std::forward<T>(lhs), std::forward<T>(rhs));
248}
249
250/// Test whether \p RangeOrContainer is empty. Similar to C++17 std::empty.
251template <typename T>
252constexpr bool empty(const T &RangeOrContainer) {
253 return adl_begin(RangeOrContainer) == adl_end(RangeOrContainer);
254}
255
256/// Returns true if the given container only contains a single element.
257template <typename ContainerTy> bool hasSingleElement(ContainerTy &&C) {
258 auto B = std::begin(C), E = std::end(C);
259 return B != E && std::next(B) == E;
260}
261
262/// Return a range covering \p RangeOrContainer with the first N elements
263/// excluded.
264template <typename T> auto drop_begin(T &&RangeOrContainer, size_t N = 1) {
265 return make_range(std::next(adl_begin(RangeOrContainer), N),
266 adl_end(RangeOrContainer));
267}
268
269// mapped_iterator - This is a simple iterator adapter that causes a function to
270// be applied whenever operator* is invoked on the iterator.
271
272template <typename ItTy, typename FuncTy,
273 typename ReferenceTy =
274 decltype(std::declval<FuncTy>()(*std::declval<ItTy>()))>
275class mapped_iterator
276 : public iterator_adaptor_base<
277 mapped_iterator<ItTy, FuncTy>, ItTy,
278 typename std::iterator_traits<ItTy>::iterator_category,
279 std::remove_reference_t<ReferenceTy>,
280 typename std::iterator_traits<ItTy>::difference_type,
281 std::remove_reference_t<ReferenceTy> *, ReferenceTy> {
282public:
283 mapped_iterator(ItTy U, FuncTy F)
284 : mapped_iterator::iterator_adaptor_base(std::move(U)), F(std::move(F)) {}
285
286 ItTy getCurrent() { return this->I; }
287
288 const FuncTy &getFunction() const { return F; }
289
290 ReferenceTy operator*() const { return F(*this->I); }
291
292private:
293 FuncTy F;
294};
295
296// map_iterator - Provide a convenient way to create mapped_iterators, just like
297// make_pair is useful for creating pairs...
298template <class ItTy, class FuncTy>
299inline mapped_iterator<ItTy, FuncTy> map_iterator(ItTy I, FuncTy F) {
300 return mapped_iterator<ItTy, FuncTy>(std::move(I), std::move(F));
301}
302
303template <class ContainerTy, class FuncTy>
304auto map_range(ContainerTy &&C, FuncTy F) {
305 return make_range(map_iterator(C.begin(), F), map_iterator(C.end(), F));
306}
307
308/// A base type of mapped iterator, that is useful for building derived
309/// iterators that do not need/want to store the map function (as in
310/// mapped_iterator). These iterators must simply provide a `mapElement` method
311/// that defines how to map a value of the iterator to the provided reference
312/// type.
313template <typename DerivedT, typename ItTy, typename ReferenceTy>
314class mapped_iterator_base
315 : public iterator_adaptor_base<
316 DerivedT, ItTy,
317 typename std::iterator_traits<ItTy>::iterator_category,
318 std::remove_reference_t<ReferenceTy>,
319 typename std::iterator_traits<ItTy>::difference_type,
320 std::remove_reference_t<ReferenceTy> *, ReferenceTy> {
321public:
322 using BaseT = mapped_iterator_base;
323
324 mapped_iterator_base(ItTy U)
325 : mapped_iterator_base::iterator_adaptor_base(std::move(U)) {}
326
327 ItTy getCurrent() { return this->I; }
328
329 ReferenceTy operator*() const {
330 return static_cast<const DerivedT &>(*this).mapElement(*this->I);
331 }
332};
333
334/// Helper to determine if type T has a member called rbegin().
335template <typename Ty> class has_rbegin_impl {
336 using yes = char[1];
337 using no = char[2];
338
339 template <typename Inner>
340 static yes& test(Inner *I, decltype(I->rbegin()) * = nullptr);
341
342 template <typename>
343 static no& test(...);
344
345public:
346 static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes);
347};
348
349/// Metafunction to determine if T& or T has a member called rbegin().
350template <typename Ty>
351struct has_rbegin : has_rbegin_impl<typename std::remove_reference<Ty>::type> {
352};
353
354// Returns an iterator_range over the given container which iterates in reverse.
355// Note that the container must have rbegin()/rend() methods for this to work.
356template <typename ContainerTy>
357auto reverse(ContainerTy &&C,
358 std::enable_if_t<has_rbegin<ContainerTy>::value> * = nullptr) {
359 return make_range(C.rbegin(), C.rend());
360}
361
362// Returns an iterator_range over the given container which iterates in reverse.
363// Note that the container must have begin()/end() methods which return
364// bidirectional iterators for this to work.
365template <typename ContainerTy>
366auto reverse(ContainerTy &&C,
367 std::enable_if_t<!has_rbegin<ContainerTy>::value> * = nullptr) {
368 return make_range(std::make_reverse_iterator(std::end(C)),
369 std::make_reverse_iterator(std::begin(C)));
370}
371
372/// An iterator adaptor that filters the elements of given inner iterators.
373///
374/// The predicate parameter should be a callable object that accepts the wrapped
375/// iterator's reference type and returns a bool. When incrementing or
376/// decrementing the iterator, it will call the predicate on each element and
377/// skip any where it returns false.
378///
379/// \code
380/// int A[] = { 1, 2, 3, 4 };
381/// auto R = make_filter_range(A, [](int N) { return N % 2 == 1; });
382/// // R contains { 1, 3 }.
383/// \endcode
384///
385/// Note: filter_iterator_base implements support for forward iteration.
386/// filter_iterator_impl exists to provide support for bidirectional iteration,
387/// conditional on whether the wrapped iterator supports it.
388template <typename WrappedIteratorT, typename PredicateT, typename IterTag>
389class filter_iterator_base
390 : public iterator_adaptor_base<
391 filter_iterator_base<WrappedIteratorT, PredicateT, IterTag>,
392 WrappedIteratorT,
393 typename std::common_type<
394 IterTag, typename std::iterator_traits<
395 WrappedIteratorT>::iterator_category>::type> {
396 using BaseT = typename filter_iterator_base::iterator_adaptor_base;
397
398protected:
399 WrappedIteratorT End;
400 PredicateT Pred;
401
402 void findNextValid() {
403 while (this->I != End && !Pred(*this->I))
404 BaseT::operator++();
405 }
406
407 // Construct the iterator. The begin iterator needs to know where the end
408 // is, so that it can properly stop when it gets there. The end iterator only
409 // needs the predicate to support bidirectional iteration.
410 filter_iterator_base(WrappedIteratorT Begin, WrappedIteratorT End,
411 PredicateT Pred)
412 : BaseT(Begin), End(End), Pred(Pred) {
413 findNextValid();
414 }
415
416public:
417 using BaseT::operator++;
418
419 filter_iterator_base &operator++() {
420 BaseT::operator++();
421 findNextValid();
422 return *this;
423 }
424};
425
426/// Specialization of filter_iterator_base for forward iteration only.
427template <typename WrappedIteratorT, typename PredicateT,
428 typename IterTag = std::forward_iterator_tag>
429class filter_iterator_impl
430 : public filter_iterator_base<WrappedIteratorT, PredicateT, IterTag> {
431public:
432 filter_iterator_impl(WrappedIteratorT Begin, WrappedIteratorT End,
433 PredicateT Pred)
434 : filter_iterator_impl::filter_iterator_base(Begin, End, Pred) {}
435};
436
437/// Specialization of filter_iterator_base for bidirectional iteration.
438template <typename WrappedIteratorT, typename PredicateT>
439class filter_iterator_impl<WrappedIteratorT, PredicateT,
440 std::bidirectional_iterator_tag>
441 : public filter_iterator_base<WrappedIteratorT, PredicateT,
442 std::bidirectional_iterator_tag> {
443 using BaseT = typename filter_iterator_impl::filter_iterator_base;
444
445 void findPrevValid() {
446 while (!this->Pred(*this->I))
447 BaseT::operator--();
448 }
449
450public:
451 using BaseT::operator--;
452
453 filter_iterator_impl(WrappedIteratorT Begin, WrappedIteratorT End,
454 PredicateT Pred)
455 : BaseT(Begin, End, Pred) {}
456
457 filter_iterator_impl &operator--() {
458 BaseT::operator--();
459 findPrevValid();
460 return *this;
461 }
462};
463
464namespace detail {
465
466template <bool is_bidirectional> struct fwd_or_bidi_tag_impl {
467 using type = std::forward_iterator_tag;
468};
469
470template <> struct fwd_or_bidi_tag_impl<true> {
471 using type = std::bidirectional_iterator_tag;
472};
473
474/// Helper which sets its type member to forward_iterator_tag if the category
475/// of \p IterT does not derive from bidirectional_iterator_tag, and to
476/// bidirectional_iterator_tag otherwise.
477template <typename IterT> struct fwd_or_bidi_tag {
478 using type = typename fwd_or_bidi_tag_impl<std::is_base_of<
479 std::bidirectional_iterator_tag,
480 typename std::iterator_traits<IterT>::iterator_category>::value>::type;
481};
482
483} // namespace detail
484
485/// Defines filter_iterator to a suitable specialization of
486/// filter_iterator_impl, based on the underlying iterator's category.
487template <typename WrappedIteratorT, typename PredicateT>
488using filter_iterator = filter_iterator_impl<
489 WrappedIteratorT, PredicateT,
490 typename detail::fwd_or_bidi_tag<WrappedIteratorT>::type>;
491
492/// Convenience function that takes a range of elements and a predicate,
493/// and return a new filter_iterator range.
494///
495/// FIXME: Currently if RangeT && is a rvalue reference to a temporary, the
496/// lifetime of that temporary is not kept by the returned range object, and the
497/// temporary is going to be dropped on the floor after the make_iterator_range
498/// full expression that contains this function call.
499template <typename RangeT, typename PredicateT>
500iterator_range<filter_iterator<detail::IterOfRange<RangeT>, PredicateT>>
501make_filter_range(RangeT &&Range, PredicateT Pred) {
502 using FilterIteratorT =
503 filter_iterator<detail::IterOfRange<RangeT>, PredicateT>;
504 return make_range(
505 FilterIteratorT(std::begin(std::forward<RangeT>(Range)),
506 std::end(std::forward<RangeT>(Range)), Pred),
507 FilterIteratorT(std::end(std::forward<RangeT>(Range)),
508 std::end(std::forward<RangeT>(Range)), Pred));
509}
510
511/// A pseudo-iterator adaptor that is designed to implement "early increment"
512/// style loops.
513///
514/// This is *not a normal iterator* and should almost never be used directly. It
515/// is intended primarily to be used with range based for loops and some range
516/// algorithms.
517///
518/// The iterator isn't quite an `OutputIterator` or an `InputIterator` but
519/// somewhere between them. The constraints of these iterators are:
520///
521/// - On construction or after being incremented, it is comparable and
522/// dereferencable. It is *not* incrementable.
523/// - After being dereferenced, it is neither comparable nor dereferencable, it
524/// is only incrementable.
525///
526/// This means you can only dereference the iterator once, and you can only
527/// increment it once between dereferences.
528template <typename WrappedIteratorT>
529class early_inc_iterator_impl
530 : public iterator_adaptor_base<early_inc_iterator_impl<WrappedIteratorT>,
531 WrappedIteratorT, std::input_iterator_tag> {
532 using BaseT = typename early_inc_iterator_impl::iterator_adaptor_base;
533
534 using PointerT = typename std::iterator_traits<WrappedIteratorT>::pointer;
535
536protected:
537#if LLVM_ENABLE_ABI_BREAKING_CHECKS1
538 bool IsEarlyIncremented = false;
539#endif
540
541public:
542 early_inc_iterator_impl(WrappedIteratorT I) : BaseT(I) {}
543
544 using BaseT::operator*;
545 decltype(*std::declval<WrappedIteratorT>()) operator*() {
546#if LLVM_ENABLE_ABI_BREAKING_CHECKS1
547 assert(!IsEarlyIncremented && "Cannot dereference twice!")(static_cast <bool> (!IsEarlyIncremented && "Cannot dereference twice!"
) ? void (0) : __assert_fail ("!IsEarlyIncremented && \"Cannot dereference twice!\""
, "llvm/include/llvm/ADT/STLExtras.h", 547, __extension__ __PRETTY_FUNCTION__
))
;
548 IsEarlyIncremented = true;
549#endif
550 return *(this->I)++;
551 }
552
553 using BaseT::operator++;
554 early_inc_iterator_impl &operator++() {
555#if LLVM_ENABLE_ABI_BREAKING_CHECKS1
556 assert(IsEarlyIncremented && "Cannot increment before dereferencing!")(static_cast <bool> (IsEarlyIncremented && "Cannot increment before dereferencing!"
) ? void (0) : __assert_fail ("IsEarlyIncremented && \"Cannot increment before dereferencing!\""
, "llvm/include/llvm/ADT/STLExtras.h", 556, __extension__ __PRETTY_FUNCTION__
))
;
557 IsEarlyIncremented = false;
558#endif
559 return *this;
560 }
561
562 friend bool operator==(const early_inc_iterator_impl &LHS,
563 const early_inc_iterator_impl &RHS) {
564#if LLVM_ENABLE_ABI_BREAKING_CHECKS1
565 assert(!LHS.IsEarlyIncremented && "Cannot compare after dereferencing!")(static_cast <bool> (!LHS.IsEarlyIncremented &&
"Cannot compare after dereferencing!") ? void (0) : __assert_fail
("!LHS.IsEarlyIncremented && \"Cannot compare after dereferencing!\""
, "llvm/include/llvm/ADT/STLExtras.h", 565, __extension__ __PRETTY_FUNCTION__
))
;
566#endif
567 return (const BaseT &)LHS == (const BaseT &)RHS;
568 }
569};
570
571/// Make a range that does early increment to allow mutation of the underlying
572/// range without disrupting iteration.
573///
574/// The underlying iterator will be incremented immediately after it is
575/// dereferenced, allowing deletion of the current node or insertion of nodes to
576/// not disrupt iteration provided they do not invalidate the *next* iterator --
577/// the current iterator can be invalidated.
578///
579/// This requires a very exact pattern of use that is only really suitable to
580/// range based for loops and other range algorithms that explicitly guarantee
581/// to dereference exactly once each element, and to increment exactly once each
582/// element.
583template <typename RangeT>
584iterator_range<early_inc_iterator_impl<detail::IterOfRange<RangeT>>>
585make_early_inc_range(RangeT &&Range) {
586 using EarlyIncIteratorT =
587 early_inc_iterator_impl<detail::IterOfRange<RangeT>>;
588 return make_range(EarlyIncIteratorT(std::begin(std::forward<RangeT>(Range))),
589 EarlyIncIteratorT(std::end(std::forward<RangeT>(Range))));
590}
591
592// forward declarations required by zip_shortest/zip_first/zip_longest
593template <typename R, typename UnaryPredicate>
594bool all_of(R &&range, UnaryPredicate P);
595template <typename R, typename UnaryPredicate>
596bool any_of(R &&range, UnaryPredicate P);
597
598namespace detail {
599
600using std::declval;
601
602// We have to alias this since inlining the actual type at the usage site
603// in the parameter list of iterator_facade_base<> below ICEs MSVC 2017.
604template<typename... Iters> struct ZipTupleType {
605 using type = std::tuple<decltype(*declval<Iters>())...>;
606};
607
608template <typename ZipType, typename... Iters>
609using zip_traits = iterator_facade_base<
610 ZipType, typename std::common_type<std::bidirectional_iterator_tag,
611 typename std::iterator_traits<
612 Iters>::iterator_category...>::type,
613 // ^ TODO: Implement random access methods.
614 typename ZipTupleType<Iters...>::type,
615 typename std::iterator_traits<typename std::tuple_element<
616 0, std::tuple<Iters...>>::type>::difference_type,
617 // ^ FIXME: This follows boost::make_zip_iterator's assumption that all
618 // inner iterators have the same difference_type. It would fail if, for
619 // instance, the second field's difference_type were non-numeric while the
620 // first is.
621 typename ZipTupleType<Iters...>::type *,
622 typename ZipTupleType<Iters...>::type>;
623
624template <typename ZipType, typename... Iters>
625struct zip_common : public zip_traits<ZipType, Iters...> {
626 using Base = zip_traits<ZipType, Iters...>;
627 using value_type = typename Base::value_type;
628
629 std::tuple<Iters...> iterators;
630
631protected:
632 template <size_t... Ns> value_type deref(std::index_sequence<Ns...>) const {
633 return value_type(*std::get<Ns>(iterators)...);
634 }
635
636 template <size_t... Ns>
637 decltype(iterators) tup_inc(std::index_sequence<Ns...>) const {
638 return std::tuple<Iters...>(std::next(std::get<Ns>(iterators))...);
639 }
640
641 template <size_t... Ns>
642 decltype(iterators) tup_dec(std::index_sequence<Ns...>) const {
643 return std::tuple<Iters...>(std::prev(std::get<Ns>(iterators))...);
644 }
645
646 template <size_t... Ns>
647 bool test_all_equals(const zip_common &other,
648 std::index_sequence<Ns...>) const {
649 return all_of(std::initializer_list<bool>{std::get<Ns>(this->iterators) ==
650 std::get<Ns>(other.iterators)...},
651 identity<bool>{});
652 }
653
654public:
655 zip_common(Iters &&... ts) : iterators(std::forward<Iters>(ts)...) {}
656
657 value_type operator*() const {
658 return deref(std::index_sequence_for<Iters...>{});
659 }
660
661 ZipType &operator++() {
662 iterators = tup_inc(std::index_sequence_for<Iters...>{});
663 return *reinterpret_cast<ZipType *>(this);
664 }
665
666 ZipType &operator--() {
667 static_assert(Base::IsBidirectional,
668 "All inner iterators must be at least bidirectional.");
669 iterators = tup_dec(std::index_sequence_for<Iters...>{});
670 return *reinterpret_cast<ZipType *>(this);
671 }
672
673 /// Return true if all the iterator are matching `other`'s iterators.
674 bool all_equals(zip_common &other) {
675 return test_all_equals(other, std::index_sequence_for<Iters...>{});
676 }
677};
678
679template <typename... Iters>
680struct zip_first : public zip_common<zip_first<Iters...>, Iters...> {
681 using Base = zip_common<zip_first<Iters...>, Iters...>;
682
683 bool operator==(const zip_first<Iters...> &other) const {
684 return std::get<0>(this->iterators) == std::get<0>(other.iterators);
685 }
686
687 zip_first(Iters &&... ts) : Base(std::forward<Iters>(ts)...) {}
688};
689
690template <typename... Iters>
691class zip_shortest : public zip_common<zip_shortest<Iters...>, Iters...> {
692 template <size_t... Ns>
693 bool test(const zip_shortest<Iters...> &other,
694 std::index_sequence<Ns...>) const {
695 return all_of(std::initializer_list<bool>{std::get<Ns>(this->iterators) !=
696 std::get<Ns>(other.iterators)...},
697 identity<bool>{});
698 }
699
700public:
701 using Base = zip_common<zip_shortest<Iters...>, Iters...>;
702
703 zip_shortest(Iters &&... ts) : Base(std::forward<Iters>(ts)...) {}
704
705 bool operator==(const zip_shortest<Iters...> &other) const {
706 return !test(other, std::index_sequence_for<Iters...>{});
707 }
708};
709
710template <template <typename...> class ItType, typename... Args> class zippy {
711public:
712 using iterator = ItType<decltype(std::begin(std::declval<Args>()))...>;
713 using iterator_category = typename iterator::iterator_category;
714 using value_type = typename iterator::value_type;
715 using difference_type = typename iterator::difference_type;
716 using pointer = typename iterator::pointer;
717 using reference = typename iterator::reference;
718
719private:
720 std::tuple<Args...> ts;
721
722 template <size_t... Ns>
723 iterator begin_impl(std::index_sequence<Ns...>) const {
724 return iterator(std::begin(std::get<Ns>(ts))...);
725 }
726 template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) const {
727 return iterator(std::end(std::get<Ns>(ts))...);
728 }
729
730public:
731 zippy(Args &&... ts_) : ts(std::forward<Args>(ts_)...) {}
732
733 iterator begin() const {
734 return begin_impl(std::index_sequence_for<Args...>{});
735 }
736 iterator end() const { return end_impl(std::index_sequence_for<Args...>{}); }
737};
738
739} // end namespace detail
740
741/// zip iterator for two or more iteratable types.
742template <typename T, typename U, typename... Args>
743detail::zippy<detail::zip_shortest, T, U, Args...> zip(T &&t, U &&u,
744 Args &&... args) {
745 return detail::zippy<detail::zip_shortest, T, U, Args...>(
746 std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
747}
748
749/// zip iterator that, for the sake of efficiency, assumes the first iteratee to
750/// be the shortest.
751template <typename T, typename U, typename... Args>
752detail::zippy<detail::zip_first, T, U, Args...> zip_first(T &&t, U &&u,
753 Args &&... args) {
754 return detail::zippy<detail::zip_first, T, U, Args...>(
755 std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
756}
757
758namespace detail {
759template <typename Iter>
760Iter next_or_end(const Iter &I, const Iter &End) {
761 if (I == End)
762 return End;
763 return std::next(I);
764}
765
766template <typename Iter>
767auto deref_or_none(const Iter &I, const Iter &End) -> llvm::Optional<
768 std::remove_const_t<std::remove_reference_t<decltype(*I)>>> {
769 if (I == End)
770 return None;
771 return *I;
772}
773
774template <typename Iter> struct ZipLongestItemType {
775 using type =
776 llvm::Optional<typename std::remove_const<typename std::remove_reference<
777 decltype(*std::declval<Iter>())>::type>::type>;
778};
779
780template <typename... Iters> struct ZipLongestTupleType {
781 using type = std::tuple<typename ZipLongestItemType<Iters>::type...>;
782};
783
784template <typename... Iters>
785class zip_longest_iterator
786 : public iterator_facade_base<
787 zip_longest_iterator<Iters...>,
788 typename std::common_type<
789 std::forward_iterator_tag,
790 typename std::iterator_traits<Iters>::iterator_category...>::type,
791 typename ZipLongestTupleType<Iters...>::type,
792 typename std::iterator_traits<typename std::tuple_element<
793 0, std::tuple<Iters...>>::type>::difference_type,
794 typename ZipLongestTupleType<Iters...>::type *,
795 typename ZipLongestTupleType<Iters...>::type> {
796public:
797 using value_type = typename ZipLongestTupleType<Iters...>::type;
798
799private:
800 std::tuple<Iters...> iterators;
801 std::tuple<Iters...> end_iterators;
802
803 template <size_t... Ns>
804 bool test(const zip_longest_iterator<Iters...> &other,
805 std::index_sequence<Ns...>) const {
806 return llvm::any_of(
807 std::initializer_list<bool>{std::get<Ns>(this->iterators) !=
808 std::get<Ns>(other.iterators)...},
809 identity<bool>{});
810 }
811
812 template <size_t... Ns> value_type deref(std::index_sequence<Ns...>) const {
813 return value_type(
814 deref_or_none(std::get<Ns>(iterators), std::get<Ns>(end_iterators))...);
815 }
816
817 template <size_t... Ns>
818 decltype(iterators) tup_inc(std::index_sequence<Ns...>) const {
819 return std::tuple<Iters...>(
820 next_or_end(std::get<Ns>(iterators), std::get<Ns>(end_iterators))...);
821 }
822
823public:
824 zip_longest_iterator(std::pair<Iters &&, Iters &&>... ts)
825 : iterators(std::forward<Iters>(ts.first)...),
826 end_iterators(std::forward<Iters>(ts.second)...) {}
827
828 value_type operator*() const {
829 return deref(std::index_sequence_for<Iters...>{});
830 }
831
832 zip_longest_iterator<Iters...> &operator++() {
833 iterators = tup_inc(std::index_sequence_for<Iters...>{});
834 return *this;
835 }
836
837 bool operator==(const zip_longest_iterator<Iters...> &other) const {
838 return !test(other, std::index_sequence_for<Iters...>{});
839 }
840};
841
842template <typename... Args> class zip_longest_range {
843public:
844 using iterator =
845 zip_longest_iterator<decltype(adl_begin(std::declval<Args>()))...>;
846 using iterator_category = typename iterator::iterator_category;
847 using value_type = typename iterator::value_type;
848 using difference_type = typename iterator::difference_type;
849 using pointer = typename iterator::pointer;
850 using reference = typename iterator::reference;
851
852private:
853 std::tuple<Args...> ts;
854
855 template <size_t... Ns>
856 iterator begin_impl(std::index_sequence<Ns...>) const {
857 return iterator(std::make_pair(adl_begin(std::get<Ns>(ts)),
858 adl_end(std::get<Ns>(ts)))...);
859 }
860
861 template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) const {
862 return iterator(std::make_pair(adl_end(std::get<Ns>(ts)),
863 adl_end(std::get<Ns>(ts)))...);
864 }
865
866public:
867 zip_longest_range(Args &&... ts_) : ts(std::forward<Args>(ts_)...) {}
868
869 iterator begin() const {
870 return begin_impl(std::index_sequence_for<Args...>{});
871 }
872 iterator end() const { return end_impl(std::index_sequence_for<Args...>{}); }
873};
874} // namespace detail
875
876/// Iterate over two or more iterators at the same time. Iteration continues
877/// until all iterators reach the end. The llvm::Optional only contains a value
878/// if the iterator has not reached the end.
879template <typename T, typename U, typename... Args>
880detail::zip_longest_range<T, U, Args...> zip_longest(T &&t, U &&u,
881 Args &&... args) {
882 return detail::zip_longest_range<T, U, Args...>(
883 std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
884}
885
886/// Iterator wrapper that concatenates sequences together.
887///
888/// This can concatenate different iterators, even with different types, into
889/// a single iterator provided the value types of all the concatenated
890/// iterators expose `reference` and `pointer` types that can be converted to
891/// `ValueT &` and `ValueT *` respectively. It doesn't support more
892/// interesting/customized pointer or reference types.
893///
894/// Currently this only supports forward or higher iterator categories as
895/// inputs and always exposes a forward iterator interface.
896template <typename ValueT, typename... IterTs>
897class concat_iterator
898 : public iterator_facade_base<concat_iterator<ValueT, IterTs...>,
899 std::forward_iterator_tag, ValueT> {
900 using BaseT = typename concat_iterator::iterator_facade_base;
901
902 /// We store both the current and end iterators for each concatenated
903 /// sequence in a tuple of pairs.
904 ///
905 /// Note that something like iterator_range seems nice at first here, but the
906 /// range properties are of little benefit and end up getting in the way
907 /// because we need to do mutation on the current iterators.
908 std::tuple<IterTs...> Begins;
909 std::tuple<IterTs...> Ends;
910
911 /// Attempts to increment a specific iterator.
912 ///
913 /// Returns true if it was able to increment the iterator. Returns false if
914 /// the iterator is already at the end iterator.
915 template <size_t Index> bool incrementHelper() {
916 auto &Begin = std::get<Index>(Begins);
917 auto &End = std::get<Index>(Ends);
918 if (Begin == End)
919 return false;
920
921 ++Begin;
922 return true;
923 }
924
925 /// Increments the first non-end iterator.
926 ///
927 /// It is an error to call this with all iterators at the end.
928 template <size_t... Ns> void increment(std::index_sequence<Ns...>) {
929 // Build a sequence of functions to increment each iterator if possible.
930 bool (concat_iterator::*IncrementHelperFns[])() = {
931 &concat_iterator::incrementHelper<Ns>...};
932
933 // Loop over them, and stop as soon as we succeed at incrementing one.
934 for (auto &IncrementHelperFn : IncrementHelperFns)
935 if ((this->*IncrementHelperFn)())
936 return;
937
938 llvm_unreachable("Attempted to increment an end concat iterator!")::llvm::llvm_unreachable_internal("Attempted to increment an end concat iterator!"
, "llvm/include/llvm/ADT/STLExtras.h", 938)
;
939 }
940
941 /// Returns null if the specified iterator is at the end. Otherwise,
942 /// dereferences the iterator and returns the address of the resulting
943 /// reference.
944 template <size_t Index> ValueT *getHelper() const {
945 auto &Begin = std::get<Index>(Begins);
946 auto &End = std::get<Index>(Ends);
947 if (Begin == End)
948 return nullptr;
949
950 return &*Begin;
951 }
952
953 /// Finds the first non-end iterator, dereferences, and returns the resulting
954 /// reference.
955 ///
956 /// It is an error to call this with all iterators at the end.
957 template <size_t... Ns> ValueT &get(std::index_sequence<Ns...>) const {
958 // Build a sequence of functions to get from iterator if possible.
959 ValueT *(concat_iterator::*GetHelperFns[])() const = {
960 &concat_iterator::getHelper<Ns>...};
961
962 // Loop over them, and return the first result we find.
963 for (auto &GetHelperFn : GetHelperFns)
964 if (ValueT *P = (this->*GetHelperFn)())
965 return *P;
966
967 llvm_unreachable("Attempted to get a pointer from an end concat iterator!")::llvm::llvm_unreachable_internal("Attempted to get a pointer from an end concat iterator!"
, "llvm/include/llvm/ADT/STLExtras.h", 967)
;
968 }
969
970public:
971 /// Constructs an iterator from a sequence of ranges.
972 ///
973 /// We need the full range to know how to switch between each of the
974 /// iterators.
975 template <typename... RangeTs>
976 explicit concat_iterator(RangeTs &&... Ranges)
977 : Begins(std::begin(Ranges)...), Ends(std::end(Ranges)...) {}
978
979 using BaseT::operator++;
980
981 concat_iterator &operator++() {
982 increment(std::index_sequence_for<IterTs...>());
983 return *this;
984 }
985
986 ValueT &operator*() const {
987 return get(std::index_sequence_for<IterTs...>());
988 }
989
990 bool operator==(const concat_iterator &RHS) const {
991 return Begins == RHS.Begins && Ends == RHS.Ends;
992 }
993};
994
995namespace detail {
996
997/// Helper to store a sequence of ranges being concatenated and access them.
998///
999/// This is designed to facilitate providing actual storage when temporaries
1000/// are passed into the constructor such that we can use it as part of range
1001/// based for loops.
1002template <typename ValueT, typename... RangeTs> class concat_range {
1003public:
1004 using iterator =
1005 concat_iterator<ValueT,
1006 decltype(std::begin(std::declval<RangeTs &>()))...>;
1007
1008private:
1009 std::tuple<RangeTs...> Ranges;
1010
1011 template <size_t... Ns>
1012 iterator begin_impl(std::index_sequence<Ns...>) {
1013 return iterator(std::get<Ns>(Ranges)...);
1014 }
1015 template <size_t... Ns>
1016 iterator begin_impl(std::index_sequence<Ns...>) const {
1017 return iterator(std::get<Ns>(Ranges)...);
1018 }
1019 template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) {
1020 return iterator(make_range(std::end(std::get<Ns>(Ranges)),
1021 std::end(std::get<Ns>(Ranges)))...);
1022 }
1023 template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) const {
1024 return iterator(make_range(std::end(std::get<Ns>(Ranges)),
1025 std::end(std::get<Ns>(Ranges)))...);
1026 }
1027
1028public:
1029 concat_range(RangeTs &&... Ranges)
1030 : Ranges(std::forward<RangeTs>(Ranges)...) {}
1031
1032 iterator begin() {
1033 return begin_impl(std::index_sequence_for<RangeTs...>{});
1034 }
1035 iterator begin() const {
1036 return begin_impl(std::index_sequence_for<RangeTs...>{});
1037 }
1038 iterator end() {
1039 return end_impl(std::index_sequence_for<RangeTs...>{});
1040 }
1041 iterator end() const {
1042 return end_impl(std::index_sequence_for<RangeTs...>{});
1043 }
1044};
1045
1046} // end namespace detail
1047
1048/// Concatenated range across two or more ranges.
1049///
1050/// The desired value type must be explicitly specified.
1051template <typename ValueT, typename... RangeTs>
1052detail::concat_range<ValueT, RangeTs...> concat(RangeTs &&... Ranges) {
1053 static_assert(sizeof...(RangeTs) > 1,
1054 "Need more than one range to concatenate!");
1055 return detail::concat_range<ValueT, RangeTs...>(
1056 std::forward<RangeTs>(Ranges)...);
1057}
1058
1059/// A utility class used to implement an iterator that contains some base object
1060/// and an index. The iterator moves the index but keeps the base constant.
1061template <typename DerivedT, typename BaseT, typename T,
1062 typename PointerT = T *, typename ReferenceT = T &>
1063class indexed_accessor_iterator
1064 : public llvm::iterator_facade_base<DerivedT,
1065 std::random_access_iterator_tag, T,
1066 std::ptrdiff_t, PointerT, ReferenceT> {
1067public:
1068 ptrdiff_t operator-(const indexed_accessor_iterator &rhs) const {
1069 assert(base == rhs.base && "incompatible iterators")(static_cast <bool> (base == rhs.base && "incompatible iterators"
) ? void (0) : __assert_fail ("base == rhs.base && \"incompatible iterators\""
, "llvm/include/llvm/ADT/STLExtras.h", 1069, __extension__ __PRETTY_FUNCTION__
))
;
1070 return index - rhs.index;
1071 }
1072 bool operator==(const indexed_accessor_iterator &rhs) const {
1073 return base == rhs.base && index == rhs.index;
1074 }
1075 bool operator<(const indexed_accessor_iterator &rhs) const {
1076 assert(base == rhs.base && "incompatible iterators")(static_cast <bool> (base == rhs.base && "incompatible iterators"
) ? void (0) : __assert_fail ("base == rhs.base && \"incompatible iterators\""
, "llvm/include/llvm/ADT/STLExtras.h", 1076, __extension__ __PRETTY_FUNCTION__
))
;
1077 return index < rhs.index;
1078 }
1079
1080 DerivedT &operator+=(ptrdiff_t offset) {
1081 this->index += offset;
1082 return static_cast<DerivedT &>(*this);
1083 }
1084 DerivedT &operator-=(ptrdiff_t offset) {
1085 this->index -= offset;
1086 return static_cast<DerivedT &>(*this);
1087 }
1088
1089 /// Returns the current index of the iterator.
1090 ptrdiff_t getIndex() const { return index; }
1091
1092 /// Returns the current base of the iterator.
1093 const BaseT &getBase() const { return base; }
1094
1095protected:
1096 indexed_accessor_iterator(BaseT base, ptrdiff_t index)
1097 : base(base), index(index) {}
1098 BaseT base;
1099 ptrdiff_t index;
1100};
1101
1102namespace detail {
1103/// The class represents the base of a range of indexed_accessor_iterators. It
1104/// provides support for many different range functionalities, e.g.
1105/// drop_front/slice/etc.. Derived range classes must implement the following
1106/// static methods:
1107/// * ReferenceT dereference_iterator(const BaseT &base, ptrdiff_t index)
1108/// - Dereference an iterator pointing to the base object at the given
1109/// index.
1110/// * BaseT offset_base(const BaseT &base, ptrdiff_t index)
1111/// - Return a new base that is offset from the provide base by 'index'
1112/// elements.
1113template <typename DerivedT, typename BaseT, typename T,
1114 typename PointerT = T *, typename ReferenceT = T &>
1115class indexed_accessor_range_base {
1116public:
1117 using RangeBaseT = indexed_accessor_range_base;
1118
1119 /// An iterator element of this range.
1120 class iterator : public indexed_accessor_iterator<iterator, BaseT, T,
1121 PointerT, ReferenceT> {
1122 public:
1123 // Index into this iterator, invoking a static method on the derived type.
1124 ReferenceT operator*() const {
1125 return DerivedT::dereference_iterator(this->getBase(), this->getIndex());
1126 }
1127
1128 private:
1129 iterator(BaseT owner, ptrdiff_t curIndex)
1130 : iterator::indexed_accessor_iterator(owner, curIndex) {}
1131
1132 /// Allow access to the constructor.
1133 friend indexed_accessor_range_base<DerivedT, BaseT, T, PointerT,
1134 ReferenceT>;
1135 };
1136
1137 indexed_accessor_range_base(iterator begin, iterator end)
1138 : base(offset_base(begin.getBase(), begin.getIndex())),
1139 count(end.getIndex() - begin.getIndex()) {}
1140 indexed_accessor_range_base(const iterator_range<iterator> &range)
1141 : indexed_accessor_range_base(range.begin(), range.end()) {}
1142 indexed_accessor_range_base(BaseT base, ptrdiff_t count)
1143 : base(base), count(count) {}
1144
1145 iterator begin() const { return iterator(base, 0); }
1146 iterator end() const { return iterator(base, count); }
1147 ReferenceT operator[](size_t Index) const {
1148 assert(Index < size() && "invalid index for value range")(static_cast <bool> (Index < size() && "invalid index for value range"
) ? void (0) : __assert_fail ("Index < size() && \"invalid index for value range\""
, "llvm/include/llvm/ADT/STLExtras.h", 1148, __extension__ __PRETTY_FUNCTION__
))
;
1149 return DerivedT::dereference_iterator(base, static_cast<ptrdiff_t>(Index));
1150 }
1151 ReferenceT front() const {
1152 assert(!empty() && "expected non-empty range")(static_cast <bool> (!empty() && "expected non-empty range"
) ? void (0) : __assert_fail ("!empty() && \"expected non-empty range\""
, "llvm/include/llvm/ADT/STLExtras.h", 1152, __extension__ __PRETTY_FUNCTION__
))
;
1153 return (*this)[0];
1154 }
1155 ReferenceT back() const {
1156 assert(!empty() && "expected non-empty range")(static_cast <bool> (!empty() && "expected non-empty range"
) ? void (0) : __assert_fail ("!empty() && \"expected non-empty range\""
, "llvm/include/llvm/ADT/STLExtras.h", 1156, __extension__ __PRETTY_FUNCTION__
))
;
1157 return (*this)[size() - 1];
1158 }
1159
1160 /// Compare this range with another.
1161 template <typename OtherT> bool operator==(const OtherT &other) const {
1162 return size() ==
1163 static_cast<size_t>(std::distance(other.begin(), other.end())) &&
1164 std::equal(begin(), end(), other.begin());
1165 }
1166 template <typename OtherT> bool operator!=(const OtherT &other) const {
1167 return !(*this == other);
1168 }
1169
1170 /// Return the size of this range.
1171 size_t size() const { return count; }
1172
1173 /// Return if the range is empty.
1174 bool empty() const { return size() == 0; }
1175
1176 /// Drop the first N elements, and keep M elements.
1177 DerivedT slice(size_t n, size_t m) const {
1178 assert(n + m <= size() && "invalid size specifiers")(static_cast <bool> (n + m <= size() && "invalid size specifiers"
) ? void (0) : __assert_fail ("n + m <= size() && \"invalid size specifiers\""
, "llvm/include/llvm/ADT/STLExtras.h", 1178, __extension__ __PRETTY_FUNCTION__
))
;
1179 return DerivedT(offset_base(base, n), m);
1180 }
1181
1182 /// Drop the first n elements.
1183 DerivedT drop_front(size_t n = 1) const {
1184 assert(size() >= n && "Dropping more elements than exist")(static_cast <bool> (size() >= n && "Dropping more elements than exist"
) ? void (0) : __assert_fail ("size() >= n && \"Dropping more elements than exist\""
, "llvm/include/llvm/ADT/STLExtras.h", 1184, __extension__ __PRETTY_FUNCTION__
))
;
1185 return slice(n, size() - n);
1186 }
1187 /// Drop the last n elements.
1188 DerivedT drop_back(size_t n = 1) const {
1189 assert(size() >= n && "Dropping more elements than exist")(static_cast <bool> (size() >= n && "Dropping more elements than exist"
) ? void (0) : __assert_fail ("size() >= n && \"Dropping more elements than exist\""
, "llvm/include/llvm/ADT/STLExtras.h", 1189, __extension__ __PRETTY_FUNCTION__
))
;
1190 return DerivedT(base, size() - n);
1191 }
1192
1193 /// Take the first n elements.
1194 DerivedT take_front(size_t n = 1) const {
1195 return n < size() ? drop_back(size() - n)
1196 : static_cast<const DerivedT &>(*this);
1197 }
1198
1199 /// Take the last n elements.
1200 DerivedT take_back(size_t n = 1) const {
1201 return n < size() ? drop_front(size() - n)
1202 : static_cast<const DerivedT &>(*this);
1203 }
1204
1205 /// Allow conversion to any type accepting an iterator_range.
1206 template <typename RangeT, typename = std::enable_if_t<std::is_constructible<
1207 RangeT, iterator_range<iterator>>::value>>
1208 operator RangeT() const {
1209 return RangeT(iterator_range<iterator>(*this));
1210 }
1211
1212 /// Returns the base of this range.
1213 const BaseT &getBase() const { return base; }
1214
1215private:
1216 /// Offset the given base by the given amount.
1217 static BaseT offset_base(const BaseT &base, size_t n) {
1218 return n == 0 ? base : DerivedT::offset_base(base, n);
1219 }
1220
1221protected:
1222 indexed_accessor_range_base(const indexed_accessor_range_base &) = default;
1223 indexed_accessor_range_base(indexed_accessor_range_base &&) = default;
1224 indexed_accessor_range_base &
1225 operator=(const indexed_accessor_range_base &) = default;
1226
1227 /// The base that owns the provided range of values.
1228 BaseT base;
1229 /// The size from the owning range.
1230 ptrdiff_t count;
1231};
1232} // end namespace detail
1233
1234/// This class provides an implementation of a range of
1235/// indexed_accessor_iterators where the base is not indexable. Ranges with
1236/// bases that are offsetable should derive from indexed_accessor_range_base
1237/// instead. Derived range classes are expected to implement the following
1238/// static method:
1239/// * ReferenceT dereference(const BaseT &base, ptrdiff_t index)
1240/// - Dereference an iterator pointing to a parent base at the given index.
1241template <typename DerivedT, typename BaseT, typename T,
1242 typename PointerT = T *, typename ReferenceT = T &>
1243class indexed_accessor_range
1244 : public detail::indexed_accessor_range_base<
1245 DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT, ReferenceT> {
1246public:
1247 indexed_accessor_range(BaseT base, ptrdiff_t startIndex, ptrdiff_t count)
1248 : detail::indexed_accessor_range_base<
1249 DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT, ReferenceT>(
1250 std::make_pair(base, startIndex), count) {}
1251 using detail::indexed_accessor_range_base<
1252 DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT,
1253 ReferenceT>::indexed_accessor_range_base;
1254
1255 /// Returns the current base of the range.
1256 const BaseT &getBase() const { return this->base.first; }
1257
1258 /// Returns the current start index of the range.
1259 ptrdiff_t getStartIndex() const { return this->base.second; }
1260
1261 /// See `detail::indexed_accessor_range_base` for details.
1262 static std::pair<BaseT, ptrdiff_t>
1263 offset_base(const std::pair<BaseT, ptrdiff_t> &base, ptrdiff_t index) {
1264 // We encode the internal base as a pair of the derived base and a start
1265 // index into the derived base.
1266 return std::make_pair(base.first, base.second + index);
1267 }
1268 /// See `detail::indexed_accessor_range_base` for details.
1269 static ReferenceT
1270 dereference_iterator(const std::pair<BaseT, ptrdiff_t> &base,
1271 ptrdiff_t index) {
1272 return DerivedT::dereference(base.first, base.second + index);
1273 }
1274};
1275
1276namespace detail {
1277/// Return a reference to the first or second member of a reference. Otherwise,
1278/// return a copy of the member of a temporary.
1279///
1280/// When passing a range whose iterators return values instead of references,
1281/// the reference must be dropped from `decltype((elt.first))`, which will
1282/// always be a reference, to avoid returning a reference to a temporary.
1283template <typename EltTy, typename FirstTy> class first_or_second_type {
1284public:
1285 using type =
1286 typename std::conditional_t<std::is_reference<EltTy>::value, FirstTy,
1287 std::remove_reference_t<FirstTy>>;
1288};
1289} // end namespace detail
1290
1291/// Given a container of pairs, return a range over the first elements.
1292template <typename ContainerTy> auto make_first_range(ContainerTy &&c) {
1293 using EltTy = decltype((*std::begin(c)));
1294 return llvm::map_range(std::forward<ContainerTy>(c),
1295 [](EltTy elt) -> typename detail::first_or_second_type<
1296 EltTy, decltype((elt.first))>::type {
1297 return elt.first;
1298 });
1299}
1300
1301/// Given a container of pairs, return a range over the second elements.
1302template <typename ContainerTy> auto make_second_range(ContainerTy &&c) {
1303 using EltTy = decltype((*std::begin(c)));
1304 return llvm::map_range(
1305 std::forward<ContainerTy>(c),
1306 [](EltTy elt) ->
1307 typename detail::first_or_second_type<EltTy,
1308 decltype((elt.second))>::type {
1309 return elt.second;
1310 });
1311}
1312
1313//===----------------------------------------------------------------------===//
1314// Extra additions to <utility>
1315//===----------------------------------------------------------------------===//
1316
1317/// Function object to check whether the first component of a std::pair
1318/// compares less than the first component of another std::pair.
1319struct less_first {
1320 template <typename T> bool operator()(const T &lhs, const T &rhs) const {
1321 return std::less<>()(lhs.first, rhs.first);
1322 }
1323};
1324
1325/// Function object to check whether the second component of a std::pair
1326/// compares less than the second component of another std::pair.
1327struct less_second {
1328 template <typename T> bool operator()(const T &lhs, const T &rhs) const {
1329 return std::less<>()(lhs.second, rhs.second);
1330 }
1331};
1332
1333/// \brief Function object to apply a binary function to the first component of
1334/// a std::pair.
1335template<typename FuncTy>
1336struct on_first {
1337 FuncTy func;
1338
1339 template <typename T>
1340 decltype(auto) operator()(const T &lhs, const T &rhs) const {
1341 return func(lhs.first, rhs.first);
1342 }
1343};
1344
1345/// Utility type to build an inheritance chain that makes it easy to rank
1346/// overload candidates.
1347template <int N> struct rank : rank<N - 1> {};
1348template <> struct rank<0> {};
1349
1350/// traits class for checking whether type T is one of any of the given
1351/// types in the variadic list.
1352template <typename T, typename... Ts>
1353using is_one_of = disjunction<std::is_same<T, Ts>...>;
1354
1355/// traits class for checking whether type T is a base class for all
1356/// the given types in the variadic list.
1357template <typename T, typename... Ts>
1358using are_base_of = conjunction<std::is_base_of<T, Ts>...>;
1359
1360namespace detail {
1361template <typename... Ts> struct Visitor;
1362
1363template <typename HeadT, typename... TailTs>
1364struct Visitor<HeadT, TailTs...> : remove_cvref_t<HeadT>, Visitor<TailTs...> {
1365 explicit constexpr Visitor(HeadT &&Head, TailTs &&...Tail)
1366 : remove_cvref_t<HeadT>(std::forward<HeadT>(Head)),
1367 Visitor<TailTs...>(std::forward<TailTs>(Tail)...) {}
1368 using remove_cvref_t<HeadT>::operator();
1369 using Visitor<TailTs...>::operator();
1370};
1371
1372template <typename HeadT> struct Visitor<HeadT> : remove_cvref_t<HeadT> {
1373 explicit constexpr Visitor(HeadT &&Head)
1374 : remove_cvref_t<HeadT>(std::forward<HeadT>(Head)) {}
1375 using remove_cvref_t<HeadT>::operator();
1376};
1377} // namespace detail
1378
1379/// Returns an opaquely-typed Callable object whose operator() overload set is
1380/// the sum of the operator() overload sets of each CallableT in CallableTs.
1381///
1382/// The type of the returned object derives from each CallableT in CallableTs.
1383/// The returned object is constructed by invoking the appropriate copy or move
1384/// constructor of each CallableT, as selected by overload resolution on the
1385/// corresponding argument to makeVisitor.
1386///
1387/// Example:
1388///
1389/// \code
1390/// auto visitor = makeVisitor([](auto) { return "unhandled type"; },
1391/// [](int i) { return "int"; },
1392/// [](std::string s) { return "str"; });
1393/// auto a = visitor(42); // `a` is now "int".
1394/// auto b = visitor("foo"); // `b` is now "str".
1395/// auto c = visitor(3.14f); // `c` is now "unhandled type".
1396/// \endcode
1397///
1398/// Example of making a visitor with a lambda which captures a move-only type:
1399///
1400/// \code
1401/// std::unique_ptr<FooHandler> FH = /* ... */;
1402/// auto visitor = makeVisitor(
1403/// [FH{std::move(FH)}](Foo F) { return FH->handle(F); },
1404/// [](int i) { return i; },
1405/// [](std::string s) { return atoi(s); });
1406/// \endcode
1407template <typename... CallableTs>
1408constexpr decltype(auto) makeVisitor(CallableTs &&...Callables) {
1409 return detail::Visitor<CallableTs...>(std::forward<CallableTs>(Callables)...);
1410}
1411
1412//===----------------------------------------------------------------------===//
1413// Extra additions for arrays
1414//===----------------------------------------------------------------------===//
1415
1416// We have a copy here so that LLVM behaves the same when using different
1417// standard libraries.
1418template <class Iterator, class RNG>
1419void shuffle(Iterator first, Iterator last, RNG &&g) {
1420 // It would be better to use a std::uniform_int_distribution,
1421 // but that would be stdlib dependent.
1422 typedef
1423 typename std::iterator_traits<Iterator>::difference_type difference_type;
1424 for (auto size = last - first; size > 1; ++first, (void)--size) {
1425 difference_type offset = g() % size;
1426 // Avoid self-assignment due to incorrect assertions in libstdc++
1427 // containers (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85828).
1428 if (offset != difference_type(0))
1429 std::iter_swap(first, first + offset);
1430 }
1431}
1432
1433/// Find the length of an array.
1434template <class T, std::size_t N>
1435constexpr inline size_t array_lengthof(T (&)[N]) {
1436 return N;
1437}
1438
1439/// Adapt std::less<T> for array_pod_sort.
1440template<typename T>
1441inline int array_pod_sort_comparator(const void *P1, const void *P2) {
1442 if (std::less<T>()(*reinterpret_cast<const T*>(P1),
1443 *reinterpret_cast<const T*>(P2)))
1444 return -1;
1445 if (std::less<T>()(*reinterpret_cast<const T*>(P2),
1446 *reinterpret_cast<const T*>(P1)))
1447 return 1;
1448 return 0;
1449}
1450
1451/// get_array_pod_sort_comparator - This is an internal helper function used to
1452/// get type deduction of T right.
1453template<typename T>
1454inline int (*get_array_pod_sort_comparator(const T &))
1455 (const void*, const void*) {
1456 return array_pod_sort_comparator<T>;
1457}
1458
1459#ifdef EXPENSIVE_CHECKS
1460namespace detail {
1461
1462inline unsigned presortShuffleEntropy() {
1463 static unsigned Result(std::random_device{}());
1464 return Result;
1465}
1466
1467template <class IteratorTy>
1468inline void presortShuffle(IteratorTy Start, IteratorTy End) {
1469 std::mt19937 Generator(presortShuffleEntropy());
1470 llvm::shuffle(Start, End, Generator);
1471}
1472
1473} // end namespace detail
1474#endif
1475
1476/// array_pod_sort - This sorts an array with the specified start and end
1477/// extent. This is just like std::sort, except that it calls qsort instead of
1478/// using an inlined template. qsort is slightly slower than std::sort, but
1479/// most sorts are not performance critical in LLVM and std::sort has to be
1480/// template instantiated for each type, leading to significant measured code
1481/// bloat. This function should generally be used instead of std::sort where
1482/// possible.
1483///
1484/// This function assumes that you have simple POD-like types that can be
1485/// compared with std::less and can be moved with memcpy. If this isn't true,
1486/// you should use std::sort.
1487///
1488/// NOTE: If qsort_r were portable, we could allow a custom comparator and
1489/// default to std::less.
1490template<class IteratorTy>
1491inline void array_pod_sort(IteratorTy Start, IteratorTy End) {
1492 // Don't inefficiently call qsort with one element or trigger undefined
1493 // behavior with an empty sequence.
1494 auto NElts = End - Start;
1495 if (NElts <= 1) return;
1496#ifdef EXPENSIVE_CHECKS
1497 detail::presortShuffle<IteratorTy>(Start, End);
1498#endif
1499 qsort(&*Start, NElts, sizeof(*Start), get_array_pod_sort_comparator(*Start));
1500}
1501
1502template <class IteratorTy>
1503inline void array_pod_sort(
1504 IteratorTy Start, IteratorTy End,
1505 int (*Compare)(
1506 const typename std::iterator_traits<IteratorTy>::value_type *,
1507 const typename std::iterator_traits<IteratorTy>::value_type *)) {
1508 // Don't inefficiently call qsort with one element or trigger undefined
1509 // behavior with an empty sequence.
1510 auto NElts = End - Start;
1511 if (NElts <= 1) return;
1512#ifdef EXPENSIVE_CHECKS
1513 detail::presortShuffle<IteratorTy>(Start, End);
1514#endif
1515 qsort(&*Start, NElts, sizeof(*Start),
1516 reinterpret_cast<int (*)(const void *, const void *)>(Compare));
1517}
1518
1519namespace detail {
1520template <typename T>
1521// We can use qsort if the iterator type is a pointer and the underlying value
1522// is trivially copyable.
1523using sort_trivially_copyable = conjunction<
1524 std::is_pointer<T>,
1525 std::is_trivially_copyable<typename std::iterator_traits<T>::value_type>>;
1526} // namespace detail
1527
1528// Provide wrappers to std::sort which shuffle the elements before sorting
1529// to help uncover non-deterministic behavior (PR35135).
1530template <typename IteratorTy,
1531 std::enable_if_t<!detail::sort_trivially_copyable<IteratorTy>::value,
1532 int> = 0>
1533inline void sort(IteratorTy Start, IteratorTy End) {
1534#ifdef EXPENSIVE_CHECKS
1535 detail::presortShuffle<IteratorTy>(Start, End);
1536#endif
1537 std::sort(Start, End);
1538}
1539
1540// Forward trivially copyable types to array_pod_sort. This avoids a large
1541// amount of code bloat for a minor performance hit.
1542template <typename IteratorTy,
1543 std::enable_if_t<detail::sort_trivially_copyable<IteratorTy>::value,
1544 int> = 0>
1545inline void sort(IteratorTy Start, IteratorTy End) {
1546 array_pod_sort(Start, End);
1547}
1548
1549template <typename Container> inline void sort(Container &&C) {
1550 llvm::sort(adl_begin(C), adl_end(C));
1551}
1552
1553template <typename IteratorTy, typename Compare>
1554inline void sort(IteratorTy Start, IteratorTy End, Compare Comp) {
1555#ifdef EXPENSIVE_CHECKS
1556 detail::presortShuffle<IteratorTy>(Start, End);
1557#endif
1558 std::sort(Start, End, Comp);
1559}
1560
1561template <typename Container, typename Compare>
1562inline void sort(Container &&C, Compare Comp) {
1563 llvm::sort(adl_begin(C), adl_end(C), Comp);
1564}
1565
1566//===----------------------------------------------------------------------===//
1567// Extra additions to <algorithm>
1568//===----------------------------------------------------------------------===//
1569
1570/// Get the size of a range. This is a wrapper function around std::distance
1571/// which is only enabled when the operation is O(1).
1572template <typename R>
1573auto size(R &&Range,
1574 std::enable_if_t<
1575 std::is_base_of<std::random_access_iterator_tag,
1576 typename std::iterator_traits<decltype(
1577 Range.begin())>::iterator_category>::value,
1578 void> * = nullptr) {
1579 return std::distance(Range.begin(), Range.end());
1580}
1581
1582/// Provide wrappers to std::for_each which take ranges instead of having to
1583/// pass begin/end explicitly.
1584template <typename R, typename UnaryFunction>
1585UnaryFunction for_each(R &&Range, UnaryFunction F) {
1586 return std::for_each(adl_begin(Range), adl_end(Range), F);
1587}
1588
1589/// Provide wrappers to std::all_of which take ranges instead of having to pass
1590/// begin/end explicitly.
1591template <typename R, typename UnaryPredicate>
1592bool all_of(R &&Range, UnaryPredicate P) {
1593 return std::all_of(adl_begin(Range), adl_end(Range), P);
1594}
1595
1596/// Provide wrappers to std::any_of which take ranges instead of having to pass
1597/// begin/end explicitly.
1598template <typename R, typename UnaryPredicate>
1599bool any_of(R &&Range, UnaryPredicate P) {
1600 return std::any_of(adl_begin(Range), adl_end(Range), P);
1601}
1602
1603/// Provide wrappers to std::none_of which take ranges instead of having to pass
1604/// begin/end explicitly.
1605template <typename R, typename UnaryPredicate>
1606bool none_of(R &&Range, UnaryPredicate P) {
1607 return std::none_of(adl_begin(Range), adl_end(Range), P);
1608}
1609
1610/// Provide wrappers to std::find which take ranges instead of having to pass
1611/// begin/end explicitly.
1612template <typename R, typename T> auto find(R &&Range, const T &Val) {
1613 return std::find(adl_begin(Range), adl_end(Range), Val);
1614}
1615
1616/// Provide wrappers to std::find_if which take ranges instead of having to pass
1617/// begin/end explicitly.
1618template <typename R, typename UnaryPredicate>
1619auto find_if(R &&Range, UnaryPredicate P) {
1620 return std::find_if(adl_begin(Range), adl_end(Range), P);
1621}
1622
1623template <typename R, typename UnaryPredicate>
1624auto find_if_not(R &&Range, UnaryPredicate P) {
1625 return std::find_if_not(adl_begin(Range), adl_end(Range), P);
1626}
1627
1628/// Provide wrappers to std::remove_if which take ranges instead of having to
1629/// pass begin/end explicitly.
1630template <typename R, typename UnaryPredicate>
1631auto remove_if(R &&Range, UnaryPredicate P) {
1632 return std::remove_if(adl_begin(Range), adl_end(Range), P);
1633}
1634
1635/// Provide wrappers to std::copy_if which take ranges instead of having to
1636/// pass begin/end explicitly.
1637template <typename R, typename OutputIt, typename UnaryPredicate>
1638OutputIt copy_if(R &&Range, OutputIt Out, UnaryPredicate P) {
1639 return std::copy_if(adl_begin(Range), adl_end(Range), Out, P);
1640}
1641
1642template <typename R, typename OutputIt>
1643OutputIt copy(R &&Range, OutputIt Out) {
1644 return std::copy(adl_begin(Range), adl_end(Range), Out);
1645}
1646
1647/// Provide wrappers to std::move which take ranges instead of having to
1648/// pass begin/end explicitly.
1649template <typename R, typename OutputIt>
1650OutputIt move(R &&Range, OutputIt Out) {
1651 return std::move(adl_begin(Range), adl_end(Range), Out);
1652}
1653
1654/// Wrapper function around std::find to detect if an element exists
1655/// in a container.
1656template <typename R, typename E>
1657bool is_contained(R &&Range, const E &Element) {
1658 return std::find(adl_begin(Range), adl_end(Range), Element) != adl_end(Range);
42
Assuming the condition is false
43
Returning zero, which participates in a condition later
1659}
1660
1661/// Wrapper function around std::is_sorted to check if elements in a range \p R
1662/// are sorted with respect to a comparator \p C.
1663template <typename R, typename Compare> bool is_sorted(R &&Range, Compare C) {
1664 return std::is_sorted(adl_begin(Range), adl_end(Range), C);
1665}
1666
1667/// Wrapper function around std::is_sorted to check if elements in a range \p R
1668/// are sorted in non-descending order.
1669template <typename R> bool is_sorted(R &&Range) {
1670 return std::is_sorted(adl_begin(Range), adl_end(Range));
1671}
1672
1673/// Wrapper function around std::count to count the number of times an element
1674/// \p Element occurs in the given range \p Range.
1675template <typename R, typename E> auto count(R &&Range, const E &Element) {
1676 return std::count(adl_begin(Range), adl_end(Range), Element);
1677}
1678
1679/// Wrapper function around std::count_if to count the number of times an
1680/// element satisfying a given predicate occurs in a range.
1681template <typename R, typename UnaryPredicate>
1682auto count_if(R &&Range, UnaryPredicate P) {
1683 return std::count_if(adl_begin(Range), adl_end(Range), P);
1684}
1685
1686/// Wrapper function around std::transform to apply a function to a range and
1687/// store the result elsewhere.
1688template <typename R, typename OutputIt, typename UnaryFunction>
1689OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F) {
1690 return std::transform(adl_begin(Range), adl_end(Range), d_first, F);
1691}
1692
1693/// Provide wrappers to std::partition which take ranges instead of having to
1694/// pass begin/end explicitly.
1695template <typename R, typename UnaryPredicate>
1696auto partition(R &&Range, UnaryPredicate P) {
1697 return std::partition(adl_begin(Range), adl_end(Range), P);
1698}
1699
1700/// Provide wrappers to std::lower_bound which take ranges instead of having to
1701/// pass begin/end explicitly.
1702template <typename R, typename T> auto lower_bound(R &&Range, T &&Value) {
1703 return std::lower_bound(adl_begin(Range), adl_end(Range),
1704 std::forward<T>(Value));
1705}
1706
1707template <typename R, typename T, typename Compare>
1708auto lower_bound(R &&Range, T &&Value, Compare C) {
1709 return std::lower_bound(adl_begin(Range), adl_end(Range),
1710 std::forward<T>(Value), C);
1711}
1712
1713/// Provide wrappers to std::upper_bound which take ranges instead of having to
1714/// pass begin/end explicitly.
1715template <typename R, typename T> auto upper_bound(R &&Range, T &&Value) {
1716 return std::upper_bound(adl_begin(Range), adl_end(Range),
1717 std::forward<T>(Value));
1718}
1719
1720template <typename R, typename T, typename Compare>
1721auto upper_bound(R &&Range, T &&Value, Compare C) {
1722 return std::upper_bound(adl_begin(Range), adl_end(Range),
1723 std::forward<T>(Value), C);
1724}
1725
1726template <typename R>
1727void stable_sort(R &&Range) {
1728 std::stable_sort(adl_begin(Range), adl_end(Range));
1729}
1730
1731template <typename R, typename Compare>
1732void stable_sort(R &&Range, Compare C) {
1733 std::stable_sort(adl_begin(Range), adl_end(Range), C);
1734}
1735
1736/// Binary search for the first iterator in a range where a predicate is false.
1737/// Requires that C is always true below some limit, and always false above it.
1738template <typename R, typename Predicate,
1739 typename Val = decltype(*adl_begin(std::declval<R>()))>
1740auto partition_point(R &&Range, Predicate P) {
1741 return std::partition_point(adl_begin(Range), adl_end(Range), P);
1742}
1743
1744template<typename Range, typename Predicate>
1745auto unique(Range &&R, Predicate P) {
1746 return std::unique(adl_begin(R), adl_end(R), P);
1747}
1748
1749/// Wrapper function around std::equal to detect if pair-wise elements between
1750/// two ranges are the same.
1751template <typename L, typename R> bool equal(L &&LRange, R &&RRange) {
1752 return std::equal(adl_begin(LRange), adl_end(LRange), adl_begin(RRange),
1753 adl_end(RRange));
1754}
1755
1756/// Wrapper function around std::equal to detect if all elements
1757/// in a container are same.
1758template <typename R>
1759bool is_splat(R &&Range) {
1760 size_t range_size = size(Range);
1761 return range_size != 0 && (range_size == 1 ||
1762 std::equal(adl_begin(Range) + 1, adl_end(Range), adl_begin(Range)));
1763}
1764
1765/// Provide a container algorithm similar to C++ Library Fundamentals v2's
1766/// `erase_if` which is equivalent to:
1767///
1768/// C.erase(remove_if(C, pred), C.end());
1769///
1770/// This version works for any container with an erase method call accepting
1771/// two iterators.
1772template <typename Container, typename UnaryPredicate>
1773void erase_if(Container &C, UnaryPredicate P) {
1774 C.erase(remove_if(C, P), C.end());
1775}
1776
1777/// Wrapper function to remove a value from a container:
1778///
1779/// C.erase(remove(C.begin(), C.end(), V), C.end());
1780template <typename Container, typename ValueType>
1781void erase_value(Container &C, ValueType V) {
1782 C.erase(std::remove(C.begin(), C.end(), V), C.end());
1783}
1784
1785/// Wrapper function to append a range to a container.
1786///
1787/// C.insert(C.end(), R.begin(), R.end());
1788template <typename Container, typename Range>
1789inline void append_range(Container &C, Range &&R) {
1790 C.insert(C.end(), R.begin(), R.end());
1791}
1792
1793/// Given a sequence container Cont, replace the range [ContIt, ContEnd) with
1794/// the range [ValIt, ValEnd) (which is not from the same container).
1795template<typename Container, typename RandomAccessIterator>
1796void replace(Container &Cont, typename Container::iterator ContIt,
1797 typename Container::iterator ContEnd, RandomAccessIterator ValIt,
1798 RandomAccessIterator ValEnd) {
1799 while (true) {
1800 if (ValIt == ValEnd) {
1801 Cont.erase(ContIt, ContEnd);
1802 return;
1803 } else if (ContIt == ContEnd) {
1804 Cont.insert(ContIt, ValIt, ValEnd);
1805 return;
1806 }
1807 *ContIt++ = *ValIt++;
1808 }
1809}
1810
1811/// Given a sequence container Cont, replace the range [ContIt, ContEnd) with
1812/// the range R.
1813template<typename Container, typename Range = std::initializer_list<
1814 typename Container::value_type>>
1815void replace(Container &Cont, typename Container::iterator ContIt,
1816 typename Container::iterator ContEnd, Range R) {
1817 replace(Cont, ContIt, ContEnd, R.begin(), R.end());
1818}
1819
1820/// An STL-style algorithm similar to std::for_each that applies a second
1821/// functor between every pair of elements.
1822///
1823/// This provides the control flow logic to, for example, print a
1824/// comma-separated list:
1825/// \code
1826/// interleave(names.begin(), names.end(),
1827/// [&](StringRef name) { os << name; },
1828/// [&] { os << ", "; });
1829/// \endcode
1830template <typename ForwardIterator, typename UnaryFunctor,
1831 typename NullaryFunctor,
1832 typename = typename std::enable_if<
1833 !std::is_constructible<StringRef, UnaryFunctor>::value &&
1834 !std::is_constructible<StringRef, NullaryFunctor>::value>::type>
1835inline void interleave(ForwardIterator begin, ForwardIterator end,
1836 UnaryFunctor each_fn, NullaryFunctor between_fn) {
1837 if (begin == end)
1838 return;
1839 each_fn(*begin);
1840 ++begin;
1841 for (; begin != end; ++begin) {
1842 between_fn();
1843 each_fn(*begin);
1844 }
1845}
1846
1847template <typename Container, typename UnaryFunctor, typename NullaryFunctor,
1848 typename = typename std::enable_if<
1849 !std::is_constructible<StringRef, UnaryFunctor>::value &&
1850 !std::is_constructible<StringRef, NullaryFunctor>::value>::type>
1851inline void interleave(const Container &c, UnaryFunctor each_fn,
1852 NullaryFunctor between_fn) {
1853 interleave(c.begin(), c.end(), each_fn, between_fn);
1854}
1855
1856/// Overload of interleave for the common case of string separator.
1857template <typename Container, typename UnaryFunctor, typename StreamT,
1858 typename T = detail::ValueOfRange<Container>>
1859inline void interleave(const Container &c, StreamT &os, UnaryFunctor each_fn,
1860 const StringRef &separator) {
1861 interleave(c.begin(), c.end(), each_fn, [&] { os << separator; });
1862}
1863template <typename Container, typename StreamT,
1864 typename T = detail::ValueOfRange<Container>>
1865inline void interleave(const Container &c, StreamT &os,
1866 const StringRef &separator) {
1867 interleave(
1868 c, os, [&](const T &a) { os << a; }, separator);
1869}
1870
1871template <typename Container, typename UnaryFunctor, typename StreamT,
1872 typename T = detail::ValueOfRange<Container>>
1873inline void interleaveComma(const Container &c, StreamT &os,
1874 UnaryFunctor each_fn) {
1875 interleave(c, os, each_fn, ", ");
1876}
1877template <typename Container, typename StreamT,
1878 typename T = detail::ValueOfRange<Container>>
1879inline void interleaveComma(const Container &c, StreamT &os) {
1880 interleaveComma(c, os, [&](const T &a) { os << a; });
1881}
1882
1883//===----------------------------------------------------------------------===//
1884// Extra additions to <memory>
1885//===----------------------------------------------------------------------===//
1886
1887struct FreeDeleter {
1888 void operator()(void* v) {
1889 ::free(v);
1890 }
1891};
1892
1893template<typename First, typename Second>
1894struct pair_hash {
1895 size_t operator()(const std::pair<First, Second> &P) const {
1896 return std::hash<First>()(P.first) * 31 + std::hash<Second>()(P.second);
1897 }
1898};
1899
1900/// Binary functor that adapts to any other binary functor after dereferencing
1901/// operands.
1902template <typename T> struct deref {
1903 T func;
1904
1905 // Could be further improved to cope with non-derivable functors and
1906 // non-binary functors (should be a variadic template member function
1907 // operator()).
1908 template <typename A, typename B> auto operator()(A &lhs, B &rhs) const {
1909 assert(lhs)(static_cast <bool> (lhs) ? void (0) : __assert_fail ("lhs"
, "llvm/include/llvm/ADT/STLExtras.h", 1909, __extension__ __PRETTY_FUNCTION__
))
;
1910 assert(rhs)(static_cast <bool> (rhs) ? void (0) : __assert_fail ("rhs"
, "llvm/include/llvm/ADT/STLExtras.h", 1910, __extension__ __PRETTY_FUNCTION__
))
;
1911 return func(*lhs, *rhs);
1912 }
1913};
1914
1915namespace detail {
1916
1917template <typename R> class enumerator_iter;
1918
1919template <typename R> struct result_pair {
1920 using value_reference =
1921 typename std::iterator_traits<IterOfRange<R>>::reference;
1922
1923 friend class enumerator_iter<R>;
1924
1925 result_pair() = default;
1926 result_pair(std::size_t Index, IterOfRange<R> Iter)
1927 : Index(Index), Iter(Iter) {}
1928
1929 result_pair(const result_pair<R> &Other)
1930 : Index(Other.Index), Iter(Other.Iter) {}
1931 result_pair &operator=(const result_pair &Other) {
1932 Index = Other.Index;
1933 Iter = Other.Iter;
1934 return *this;
1935 }
1936
1937 std::size_t index() const { return Index; }
1938 value_reference value() const { return *Iter; }
1939
1940private:
1941 std::size_t Index = std::numeric_limits<std::size_t>::max();
1942 IterOfRange<R> Iter;
1943};
1944
1945template <typename R>
1946class enumerator_iter
1947 : public iterator_facade_base<enumerator_iter<R>, std::forward_iterator_tag,
1948 const result_pair<R>> {
1949 using result_type = result_pair<R>;
1950
1951public:
1952 explicit enumerator_iter(IterOfRange<R> EndIter)
1953 : Result(std::numeric_limits<size_t>::max(), EndIter) {}
1954
1955 enumerator_iter(std::size_t Index, IterOfRange<R> Iter)
1956 : Result(Index, Iter) {}
1957
1958 const result_type &operator*() const { return Result; }
1959
1960 enumerator_iter &operator++() {
1961 assert(Result.Index != std::numeric_limits<size_t>::max())(static_cast <bool> (Result.Index != std::numeric_limits
<size_t>::max()) ? void (0) : __assert_fail ("Result.Index != std::numeric_limits<size_t>::max()"
, "llvm/include/llvm/ADT/STLExtras.h", 1961, __extension__ __PRETTY_FUNCTION__
))
;
1962 ++Result.Iter;
1963 ++Result.Index;
1964 return *this;
1965 }
1966
1967 bool operator==(const enumerator_iter &RHS) const {
1968 // Don't compare indices here, only iterators. It's possible for an end
1969 // iterator to have different indices depending on whether it was created
1970 // by calling std::end() versus incrementing a valid iterator.
1971 return Result.Iter == RHS.Result.Iter;
1972 }
1973
1974 enumerator_iter(const enumerator_iter &Other) : Result(Other.Result) {}
1975 enumerator_iter &operator=(const enumerator_iter &Other) {
1976 Result = Other.Result;
1977 return *this;
1978 }
1979
1980private:
1981 result_type Result;
1982};
1983
1984template <typename R> class enumerator {
1985public:
1986 explicit enumerator(R &&Range) : TheRange(std::forward<R>(Range)) {}
1987
1988 enumerator_iter<R> begin() {
1989 return enumerator_iter<R>(0, std::begin(TheRange));
1990 }
1991 enumerator_iter<R> begin() const {
1992 return enumerator_iter<R>(0, std::begin(TheRange));
1993 }
1994
1995 enumerator_iter<R> end() {
1996 return enumerator_iter<R>(std::end(TheRange));
1997 }
1998 enumerator_iter<R> end() const {
1999 return enumerator_iter<R>(std::end(TheRange));
2000 }
2001
2002private:
2003 R TheRange;
2004};
2005
2006} // end namespace detail
2007
2008/// Given an input range, returns a new range whose values are are pair (A,B)
2009/// such that A is the 0-based index of the item in the sequence, and B is
2010/// the value from the original sequence. Example:
2011///
2012/// std::vector<char> Items = {'A', 'B', 'C', 'D'};
2013/// for (auto X : enumerate(Items)) {
2014/// printf("Item %d - %c\n", X.index(), X.value());
2015/// }
2016///
2017/// Output:
2018/// Item 0 - A
2019/// Item 1 - B
2020/// Item 2 - C
2021/// Item 3 - D
2022///
2023template <typename R> detail::enumerator<R> enumerate(R &&TheRange) {
2024 return detail::enumerator<R>(std::forward<R>(TheRange));
2025}
2026
2027namespace detail {
2028
2029template <typename F, typename Tuple, std::size_t... I>
2030decltype(auto) apply_tuple_impl(F &&f, Tuple &&t, std::index_sequence<I...>) {
2031 return std::forward<F>(f)(std::get<I>(std::forward<Tuple>(t))...);
2032}
2033
2034} // end namespace detail
2035
2036/// Given an input tuple (a1, a2, ..., an), pass the arguments of the
2037/// tuple variadically to f as if by calling f(a1, a2, ..., an) and
2038/// return the result.
2039template <typename F, typename Tuple>
2040decltype(auto) apply_tuple(F &&f, Tuple &&t) {
2041 using Indices = std::make_index_sequence<
2042 std::tuple_size<typename std::decay<Tuple>::type>::value>;
2043
2044 return detail::apply_tuple_impl(std::forward<F>(f), std::forward<Tuple>(t),
2045 Indices{});
2046}
2047
2048namespace detail {
2049
2050template <typename Predicate, typename... Args>
2051bool all_of_zip_predicate_first(Predicate &&P, Args &&...args) {
2052 auto z = zip(args...);
2053 auto it = z.begin();
2054 auto end = z.end();
2055 while (it != end) {
2056 if (!apply_tuple([&](auto &&...args) { return P(args...); }, *it))
2057 return false;
2058 ++it;
2059 }
2060 return it.all_equals(end);
2061}
2062
2063// Just an adaptor to switch the order of argument and have the predicate before
2064// the zipped inputs.
2065template <typename... ArgsThenPredicate, size_t... InputIndexes>
2066bool all_of_zip_predicate_last(
2067 std::tuple<ArgsThenPredicate...> argsThenPredicate,
2068 std::index_sequence<InputIndexes...>) {
2069 auto constexpr OutputIndex =
2070 std::tuple_size<decltype(argsThenPredicate)>::value - 1;
2071 return all_of_zip_predicate_first(std::get<OutputIndex>(argsThenPredicate),
2072 std::get<InputIndexes>(argsThenPredicate)...);
2073}
2074
2075} // end namespace detail
2076
2077/// Compare two zipped ranges using the provided predicate (as last argument).
2078/// Return true if all elements satisfy the predicate and false otherwise.
2079// Return false if the zipped iterator aren't all at end (size mismatch).
2080template <typename... ArgsAndPredicate>
2081bool all_of_zip(ArgsAndPredicate &&...argsAndPredicate) {
2082 return detail::all_of_zip_predicate_last(
2083 std::forward_as_tuple(argsAndPredicate...),
2084 std::make_index_sequence<sizeof...(argsAndPredicate) - 1>{});
2085}
2086
2087/// Return true if the sequence [Begin, End) has exactly N items. Runs in O(N)
2088/// time. Not meant for use with random-access iterators.
2089/// Can optionally take a predicate to filter lazily some items.
2090template <typename IterTy,
2091 typename Pred = bool (*)(const decltype(*std::declval<IterTy>()) &)>
2092bool hasNItems(
2093 IterTy &&Begin, IterTy &&End, unsigned N,
2094 Pred &&ShouldBeCounted =
2095 [](const decltype(*std::declval<IterTy>()) &) { return true; },
2096 std::enable_if_t<
2097 !std::is_base_of<std::random_access_iterator_tag,
2098 typename std::iterator_traits<std::remove_reference_t<
2099 decltype(Begin)>>::iterator_category>::value,
2100 void> * = nullptr) {
2101 for (; N; ++Begin) {
2102 if (Begin == End)
2103 return false; // Too few.
2104 N -= ShouldBeCounted(*Begin);
2105 }
2106 for (; Begin != End; ++Begin)
2107 if (ShouldBeCounted(*Begin))
2108 return false; // Too many.
2109 return true;
2110}
2111
2112/// Return true if the sequence [Begin, End) has N or more items. Runs in O(N)
2113/// time. Not meant for use with random-access iterators.
2114/// Can optionally take a predicate to lazily filter some items.
2115template <typename IterTy,
2116 typename Pred = bool (*)(const decltype(*std::declval<IterTy>()) &)>
2117bool hasNItemsOrMore(
2118 IterTy &&Begin, IterTy &&End, unsigned N,
2119 Pred &&ShouldBeCounted =
2120 [](const decltype(*std::declval<IterTy>()) &) { return true; },
2121 std::enable_if_t<
2122 !std::is_base_of<std::random_access_iterator_tag,
2123 typename std::iterator_traits<std::remove_reference_t<
2124 decltype(Begin)>>::iterator_category>::value,
2125 void> * = nullptr) {
2126 for (; N; ++Begin) {
2127 if (Begin == End)
2128 return false; // Too few.
2129 N -= ShouldBeCounted(*Begin);
2130 }
2131 return true;
2132}
2133
2134/// Returns true if the sequence [Begin, End) has N or less items. Can
2135/// optionally take a predicate to lazily filter some items.
2136template <typename IterTy,
2137 typename Pred = bool (*)(const decltype(*std::declval<IterTy>()) &)>
2138bool hasNItemsOrLess(
2139 IterTy &&Begin, IterTy &&End, unsigned N,
2140 Pred &&ShouldBeCounted = [](const decltype(*std::declval<IterTy>()) &) {
2141 return true;
2142 }) {
2143 assert(N != std::numeric_limits<unsigned>::max())(static_cast <bool> (N != std::numeric_limits<unsigned
>::max()) ? void (0) : __assert_fail ("N != std::numeric_limits<unsigned>::max()"
, "llvm/include/llvm/ADT/STLExtras.h", 2143, __extension__ __PRETTY_FUNCTION__
))
;
2144 return !hasNItemsOrMore(Begin, End, N + 1, ShouldBeCounted);
2145}
2146
2147/// Returns true if the given container has exactly N items
2148template <typename ContainerTy> bool hasNItems(ContainerTy &&C, unsigned N) {
2149 return hasNItems(std::begin(C), std::end(C), N);
2150}
2151
2152/// Returns true if the given container has N or more items
2153template <typename ContainerTy>
2154bool hasNItemsOrMore(ContainerTy &&C, unsigned N) {
2155 return hasNItemsOrMore(std::begin(C), std::end(C), N);
2156}
2157
2158/// Returns true if the given container has N or less items
2159template <typename ContainerTy>
2160bool hasNItemsOrLess(ContainerTy &&C, unsigned N) {
2161 return hasNItemsOrLess(std::begin(C), std::end(C), N);
2162}
2163
2164/// Returns a raw pointer that represents the same address as the argument.
2165///
2166/// This implementation can be removed once we move to C++20 where it's defined
2167/// as std::to_address().
2168///
2169/// The std::pointer_traits<>::to_address(p) variations of these overloads has
2170/// not been implemented.
2171template <class Ptr> auto to_address(const Ptr &P) { return P.operator->(); }
2172template <class T> constexpr T *to_address(T *P) { return P; }
2173
2174} // end namespace llvm
2175
2176#endif // LLVM_ADT_STLEXTRAS_H

/build/llvm-toolchain-snapshot-14~++20220125101009+ceec4383681c/llvm/include/llvm/CodeGen/MachineInstr.h

1//===- llvm/CodeGen/MachineInstr.h - MachineInstr class ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the declaration of the MachineInstr class, which is the
10// basic representation for all target dependent machine instructions used by
11// the back end.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_CODEGEN_MACHINEINSTR_H
16#define LLVM_CODEGEN_MACHINEINSTR_H
17
18#include "llvm/ADT/DenseMapInfo.h"
19#include "llvm/ADT/PointerSumType.h"
20#include "llvm/ADT/SmallSet.h"
21#include "llvm/ADT/ilist.h"
22#include "llvm/ADT/ilist_node.h"
23#include "llvm/ADT/iterator_range.h"
24#include "llvm/CodeGen/MachineMemOperand.h"
25#include "llvm/CodeGen/MachineOperand.h"
26#include "llvm/CodeGen/TargetOpcodes.h"
27#include "llvm/IR/DebugLoc.h"
28#include "llvm/IR/InlineAsm.h"
29#include "llvm/IR/PseudoProbe.h"
30#include "llvm/MC/MCInstrDesc.h"
31#include "llvm/MC/MCSymbol.h"
32#include "llvm/Support/ArrayRecycler.h"
33#include "llvm/Support/TrailingObjects.h"
34#include <algorithm>
35#include <cassert>
36#include <cstdint>
37#include <utility>
38
39namespace llvm {
40
41class AAResults;
42template <typename T> class ArrayRef;
43class DIExpression;
44class DILocalVariable;
45class MachineBasicBlock;
46class MachineFunction;
47class MachineRegisterInfo;
48class ModuleSlotTracker;
49class raw_ostream;
50template <typename T> class SmallVectorImpl;
51class SmallBitVector;
52class StringRef;
53class TargetInstrInfo;
54class TargetRegisterClass;
55class TargetRegisterInfo;
56
57//===----------------------------------------------------------------------===//
58/// Representation of each machine instruction.
59///
60/// This class isn't a POD type, but it must have a trivial destructor. When a
61/// MachineFunction is deleted, all the contained MachineInstrs are deallocated
62/// without having their destructor called.
63///
64class MachineInstr
65 : public ilist_node_with_parent<MachineInstr, MachineBasicBlock,
66 ilist_sentinel_tracking<true>> {
67public:
68 using mmo_iterator = ArrayRef<MachineMemOperand *>::iterator;
69
70 /// Flags to specify different kinds of comments to output in
71 /// assembly code. These flags carry semantic information not
72 /// otherwise easily derivable from the IR text.
73 ///
74 enum CommentFlag {
75 ReloadReuse = 0x1, // higher bits are reserved for target dep comments.
76 NoSchedComment = 0x2,
77 TAsmComments = 0x4 // Target Asm comments should start from this value.
78 };
79
80 enum MIFlag {
81 NoFlags = 0,
82 FrameSetup = 1 << 0, // Instruction is used as a part of
83 // function frame setup code.
84 FrameDestroy = 1 << 1, // Instruction is used as a part of
85 // function frame destruction code.
86 BundledPred = 1 << 2, // Instruction has bundled predecessors.
87 BundledSucc = 1 << 3, // Instruction has bundled successors.
88 FmNoNans = 1 << 4, // Instruction does not support Fast
89 // math nan values.
90 FmNoInfs = 1 << 5, // Instruction does not support Fast
91 // math infinity values.
92 FmNsz = 1 << 6, // Instruction is not required to retain
93 // signed zero values.
94 FmArcp = 1 << 7, // Instruction supports Fast math
95 // reciprocal approximations.
96 FmContract = 1 << 8, // Instruction supports Fast math
97 // contraction operations like fma.
98 FmAfn = 1 << 9, // Instruction may map to Fast math
99 // instrinsic approximation.
100 FmReassoc = 1 << 10, // Instruction supports Fast math
101 // reassociation of operand order.
102 NoUWrap = 1 << 11, // Instruction supports binary operator
103 // no unsigned wrap.
104 NoSWrap = 1 << 12, // Instruction supports binary operator
105 // no signed wrap.
106 IsExact = 1 << 13, // Instruction supports division is
107 // known to be exact.
108 NoFPExcept = 1 << 14, // Instruction does not raise
109 // floatint-point exceptions.
110 NoMerge = 1 << 15, // Passes that drop source location info
111 // (e.g. branch folding) should skip
112 // this instruction.
113 };
114
115private:
116 const MCInstrDesc *MCID; // Instruction descriptor.
117 MachineBasicBlock *Parent = nullptr; // Pointer to the owning basic block.
118
119 // Operands are allocated by an ArrayRecycler.
120 MachineOperand *Operands = nullptr; // Pointer to the first operand.
121 unsigned NumOperands = 0; // Number of operands on instruction.
122
123 uint16_t Flags = 0; // Various bits of additional
124 // information about machine
125 // instruction.
126
127 uint8_t AsmPrinterFlags = 0; // Various bits of information used by
128 // the AsmPrinter to emit helpful
129 // comments. This is *not* semantic
130 // information. Do not use this for
131 // anything other than to convey comment
132 // information to AsmPrinter.
133
134 // OperandCapacity has uint8_t size, so it should be next to AsmPrinterFlags
135 // to properly pack.
136 using OperandCapacity = ArrayRecycler<MachineOperand>::Capacity;
137 OperandCapacity CapOperands; // Capacity of the Operands array.
138
139 /// Internal implementation detail class that provides out-of-line storage for
140 /// extra info used by the machine instruction when this info cannot be stored
141 /// in-line within the instruction itself.
142 ///
143 /// This has to be defined eagerly due to the implementation constraints of
144 /// `PointerSumType` where it is used.
145 class ExtraInfo final
146 : TrailingObjects<ExtraInfo, MachineMemOperand *, MCSymbol *, MDNode *> {
147 public:
148 static ExtraInfo *create(BumpPtrAllocator &Allocator,
149 ArrayRef<MachineMemOperand *> MMOs,
150 MCSymbol *PreInstrSymbol = nullptr,
151 MCSymbol *PostInstrSymbol = nullptr,
152 MDNode *HeapAllocMarker = nullptr) {
153 bool HasPreInstrSymbol = PreInstrSymbol != nullptr;
154 bool HasPostInstrSymbol = PostInstrSymbol != nullptr;
155 bool HasHeapAllocMarker = HeapAllocMarker != nullptr;
156 auto *Result = new (Allocator.Allocate(
157 totalSizeToAlloc<MachineMemOperand *, MCSymbol *, MDNode *>(
158 MMOs.size(), HasPreInstrSymbol + HasPostInstrSymbol,
159 HasHeapAllocMarker),
160 alignof(ExtraInfo)))
161 ExtraInfo(MMOs.size(), HasPreInstrSymbol, HasPostInstrSymbol,
162 HasHeapAllocMarker);
163
164 // Copy the actual data into the trailing objects.
165 std::copy(MMOs.begin(), MMOs.end(),
166 Result->getTrailingObjects<MachineMemOperand *>());
167
168 if (HasPreInstrSymbol)
169 Result->getTrailingObjects<MCSymbol *>()[0] = PreInstrSymbol;
170 if (HasPostInstrSymbol)
171 Result->getTrailingObjects<MCSymbol *>()[HasPreInstrSymbol] =
172 PostInstrSymbol;
173 if (HasHeapAllocMarker)
174 Result->getTrailingObjects<MDNode *>()[0] = HeapAllocMarker;
175
176 return Result;
177 }
178
179 ArrayRef<MachineMemOperand *> getMMOs() const {
180 return makeArrayRef(getTrailingObjects<MachineMemOperand *>(), NumMMOs);
181 }
182
183 MCSymbol *getPreInstrSymbol() const {
184 return HasPreInstrSymbol ? getTrailingObjects<MCSymbol *>()[0] : nullptr;
185 }
186
187 MCSymbol *getPostInstrSymbol() const {
188 return HasPostInstrSymbol
189 ? getTrailingObjects<MCSymbol *>()[HasPreInstrSymbol]
190 : nullptr;
191 }
192
193 MDNode *getHeapAllocMarker() const {
194 return HasHeapAllocMarker ? getTrailingObjects<MDNode *>()[0] : nullptr;
195 }
196
197 private:
198 friend TrailingObjects;
199
200 // Description of the extra info, used to interpret the actual optional
201 // data appended.
202 //
203 // Note that this is not terribly space optimized. This leaves a great deal
204 // of flexibility to fit more in here later.
205 const int NumMMOs;
206 const bool HasPreInstrSymbol;
207 const bool HasPostInstrSymbol;
208 const bool HasHeapAllocMarker;
209
210 // Implement the `TrailingObjects` internal API.
211 size_t numTrailingObjects(OverloadToken<MachineMemOperand *>) const {
212 return NumMMOs;
213 }
214 size_t numTrailingObjects(OverloadToken<MCSymbol *>) const {
215 return HasPreInstrSymbol + HasPostInstrSymbol;
216 }
217 size_t numTrailingObjects(OverloadToken<MDNode *>) const {
218 return HasHeapAllocMarker;
219 }
220
221 // Just a boring constructor to allow us to initialize the sizes. Always use
222 // the `create` routine above.
223 ExtraInfo(int NumMMOs, bool HasPreInstrSymbol, bool HasPostInstrSymbol,
224 bool HasHeapAllocMarker)
225 : NumMMOs(NumMMOs), HasPreInstrSymbol(HasPreInstrSymbol),
226 HasPostInstrSymbol(HasPostInstrSymbol),
227 HasHeapAllocMarker(HasHeapAllocMarker) {}
228 };
229
230 /// Enumeration of the kinds of inline extra info available. It is important
231 /// that the `MachineMemOperand` inline kind has a tag value of zero to make
232 /// it accessible as an `ArrayRef`.
233 enum ExtraInfoInlineKinds {
234 EIIK_MMO = 0,
235 EIIK_PreInstrSymbol,
236 EIIK_PostInstrSymbol,
237 EIIK_OutOfLine
238 };
239
240 // We store extra information about the instruction here. The common case is
241 // expected to be nothing or a single pointer (typically a MMO or a symbol).
242 // We work to optimize this common case by storing it inline here rather than
243 // requiring a separate allocation, but we fall back to an allocation when
244 // multiple pointers are needed.
245 PointerSumType<ExtraInfoInlineKinds,
246 PointerSumTypeMember<EIIK_MMO, MachineMemOperand *>,
247 PointerSumTypeMember<EIIK_PreInstrSymbol, MCSymbol *>,
248 PointerSumTypeMember<EIIK_PostInstrSymbol, MCSymbol *>,
249 PointerSumTypeMember<EIIK_OutOfLine, ExtraInfo *>>
250 Info;
251
252 DebugLoc DbgLoc; // Source line information.
253
254 /// Unique instruction number. Used by DBG_INSTR_REFs to refer to the values
255 /// defined by this instruction.
256 unsigned DebugInstrNum;
257
258 // Intrusive list support
259 friend struct ilist_traits<MachineInstr>;
260 friend struct ilist_callback_traits<MachineBasicBlock>;
261 void setParent(MachineBasicBlock *P) { Parent = P; }
262
263 /// This constructor creates a copy of the given
264 /// MachineInstr in the given MachineFunction.
265 MachineInstr(MachineFunction &, const MachineInstr &);
266
267 /// This constructor create a MachineInstr and add the implicit operands.
268 /// It reserves space for number of operands specified by
269 /// MCInstrDesc. An explicit DebugLoc is supplied.
270 MachineInstr(MachineFunction &, const MCInstrDesc &TID, DebugLoc DL,
271 bool NoImp = false);
272
273 // MachineInstrs are pool-allocated and owned by MachineFunction.
274 friend class MachineFunction;
275
276 void
277 dumprImpl(const MachineRegisterInfo &MRI, unsigned Depth, unsigned MaxDepth,
278 SmallPtrSetImpl<const MachineInstr *> &AlreadySeenInstrs) const;
279
280public:
281 MachineInstr(const MachineInstr &) = delete;
282 MachineInstr &operator=(const MachineInstr &) = delete;
283 // Use MachineFunction::DeleteMachineInstr() instead.
284 ~MachineInstr() = delete;
285
286 const MachineBasicBlock* getParent() const { return Parent; }
287 MachineBasicBlock* getParent() { return Parent; }
288
289 /// Move the instruction before \p MovePos.
290 void moveBefore(MachineInstr *MovePos);
291
292 /// Return the function that contains the basic block that this instruction
293 /// belongs to.
294 ///
295 /// Note: this is undefined behaviour if the instruction does not have a
296 /// parent.
297 const MachineFunction *getMF() const;
298 MachineFunction *getMF() {
299 return const_cast<MachineFunction *>(
300 static_cast<const MachineInstr *>(this)->getMF());
301 }
302
303 /// Return the asm printer flags bitvector.
304 uint8_t getAsmPrinterFlags() const { return AsmPrinterFlags; }
305
306 /// Clear the AsmPrinter bitvector.
307 void clearAsmPrinterFlags() { AsmPrinterFlags = 0; }
308
309 /// Return whether an AsmPrinter flag is set.
310 bool getAsmPrinterFlag(CommentFlag Flag) const {
311 return AsmPrinterFlags & Flag;
312 }
313
314 /// Set a flag for the AsmPrinter.
315 void setAsmPrinterFlag(uint8_t Flag) {
316 AsmPrinterFlags |= Flag;
317 }
318
319 /// Clear specific AsmPrinter flags.
320 void clearAsmPrinterFlag(CommentFlag Flag) {
321 AsmPrinterFlags &= ~Flag;
322 }
323
324 /// Return the MI flags bitvector.
325 uint16_t getFlags() const {
326 return Flags;
327 }
328
329 /// Return whether an MI flag is set.
330 bool getFlag(MIFlag Flag) const {
331 return Flags & Flag;
332 }
333
334 /// Set a MI flag.
335 void setFlag(MIFlag Flag) {
336 Flags |= (uint16_t)Flag;
337 }
338
339 void setFlags(unsigned flags) {
340 // Filter out the automatically maintained flags.
341 unsigned Mask = BundledPred | BundledSucc;
342 Flags = (Flags & Mask) | (flags & ~Mask);
343 }
344
345 /// clearFlag - Clear a MI flag.
346 void clearFlag(MIFlag Flag) {
347 Flags &= ~((uint16_t)Flag);
348 }
349
350 /// Return true if MI is in a bundle (but not the first MI in a bundle).
351 ///
352 /// A bundle looks like this before it's finalized:
353 /// ----------------
354 /// | MI |
355 /// ----------------
356 /// |
357 /// ----------------
358 /// | MI * |
359 /// ----------------
360 /// |
361 /// ----------------
362 /// | MI * |
363 /// ----------------
364 /// In this case, the first MI starts a bundle but is not inside a bundle, the
365 /// next 2 MIs are considered "inside" the bundle.
366 ///
367 /// After a bundle is finalized, it looks like this:
368 /// ----------------
369 /// | Bundle |
370 /// ----------------
371 /// |
372 /// ----------------
373 /// | MI * |
374 /// ----------------
375 /// |
376 /// ----------------
377 /// | MI * |
378 /// ----------------
379 /// |
380 /// ----------------
381 /// | MI * |
382 /// ----------------
383 /// The first instruction has the special opcode "BUNDLE". It's not "inside"
384 /// a bundle, but the next three MIs are.
385 bool isInsideBundle() const {
386 return getFlag(BundledPred);
387 }
388
389 /// Return true if this instruction part of a bundle. This is true
390 /// if either itself or its following instruction is marked "InsideBundle".
391 bool isBundled() const {
392 return isBundledWithPred() || isBundledWithSucc();
393 }
394
395 /// Return true if this instruction is part of a bundle, and it is not the
396 /// first instruction in the bundle.
397 bool isBundledWithPred() const { return getFlag(BundledPred); }
398
399 /// Return true if this instruction is part of a bundle, and it is not the
400 /// last instruction in the bundle.
401 bool isBundledWithSucc() const { return getFlag(BundledSucc); }
402
403 /// Bundle this instruction with its predecessor. This can be an unbundled
404 /// instruction, or it can be the first instruction in a bundle.
405 void bundleWithPred();
406
407 /// Bundle this instruction with its successor. This can be an unbundled
408 /// instruction, or it can be the last instruction in a bundle.
409 void bundleWithSucc();
410
411 /// Break bundle above this instruction.
412 void unbundleFromPred();
413
414 /// Break bundle below this instruction.
415 void unbundleFromSucc();
416
417 /// Returns the debug location id of this MachineInstr.
418 const DebugLoc &getDebugLoc() const { return DbgLoc; }
419
420 /// Return the operand containing the offset to be used if this DBG_VALUE
421 /// instruction is indirect; will be an invalid register if this value is
422 /// not indirect, and an immediate with value 0 otherwise.
423 const MachineOperand &getDebugOffset() const {
424 assert(isNonListDebugValue() && "not a DBG_VALUE")(static_cast <bool> (isNonListDebugValue() && "not a DBG_VALUE"
) ? void (0) : __assert_fail ("isNonListDebugValue() && \"not a DBG_VALUE\""
, "llvm/include/llvm/CodeGen/MachineInstr.h", 424, __extension__
__PRETTY_FUNCTION__))
;
425 return getOperand(1);
426 }
427 MachineOperand &getDebugOffset() {
428 assert(isNonListDebugValue() && "not a DBG_VALUE")(static_cast <bool> (isNonListDebugValue() && "not a DBG_VALUE"
) ? void (0) : __assert_fail ("isNonListDebugValue() && \"not a DBG_VALUE\""
, "llvm/include/llvm/CodeGen/MachineInstr.h", 428, __extension__
__PRETTY_FUNCTION__))
;
429 return getOperand(1);
430 }
431
432 /// Return the operand for the debug variable referenced by
433 /// this DBG_VALUE instruction.
434 const MachineOperand &getDebugVariableOp() const;
435 MachineOperand &getDebugVariableOp();
436
437 /// Return the debug variable referenced by
438 /// this DBG_VALUE instruction.
439 const DILocalVariable *getDebugVariable() const;
440
441 /// Return the operand for the complex address expression referenced by
442 /// this DBG_VALUE instruction.
443 const MachineOperand &getDebugExpressionOp() const;
444 MachineOperand &getDebugExpressionOp();
445
446 /// Return the complex address expression referenced by
447 /// this DBG_VALUE instruction.
448 const DIExpression *getDebugExpression() const;
449
450 /// Return the debug label referenced by
451 /// this DBG_LABEL instruction.
452 const DILabel *getDebugLabel() const;
453
454 /// Fetch the instruction number of this MachineInstr. If it does not have
455 /// one already, a new and unique number will be assigned.
456 unsigned getDebugInstrNum();
457
458 /// Fetch instruction number of this MachineInstr -- but before it's inserted
459 /// into \p MF. Needed for transformations that create an instruction but
460 /// don't immediately insert them.
461 unsigned getDebugInstrNum(MachineFunction &MF);
462
463 /// Examine the instruction number of this MachineInstr. May be zero if
464 /// it hasn't been assigned a number yet.
465 unsigned peekDebugInstrNum() const { return DebugInstrNum; }
466
467 /// Set instruction number of this MachineInstr. Avoid using unless you're
468 /// deserializing this information.
469 void setDebugInstrNum(unsigned Num) { DebugInstrNum = Num; }
470
471 /// Drop any variable location debugging information associated with this
472 /// instruction. Use when an instruction is modified in such a way that it no
473 /// longer defines the value it used to. Variable locations using that value
474 /// will be dropped.
475 void dropDebugNumber() { DebugInstrNum = 0; }
476
477 /// Emit an error referring to the source location of this instruction.
478 /// This should only be used for inline assembly that is somehow
479 /// impossible to compile. Other errors should have been handled much
480 /// earlier.
481 ///
482 /// If this method returns, the caller should try to recover from the error.
483 void emitError(StringRef Msg) const;
484
485 /// Returns the target instruction descriptor of this MachineInstr.
486 const MCInstrDesc &getDesc() const { return *MCID; }
487
488 /// Returns the opcode of this MachineInstr.
489 unsigned getOpcode() const { return MCID->Opcode; }
490
491 /// Retuns the total number of operands.
492 unsigned getNumOperands() const { return NumOperands; }
493
494 /// Returns the total number of operands which are debug locations.
495 unsigned getNumDebugOperands() const {
496 return std::distance(debug_operands().begin(), debug_operands().end());
497 }
498
499 const MachineOperand& getOperand(unsigned i) const {
500 assert(i < getNumOperands() && "getOperand() out of range!")(static_cast <bool> (i < getNumOperands() &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i < getNumOperands() && \"getOperand() out of range!\""
, "llvm/include/llvm/CodeGen/MachineInstr.h", 500, __extension__
__PRETTY_FUNCTION__))
;
501 return Operands[i];
502 }
503 MachineOperand& getOperand(unsigned i) {
504 assert(i < getNumOperands() && "getOperand() out of range!")(static_cast <bool> (i < getNumOperands() &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i < getNumOperands() && \"getOperand() out of range!\""
, "llvm/include/llvm/CodeGen/MachineInstr.h", 504, __extension__
__PRETTY_FUNCTION__))
;
505 return Operands[i];
506 }
507
508 MachineOperand &getDebugOperand(unsigned Index) {
509 assert(Index < getNumDebugOperands() && "getDebugOperand() out of range!")(static_cast <bool> (Index < getNumDebugOperands() &&
"getDebugOperand() out of range!") ? void (0) : __assert_fail
("Index < getNumDebugOperands() && \"getDebugOperand() out of range!\""
, "llvm/include/llvm/CodeGen/MachineInstr.h", 509, __extension__
__PRETTY_FUNCTION__))
;
510 return *(debug_operands().begin() + Index);
511 }
512 const MachineOperand &getDebugOperand(unsigned Index) const {
513 assert(Index < getNumDebugOperands() && "getDebugOperand() out of range!")(static_cast <bool> (Index < getNumDebugOperands() &&
"getDebugOperand() out of range!") ? void (0) : __assert_fail
("Index < getNumDebugOperands() && \"getDebugOperand() out of range!\""
, "llvm/include/llvm/CodeGen/MachineInstr.h", 513, __extension__
__PRETTY_FUNCTION__))
;
514 return *(debug_operands().begin() + Index);
515 }
516
517 SmallSet<Register, 4> getUsedDebugRegs() const {
518 assert(isDebugValue() && "not a DBG_VALUE*")(static_cast <bool> (isDebugValue() && "not a DBG_VALUE*"
) ? void (0) : __assert_fail ("isDebugValue() && \"not a DBG_VALUE*\""
, "llvm/include/llvm/CodeGen/MachineInstr.h", 518, __extension__
__PRETTY_FUNCTION__))
;
519 SmallSet<Register, 4> UsedRegs;
520 for (const auto &MO : debug_operands())
521 if (MO.isReg() && MO.getReg())
522 UsedRegs.insert(MO.getReg());
523 return UsedRegs;
524 }
525
526 /// Returns whether this debug value has at least one debug operand with the
527 /// register \p Reg.
528 bool hasDebugOperandForReg(Register Reg) const {
529 return any_of(debug_operands(), [Reg](const MachineOperand &Op) {
530 return Op.isReg() && Op.getReg() == Reg;
531 });
532 }
533
534 /// Returns a range of all of the operands that correspond to a debug use of
535 /// \p Reg.
536 template <typename Operand, typename Instruction>
537 static iterator_range<
538 filter_iterator<Operand *, std::function<bool(Operand &Op)>>>
539 getDebugOperandsForReg(Instruction *MI, Register Reg) {
540 std::function<bool(Operand & Op)> OpUsesReg(
541 [Reg](Operand &Op) { return Op.isReg() && Op.getReg() == Reg; });
542 return make_filter_range(MI->debug_operands(), OpUsesReg);
543 }
544 iterator_range<filter_iterator<const MachineOperand *,
545 std::function<bool(const MachineOperand &Op)>>>
546 getDebugOperandsForReg(Register Reg) const {
547 return MachineInstr::getDebugOperandsForReg<const MachineOperand,
548 const MachineInstr>(this, Reg);
549 }
550 iterator_range<filter_iterator<MachineOperand *,
551 std::function<bool(MachineOperand &Op)>>>
552 getDebugOperandsForReg(Register Reg) {
553 return MachineInstr::getDebugOperandsForReg<MachineOperand, MachineInstr>(
554 this, Reg);
555 }
556
557 bool isDebugOperand(const MachineOperand *Op) const {
558 return Op >= adl_begin(debug_operands()) && Op <= adl_end(debug_operands());
559 }
560
561 unsigned getDebugOperandIndex(const MachineOperand *Op) const {
562 assert(isDebugOperand(Op) && "Expected a debug operand.")(static_cast <bool> (isDebugOperand(Op) && "Expected a debug operand."
) ? void (0) : __assert_fail ("isDebugOperand(Op) && \"Expected a debug operand.\""
, "llvm/include/llvm/CodeGen/MachineInstr.h", 562, __extension__
__PRETTY_FUNCTION__))
;
563 return std::distance(adl_begin(debug_operands()), Op);
564 }
565
566 /// Returns the total number of definitions.
567 unsigned getNumDefs() const {
568 return getNumExplicitDefs() + MCID->getNumImplicitDefs();
569 }
570
571 /// Returns true if the instruction has implicit definition.
572 bool hasImplicitDef() const {
573 for (unsigned I = getNumExplicitOperands(), E = getNumOperands();
574 I != E; ++I) {
575 const MachineOperand &MO = getOperand(I);
576 if (MO.isDef() && MO.isImplicit())
577 return true;
578 }
579 return false;
580 }
581
582 /// Returns the implicit operands number.
583 unsigned getNumImplicitOperands() const {
584 return getNumOperands() - getNumExplicitOperands();
585 }
586
587 /// Return true if operand \p OpIdx is a subregister index.
588 bool isOperandSubregIdx(unsigned OpIdx) const {
589 assert(getOperand(OpIdx).getType() == MachineOperand::MO_Immediate &&(static_cast <bool> (getOperand(OpIdx).getType() == MachineOperand
::MO_Immediate && "Expected MO_Immediate operand type."
) ? void (0) : __assert_fail ("getOperand(OpIdx).getType() == MachineOperand::MO_Immediate && \"Expected MO_Immediate operand type.\""
, "llvm/include/llvm/CodeGen/MachineInstr.h", 590, __extension__
__PRETTY_FUNCTION__))
590 "Expected MO_Immediate operand type.")(static_cast <bool> (getOperand(OpIdx).getType() == MachineOperand
::MO_Immediate && "Expected MO_Immediate operand type."
) ? void (0) : __assert_fail ("getOperand(OpIdx).getType() == MachineOperand::MO_Immediate && \"Expected MO_Immediate operand type.\""
, "llvm/include/llvm/CodeGen/MachineInstr.h", 590, __extension__
__PRETTY_FUNCTION__))
;
591 if (isExtractSubreg() && OpIdx == 2)
592 return true;
593 if (isInsertSubreg() && OpIdx == 3)
594 return true;
595 if (isRegSequence() && OpIdx > 1 && (OpIdx % 2) == 0)
596 return true;
597 if (isSubregToReg() && OpIdx == 3)
598 return true;
599 return false;
600 }
601
602 /// Returns the number of non-implicit operands.
603 unsigned getNumExplicitOperands() const;
604
605 /// Returns the number of non-implicit definitions.
606 unsigned getNumExplicitDefs() const;
607
608 /// iterator/begin/end - Iterate over all operands of a machine instruction.
609 using mop_iterator = MachineOperand *;
610 using const_mop_iterator = const MachineOperand *;
611
612 mop_iterator operands_begin() { return Operands; }
613 mop_iterator operands_end() { return Operands + NumOperands; }
614
615 const_mop_iterator operands_begin() const { return Operands; }
616 const_mop_iterator operands_end() const { return Operands + NumOperands; }
617
618 iterator_range<mop_iterator> operands() {
619 return make_range(operands_begin(), operands_end());
620 }
621 iterator_range<const_mop_iterator> operands() const {
622 return make_range(operands_begin(), operands_end());
623 }
624 iterator_range<mop_iterator> explicit_operands() {
625 return make_range(operands_begin(),
626 operands_begin() + getNumExplicitOperands());
627 }
628 iterator_range<const_mop_iterator> explicit_operands() const {
629 return make_range(operands_begin(),
630 operands_begin() + getNumExplicitOperands());
631 }
632 iterator_range<mop_iterator> implicit_operands() {
633 return make_range(explicit_operands().end(), operands_end());
634 }
635 iterator_range<const_mop_iterator> implicit_operands() const {
636 return make_range(explicit_operands().end(), operands_end());
637 }
638 /// Returns a range over all operands that are used to determine the variable
639 /// location for this DBG_VALUE instruction.
640 iterator_range<mop_iterator> debug_operands() {
641 assert(isDebugValue() && "Must be a debug value instruction.")(static_cast <bool> (isDebugValue() && "Must be a debug value instruction."
) ? void (0) : __assert_fail ("isDebugValue() && \"Must be a debug value instruction.\""
, "llvm/include/llvm/CodeGen/MachineInstr.h", 641, __extension__
__PRETTY_FUNCTION__))
;
642 return isDebugValueList()
643 ? make_range(operands_begin() + 2, operands_end())
644 : make_range(operands_begin(), operands_begin() + 1);
645 }
646 /// \copydoc debug_operands()
647 iterator_range<const_mop_iterator> debug_operands() const {
648 assert(isDebugValue() && "Must be a debug value instruction.")(static_cast <bool> (isDebugValue() && "Must be a debug value instruction."
) ? void (0) : __assert_fail ("isDebugValue() && \"Must be a debug value instruction.\""
, "llvm/include/llvm/CodeGen/MachineInstr.h", 648, __extension__
__PRETTY_FUNCTION__))
;
649 return isDebugValueList()
650 ? make_range(operands_begin() + 2, operands_end())
651 : make_range(operands_begin(), operands_begin() + 1);
652 }
653 /// Returns a range over all explicit operands that are register definitions.
654 /// Implicit definition are not included!
655 iterator_range<mop_iterator> defs() {
656 return make_range(operands_begin(),
657 operands_begin() + getNumExplicitDefs());
658 }
659 /// \copydoc defs()
660 iterator_range<const_mop_iterator> defs() const {
661 return make_range(operands_begin(),
662 operands_begin() + getNumExplicitDefs());
663 }
664 /// Returns a range that includes all operands that are register uses.
665 /// This may include unrelated operands which are not register uses.
666 iterator_range<mop_iterator> uses() {
667 return make_range(operands_begin() + getNumExplicitDefs(), operands_end());
668 }
669 /// \copydoc uses()
670 iterator_range<const_mop_iterator> uses() const {
671 return make_range(operands_begin() + getNumExplicitDefs(), operands_end());
672 }
673 iterator_range<mop_iterator> explicit_uses() {
674 return make_range(operands_begin() + getNumExplicitDefs(),
675 operands_begin() + getNumExplicitOperands());
676 }
677 iterator_range<const_mop_iterator> explicit_uses() const {
678 return make_range(operands_begin() + getNumExplicitDefs(),
679 operands_begin() + getNumExplicitOperands());
680 }
681
682 /// Returns the number of the operand iterator \p I points to.
683 unsigned getOperandNo(const_mop_iterator I) const {
684 return I - operands_begin();
685 }
686
687 /// Access to memory operands of the instruction. If there are none, that does
688 /// not imply anything about whether the function accesses memory. Instead,
689 /// the caller must behave conservatively.
690 ArrayRef<MachineMemOperand *> memoperands() const {
691 if (!Info)
692 return {};
693
694 if (Info.is<EIIK_MMO>())
695 return makeArrayRef(Info.getAddrOfZeroTagPointer(), 1);
696
697 if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
698 return EI->getMMOs();
699
700 return {};
701 }
702
703 /// Access to memory operands of the instruction.
704 ///
705 /// If `memoperands_begin() == memoperands_end()`, that does not imply
706 /// anything about whether the function accesses memory. Instead, the caller
707 /// must behave conservatively.
708 mmo_iterator memoperands_begin() const { return memoperands().begin(); }
709
710 /// Access to memory operands of the instruction.
711 ///
712 /// If `memoperands_begin() == memoperands_end()`, that does not imply
713 /// anything about whether the function accesses memory. Instead, the caller
714 /// must behave conservatively.
715 mmo_iterator memoperands_end() const { return memoperands().end(); }
716
717 /// Return true if we don't have any memory operands which described the
718 /// memory access done by this instruction. If this is true, calling code
719 /// must be conservative.
720 bool memoperands_empty() const { return memoperands().empty(); }
721
722 /// Return true if this instruction has exactly one MachineMemOperand.
723 bool hasOneMemOperand() const { return memoperands().size() == 1; }
724
725 /// Return the number of memory operands.
726 unsigned getNumMemOperands() const { return memoperands().size(); }
727
728 /// Helper to extract a pre-instruction symbol if one has been added.
729 MCSymbol *getPreInstrSymbol() const {
730 if (!Info)
731 return nullptr;
732 if (MCSymbol *S = Info.get<EIIK_PreInstrSymbol>())
733 return S;
734 if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
735 return EI->getPreInstrSymbol();
736
737 return nullptr;
738 }
739
740 /// Helper to extract a post-instruction symbol if one has been added.
741 MCSymbol *getPostInstrSymbol() const {
742 if (!Info)
743 return nullptr;
744 if (MCSymbol *S = Info.get<EIIK_PostInstrSymbol>())
745 return S;
746 if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
747 return EI->getPostInstrSymbol();
748
749 return nullptr;
750 }
751
752 /// Helper to extract a heap alloc marker if one has been added.
753 MDNode *getHeapAllocMarker() const {
754 if (!Info)
755 return nullptr;
756 if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
757 return EI->getHeapAllocMarker();
758
759 return nullptr;
760 }
761
762 /// API for querying MachineInstr properties. They are the same as MCInstrDesc
763 /// queries but they are bundle aware.
764
765 enum QueryType {
766 IgnoreBundle, // Ignore bundles
767 AnyInBundle, // Return true if any instruction in bundle has property
768 AllInBundle // Return true if all instructions in bundle have property
769 };
770
771 /// Return true if the instruction (or in the case of a bundle,
772 /// the instructions inside the bundle) has the specified property.
773 /// The first argument is the property being queried.
774 /// The second argument indicates whether the query should look inside
775 /// instruction bundles.
776 bool hasProperty(unsigned MCFlag, QueryType Type = AnyInBundle) const {
777 assert(MCFlag < 64 &&(static_cast <bool> (MCFlag < 64 && "MCFlag out of range for bit mask in getFlags/hasPropertyInBundle."
) ? void (0) : __assert_fail ("MCFlag < 64 && \"MCFlag out of range for bit mask in getFlags/hasPropertyInBundle.\""
, "llvm/include/llvm/CodeGen/MachineInstr.h", 778, __extension__
__PRETTY_FUNCTION__))
778 "MCFlag out of range for bit mask in getFlags/hasPropertyInBundle.")(static_cast <bool> (MCFlag < 64 && "MCFlag out of range for bit mask in getFlags/hasPropertyInBundle."
) ? void (0) : __assert_fail ("MCFlag < 64 && \"MCFlag out of range for bit mask in getFlags/hasPropertyInBundle.\""
, "llvm/include/llvm/CodeGen/MachineInstr.h", 778, __extension__
__PRETTY_FUNCTION__))
;
779 // Inline the fast path for unbundled or bundle-internal instructions.
780 if (Type == IgnoreBundle || !isBundled() || isBundledWithPred())
781 return getDesc().getFlags() & (1ULL << MCFlag);
782
783 // If this is the first instruction in a bundle, take the slow path.
784 return hasPropertyInBundle(1ULL << MCFlag, Type);
785 }
786
787 /// Return true if this is an instruction that should go through the usual
788 /// legalization steps.
789 bool isPreISelOpcode(QueryType Type = IgnoreBundle) const {
790 return hasProperty(MCID::PreISelOpcode, Type);
791 }
792
793 /// Return true if this instruction can have a variable number of operands.
794 /// In this case, the variable operands will be after the normal
795 /// operands but before the implicit definitions and uses (if any are
796 /// present).
797 bool isVariadic(QueryType Type = IgnoreBundle) const {
798 return hasProperty(MCID::Variadic, Type);
799 }
800
801 /// Set if this instruction has an optional definition, e.g.
802 /// ARM instructions which can set condition code if 's' bit is set.
803 bool hasOptionalDef(QueryType Type = IgnoreBundle) const {
804 return hasProperty(MCID::HasOptionalDef, Type);
805 }
806
807 /// Return true if this is a pseudo instruction that doesn't
808 /// correspond to a real machine instruction.
809 bool isPseudo(QueryType Type = IgnoreBundle) const {
810 return hasProperty(MCID::Pseudo, Type);
811 }
812
813 bool isReturn(QueryType Type = AnyInBundle) const {
814 return hasProperty(MCID::Return, Type);
815 }
816
817 /// Return true if this is an instruction that marks the end of an EH scope,
818 /// i.e., a catchpad or a cleanuppad instruction.
819 bool isEHScopeReturn(QueryType Type = AnyInBundle) const {
820 return hasProperty(MCID::EHScopeReturn, Type);
821 }
822
823 bool isCall(QueryType Type = AnyInBundle) const {
824 return hasProperty(MCID::Call, Type);
825 }
826
827 /// Return true if this is a call instruction that may have an associated
828 /// call site entry in the debug info.
829 bool isCandidateForCallSiteEntry(QueryType Type = IgnoreBundle) const;
830 /// Return true if copying, moving, or erasing this instruction requires
831 /// updating Call Site Info (see \ref copyCallSiteInfo, \ref moveCallSiteInfo,
832 /// \ref eraseCallSiteInfo).
833 bool shouldUpdateCallSiteInfo() const;
834
835 /// Returns true if the specified instruction stops control flow
836 /// from executing the instruction immediately following it. Examples include
837 /// unconditional branches and return instructions.
838 bool isBarrier(QueryType Type = AnyInBundle) const {
839 return hasProperty(MCID::Barrier, Type);
840 }
841
842 /// Returns true if this instruction part of the terminator for a basic block.
843 /// Typically this is things like return and branch instructions.
844 ///
845 /// Various passes use this to insert code into the bottom of a basic block,
846 /// but before control flow occurs.
847 bool isTerminator(QueryType Type = AnyInBundle) const {
848 return hasProperty(MCID::Terminator, Type);
849 }
850
851 /// Returns true if this is a conditional, unconditional, or indirect branch.
852 /// Predicates below can be used to discriminate between
853 /// these cases, and the TargetInstrInfo::analyzeBranch method can be used to
854 /// get more information.
855 bool isBranch(QueryType Type = AnyInBundle) const {
856 return hasProperty(MCID::Branch, Type);
857 }
858
859 /// Return true if this is an indirect branch, such as a
860 /// branch through a register.
861 bool isIndirectBranch(QueryType Type = AnyInBundle) const {
862 return hasProperty(MCID::IndirectBranch, Type);
863 }
864
865 /// Return true if this is a branch which may fall
866 /// through to the next instruction or may transfer control flow to some other
867 /// block. The TargetInstrInfo::analyzeBranch method can be used to get more
868 /// information about this branch.
869 bool isConditionalBranch(QueryType Type = AnyInBundle) const {
870 return isBranch(Type) && !isBarrier(Type) && !isIndirectBranch(Type);
871 }
872
873 /// Return true if this is a branch which always
874 /// transfers control flow to some other block. The
875 /// TargetInstrInfo::analyzeBranch method can be used to get more information
876 /// about this branch.
877 bool isUnconditionalBranch(QueryType Type = AnyInBundle) const {
878 return isBranch(Type) && isBarrier(Type) && !isIndirectBranch(Type);
879 }
880
881 /// Return true if this instruction has a predicate operand that
882 /// controls execution. It may be set to 'always', or may be set to other
883 /// values. There are various methods in TargetInstrInfo that can be used to
884 /// control and modify the predicate in this instruction.
885 bool isPredicable(QueryType Type = AllInBundle) const {
886 // If it's a bundle than all bundled instructions must be predicable for this
887 // to return true.
888 return hasProperty(MCID::Predicable, Type);
889 }
890
891 /// Return true if this instruction is a comparison.
892 bool isCompare(QueryType Type = IgnoreBundle) const {
893 return hasProperty(MCID::Compare, Type);
894 }
895
896 /// Return true if this instruction is a move immediate
897 /// (including conditional moves) instruction.
898 bool isMoveImmediate(QueryType Type = IgnoreBundle) const {
899 return hasProperty(MCID::MoveImm, Type);
900 }
901
902 /// Return true if this instruction is a register move.
903 /// (including moving values from subreg to reg)
904 bool isMoveReg(QueryType Type = IgnoreBundle) const {
905 return hasProperty(MCID::MoveReg, Type);
906 }
907
908 /// Return true if this instruction is a bitcast instruction.
909 bool isBitcast(QueryType Type = IgnoreBundle) const {
910 return hasProperty(MCID::Bitcast, Type);
911 }
912
913 /// Return true if this instruction is a select instruction.
914 bool isSelect(QueryType Type = IgnoreBundle) const {
915 return hasProperty(MCID::Select, Type);
916 }
917
918 /// Return true if this instruction cannot be safely duplicated.
919 /// For example, if the instruction has a unique labels attached
920 /// to it, duplicating it would cause multiple definition errors.
921 bool isNotDuplicable(QueryType Type = AnyInBundle) const {
922 return hasProperty(MCID::NotDuplicable, Type);
923 }
924
925 /// Return true if this instruction is convergent.
926 /// Convergent instructions can not be made control-dependent on any
927 /// additional values.
928 bool isConvergent(QueryType Type = AnyInBundle) const {
929 if (isInlineAsm()) {
930 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
931 if (ExtraInfo & InlineAsm::Extra_IsConvergent)
932 return true;
933 }
934 return hasProperty(MCID::Convergent, Type);
935 }
936
937 /// Returns true if the specified instruction has a delay slot
938 /// which must be filled by the code generator.
939 bool hasDelaySlot(QueryType Type = AnyInBundle) const {
940 return hasProperty(MCID::DelaySlot, Type);
941 }
942
943 /// Return true for instructions that can be folded as
944 /// memory operands in other instructions. The most common use for this
945 /// is instructions that are simple loads from memory that don't modify
946 /// the loaded value in any way, but it can also be used for instructions
947 /// that can be expressed as constant-pool loads, such as V_SETALLONES
948 /// on x86, to allow them to be folded when it is beneficial.
949 /// This should only be set on instructions that return a value in their
950 /// only virtual register definition.
951 bool canFoldAsLoad(QueryType Type = IgnoreBundle) const {
952 return hasProperty(MCID::FoldableAsLoad, Type);
953 }
954
955 /// Return true if this instruction behaves
956 /// the same way as the generic REG_SEQUENCE instructions.
957 /// E.g., on ARM,
958 /// dX VMOVDRR rY, rZ
959 /// is equivalent to
960 /// dX = REG_SEQUENCE rY, ssub_0, rZ, ssub_1.
961 ///
962 /// Note that for the optimizers to be able to take advantage of
963 /// this property, TargetInstrInfo::getRegSequenceLikeInputs has to be
964 /// override accordingly.
965 bool isRegSequenceLike(QueryType Type = IgnoreBundle) const {
966 return hasProperty(MCID::RegSequence, Type);
967 }
968
969 /// Return true if this instruction behaves
970 /// the same way as the generic EXTRACT_SUBREG instructions.
971 /// E.g., on ARM,
972 /// rX, rY VMOVRRD dZ
973 /// is equivalent to two EXTRACT_SUBREG:
974 /// rX = EXTRACT_SUBREG dZ, ssub_0
975 /// rY = EXTRACT_SUBREG dZ, ssub_1
976 ///
977 /// Note that for the optimizers to be able to take advantage of
978 /// this property, TargetInstrInfo::getExtractSubregLikeInputs has to be
979 /// override accordingly.
980 bool isExtractSubregLike(QueryType Type = IgnoreBundle) const {
981 return hasProperty(MCID::ExtractSubreg, Type);
982 }
983
984 /// Return true if this instruction behaves
985 /// the same way as the generic INSERT_SUBREG instructions.
986 /// E.g., on ARM,
987 /// dX = VSETLNi32 dY, rZ, Imm
988 /// is equivalent to a INSERT_SUBREG:
989 /// dX = INSERT_SUBREG dY, rZ, translateImmToSubIdx(Imm)
990 ///
991 /// Note that for the optimizers to be able to take advantage of
992 /// this property, TargetInstrInfo::getInsertSubregLikeInputs has to be
993 /// override accordingly.
994 bool isInsertSubregLike(QueryType Type = IgnoreBundle) const {
995 return hasProperty(MCID::InsertSubreg, Type);
996 }
997
998 //===--------------------------------------------------------------------===//
999 // Side Effect Analysis
1000 //===--------------------------------------------------------------------===//
1001
1002 /// Return true if this instruction could possibly read memory.
1003 /// Instructions with this flag set are not necessarily simple load
1004 /// instructions, they may load a value and modify it, for example.
1005 bool mayLoad(QueryType Type = AnyInBundle) const {
1006 if (isInlineAsm()) {
1007 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1008 if (ExtraInfo & InlineAsm::Extra_MayLoad)
1009 return true;
1010 }
1011 return hasProperty(MCID::MayLoad, Type);
1012 }
1013
1014 /// Return true if this instruction could possibly modify memory.
1015 /// Instructions with this flag set are not necessarily simple store
1016 /// instructions, they may store a modified value based on their operands, or
1017 /// may not actually modify anything, for example.
1018 bool mayStore(QueryType Type = AnyInBundle) const {
1019 if (isInlineAsm()) {
55
Assuming the condition is false
56
Taking false branch
1020 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1021 if (ExtraInfo & InlineAsm::Extra_MayStore)
1022 return true;
1023 }
1024 return hasProperty(MCID::MayStore, Type);
57
Returning value, which participates in a condition later
1025 }
1026
1027 /// Return true if this instruction could possibly read or modify memory.
1028 bool mayLoadOrStore(QueryType Type = AnyInBundle) const {
1029 return mayLoad(Type) || mayStore(Type);
1030 }
1031
1032 /// Return true if this instruction could possibly raise a floating-point
1033 /// exception. This is the case if the instruction is a floating-point
1034 /// instruction that can in principle raise an exception, as indicated
1035 /// by the MCID::MayRaiseFPException property, *and* at the same time,
1036 /// the instruction is used in a context where we expect floating-point
1037 /// exceptions are not disabled, as indicated by the NoFPExcept MI flag.
1038 bool mayRaiseFPException() const {
1039 return hasProperty(MCID::MayRaiseFPException) &&
1040 !getFlag(MachineInstr::MIFlag::NoFPExcept);
1041 }
1042
1043 //===--------------------------------------------------------------------===//
1044 // Flags that indicate whether an instruction can be modified by a method.
1045 //===--------------------------------------------------------------------===//
1046
1047 /// Return true if this may be a 2- or 3-address
1048 /// instruction (of the form "X = op Y, Z, ..."), which produces the same
1049 /// result if Y and Z are exchanged. If this flag is set, then the
1050 /// TargetInstrInfo::commuteInstruction method may be used to hack on the
1051 /// instruction.
1052 ///
1053 /// Note that this flag may be set on instructions that are only commutable
1054 /// sometimes. In these cases, the call to commuteInstruction will fail.
1055 /// Also note that some instructions require non-trivial modification to
1056 /// commute them.
1057 bool isCommutable(QueryType Type = IgnoreBundle) const {
1058 return hasProperty(MCID::Commutable, Type);
1059 }
1060
1061 /// Return true if this is a 2-address instruction
1062 /// which can be changed into a 3-address instruction if needed. Doing this
1063 /// transformation can be profitable in the register allocator, because it
1064 /// means that the instruction can use a 2-address form if possible, but
1065 /// degrade into a less efficient form if the source and dest register cannot
1066 /// be assigned to the same register. For example, this allows the x86
1067 /// backend to turn a "shl reg, 3" instruction into an LEA instruction, which
1068 /// is the same speed as the shift but has bigger code size.
1069 ///
1070 /// If this returns true, then the target must implement the
1071 /// TargetInstrInfo::convertToThreeAddress method for this instruction, which
1072 /// is allowed to fail if the transformation isn't valid for this specific
1073 /// instruction (e.g. shl reg, 4 on x86).
1074 ///
1075 bool isConvertibleTo3Addr(QueryType Type = IgnoreBundle) const {
1076 return hasProperty(MCID::ConvertibleTo3Addr, Type);
1077 }
1078
1079 /// Return true if this instruction requires
1080 /// custom insertion support when the DAG scheduler is inserting it into a
1081 /// machine basic block. If this is true for the instruction, it basically
1082 /// means that it is a pseudo instruction used at SelectionDAG time that is
1083 /// expanded out into magic code by the target when MachineInstrs are formed.
1084 ///
1085 /// If this is true, the TargetLoweringInfo::InsertAtEndOfBasicBlock method
1086 /// is used to insert this into the MachineBasicBlock.
1087 bool usesCustomInsertionHook(QueryType Type = IgnoreBundle) const {
1088 return hasProperty(MCID::UsesCustomInserter, Type);
1089 }
1090
1091 /// Return true if this instruction requires *adjustment*
1092 /// after instruction selection by calling a target hook. For example, this
1093 /// can be used to fill in ARM 's' optional operand depending on whether
1094 /// the conditional flag register is used.
1095 bool hasPostISelHook(QueryType Type = IgnoreBundle) const {
1096 return hasProperty(MCID::HasPostISelHook, Type);
1097 }
1098
1099 /// Returns true if this instruction is a candidate for remat.
1100 /// This flag is deprecated, please don't use it anymore. If this
1101 /// flag is set, the isReallyTriviallyReMaterializable() method is called to
1102 /// verify the instruction is really rematable.
1103 bool isRematerializable(QueryType Type = AllInBundle) const {
1104 // It's only possible to re-mat a bundle if all bundled instructions are
1105 // re-materializable.
1106 return hasProperty(MCID::Rematerializable, Type);
1107 }
1108
1109 /// Returns true if this instruction has the same cost (or less) than a move
1110 /// instruction. This is useful during certain types of optimizations
1111 /// (e.g., remat during two-address conversion or machine licm)
1112 /// where we would like to remat or hoist the instruction, but not if it costs
1113 /// more than moving the instruction into the appropriate register. Note, we
1114 /// are not marking copies from and to the same register class with this flag.
1115 bool isAsCheapAsAMove(QueryType Type = AllInBundle) const {
1116 // Only returns true for a bundle if all bundled instructions are cheap.
1117 return hasProperty(MCID::CheapAsAMove, Type);
1118 }
1119
1120 /// Returns true if this instruction source operands
1121 /// have special register allocation requirements that are not captured by the
1122 /// operand register classes. e.g. ARM::STRD's two source registers must be an
1123 /// even / odd pair, ARM::STM registers have to be in ascending order.
1124 /// Post-register allocation passes should not attempt to change allocations
1125 /// for sources of instructions with this flag.
1126 bool hasExtraSrcRegAllocReq(QueryType Type = AnyInBundle) const {
1127 return hasProperty(MCID::ExtraSrcRegAllocReq, Type);
1128 }
1129
1130 /// Returns true if this instruction def operands
1131 /// have special register allocation requirements that are not captured by the
1132 /// operand register classes. e.g. ARM::LDRD's two def registers must be an
1133 /// even / odd pair, ARM::LDM registers have to be in ascending order.
1134 /// Post-register allocation passes should not attempt to change allocations
1135 /// for definitions of instructions with this flag.
1136 bool hasExtraDefRegAllocReq(QueryType Type = AnyInBundle) const {
1137 return hasProperty(MCID::ExtraDefRegAllocReq, Type);
1138 }
1139
1140 enum MICheckType {
1141 CheckDefs, // Check all operands for equality
1142 CheckKillDead, // Check all operands including kill / dead markers
1143 IgnoreDefs, // Ignore all definitions
1144 IgnoreVRegDefs // Ignore virtual register definitions
1145 };
1146
1147 /// Return true if this instruction is identical to \p Other.
1148 /// Two instructions are identical if they have the same opcode and all their
1149 /// operands are identical (with respect to MachineOperand::isIdenticalTo()).
1150 /// Note that this means liveness related flags (dead, undef, kill) do not
1151 /// affect the notion of identical.
1152 bool isIdenticalTo(const MachineInstr &Other,
1153 MICheckType Check = CheckDefs) const;
1154
1155 /// Unlink 'this' from the containing basic block, and return it without
1156 /// deleting it.
1157 ///
1158 /// This function can not be used on bundled instructions, use
1159 /// removeFromBundle() to remove individual instructions from a bundle.
1160 MachineInstr *removeFromParent();
1161
1162 /// Unlink this instruction from its basic block and return it without
1163 /// deleting it.
1164 ///
1165 /// If the instruction is part of a bundle, the other instructions in the
1166 /// bundle remain bundled.
1167 MachineInstr *removeFromBundle();
1168
1169 /// Unlink 'this' from the containing basic block and delete it.
1170 ///
1171 /// If this instruction is the header of a bundle, the whole bundle is erased.
1172 /// This function can not be used for instructions inside a bundle, use
1173 /// eraseFromBundle() to erase individual bundled instructions.
1174 void eraseFromParent();
1175
1176 /// Unlink 'this' form its basic block and delete it.
1177 ///
1178 /// If the instruction is part of a bundle, the other instructions in the
1179 /// bundle remain bundled.
1180 void eraseFromBundle();
1181
1182 bool isEHLabel() const { return getOpcode() == TargetOpcode::EH_LABEL; }
1183 bool isGCLabel() const { return getOpcode() == TargetOpcode::GC_LABEL; }
1184 bool isAnnotationLabel() const {
1185 return getOpcode() == TargetOpcode::ANNOTATION_LABEL;
1186 }
1187
1188 /// Returns true if the MachineInstr represents a label.
1189 bool isLabel() const {
1190 return isEHLabel() || isGCLabel() || isAnnotationLabel();
1191 }
1192
1193 bool isCFIInstruction() const {
1194 return getOpcode() == TargetOpcode::CFI_INSTRUCTION;
1195 }
1196
1197 bool isPseudoProbe() const {
1198 return getOpcode() == TargetOpcode::PSEUDO_PROBE;
1199 }
1200
1201 // True if the instruction represents a position in the function.
1202 bool isPosition() const { return isLabel() || isCFIInstruction(); }
1203
1204 bool isNonListDebugValue() const {
1205 return getOpcode() == TargetOpcode::DBG_VALUE;
1206 }
1207 bool isDebugValueList() const {
1208 return getOpcode() == TargetOpcode::DBG_VALUE_LIST;
1209 }
1210 bool isDebugValue() const {
1211 return isNonListDebugValue() || isDebugValueList();
1212 }
1213 bool isDebugLabel() const { return getOpcode() == TargetOpcode::DBG_LABEL; }
1214 bool isDebugRef() const { return getOpcode() == TargetOpcode::DBG_INSTR_REF; }
1215 bool isDebugPHI() const { return getOpcode() == TargetOpcode::DBG_PHI; }
1216 bool isDebugInstr() const {
1217 return isDebugValue() || isDebugLabel() || isDebugRef() || isDebugPHI();
1218 }
1219 bool isDebugOrPseudoInstr() const {
1220 return isDebugInstr() || isPseudoProbe();
1221 }
1222
1223 bool isDebugOffsetImm() const {
1224 return isNonListDebugValue() && getDebugOffset().isImm();
1225 }
1226
1227 /// A DBG_VALUE is indirect iff the location operand is a register and
1228 /// the offset operand is an immediate.
1229 bool isIndirectDebugValue() const {
1230 return isDebugOffsetImm() && getDebugOperand(0).isReg();
1231 }
1232
1233 /// A DBG_VALUE is an entry value iff its debug expression contains the
1234 /// DW_OP_LLVM_entry_value operation.
1235 bool isDebugEntryValue() const;
1236
1237 /// Return true if the instruction is a debug value which describes a part of
1238 /// a variable as unavailable.
1239 bool isUndefDebugValue() const {
1240 if (!isDebugValue())
1241 return false;
1242 // If any $noreg locations are given, this DV is undef.
1243 for (const MachineOperand &Op : debug_operands())
1244 if (Op.isReg() && !Op.getReg().isValid())
1245 return true;
1246 return false;
1247 }
1248
1249 bool isPHI() const {
1250 return getOpcode() == TargetOpcode::PHI ||
1251 getOpcode() == TargetOpcode::G_PHI;
1252 }
1253 bool isKill() const { return getOpcode() == TargetOpcode::KILL; }
1254 bool isImplicitDef() const { return getOpcode()==TargetOpcode::IMPLICIT_DEF; }
1255 bool isInlineAsm() const {
1256 return getOpcode() == TargetOpcode::INLINEASM ||
1257 getOpcode() == TargetOpcode::INLINEASM_BR;
1258 }
1259
1260 /// FIXME: Seems like a layering violation that the AsmDialect, which is X86
1261 /// specific, be attached to a generic MachineInstr.
1262 bool isMSInlineAsm() const {
1263 return isInlineAsm() && getInlineAsmDialect() == InlineAsm::AD_Intel;
1264 }
1265
1266 bool isStackAligningInlineAsm() const;
1267 InlineAsm::AsmDialect getInlineAsmDialect() const;
1268
1269 bool isInsertSubreg() const {
1270 return getOpcode() == TargetOpcode::INSERT_SUBREG;
1271 }
1272
1273 bool isSubregToReg() const {
1274 return getOpcode() == TargetOpcode::SUBREG_TO_REG;
1275 }
1276
1277 bool isRegSequence() const {
1278 return getOpcode() == TargetOpcode::REG_SEQUENCE;
1279 }
1280
1281 bool isBundle() const {
1282 return getOpcode() == TargetOpcode::BUNDLE;
1283 }
1284
1285 bool isCopy() const {
1286 return getOpcode() == TargetOpcode::COPY;
51
Assuming the condition is false
52
Returning zero, which participates in a condition later
1287 }
1288
1289 bool isFullCopy() const {
1290 return isCopy() && !getOperand(0).getSubReg() && !getOperand(1).getSubReg();
1291 }
1292
1293 bool isExtractSubreg() const {
1294 return getOpcode() == TargetOpcode::EXTRACT_SUBREG;
1295 }
1296
1297 /// Return true if the instruction behaves like a copy.
1298 /// This does not include native copy instructions.
1299 bool isCopyLike() const {
1300 return isCopy() || isSubregToReg();
1301 }
1302
1303 /// Return true is the instruction is an identity copy.
1304 bool isIdentityCopy() const {
1305 return isCopy() && getOperand(0).getReg() == getOperand(1).getReg() &&
1306 getOperand(0).getSubReg() == getOperand(1).getSubReg();
1307 }
1308
1309 /// Return true if this instruction doesn't produce any output in the form of
1310 /// executable instructions.
1311 bool isMetaInstruction() const {
1312 switch (getOpcode()) {
1313 default:
1314 return false;
1315 case TargetOpcode::IMPLICIT_DEF:
1316 case TargetOpcode::KILL:
1317 case TargetOpcode::CFI_INSTRUCTION:
1318 case TargetOpcode::EH_LABEL:
1319 case TargetOpcode::GC_LABEL:
1320 case TargetOpcode::DBG_VALUE:
1321 case TargetOpcode::DBG_VALUE_LIST:
1322 case TargetOpcode::DBG_INSTR_REF:
1323 case TargetOpcode::DBG_PHI:
1324 case TargetOpcode::DBG_LABEL:
1325 case TargetOpcode::LIFETIME_START:
1326 case TargetOpcode::LIFETIME_END:
1327 case TargetOpcode::PSEUDO_PROBE:
1328 case TargetOpcode::ARITH_FENCE:
1329 return true;
1330 }
1331 }
1332
1333 /// Return true if this is a transient instruction that is either very likely
1334 /// to be eliminated during register allocation (such as copy-like
1335 /// instructions), or if this instruction doesn't have an execution-time cost.
1336 bool isTransient() const {
1337 switch (getOpcode()) {
1338 default:
1339 return isMetaInstruction();
1340 // Copy-like instructions are usually eliminated during register allocation.
1341 case TargetOpcode::PHI:
1342 case TargetOpcode::G_PHI:
1343 case TargetOpcode::COPY:
1344 case TargetOpcode::INSERT_SUBREG:
1345 case TargetOpcode::SUBREG_TO_REG:
1346 case TargetOpcode::REG_SEQUENCE:
1347 return true;
1348 }
1349 }
1350
1351 /// Return the number of instructions inside the MI bundle, excluding the
1352 /// bundle header.
1353 ///
1354 /// This is the number of instructions that MachineBasicBlock::iterator
1355 /// skips, 0 for unbundled instructions.
1356 unsigned getBundleSize() const;
1357
1358 /// Return true if the MachineInstr reads the specified register.
1359 /// If TargetRegisterInfo is passed, then it also checks if there
1360 /// is a read of a super-register.
1361 /// This does not count partial redefines of virtual registers as reads:
1362 /// %reg1024:6 = OP.
1363 bool readsRegister(Register Reg,
1364 const TargetRegisterInfo *TRI = nullptr) const {
1365 return findRegisterUseOperandIdx(Reg, false, TRI) != -1;
1366 }
1367
1368 /// Return true if the MachineInstr reads the specified virtual register.
1369 /// Take into account that a partial define is a
1370 /// read-modify-write operation.
1371 bool readsVirtualRegister(Register Reg) const {
1372 return readsWritesVirtualRegister(Reg).first;
1373 }
1374
1375 /// Return a pair of bools (reads, writes) indicating if this instruction
1376 /// reads or writes Reg. This also considers partial defines.
1377 /// If Ops is not null, all operand indices for Reg are added.
1378 std::pair<bool,bool> readsWritesVirtualRegister(Register Reg,
1379 SmallVectorImpl<unsigned> *Ops = nullptr) const;
1380
1381 /// Return true if the MachineInstr kills the specified register.
1382 /// If TargetRegisterInfo is passed, then it also checks if there is
1383 /// a kill of a super-register.
1384 bool killsRegister(Register Reg,
1385 const TargetRegisterInfo *TRI = nullptr) const {
1386 return findRegisterUseOperandIdx(Reg, true, TRI) != -1;
1387 }
1388
1389 /// Return true if the MachineInstr fully defines the specified register.
1390 /// If TargetRegisterInfo is passed, then it also checks
1391 /// if there is a def of a super-register.
1392 /// NOTE: It's ignoring subreg indices on virtual registers.
1393 bool definesRegister(Register Reg,
1394 const TargetRegisterInfo *TRI = nullptr) const {
1395 return findRegisterDefOperandIdx(Reg, false, false, TRI) != -1;
1396 }
1397
1398 /// Return true if the MachineInstr modifies (fully define or partially
1399 /// define) the specified register.
1400 /// NOTE: It's ignoring subreg indices on virtual registers.
1401 bool modifiesRegister(Register Reg,
1402 const TargetRegisterInfo *TRI = nullptr) const {
1403 return findRegisterDefOperandIdx(Reg, false, true, TRI) != -1;
1404 }
1405
1406 /// Returns true if the register is dead in this machine instruction.
1407 /// If TargetRegisterInfo is passed, then it also checks
1408 /// if there is a dead def of a super-register.
1409 bool registerDefIsDead(Register Reg,
1410 const TargetRegisterInfo *TRI = nullptr) const {
1411 return findRegisterDefOperandIdx(Reg, true, false, TRI) != -1;
1412 }
1413
1414 /// Returns true if the MachineInstr has an implicit-use operand of exactly
1415 /// the given register (not considering sub/super-registers).
1416 bool hasRegisterImplicitUseOperand(Register Reg) const;
1417
1418 /// Returns the operand index that is a use of the specific register or -1
1419 /// if it is not found. It further tightens the search criteria to a use
1420 /// that kills the register if isKill is true.
1421 int findRegisterUseOperandIdx(Register Reg, bool isKill = false,
1422 const TargetRegisterInfo *TRI = nullptr) const;
1423
1424 /// Wrapper for findRegisterUseOperandIdx, it returns
1425 /// a pointer to the MachineOperand rather than an index.
1426 MachineOperand *findRegisterUseOperand(Register Reg, bool isKill = false,
1427 const TargetRegisterInfo *TRI = nullptr) {
1428 int Idx = findRegisterUseOperandIdx(Reg, isKill, TRI);
1429 return (Idx == -1) ? nullptr : &getOperand(Idx);
1430 }
1431
1432 const MachineOperand *findRegisterUseOperand(
1433 Register Reg, bool isKill = false,
1434 const TargetRegisterInfo *TRI = nullptr) const {
1435 return const_cast<MachineInstr *>(this)->
1436 findRegisterUseOperand(Reg, isKill, TRI);
1437 }
1438
1439 /// Returns the operand index that is a def of the specified register or
1440 /// -1 if it is not found. If isDead is true, defs that are not dead are
1441 /// skipped. If Overlap is true, then it also looks for defs that merely
1442 /// overlap the specified register. If TargetRegisterInfo is non-null,
1443 /// then it also checks if there is a def of a super-register.
1444 /// This may also return a register mask operand when Overlap is true.
1445 int findRegisterDefOperandIdx(Register Reg,
1446 bool isDead = false, bool Overlap = false,
1447 const TargetRegisterInfo *TRI = nullptr) const;
1448
1449 /// Wrapper for findRegisterDefOperandIdx, it returns
1450 /// a pointer to the MachineOperand rather than an index.
1451 MachineOperand *
1452 findRegisterDefOperand(Register Reg, bool isDead = false,
1453 bool Overlap = false,
1454 const TargetRegisterInfo *TRI = nullptr) {
1455 int Idx = findRegisterDefOperandIdx(Reg, isDead, Overlap, TRI);
1456 return (Idx == -1) ? nullptr : &getOperand(Idx);
1457 }
1458
1459 const MachineOperand *
1460 findRegisterDefOperand(Register Reg, bool isDead = false,
1461 bool Overlap = false,
1462 const TargetRegisterInfo *TRI = nullptr) const {
1463 return const_cast<MachineInstr *>(this)->findRegisterDefOperand(
1464 Reg, isDead, Overlap, TRI);
1465 }
1466
1467 /// Find the index of the first operand in the
1468 /// operand list that is used to represent the predicate. It returns -1 if
1469 /// none is found.
1470 int findFirstPredOperandIdx() const;
1471
1472 /// Find the index of the flag word operand that
1473 /// corresponds to operand OpIdx on an inline asm instruction. Returns -1 if
1474 /// getOperand(OpIdx) does not belong to an inline asm operand group.
1475 ///
1476 /// If GroupNo is not NULL, it will receive the number of the operand group
1477 /// containing OpIdx.
1478 int findInlineAsmFlagIdx(unsigned OpIdx, unsigned *GroupNo = nullptr) const;
1479
1480 /// Compute the static register class constraint for operand OpIdx.
1481 /// For normal instructions, this is derived from the MCInstrDesc.
1482 /// For inline assembly it is derived from the flag words.
1483 ///
1484 /// Returns NULL if the static register class constraint cannot be
1485 /// determined.
1486 const TargetRegisterClass*
1487 getRegClassConstraint(unsigned OpIdx,
1488 const TargetInstrInfo *TII,
1489 const TargetRegisterInfo *TRI) const;
1490
1491 /// Applies the constraints (def/use) implied by this MI on \p Reg to
1492 /// the given \p CurRC.
1493 /// If \p ExploreBundle is set and MI is part of a bundle, all the
1494 /// instructions inside the bundle will be taken into account. In other words,
1495 /// this method accumulates all the constraints of the operand of this MI and
1496 /// the related bundle if MI is a bundle or inside a bundle.
1497 ///
1498 /// Returns the register class that satisfies both \p CurRC and the
1499 /// constraints set by MI. Returns NULL if such a register class does not
1500 /// exist.
1501 ///
1502 /// \pre CurRC must not be NULL.
1503 const TargetRegisterClass *getRegClassConstraintEffectForVReg(
1504 Register Reg, const TargetRegisterClass *CurRC,
1505 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI,
1506 bool ExploreBundle = false) const;
1507
1508 /// Applies the constraints (def/use) implied by the \p OpIdx operand
1509 /// to the given \p CurRC.
1510 ///
1511 /// Returns the register class that satisfies both \p CurRC and the
1512 /// constraints set by \p OpIdx MI. Returns NULL if such a register class
1513 /// does not exist.
1514 ///
1515 /// \pre CurRC must not be NULL.
1516 /// \pre The operand at \p OpIdx must be a register.