File: | llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp |
Warning: | line 4043, column 9 Array access (from variable 'MLiveIns') results in a null pointer dereference |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- InstrRefBasedImpl.cpp - Tracking Debug Value MIs -------------------===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | /// \file InstrRefBasedImpl.cpp | |||
9 | /// | |||
10 | /// This is a separate implementation of LiveDebugValues, see | |||
11 | /// LiveDebugValues.cpp and VarLocBasedImpl.cpp for more information. | |||
12 | /// | |||
13 | /// This pass propagates variable locations between basic blocks, resolving | |||
14 | /// control flow conflicts between them. The problem is much like SSA | |||
15 | /// construction, where each DBG_VALUE instruction assigns the *value* that | |||
16 | /// a variable has, and every instruction where the variable is in scope uses | |||
17 | /// that variable. The resulting map of instruction-to-value is then translated | |||
18 | /// into a register (or spill) location for each variable over each instruction. | |||
19 | /// | |||
20 | /// This pass determines which DBG_VALUE dominates which instructions, or if | |||
21 | /// none do, where values must be merged (like PHI nodes). The added | |||
22 | /// complication is that because codegen has already finished, a PHI node may | |||
23 | /// be needed for a variable location to be correct, but no register or spill | |||
24 | /// slot merges the necessary values. In these circumstances, the variable | |||
25 | /// location is dropped. | |||
26 | /// | |||
27 | /// What makes this analysis non-trivial is loops: we cannot tell in advance | |||
28 | /// whether a variable location is live throughout a loop, or whether its | |||
29 | /// location is clobbered (or redefined by another DBG_VALUE), without | |||
30 | /// exploring all the way through. | |||
31 | /// | |||
32 | /// To make this simpler we perform two kinds of analysis. First, we identify | |||
33 | /// every value defined by every instruction (ignoring those that only move | |||
34 | /// another value), then compute a map of which values are available for each | |||
35 | /// instruction. This is stronger than a reaching-def analysis, as we create | |||
36 | /// PHI values where other values merge. | |||
37 | /// | |||
38 | /// Secondly, for each variable, we effectively re-construct SSA using each | |||
39 | /// DBG_VALUE as a def. The DBG_VALUEs read a value-number computed by the | |||
40 | /// first analysis from the location they refer to. We can then compute the | |||
41 | /// dominance frontiers of where a variable has a value, and create PHI nodes | |||
42 | /// where they merge. | |||
43 | /// This isn't precisely SSA-construction though, because the function shape | |||
44 | /// is pre-defined. If a variable location requires a PHI node, but no | |||
45 | /// PHI for the relevant values is present in the function (as computed by the | |||
46 | /// first analysis), the location must be dropped. | |||
47 | /// | |||
48 | /// Once both are complete, we can pass back over all instructions knowing: | |||
49 | /// * What _value_ each variable should contain, either defined by an | |||
50 | /// instruction or where control flow merges | |||
51 | /// * What the location of that value is (if any). | |||
52 | /// Allowing us to create appropriate live-in DBG_VALUEs, and DBG_VALUEs when | |||
53 | /// a value moves location. After this pass runs, all variable locations within | |||
54 | /// a block should be specified by DBG_VALUEs within that block, allowing | |||
55 | /// DbgEntityHistoryCalculator to focus on individual blocks. | |||
56 | /// | |||
57 | /// This pass is able to go fast because the size of the first | |||
58 | /// reaching-definition analysis is proportional to the working-set size of | |||
59 | /// the function, which the compiler tries to keep small. (It's also | |||
60 | /// proportional to the number of blocks). Additionally, we repeatedly perform | |||
61 | /// the second reaching-definition analysis with only the variables and blocks | |||
62 | /// in a single lexical scope, exploiting their locality. | |||
63 | /// | |||
64 | /// Determining where PHIs happen is trickier with this approach, and it comes | |||
65 | /// to a head in the major problem for LiveDebugValues: is a value live-through | |||
66 | /// a loop, or not? Your garden-variety dataflow analysis aims to build a set of | |||
67 | /// facts about a function, however this analysis needs to generate new value | |||
68 | /// numbers at joins. | |||
69 | /// | |||
70 | /// To do this, consider a lattice of all definition values, from instructions | |||
71 | /// and from PHIs. Each PHI is characterised by the RPO number of the block it | |||
72 | /// occurs in. Each value pair A, B can be ordered by RPO(A) < RPO(B): | |||
73 | /// with non-PHI values at the top, and any PHI value in the last block (by RPO | |||
74 | /// order) at the bottom. | |||
75 | /// | |||
76 | /// (Awkwardly: lower-down-the _lattice_ means a greater RPO _number_. Below, | |||
77 | /// "rank" always refers to the former). | |||
78 | /// | |||
79 | /// At any join, for each register, we consider: | |||
80 | /// * All incoming values, and | |||
81 | /// * The PREVIOUS live-in value at this join. | |||
82 | /// If all incoming values agree: that's the live-in value. If they do not, the | |||
83 | /// incoming values are ranked according to the partial order, and the NEXT | |||
84 | /// LOWEST rank after the PREVIOUS live-in value is picked (multiple values of | |||
85 | /// the same rank are ignored as conflicting). If there are no candidate values, | |||
86 | /// or if the rank of the live-in would be lower than the rank of the current | |||
87 | /// blocks PHIs, create a new PHI value. | |||
88 | /// | |||
89 | /// Intuitively: if it's not immediately obvious what value a join should result | |||
90 | /// in, we iteratively descend from instruction-definitions down through PHI | |||
91 | /// values, getting closer to the current block each time. If the current block | |||
92 | /// is a loop head, this ordering is effectively searching outer levels of | |||
93 | /// loops, to find a value that's live-through the current loop. | |||
94 | /// | |||
95 | /// If there is no value that's live-through this loop, a PHI is created for | |||
96 | /// this location instead. We can't use a lower-ranked PHI because by definition | |||
97 | /// it doesn't dominate the current block. We can't create a PHI value any | |||
98 | /// earlier, because we risk creating a PHI value at a location where values do | |||
99 | /// not in fact merge, thus misrepresenting the truth, and not making the true | |||
100 | /// live-through value for variable locations. | |||
101 | /// | |||
102 | /// This algorithm applies to both calculating the availability of values in | |||
103 | /// the first analysis, and the location of variables in the second. However | |||
104 | /// for the second we add an extra dimension of pain: creating a variable | |||
105 | /// location PHI is only valid if, for each incoming edge, | |||
106 | /// * There is a value for the variable on the incoming edge, and | |||
107 | /// * All the edges have that value in the same register. | |||
108 | /// Or put another way: we can only create a variable-location PHI if there is | |||
109 | /// a matching machine-location PHI, each input to which is the variables value | |||
110 | /// in the predecessor block. | |||
111 | /// | |||
112 | /// To accommodate this difference, each point on the lattice is split in | |||
113 | /// two: a "proposed" PHI and "definite" PHI. Any PHI that can immediately | |||
114 | /// have a location determined are "definite" PHIs, and no further work is | |||
115 | /// needed. Otherwise, a location that all non-backedge predecessors agree | |||
116 | /// on is picked and propagated as a "proposed" PHI value. If that PHI value | |||
117 | /// is truly live-through, it'll appear on the loop backedges on the next | |||
118 | /// dataflow iteration, after which the block live-in moves to be a "definite" | |||
119 | /// PHI. If it's not truly live-through, the variable value will be downgraded | |||
120 | /// further as we explore the lattice, or remains "proposed" and is considered | |||
121 | /// invalid once dataflow completes. | |||
122 | /// | |||
123 | /// ### Terminology | |||
124 | /// | |||
125 | /// A machine location is a register or spill slot, a value is something that's | |||
126 | /// defined by an instruction or PHI node, while a variable value is the value | |||
127 | /// assigned to a variable. A variable location is a machine location, that must | |||
128 | /// contain the appropriate variable value. A value that is a PHI node is | |||
129 | /// occasionally called an mphi. | |||
130 | /// | |||
131 | /// The first dataflow problem is the "machine value location" problem, | |||
132 | /// because we're determining which machine locations contain which values. | |||
133 | /// The "locations" are constant: what's unknown is what value they contain. | |||
134 | /// | |||
135 | /// The second dataflow problem (the one for variables) is the "variable value | |||
136 | /// problem", because it's determining what values a variable has, rather than | |||
137 | /// what location those values are placed in. Unfortunately, it's not that | |||
138 | /// simple, because producing a PHI value always involves picking a location. | |||
139 | /// This is an imperfection that we just have to accept, at least for now. | |||
140 | /// | |||
141 | /// TODO: | |||
142 | /// Overlapping fragments | |||
143 | /// Entry values | |||
144 | /// Add back DEBUG statements for debugging this | |||
145 | /// Collect statistics | |||
146 | /// | |||
147 | //===----------------------------------------------------------------------===// | |||
148 | ||||
149 | #include "llvm/ADT/DenseMap.h" | |||
150 | #include "llvm/ADT/PostOrderIterator.h" | |||
151 | #include "llvm/ADT/STLExtras.h" | |||
152 | #include "llvm/ADT/SmallPtrSet.h" | |||
153 | #include "llvm/ADT/SmallSet.h" | |||
154 | #include "llvm/ADT/SmallVector.h" | |||
155 | #include "llvm/ADT/Statistic.h" | |||
156 | #include "llvm/ADT/UniqueVector.h" | |||
157 | #include "llvm/CodeGen/LexicalScopes.h" | |||
158 | #include "llvm/CodeGen/MachineBasicBlock.h" | |||
159 | #include "llvm/CodeGen/MachineFrameInfo.h" | |||
160 | #include "llvm/CodeGen/MachineFunction.h" | |||
161 | #include "llvm/CodeGen/MachineFunctionPass.h" | |||
162 | #include "llvm/CodeGen/MachineInstr.h" | |||
163 | #include "llvm/CodeGen/MachineInstrBuilder.h" | |||
164 | #include "llvm/CodeGen/MachineInstrBundle.h" | |||
165 | #include "llvm/CodeGen/MachineMemOperand.h" | |||
166 | #include "llvm/CodeGen/MachineOperand.h" | |||
167 | #include "llvm/CodeGen/PseudoSourceValue.h" | |||
168 | #include "llvm/CodeGen/RegisterScavenging.h" | |||
169 | #include "llvm/CodeGen/TargetFrameLowering.h" | |||
170 | #include "llvm/CodeGen/TargetInstrInfo.h" | |||
171 | #include "llvm/CodeGen/TargetLowering.h" | |||
172 | #include "llvm/CodeGen/TargetPassConfig.h" | |||
173 | #include "llvm/CodeGen/TargetRegisterInfo.h" | |||
174 | #include "llvm/CodeGen/TargetSubtargetInfo.h" | |||
175 | #include "llvm/Config/llvm-config.h" | |||
176 | #include "llvm/IR/DIBuilder.h" | |||
177 | #include "llvm/IR/DebugInfoMetadata.h" | |||
178 | #include "llvm/IR/DebugLoc.h" | |||
179 | #include "llvm/IR/Function.h" | |||
180 | #include "llvm/IR/Module.h" | |||
181 | #include "llvm/InitializePasses.h" | |||
182 | #include "llvm/MC/MCRegisterInfo.h" | |||
183 | #include "llvm/Pass.h" | |||
184 | #include "llvm/Support/Casting.h" | |||
185 | #include "llvm/Support/Compiler.h" | |||
186 | #include "llvm/Support/Debug.h" | |||
187 | #include "llvm/Support/TypeSize.h" | |||
188 | #include "llvm/Support/raw_ostream.h" | |||
189 | #include "llvm/Target/TargetMachine.h" | |||
190 | #include "llvm/Transforms/Utils/SSAUpdaterImpl.h" | |||
191 | #include <algorithm> | |||
192 | #include <cassert> | |||
193 | #include <cstdint> | |||
194 | #include <functional> | |||
195 | #include <queue> | |||
196 | #include <tuple> | |||
197 | #include <utility> | |||
198 | #include <vector> | |||
199 | #include <limits.h> | |||
200 | #include <limits> | |||
201 | ||||
202 | #include "LiveDebugValues.h" | |||
203 | ||||
204 | using namespace llvm; | |||
205 | ||||
206 | // SSAUpdaterImple sets DEBUG_TYPE, change it. | |||
207 | #undef DEBUG_TYPE"livedebugvalues" | |||
208 | #define DEBUG_TYPE"livedebugvalues" "livedebugvalues" | |||
209 | ||||
210 | // Act more like the VarLoc implementation, by propagating some locations too | |||
211 | // far and ignoring some transfers. | |||
212 | static cl::opt<bool> EmulateOldLDV("emulate-old-livedebugvalues", cl::Hidden, | |||
213 | cl::desc("Act like old LiveDebugValues did"), | |||
214 | cl::init(false)); | |||
215 | ||||
216 | namespace { | |||
217 | ||||
218 | // The location at which a spilled value resides. It consists of a register and | |||
219 | // an offset. | |||
220 | struct SpillLoc { | |||
221 | unsigned SpillBase; | |||
222 | StackOffset SpillOffset; | |||
223 | bool operator==(const SpillLoc &Other) const { | |||
224 | return std::make_pair(SpillBase, SpillOffset) == | |||
225 | std::make_pair(Other.SpillBase, Other.SpillOffset); | |||
226 | } | |||
227 | bool operator<(const SpillLoc &Other) const { | |||
228 | return std::make_tuple(SpillBase, SpillOffset.getFixed(), | |||
229 | SpillOffset.getScalable()) < | |||
230 | std::make_tuple(Other.SpillBase, Other.SpillOffset.getFixed(), | |||
231 | Other.SpillOffset.getScalable()); | |||
232 | } | |||
233 | }; | |||
234 | ||||
235 | class LocIdx { | |||
236 | unsigned Location; | |||
237 | ||||
238 | // Default constructor is private, initializing to an illegal location number. | |||
239 | // Use only for "not an entry" elements in IndexedMaps. | |||
240 | LocIdx() : Location(UINT_MAX(2147483647 *2U +1U)) { } | |||
241 | ||||
242 | public: | |||
243 | #define NUM_LOC_BITS24 24 | |||
244 | LocIdx(unsigned L) : Location(L) { | |||
245 | assert(L < (1 << NUM_LOC_BITS) && "Machine locations must fit in 24 bits")(static_cast <bool> (L < (1 << 24) && "Machine locations must fit in 24 bits" ) ? void (0) : __assert_fail ("L < (1 << NUM_LOC_BITS) && \"Machine locations must fit in 24 bits\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 245, __extension__ __PRETTY_FUNCTION__)); | |||
246 | } | |||
247 | ||||
248 | static LocIdx MakeIllegalLoc() { | |||
249 | return LocIdx(); | |||
250 | } | |||
251 | ||||
252 | bool isIllegal() const { | |||
253 | return Location == UINT_MAX(2147483647 *2U +1U); | |||
254 | } | |||
255 | ||||
256 | uint64_t asU64() const { | |||
257 | return Location; | |||
258 | } | |||
259 | ||||
260 | bool operator==(unsigned L) const { | |||
261 | return Location == L; | |||
262 | } | |||
263 | ||||
264 | bool operator==(const LocIdx &L) const { | |||
265 | return Location == L.Location; | |||
266 | } | |||
267 | ||||
268 | bool operator!=(unsigned L) const { | |||
269 | return !(*this == L); | |||
270 | } | |||
271 | ||||
272 | bool operator!=(const LocIdx &L) const { | |||
273 | return !(*this == L); | |||
274 | } | |||
275 | ||||
276 | bool operator<(const LocIdx &Other) const { | |||
277 | return Location < Other.Location; | |||
278 | } | |||
279 | }; | |||
280 | ||||
281 | class LocIdxToIndexFunctor { | |||
282 | public: | |||
283 | using argument_type = LocIdx; | |||
284 | unsigned operator()(const LocIdx &L) const { | |||
285 | return L.asU64(); | |||
286 | } | |||
287 | }; | |||
288 | ||||
289 | /// Unique identifier for a value defined by an instruction, as a value type. | |||
290 | /// Casts back and forth to a uint64_t. Probably replacable with something less | |||
291 | /// bit-constrained. Each value identifies the instruction and machine location | |||
292 | /// where the value is defined, although there may be no corresponding machine | |||
293 | /// operand for it (ex: regmasks clobbering values). The instructions are | |||
294 | /// one-based, and definitions that are PHIs have instruction number zero. | |||
295 | /// | |||
296 | /// The obvious limits of a 1M block function or 1M instruction blocks are | |||
297 | /// problematic; but by that point we should probably have bailed out of | |||
298 | /// trying to analyse the function. | |||
299 | class ValueIDNum { | |||
300 | uint64_t BlockNo : 20; /// The block where the def happens. | |||
301 | uint64_t InstNo : 20; /// The Instruction where the def happens. | |||
302 | /// One based, is distance from start of block. | |||
303 | uint64_t LocNo : NUM_LOC_BITS24; /// The machine location where the def happens. | |||
304 | ||||
305 | public: | |||
306 | // XXX -- temporarily enabled while the live-in / live-out tables are moved | |||
307 | // to something more type-y | |||
308 | ValueIDNum() : BlockNo(0xFFFFF), | |||
309 | InstNo(0xFFFFF), | |||
310 | LocNo(0xFFFFFF) { } | |||
311 | ||||
312 | ValueIDNum(uint64_t Block, uint64_t Inst, uint64_t Loc) | |||
313 | : BlockNo(Block), InstNo(Inst), LocNo(Loc) { } | |||
314 | ||||
315 | ValueIDNum(uint64_t Block, uint64_t Inst, LocIdx Loc) | |||
316 | : BlockNo(Block), InstNo(Inst), LocNo(Loc.asU64()) { } | |||
317 | ||||
318 | uint64_t getBlock() const { return BlockNo; } | |||
319 | uint64_t getInst() const { return InstNo; } | |||
320 | uint64_t getLoc() const { return LocNo; } | |||
321 | bool isPHI() const { return InstNo == 0; } | |||
322 | ||||
323 | uint64_t asU64() const { | |||
324 | uint64_t TmpBlock = BlockNo; | |||
325 | uint64_t TmpInst = InstNo; | |||
326 | return TmpBlock << 44ull | TmpInst << NUM_LOC_BITS24 | LocNo; | |||
327 | } | |||
328 | ||||
329 | static ValueIDNum fromU64(uint64_t v) { | |||
330 | uint64_t L = (v & 0x3FFF); | |||
331 | return {v >> 44ull, ((v >> NUM_LOC_BITS24) & 0xFFFFF), L}; | |||
332 | } | |||
333 | ||||
334 | bool operator<(const ValueIDNum &Other) const { | |||
335 | return asU64() < Other.asU64(); | |||
336 | } | |||
337 | ||||
338 | bool operator==(const ValueIDNum &Other) const { | |||
339 | return std::tie(BlockNo, InstNo, LocNo) == | |||
340 | std::tie(Other.BlockNo, Other.InstNo, Other.LocNo); | |||
341 | } | |||
342 | ||||
343 | bool operator!=(const ValueIDNum &Other) const { return !(*this == Other); } | |||
344 | ||||
345 | std::string asString(const std::string &mlocname) const { | |||
346 | return Twine("Value{bb: ") | |||
347 | .concat(Twine(BlockNo).concat( | |||
348 | Twine(", inst: ") | |||
349 | .concat((InstNo ? Twine(InstNo) : Twine("live-in")) | |||
350 | .concat(Twine(", loc: ").concat(Twine(mlocname))) | |||
351 | .concat(Twine("}"))))) | |||
352 | .str(); | |||
353 | } | |||
354 | ||||
355 | static ValueIDNum EmptyValue; | |||
356 | }; | |||
357 | ||||
358 | } // end anonymous namespace | |||
359 | ||||
360 | namespace { | |||
361 | ||||
362 | /// Meta qualifiers for a value. Pair of whatever expression is used to qualify | |||
363 | /// the the value, and Boolean of whether or not it's indirect. | |||
364 | class DbgValueProperties { | |||
365 | public: | |||
366 | DbgValueProperties(const DIExpression *DIExpr, bool Indirect) | |||
367 | : DIExpr(DIExpr), Indirect(Indirect) {} | |||
368 | ||||
369 | /// Extract properties from an existing DBG_VALUE instruction. | |||
370 | DbgValueProperties(const MachineInstr &MI) { | |||
371 | assert(MI.isDebugValue())(static_cast <bool> (MI.isDebugValue()) ? void (0) : __assert_fail ("MI.isDebugValue()", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 371, __extension__ __PRETTY_FUNCTION__)); | |||
372 | DIExpr = MI.getDebugExpression(); | |||
373 | Indirect = MI.getOperand(1).isImm(); | |||
374 | } | |||
375 | ||||
376 | bool operator==(const DbgValueProperties &Other) const { | |||
377 | return std::tie(DIExpr, Indirect) == std::tie(Other.DIExpr, Other.Indirect); | |||
378 | } | |||
379 | ||||
380 | bool operator!=(const DbgValueProperties &Other) const { | |||
381 | return !(*this == Other); | |||
382 | } | |||
383 | ||||
384 | const DIExpression *DIExpr; | |||
385 | bool Indirect; | |||
386 | }; | |||
387 | ||||
388 | /// Tracker for what values are in machine locations. Listens to the Things | |||
389 | /// being Done by various instructions, and maintains a table of what machine | |||
390 | /// locations have what values (as defined by a ValueIDNum). | |||
391 | /// | |||
392 | /// There are potentially a much larger number of machine locations on the | |||
393 | /// target machine than the actual working-set size of the function. On x86 for | |||
394 | /// example, we're extremely unlikely to want to track values through control | |||
395 | /// or debug registers. To avoid doing so, MLocTracker has several layers of | |||
396 | /// indirection going on, with two kinds of ``location'': | |||
397 | /// * A LocID uniquely identifies a register or spill location, with a | |||
398 | /// predictable value. | |||
399 | /// * A LocIdx is a key (in the database sense) for a LocID and a ValueIDNum. | |||
400 | /// Whenever a location is def'd or used by a MachineInstr, we automagically | |||
401 | /// create a new LocIdx for a location, but not otherwise. This ensures we only | |||
402 | /// account for locations that are actually used or defined. The cost is another | |||
403 | /// vector lookup (of LocID -> LocIdx) over any other implementation. This is | |||
404 | /// fairly cheap, and the compiler tries to reduce the working-set at any one | |||
405 | /// time in the function anyway. | |||
406 | /// | |||
407 | /// Register mask operands completely blow this out of the water; I've just | |||
408 | /// piled hacks on top of hacks to get around that. | |||
409 | class MLocTracker { | |||
410 | public: | |||
411 | MachineFunction &MF; | |||
412 | const TargetInstrInfo &TII; | |||
413 | const TargetRegisterInfo &TRI; | |||
414 | const TargetLowering &TLI; | |||
415 | ||||
416 | /// IndexedMap type, mapping from LocIdx to ValueIDNum. | |||
417 | using LocToValueType = IndexedMap<ValueIDNum, LocIdxToIndexFunctor>; | |||
418 | ||||
419 | /// Map of LocIdxes to the ValueIDNums that they store. This is tightly | |||
420 | /// packed, entries only exist for locations that are being tracked. | |||
421 | LocToValueType LocIdxToIDNum; | |||
422 | ||||
423 | /// "Map" of machine location IDs (i.e., raw register or spill number) to the | |||
424 | /// LocIdx key / number for that location. There are always at least as many | |||
425 | /// as the number of registers on the target -- if the value in the register | |||
426 | /// is not being tracked, then the LocIdx value will be zero. New entries are | |||
427 | /// appended if a new spill slot begins being tracked. | |||
428 | /// This, and the corresponding reverse map persist for the analysis of the | |||
429 | /// whole function, and is necessarying for decoding various vectors of | |||
430 | /// values. | |||
431 | std::vector<LocIdx> LocIDToLocIdx; | |||
432 | ||||
433 | /// Inverse map of LocIDToLocIdx. | |||
434 | IndexedMap<unsigned, LocIdxToIndexFunctor> LocIdxToLocID; | |||
435 | ||||
436 | /// Unique-ification of spill slots. Used to number them -- their LocID | |||
437 | /// number is the index in SpillLocs minus one plus NumRegs. | |||
438 | UniqueVector<SpillLoc> SpillLocs; | |||
439 | ||||
440 | // If we discover a new machine location, assign it an mphi with this | |||
441 | // block number. | |||
442 | unsigned CurBB; | |||
443 | ||||
444 | /// Cached local copy of the number of registers the target has. | |||
445 | unsigned NumRegs; | |||
446 | ||||
447 | /// Collection of register mask operands that have been observed. Second part | |||
448 | /// of pair indicates the instruction that they happened in. Used to | |||
449 | /// reconstruct where defs happened if we start tracking a location later | |||
450 | /// on. | |||
451 | SmallVector<std::pair<const MachineOperand *, unsigned>, 32> Masks; | |||
452 | ||||
453 | /// Iterator for locations and the values they contain. Dereferencing | |||
454 | /// produces a struct/pair containing the LocIdx key for this location, | |||
455 | /// and a reference to the value currently stored. Simplifies the process | |||
456 | /// of seeking a particular location. | |||
457 | class MLocIterator { | |||
458 | LocToValueType &ValueMap; | |||
459 | LocIdx Idx; | |||
460 | ||||
461 | public: | |||
462 | class value_type { | |||
463 | public: | |||
464 | value_type(LocIdx Idx, ValueIDNum &Value) : Idx(Idx), Value(Value) { } | |||
465 | const LocIdx Idx; /// Read-only index of this location. | |||
466 | ValueIDNum &Value; /// Reference to the stored value at this location. | |||
467 | }; | |||
468 | ||||
469 | MLocIterator(LocToValueType &ValueMap, LocIdx Idx) | |||
470 | : ValueMap(ValueMap), Idx(Idx) { } | |||
471 | ||||
472 | bool operator==(const MLocIterator &Other) const { | |||
473 | assert(&ValueMap == &Other.ValueMap)(static_cast <bool> (&ValueMap == &Other.ValueMap ) ? void (0) : __assert_fail ("&ValueMap == &Other.ValueMap" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 473, __extension__ __PRETTY_FUNCTION__)); | |||
474 | return Idx == Other.Idx; | |||
475 | } | |||
476 | ||||
477 | bool operator!=(const MLocIterator &Other) const { | |||
478 | return !(*this == Other); | |||
479 | } | |||
480 | ||||
481 | void operator++() { | |||
482 | Idx = LocIdx(Idx.asU64() + 1); | |||
483 | } | |||
484 | ||||
485 | value_type operator*() { | |||
486 | return value_type(Idx, ValueMap[LocIdx(Idx)]); | |||
487 | } | |||
488 | }; | |||
489 | ||||
490 | MLocTracker(MachineFunction &MF, const TargetInstrInfo &TII, | |||
491 | const TargetRegisterInfo &TRI, const TargetLowering &TLI) | |||
492 | : MF(MF), TII(TII), TRI(TRI), TLI(TLI), | |||
493 | LocIdxToIDNum(ValueIDNum::EmptyValue), | |||
494 | LocIdxToLocID(0) { | |||
495 | NumRegs = TRI.getNumRegs(); | |||
496 | reset(); | |||
497 | LocIDToLocIdx.resize(NumRegs, LocIdx::MakeIllegalLoc()); | |||
498 | assert(NumRegs < (1u << NUM_LOC_BITS))(static_cast <bool> (NumRegs < (1u << 24)) ? void (0) : __assert_fail ("NumRegs < (1u << NUM_LOC_BITS)" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 498, __extension__ __PRETTY_FUNCTION__)); // Detect bit packing failure | |||
499 | ||||
500 | // Always track SP. This avoids the implicit clobbering caused by regmasks | |||
501 | // from affectings its values. (LiveDebugValues disbelieves calls and | |||
502 | // regmasks that claim to clobber SP). | |||
503 | Register SP = TLI.getStackPointerRegisterToSaveRestore(); | |||
504 | if (SP) { | |||
505 | unsigned ID = getLocID(SP, false); | |||
506 | (void)lookupOrTrackRegister(ID); | |||
507 | } | |||
508 | } | |||
509 | ||||
510 | /// Produce location ID number for indexing LocIDToLocIdx. Takes the register | |||
511 | /// or spill number, and flag for whether it's a spill or not. | |||
512 | unsigned getLocID(Register RegOrSpill, bool isSpill) { | |||
513 | return (isSpill) ? RegOrSpill.id() + NumRegs - 1 : RegOrSpill.id(); | |||
514 | } | |||
515 | ||||
516 | /// Accessor for reading the value at Idx. | |||
517 | ValueIDNum getNumAtPos(LocIdx Idx) const { | |||
518 | assert(Idx.asU64() < LocIdxToIDNum.size())(static_cast <bool> (Idx.asU64() < LocIdxToIDNum.size ()) ? void (0) : __assert_fail ("Idx.asU64() < LocIdxToIDNum.size()" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 518, __extension__ __PRETTY_FUNCTION__)); | |||
519 | return LocIdxToIDNum[Idx]; | |||
520 | } | |||
521 | ||||
522 | unsigned getNumLocs(void) const { return LocIdxToIDNum.size(); } | |||
523 | ||||
524 | /// Reset all locations to contain a PHI value at the designated block. Used | |||
525 | /// sometimes for actual PHI values, othertimes to indicate the block entry | |||
526 | /// value (before any more information is known). | |||
527 | void setMPhis(unsigned NewCurBB) { | |||
528 | CurBB = NewCurBB; | |||
529 | for (auto Location : locations()) | |||
530 | Location.Value = {CurBB, 0, Location.Idx}; | |||
531 | } | |||
532 | ||||
533 | /// Load values for each location from array of ValueIDNums. Take current | |||
534 | /// bbnum just in case we read a value from a hitherto untouched register. | |||
535 | void loadFromArray(ValueIDNum *Locs, unsigned NewCurBB) { | |||
536 | CurBB = NewCurBB; | |||
537 | // Iterate over all tracked locations, and load each locations live-in | |||
538 | // value into our local index. | |||
539 | for (auto Location : locations()) | |||
540 | Location.Value = Locs[Location.Idx.asU64()]; | |||
541 | } | |||
542 | ||||
543 | /// Wipe any un-necessary location records after traversing a block. | |||
544 | void reset(void) { | |||
545 | // We could reset all the location values too; however either loadFromArray | |||
546 | // or setMPhis should be called before this object is re-used. Just | |||
547 | // clear Masks, they're definitely not needed. | |||
548 | Masks.clear(); | |||
549 | } | |||
550 | ||||
551 | /// Clear all data. Destroys the LocID <=> LocIdx map, which makes most of | |||
552 | /// the information in this pass uninterpretable. | |||
553 | void clear(void) { | |||
554 | reset(); | |||
555 | LocIDToLocIdx.clear(); | |||
556 | LocIdxToLocID.clear(); | |||
557 | LocIdxToIDNum.clear(); | |||
558 | //SpillLocs.reset(); XXX UniqueVector::reset assumes a SpillLoc casts from 0 | |||
559 | SpillLocs = decltype(SpillLocs)(); | |||
560 | ||||
561 | LocIDToLocIdx.resize(NumRegs, LocIdx::MakeIllegalLoc()); | |||
562 | } | |||
563 | ||||
564 | /// Set a locaiton to a certain value. | |||
565 | void setMLoc(LocIdx L, ValueIDNum Num) { | |||
566 | assert(L.asU64() < LocIdxToIDNum.size())(static_cast <bool> (L.asU64() < LocIdxToIDNum.size( )) ? void (0) : __assert_fail ("L.asU64() < LocIdxToIDNum.size()" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 566, __extension__ __PRETTY_FUNCTION__)); | |||
567 | LocIdxToIDNum[L] = Num; | |||
568 | } | |||
569 | ||||
570 | /// Create a LocIdx for an untracked register ID. Initialize it to either an | |||
571 | /// mphi value representing a live-in, or a recent register mask clobber. | |||
572 | LocIdx trackRegister(unsigned ID) { | |||
573 | assert(ID != 0)(static_cast <bool> (ID != 0) ? void (0) : __assert_fail ("ID != 0", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 573, __extension__ __PRETTY_FUNCTION__)); | |||
574 | LocIdx NewIdx = LocIdx(LocIdxToIDNum.size()); | |||
575 | LocIdxToIDNum.grow(NewIdx); | |||
576 | LocIdxToLocID.grow(NewIdx); | |||
577 | ||||
578 | // Default: it's an mphi. | |||
579 | ValueIDNum ValNum = {CurBB, 0, NewIdx}; | |||
580 | // Was this reg ever touched by a regmask? | |||
581 | for (const auto &MaskPair : reverse(Masks)) { | |||
582 | if (MaskPair.first->clobbersPhysReg(ID)) { | |||
583 | // There was an earlier def we skipped. | |||
584 | ValNum = {CurBB, MaskPair.second, NewIdx}; | |||
585 | break; | |||
586 | } | |||
587 | } | |||
588 | ||||
589 | LocIdxToIDNum[NewIdx] = ValNum; | |||
590 | LocIdxToLocID[NewIdx] = ID; | |||
591 | return NewIdx; | |||
592 | } | |||
593 | ||||
594 | LocIdx lookupOrTrackRegister(unsigned ID) { | |||
595 | LocIdx &Index = LocIDToLocIdx[ID]; | |||
596 | if (Index.isIllegal()) | |||
597 | Index = trackRegister(ID); | |||
598 | return Index; | |||
599 | } | |||
600 | ||||
601 | /// Record a definition of the specified register at the given block / inst. | |||
602 | /// This doesn't take a ValueIDNum, because the definition and its location | |||
603 | /// are synonymous. | |||
604 | void defReg(Register R, unsigned BB, unsigned Inst) { | |||
605 | unsigned ID = getLocID(R, false); | |||
606 | LocIdx Idx = lookupOrTrackRegister(ID); | |||
607 | ValueIDNum ValueID = {BB, Inst, Idx}; | |||
608 | LocIdxToIDNum[Idx] = ValueID; | |||
609 | } | |||
610 | ||||
611 | /// Set a register to a value number. To be used if the value number is | |||
612 | /// known in advance. | |||
613 | void setReg(Register R, ValueIDNum ValueID) { | |||
614 | unsigned ID = getLocID(R, false); | |||
615 | LocIdx Idx = lookupOrTrackRegister(ID); | |||
616 | LocIdxToIDNum[Idx] = ValueID; | |||
617 | } | |||
618 | ||||
619 | ValueIDNum readReg(Register R) { | |||
620 | unsigned ID = getLocID(R, false); | |||
621 | LocIdx Idx = lookupOrTrackRegister(ID); | |||
622 | return LocIdxToIDNum[Idx]; | |||
623 | } | |||
624 | ||||
625 | /// Reset a register value to zero / empty. Needed to replicate the | |||
626 | /// VarLoc implementation where a copy to/from a register effectively | |||
627 | /// clears the contents of the source register. (Values can only have one | |||
628 | /// machine location in VarLocBasedImpl). | |||
629 | void wipeRegister(Register R) { | |||
630 | unsigned ID = getLocID(R, false); | |||
631 | LocIdx Idx = LocIDToLocIdx[ID]; | |||
632 | LocIdxToIDNum[Idx] = ValueIDNum::EmptyValue; | |||
633 | } | |||
634 | ||||
635 | /// Determine the LocIdx of an existing register. | |||
636 | LocIdx getRegMLoc(Register R) { | |||
637 | unsigned ID = getLocID(R, false); | |||
638 | return LocIDToLocIdx[ID]; | |||
639 | } | |||
640 | ||||
641 | /// Record a RegMask operand being executed. Defs any register we currently | |||
642 | /// track, stores a pointer to the mask in case we have to account for it | |||
643 | /// later. | |||
644 | void writeRegMask(const MachineOperand *MO, unsigned CurBB, unsigned InstID) { | |||
645 | // Ensure SP exists, so that we don't override it later. | |||
646 | Register SP = TLI.getStackPointerRegisterToSaveRestore(); | |||
647 | ||||
648 | // Def any register we track have that isn't preserved. The regmask | |||
649 | // terminates the liveness of a register, meaning its value can't be | |||
650 | // relied upon -- we represent this by giving it a new value. | |||
651 | for (auto Location : locations()) { | |||
652 | unsigned ID = LocIdxToLocID[Location.Idx]; | |||
653 | // Don't clobber SP, even if the mask says it's clobbered. | |||
654 | if (ID < NumRegs && ID != SP && MO->clobbersPhysReg(ID)) | |||
655 | defReg(ID, CurBB, InstID); | |||
656 | } | |||
657 | Masks.push_back(std::make_pair(MO, InstID)); | |||
658 | } | |||
659 | ||||
660 | /// Find LocIdx for SpillLoc \p L, creating a new one if it's not tracked. | |||
661 | LocIdx getOrTrackSpillLoc(SpillLoc L) { | |||
662 | unsigned SpillID = SpillLocs.idFor(L); | |||
663 | if (SpillID == 0) { | |||
664 | SpillID = SpillLocs.insert(L); | |||
665 | unsigned L = getLocID(SpillID, true); | |||
666 | LocIdx Idx = LocIdx(LocIdxToIDNum.size()); // New idx | |||
667 | LocIdxToIDNum.grow(Idx); | |||
668 | LocIdxToLocID.grow(Idx); | |||
669 | LocIDToLocIdx.push_back(Idx); | |||
670 | LocIdxToLocID[Idx] = L; | |||
671 | return Idx; | |||
672 | } else { | |||
673 | unsigned L = getLocID(SpillID, true); | |||
674 | LocIdx Idx = LocIDToLocIdx[L]; | |||
675 | return Idx; | |||
676 | } | |||
677 | } | |||
678 | ||||
679 | /// Set the value stored in a spill slot. | |||
680 | void setSpill(SpillLoc L, ValueIDNum ValueID) { | |||
681 | LocIdx Idx = getOrTrackSpillLoc(L); | |||
682 | LocIdxToIDNum[Idx] = ValueID; | |||
683 | } | |||
684 | ||||
685 | /// Read whatever value is in a spill slot, or None if it isn't tracked. | |||
686 | Optional<ValueIDNum> readSpill(SpillLoc L) { | |||
687 | unsigned SpillID = SpillLocs.idFor(L); | |||
688 | if (SpillID == 0) | |||
689 | return None; | |||
690 | ||||
691 | unsigned LocID = getLocID(SpillID, true); | |||
692 | LocIdx Idx = LocIDToLocIdx[LocID]; | |||
693 | return LocIdxToIDNum[Idx]; | |||
694 | } | |||
695 | ||||
696 | /// Determine the LocIdx of a spill slot. Return None if it previously | |||
697 | /// hasn't had a value assigned. | |||
698 | Optional<LocIdx> getSpillMLoc(SpillLoc L) { | |||
699 | unsigned SpillID = SpillLocs.idFor(L); | |||
700 | if (SpillID == 0) | |||
701 | return None; | |||
702 | unsigned LocNo = getLocID(SpillID, true); | |||
703 | return LocIDToLocIdx[LocNo]; | |||
704 | } | |||
705 | ||||
706 | /// Return true if Idx is a spill machine location. | |||
707 | bool isSpill(LocIdx Idx) const { | |||
708 | return LocIdxToLocID[Idx] >= NumRegs; | |||
709 | } | |||
710 | ||||
711 | MLocIterator begin() { | |||
712 | return MLocIterator(LocIdxToIDNum, 0); | |||
713 | } | |||
714 | ||||
715 | MLocIterator end() { | |||
716 | return MLocIterator(LocIdxToIDNum, LocIdxToIDNum.size()); | |||
717 | } | |||
718 | ||||
719 | /// Return a range over all locations currently tracked. | |||
720 | iterator_range<MLocIterator> locations() { | |||
721 | return llvm::make_range(begin(), end()); | |||
722 | } | |||
723 | ||||
724 | std::string LocIdxToName(LocIdx Idx) const { | |||
725 | unsigned ID = LocIdxToLocID[Idx]; | |||
726 | if (ID >= NumRegs) | |||
727 | return Twine("slot ").concat(Twine(ID - NumRegs)).str(); | |||
728 | else | |||
729 | return TRI.getRegAsmName(ID).str(); | |||
730 | } | |||
731 | ||||
732 | std::string IDAsString(const ValueIDNum &Num) const { | |||
733 | std::string DefName = LocIdxToName(Num.getLoc()); | |||
734 | return Num.asString(DefName); | |||
735 | } | |||
736 | ||||
737 | LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) | |||
738 | void dump() { | |||
739 | for (auto Location : locations()) { | |||
740 | std::string MLocName = LocIdxToName(Location.Value.getLoc()); | |||
741 | std::string DefName = Location.Value.asString(MLocName); | |||
742 | dbgs() << LocIdxToName(Location.Idx) << " --> " << DefName << "\n"; | |||
743 | } | |||
744 | } | |||
745 | ||||
746 | LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) | |||
747 | void dump_mloc_map() { | |||
748 | for (auto Location : locations()) { | |||
749 | std::string foo = LocIdxToName(Location.Idx); | |||
750 | dbgs() << "Idx " << Location.Idx.asU64() << " " << foo << "\n"; | |||
751 | } | |||
752 | } | |||
753 | ||||
754 | /// Create a DBG_VALUE based on machine location \p MLoc. Qualify it with the | |||
755 | /// information in \pProperties, for variable Var. Don't insert it anywhere, | |||
756 | /// just return the builder for it. | |||
757 | MachineInstrBuilder emitLoc(Optional<LocIdx> MLoc, const DebugVariable &Var, | |||
758 | const DbgValueProperties &Properties) { | |||
759 | DebugLoc DL = DILocation::get(Var.getVariable()->getContext(), 0, 0, | |||
760 | Var.getVariable()->getScope(), | |||
761 | const_cast<DILocation *>(Var.getInlinedAt())); | |||
762 | auto MIB = BuildMI(MF, DL, TII.get(TargetOpcode::DBG_VALUE)); | |||
763 | ||||
764 | const DIExpression *Expr = Properties.DIExpr; | |||
765 | if (!MLoc) { | |||
766 | // No location -> DBG_VALUE $noreg | |||
767 | MIB.addReg(0, RegState::Debug); | |||
768 | MIB.addReg(0, RegState::Debug); | |||
769 | } else if (LocIdxToLocID[*MLoc] >= NumRegs) { | |||
770 | unsigned LocID = LocIdxToLocID[*MLoc]; | |||
771 | const SpillLoc &Spill = SpillLocs[LocID - NumRegs + 1]; | |||
772 | ||||
773 | auto *TRI = MF.getSubtarget().getRegisterInfo(); | |||
774 | Expr = TRI->prependOffsetExpression(Expr, DIExpression::ApplyOffset, | |||
775 | Spill.SpillOffset); | |||
776 | unsigned Base = Spill.SpillBase; | |||
777 | MIB.addReg(Base, RegState::Debug); | |||
778 | MIB.addImm(0); | |||
779 | } else { | |||
780 | unsigned LocID = LocIdxToLocID[*MLoc]; | |||
781 | MIB.addReg(LocID, RegState::Debug); | |||
782 | if (Properties.Indirect) | |||
783 | MIB.addImm(0); | |||
784 | else | |||
785 | MIB.addReg(0, RegState::Debug); | |||
786 | } | |||
787 | ||||
788 | MIB.addMetadata(Var.getVariable()); | |||
789 | MIB.addMetadata(Expr); | |||
790 | return MIB; | |||
791 | } | |||
792 | }; | |||
793 | ||||
794 | /// Class recording the (high level) _value_ of a variable. Identifies either | |||
795 | /// the value of the variable as a ValueIDNum, or a constant MachineOperand. | |||
796 | /// This class also stores meta-information about how the value is qualified. | |||
797 | /// Used to reason about variable values when performing the second | |||
798 | /// (DebugVariable specific) dataflow analysis. | |||
799 | class DbgValue { | |||
800 | public: | |||
801 | union { | |||
802 | /// If Kind is Def, the value number that this value is based on. | |||
803 | ValueIDNum ID; | |||
804 | /// If Kind is Const, the MachineOperand defining this value. | |||
805 | MachineOperand MO; | |||
806 | /// For a NoVal DbgValue, which block it was generated in. | |||
807 | unsigned BlockNo; | |||
808 | }; | |||
809 | /// Qualifiers for the ValueIDNum above. | |||
810 | DbgValueProperties Properties; | |||
811 | ||||
812 | typedef enum { | |||
813 | Undef, // Represents a DBG_VALUE $noreg in the transfer function only. | |||
814 | Def, // This value is defined by an inst, or is a PHI value. | |||
815 | Const, // A constant value contained in the MachineOperand field. | |||
816 | Proposed, // This is a tentative PHI value, which may be confirmed or | |||
817 | // invalidated later. | |||
818 | NoVal // Empty DbgValue, generated during dataflow. BlockNo stores | |||
819 | // which block this was generated in. | |||
820 | } KindT; | |||
821 | /// Discriminator for whether this is a constant or an in-program value. | |||
822 | KindT Kind; | |||
823 | ||||
824 | DbgValue(const ValueIDNum &Val, const DbgValueProperties &Prop, KindT Kind) | |||
825 | : ID(Val), Properties(Prop), Kind(Kind) { | |||
826 | assert(Kind == Def || Kind == Proposed)(static_cast <bool> (Kind == Def || Kind == Proposed) ? void (0) : __assert_fail ("Kind == Def || Kind == Proposed", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 826, __extension__ __PRETTY_FUNCTION__)); | |||
827 | } | |||
828 | ||||
829 | DbgValue(unsigned BlockNo, const DbgValueProperties &Prop, KindT Kind) | |||
830 | : BlockNo(BlockNo), Properties(Prop), Kind(Kind) { | |||
831 | assert(Kind == NoVal)(static_cast <bool> (Kind == NoVal) ? void (0) : __assert_fail ("Kind == NoVal", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 831, __extension__ __PRETTY_FUNCTION__)); | |||
832 | } | |||
833 | ||||
834 | DbgValue(const MachineOperand &MO, const DbgValueProperties &Prop, KindT Kind) | |||
835 | : MO(MO), Properties(Prop), Kind(Kind) { | |||
836 | assert(Kind == Const)(static_cast <bool> (Kind == Const) ? void (0) : __assert_fail ("Kind == Const", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 836, __extension__ __PRETTY_FUNCTION__)); | |||
837 | } | |||
838 | ||||
839 | DbgValue(const DbgValueProperties &Prop, KindT Kind) | |||
840 | : Properties(Prop), Kind(Kind) { | |||
841 | assert(Kind == Undef &&(static_cast <bool> (Kind == Undef && "Empty DbgValue constructor must pass in Undef kind" ) ? void (0) : __assert_fail ("Kind == Undef && \"Empty DbgValue constructor must pass in Undef kind\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 842, __extension__ __PRETTY_FUNCTION__)) | |||
842 | "Empty DbgValue constructor must pass in Undef kind")(static_cast <bool> (Kind == Undef && "Empty DbgValue constructor must pass in Undef kind" ) ? void (0) : __assert_fail ("Kind == Undef && \"Empty DbgValue constructor must pass in Undef kind\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 842, __extension__ __PRETTY_FUNCTION__)); | |||
843 | } | |||
844 | ||||
845 | void dump(const MLocTracker *MTrack) const { | |||
846 | if (Kind == Const) { | |||
847 | MO.dump(); | |||
848 | } else if (Kind == NoVal) { | |||
849 | dbgs() << "NoVal(" << BlockNo << ")"; | |||
850 | } else if (Kind == Proposed) { | |||
851 | dbgs() << "VPHI(" << MTrack->IDAsString(ID) << ")"; | |||
852 | } else { | |||
853 | assert(Kind == Def)(static_cast <bool> (Kind == Def) ? void (0) : __assert_fail ("Kind == Def", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 853, __extension__ __PRETTY_FUNCTION__)); | |||
854 | dbgs() << MTrack->IDAsString(ID); | |||
855 | } | |||
856 | if (Properties.Indirect) | |||
857 | dbgs() << " indir"; | |||
858 | if (Properties.DIExpr) | |||
859 | dbgs() << " " << *Properties.DIExpr; | |||
860 | } | |||
861 | ||||
862 | bool operator==(const DbgValue &Other) const { | |||
863 | if (std::tie(Kind, Properties) != std::tie(Other.Kind, Other.Properties)) | |||
864 | return false; | |||
865 | else if (Kind == Proposed && ID != Other.ID) | |||
866 | return false; | |||
867 | else if (Kind == Def && ID != Other.ID) | |||
868 | return false; | |||
869 | else if (Kind == NoVal && BlockNo != Other.BlockNo) | |||
870 | return false; | |||
871 | else if (Kind == Const) | |||
872 | return MO.isIdenticalTo(Other.MO); | |||
873 | ||||
874 | return true; | |||
875 | } | |||
876 | ||||
877 | bool operator!=(const DbgValue &Other) const { return !(*this == Other); } | |||
878 | }; | |||
879 | ||||
880 | /// Types for recording sets of variable fragments that overlap. For a given | |||
881 | /// local variable, we record all other fragments of that variable that could | |||
882 | /// overlap it, to reduce search time. | |||
883 | using FragmentOfVar = | |||
884 | std::pair<const DILocalVariable *, DIExpression::FragmentInfo>; | |||
885 | using OverlapMap = | |||
886 | DenseMap<FragmentOfVar, SmallVector<DIExpression::FragmentInfo, 1>>; | |||
887 | ||||
888 | /// Collection of DBG_VALUEs observed when traversing a block. Records each | |||
889 | /// variable and the value the DBG_VALUE refers to. Requires the machine value | |||
890 | /// location dataflow algorithm to have run already, so that values can be | |||
891 | /// identified. | |||
892 | class VLocTracker { | |||
893 | public: | |||
894 | /// Map DebugVariable to the latest Value it's defined to have. | |||
895 | /// Needs to be a MapVector because we determine order-in-the-input-MIR from | |||
896 | /// the order in this container. | |||
897 | /// We only retain the last DbgValue in each block for each variable, to | |||
898 | /// determine the blocks live-out variable value. The Vars container forms the | |||
899 | /// transfer function for this block, as part of the dataflow analysis. The | |||
900 | /// movement of values between locations inside of a block is handled at a | |||
901 | /// much later stage, in the TransferTracker class. | |||
902 | MapVector<DebugVariable, DbgValue> Vars; | |||
903 | DenseMap<DebugVariable, const DILocation *> Scopes; | |||
904 | MachineBasicBlock *MBB; | |||
905 | ||||
906 | public: | |||
907 | VLocTracker() {} | |||
908 | ||||
909 | void defVar(const MachineInstr &MI, const DbgValueProperties &Properties, | |||
910 | Optional<ValueIDNum> ID) { | |||
911 | assert(MI.isDebugValue() || MI.isDebugRef())(static_cast <bool> (MI.isDebugValue() || MI.isDebugRef ()) ? void (0) : __assert_fail ("MI.isDebugValue() || MI.isDebugRef()" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 911, __extension__ __PRETTY_FUNCTION__)); | |||
912 | DebugVariable Var(MI.getDebugVariable(), MI.getDebugExpression(), | |||
913 | MI.getDebugLoc()->getInlinedAt()); | |||
914 | DbgValue Rec = (ID) ? DbgValue(*ID, Properties, DbgValue::Def) | |||
915 | : DbgValue(Properties, DbgValue::Undef); | |||
916 | ||||
917 | // Attempt insertion; overwrite if it's already mapped. | |||
918 | auto Result = Vars.insert(std::make_pair(Var, Rec)); | |||
919 | if (!Result.second) | |||
920 | Result.first->second = Rec; | |||
921 | Scopes[Var] = MI.getDebugLoc().get(); | |||
922 | } | |||
923 | ||||
924 | void defVar(const MachineInstr &MI, const MachineOperand &MO) { | |||
925 | // Only DBG_VALUEs can define constant-valued variables. | |||
926 | assert(MI.isDebugValue())(static_cast <bool> (MI.isDebugValue()) ? void (0) : __assert_fail ("MI.isDebugValue()", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 926, __extension__ __PRETTY_FUNCTION__)); | |||
927 | DebugVariable Var(MI.getDebugVariable(), MI.getDebugExpression(), | |||
928 | MI.getDebugLoc()->getInlinedAt()); | |||
929 | DbgValueProperties Properties(MI); | |||
930 | DbgValue Rec = DbgValue(MO, Properties, DbgValue::Const); | |||
931 | ||||
932 | // Attempt insertion; overwrite if it's already mapped. | |||
933 | auto Result = Vars.insert(std::make_pair(Var, Rec)); | |||
934 | if (!Result.second) | |||
935 | Result.first->second = Rec; | |||
936 | Scopes[Var] = MI.getDebugLoc().get(); | |||
937 | } | |||
938 | }; | |||
939 | ||||
940 | /// Tracker for converting machine value locations and variable values into | |||
941 | /// variable locations (the output of LiveDebugValues), recorded as DBG_VALUEs | |||
942 | /// specifying block live-in locations and transfers within blocks. | |||
943 | /// | |||
944 | /// Operating on a per-block basis, this class takes a (pre-loaded) MLocTracker | |||
945 | /// and must be initialized with the set of variable values that are live-in to | |||
946 | /// the block. The caller then repeatedly calls process(). TransferTracker picks | |||
947 | /// out variable locations for the live-in variable values (if there _is_ a | |||
948 | /// location) and creates the corresponding DBG_VALUEs. Then, as the block is | |||
949 | /// stepped through, transfers of values between machine locations are | |||
950 | /// identified and if profitable, a DBG_VALUE created. | |||
951 | /// | |||
952 | /// This is where debug use-before-defs would be resolved: a variable with an | |||
953 | /// unavailable value could materialize in the middle of a block, when the | |||
954 | /// value becomes available. Or, we could detect clobbers and re-specify the | |||
955 | /// variable in a backup location. (XXX these are unimplemented). | |||
956 | class TransferTracker { | |||
957 | public: | |||
958 | const TargetInstrInfo *TII; | |||
959 | const TargetLowering *TLI; | |||
960 | /// This machine location tracker is assumed to always contain the up-to-date | |||
961 | /// value mapping for all machine locations. TransferTracker only reads | |||
962 | /// information from it. (XXX make it const?) | |||
963 | MLocTracker *MTracker; | |||
964 | MachineFunction &MF; | |||
965 | bool ShouldEmitDebugEntryValues; | |||
966 | ||||
967 | /// Record of all changes in variable locations at a block position. Awkwardly | |||
968 | /// we allow inserting either before or after the point: MBB != nullptr | |||
969 | /// indicates it's before, otherwise after. | |||
970 | struct Transfer { | |||
971 | MachineBasicBlock::instr_iterator Pos; /// Position to insert DBG_VALUes | |||
972 | MachineBasicBlock *MBB; /// non-null if we should insert after. | |||
973 | SmallVector<MachineInstr *, 4> Insts; /// Vector of DBG_VALUEs to insert. | |||
974 | }; | |||
975 | ||||
976 | struct LocAndProperties { | |||
977 | LocIdx Loc; | |||
978 | DbgValueProperties Properties; | |||
979 | }; | |||
980 | ||||
981 | /// Collection of transfers (DBG_VALUEs) to be inserted. | |||
982 | SmallVector<Transfer, 32> Transfers; | |||
983 | ||||
984 | /// Local cache of what-value-is-in-what-LocIdx. Used to identify differences | |||
985 | /// between TransferTrackers view of variable locations and MLocTrackers. For | |||
986 | /// example, MLocTracker observes all clobbers, but TransferTracker lazily | |||
987 | /// does not. | |||
988 | std::vector<ValueIDNum> VarLocs; | |||
989 | ||||
990 | /// Map from LocIdxes to which DebugVariables are based that location. | |||
991 | /// Mantained while stepping through the block. Not accurate if | |||
992 | /// VarLocs[Idx] != MTracker->LocIdxToIDNum[Idx]. | |||
993 | std::map<LocIdx, SmallSet<DebugVariable, 4>> ActiveMLocs; | |||
994 | ||||
995 | /// Map from DebugVariable to it's current location and qualifying meta | |||
996 | /// information. To be used in conjunction with ActiveMLocs to construct | |||
997 | /// enough information for the DBG_VALUEs for a particular LocIdx. | |||
998 | DenseMap<DebugVariable, LocAndProperties> ActiveVLocs; | |||
999 | ||||
1000 | /// Temporary cache of DBG_VALUEs to be entered into the Transfers collection. | |||
1001 | SmallVector<MachineInstr *, 4> PendingDbgValues; | |||
1002 | ||||
1003 | /// Record of a use-before-def: created when a value that's live-in to the | |||
1004 | /// current block isn't available in any machine location, but it will be | |||
1005 | /// defined in this block. | |||
1006 | struct UseBeforeDef { | |||
1007 | /// Value of this variable, def'd in block. | |||
1008 | ValueIDNum ID; | |||
1009 | /// Identity of this variable. | |||
1010 | DebugVariable Var; | |||
1011 | /// Additional variable properties. | |||
1012 | DbgValueProperties Properties; | |||
1013 | }; | |||
1014 | ||||
1015 | /// Map from instruction index (within the block) to the set of UseBeforeDefs | |||
1016 | /// that become defined at that instruction. | |||
1017 | DenseMap<unsigned, SmallVector<UseBeforeDef, 1>> UseBeforeDefs; | |||
1018 | ||||
1019 | /// The set of variables that are in UseBeforeDefs and can become a location | |||
1020 | /// once the relevant value is defined. An element being erased from this | |||
1021 | /// collection prevents the use-before-def materializing. | |||
1022 | DenseSet<DebugVariable> UseBeforeDefVariables; | |||
1023 | ||||
1024 | const TargetRegisterInfo &TRI; | |||
1025 | const BitVector &CalleeSavedRegs; | |||
1026 | ||||
1027 | TransferTracker(const TargetInstrInfo *TII, MLocTracker *MTracker, | |||
1028 | MachineFunction &MF, const TargetRegisterInfo &TRI, | |||
1029 | const BitVector &CalleeSavedRegs, const TargetPassConfig &TPC) | |||
1030 | : TII(TII), MTracker(MTracker), MF(MF), TRI(TRI), | |||
1031 | CalleeSavedRegs(CalleeSavedRegs) { | |||
1032 | TLI = MF.getSubtarget().getTargetLowering(); | |||
1033 | auto &TM = TPC.getTM<TargetMachine>(); | |||
1034 | ShouldEmitDebugEntryValues = TM.Options.ShouldEmitDebugEntryValues(); | |||
1035 | } | |||
1036 | ||||
1037 | /// Load object with live-in variable values. \p mlocs contains the live-in | |||
1038 | /// values in each machine location, while \p vlocs the live-in variable | |||
1039 | /// values. This method picks variable locations for the live-in variables, | |||
1040 | /// creates DBG_VALUEs and puts them in #Transfers, then prepares the other | |||
1041 | /// object fields to track variable locations as we step through the block. | |||
1042 | /// FIXME: could just examine mloctracker instead of passing in \p mlocs? | |||
1043 | void loadInlocs(MachineBasicBlock &MBB, ValueIDNum *MLocs, | |||
1044 | SmallVectorImpl<std::pair<DebugVariable, DbgValue>> &VLocs, | |||
1045 | unsigned NumLocs) { | |||
1046 | ActiveMLocs.clear(); | |||
1047 | ActiveVLocs.clear(); | |||
1048 | VarLocs.clear(); | |||
1049 | VarLocs.reserve(NumLocs); | |||
1050 | UseBeforeDefs.clear(); | |||
1051 | UseBeforeDefVariables.clear(); | |||
1052 | ||||
1053 | auto isCalleeSaved = [&](LocIdx L) { | |||
1054 | unsigned Reg = MTracker->LocIdxToLocID[L]; | |||
1055 | if (Reg >= MTracker->NumRegs) | |||
1056 | return false; | |||
1057 | for (MCRegAliasIterator RAI(Reg, &TRI, true); RAI.isValid(); ++RAI) | |||
1058 | if (CalleeSavedRegs.test(*RAI)) | |||
1059 | return true; | |||
1060 | return false; | |||
1061 | }; | |||
1062 | ||||
1063 | // Map of the preferred location for each value. | |||
1064 | std::map<ValueIDNum, LocIdx> ValueToLoc; | |||
1065 | ||||
1066 | // Produce a map of value numbers to the current machine locs they live | |||
1067 | // in. When emulating VarLocBasedImpl, there should only be one | |||
1068 | // location; when not, we get to pick. | |||
1069 | for (auto Location : MTracker->locations()) { | |||
1070 | LocIdx Idx = Location.Idx; | |||
1071 | ValueIDNum &VNum = MLocs[Idx.asU64()]; | |||
1072 | VarLocs.push_back(VNum); | |||
1073 | auto it = ValueToLoc.find(VNum); | |||
1074 | // In order of preference, pick: | |||
1075 | // * Callee saved registers, | |||
1076 | // * Other registers, | |||
1077 | // * Spill slots. | |||
1078 | if (it == ValueToLoc.end() || MTracker->isSpill(it->second) || | |||
1079 | (!isCalleeSaved(it->second) && isCalleeSaved(Idx.asU64()))) { | |||
1080 | // Insert, or overwrite if insertion failed. | |||
1081 | auto PrefLocRes = ValueToLoc.insert(std::make_pair(VNum, Idx)); | |||
1082 | if (!PrefLocRes.second) | |||
1083 | PrefLocRes.first->second = Idx; | |||
1084 | } | |||
1085 | } | |||
1086 | ||||
1087 | // Now map variables to their picked LocIdxes. | |||
1088 | for (auto Var : VLocs) { | |||
1089 | if (Var.second.Kind == DbgValue::Const) { | |||
1090 | PendingDbgValues.push_back( | |||
1091 | emitMOLoc(Var.second.MO, Var.first, Var.second.Properties)); | |||
1092 | continue; | |||
1093 | } | |||
1094 | ||||
1095 | // If the value has no location, we can't make a variable location. | |||
1096 | const ValueIDNum &Num = Var.second.ID; | |||
1097 | auto ValuesPreferredLoc = ValueToLoc.find(Num); | |||
1098 | if (ValuesPreferredLoc == ValueToLoc.end()) { | |||
1099 | // If it's a def that occurs in this block, register it as a | |||
1100 | // use-before-def to be resolved as we step through the block. | |||
1101 | if (Num.getBlock() == (unsigned)MBB.getNumber() && !Num.isPHI()) | |||
1102 | addUseBeforeDef(Var.first, Var.second.Properties, Num); | |||
1103 | else | |||
1104 | recoverAsEntryValue(Var.first, Var.second.Properties, Num); | |||
1105 | continue; | |||
1106 | } | |||
1107 | ||||
1108 | LocIdx M = ValuesPreferredLoc->second; | |||
1109 | auto NewValue = LocAndProperties{M, Var.second.Properties}; | |||
1110 | auto Result = ActiveVLocs.insert(std::make_pair(Var.first, NewValue)); | |||
1111 | if (!Result.second) | |||
1112 | Result.first->second = NewValue; | |||
1113 | ActiveMLocs[M].insert(Var.first); | |||
1114 | PendingDbgValues.push_back( | |||
1115 | MTracker->emitLoc(M, Var.first, Var.second.Properties)); | |||
1116 | } | |||
1117 | flushDbgValues(MBB.begin(), &MBB); | |||
1118 | } | |||
1119 | ||||
1120 | /// Record that \p Var has value \p ID, a value that becomes available | |||
1121 | /// later in the function. | |||
1122 | void addUseBeforeDef(const DebugVariable &Var, | |||
1123 | const DbgValueProperties &Properties, ValueIDNum ID) { | |||
1124 | UseBeforeDef UBD = {ID, Var, Properties}; | |||
1125 | UseBeforeDefs[ID.getInst()].push_back(UBD); | |||
1126 | UseBeforeDefVariables.insert(Var); | |||
1127 | } | |||
1128 | ||||
1129 | /// After the instruction at index \p Inst and position \p pos has been | |||
1130 | /// processed, check whether it defines a variable value in a use-before-def. | |||
1131 | /// If so, and the variable value hasn't changed since the start of the | |||
1132 | /// block, create a DBG_VALUE. | |||
1133 | void checkInstForNewValues(unsigned Inst, MachineBasicBlock::iterator pos) { | |||
1134 | auto MIt = UseBeforeDefs.find(Inst); | |||
1135 | if (MIt == UseBeforeDefs.end()) | |||
1136 | return; | |||
1137 | ||||
1138 | for (auto &Use : MIt->second) { | |||
1139 | LocIdx L = Use.ID.getLoc(); | |||
1140 | ||||
1141 | // If something goes very wrong, we might end up labelling a COPY | |||
1142 | // instruction or similar with an instruction number, where it doesn't | |||
1143 | // actually define a new value, instead it moves a value. In case this | |||
1144 | // happens, discard. | |||
1145 | if (MTracker->LocIdxToIDNum[L] != Use.ID) | |||
1146 | continue; | |||
1147 | ||||
1148 | // If a different debug instruction defined the variable value / location | |||
1149 | // since the start of the block, don't materialize this use-before-def. | |||
1150 | if (!UseBeforeDefVariables.count(Use.Var)) | |||
1151 | continue; | |||
1152 | ||||
1153 | PendingDbgValues.push_back(MTracker->emitLoc(L, Use.Var, Use.Properties)); | |||
1154 | } | |||
1155 | flushDbgValues(pos, nullptr); | |||
1156 | } | |||
1157 | ||||
1158 | /// Helper to move created DBG_VALUEs into Transfers collection. | |||
1159 | void flushDbgValues(MachineBasicBlock::iterator Pos, MachineBasicBlock *MBB) { | |||
1160 | if (PendingDbgValues.size() == 0) | |||
1161 | return; | |||
1162 | ||||
1163 | // Pick out the instruction start position. | |||
1164 | MachineBasicBlock::instr_iterator BundleStart; | |||
1165 | if (MBB && Pos == MBB->begin()) | |||
1166 | BundleStart = MBB->instr_begin(); | |||
1167 | else | |||
1168 | BundleStart = getBundleStart(Pos->getIterator()); | |||
1169 | ||||
1170 | Transfers.push_back({BundleStart, MBB, PendingDbgValues}); | |||
1171 | PendingDbgValues.clear(); | |||
1172 | } | |||
1173 | ||||
1174 | bool isEntryValueVariable(const DebugVariable &Var, | |||
1175 | const DIExpression *Expr) const { | |||
1176 | if (!Var.getVariable()->isParameter()) | |||
1177 | return false; | |||
1178 | ||||
1179 | if (Var.getInlinedAt()) | |||
1180 | return false; | |||
1181 | ||||
1182 | if (Expr->getNumElements() > 0) | |||
1183 | return false; | |||
1184 | ||||
1185 | return true; | |||
1186 | } | |||
1187 | ||||
1188 | bool isEntryValueValue(const ValueIDNum &Val) const { | |||
1189 | // Must be in entry block (block number zero), and be a PHI / live-in value. | |||
1190 | if (Val.getBlock() || !Val.isPHI()) | |||
1191 | return false; | |||
1192 | ||||
1193 | // Entry values must enter in a register. | |||
1194 | if (MTracker->isSpill(Val.getLoc())) | |||
1195 | return false; | |||
1196 | ||||
1197 | Register SP = TLI->getStackPointerRegisterToSaveRestore(); | |||
1198 | Register FP = TRI.getFrameRegister(MF); | |||
1199 | Register Reg = MTracker->LocIdxToLocID[Val.getLoc()]; | |||
1200 | return Reg != SP && Reg != FP; | |||
1201 | } | |||
1202 | ||||
1203 | bool recoverAsEntryValue(const DebugVariable &Var, DbgValueProperties &Prop, | |||
1204 | const ValueIDNum &Num) { | |||
1205 | // Is this variable location a candidate to be an entry value. First, | |||
1206 | // should we be trying this at all? | |||
1207 | if (!ShouldEmitDebugEntryValues) | |||
1208 | return false; | |||
1209 | ||||
1210 | // Is the variable appropriate for entry values (i.e., is a parameter). | |||
1211 | if (!isEntryValueVariable(Var, Prop.DIExpr)) | |||
1212 | return false; | |||
1213 | ||||
1214 | // Is the value assigned to this variable still the entry value? | |||
1215 | if (!isEntryValueValue(Num)) | |||
1216 | return false; | |||
1217 | ||||
1218 | // Emit a variable location using an entry value expression. | |||
1219 | DIExpression *NewExpr = | |||
1220 | DIExpression::prepend(Prop.DIExpr, DIExpression::EntryValue); | |||
1221 | Register Reg = MTracker->LocIdxToLocID[Num.getLoc()]; | |||
1222 | MachineOperand MO = MachineOperand::CreateReg(Reg, false); | |||
1223 | MO.setIsDebug(true); | |||
1224 | ||||
1225 | PendingDbgValues.push_back(emitMOLoc(MO, Var, {NewExpr, Prop.Indirect})); | |||
1226 | return true; | |||
1227 | } | |||
1228 | ||||
1229 | /// Change a variable value after encountering a DBG_VALUE inside a block. | |||
1230 | void redefVar(const MachineInstr &MI) { | |||
1231 | DebugVariable Var(MI.getDebugVariable(), MI.getDebugExpression(), | |||
1232 | MI.getDebugLoc()->getInlinedAt()); | |||
1233 | DbgValueProperties Properties(MI); | |||
1234 | ||||
1235 | const MachineOperand &MO = MI.getOperand(0); | |||
1236 | ||||
1237 | // Ignore non-register locations, we don't transfer those. | |||
1238 | if (!MO.isReg() || MO.getReg() == 0) { | |||
1239 | auto It = ActiveVLocs.find(Var); | |||
1240 | if (It != ActiveVLocs.end()) { | |||
1241 | ActiveMLocs[It->second.Loc].erase(Var); | |||
1242 | ActiveVLocs.erase(It); | |||
1243 | } | |||
1244 | // Any use-before-defs no longer apply. | |||
1245 | UseBeforeDefVariables.erase(Var); | |||
1246 | return; | |||
1247 | } | |||
1248 | ||||
1249 | Register Reg = MO.getReg(); | |||
1250 | LocIdx NewLoc = MTracker->getRegMLoc(Reg); | |||
1251 | redefVar(MI, Properties, NewLoc); | |||
1252 | } | |||
1253 | ||||
1254 | /// Handle a change in variable location within a block. Terminate the | |||
1255 | /// variables current location, and record the value it now refers to, so | |||
1256 | /// that we can detect location transfers later on. | |||
1257 | void redefVar(const MachineInstr &MI, const DbgValueProperties &Properties, | |||
1258 | Optional<LocIdx> OptNewLoc) { | |||
1259 | DebugVariable Var(MI.getDebugVariable(), MI.getDebugExpression(), | |||
1260 | MI.getDebugLoc()->getInlinedAt()); | |||
1261 | // Any use-before-defs no longer apply. | |||
1262 | UseBeforeDefVariables.erase(Var); | |||
1263 | ||||
1264 | // Erase any previous location, | |||
1265 | auto It = ActiveVLocs.find(Var); | |||
1266 | if (It != ActiveVLocs.end()) | |||
1267 | ActiveMLocs[It->second.Loc].erase(Var); | |||
1268 | ||||
1269 | // If there _is_ no new location, all we had to do was erase. | |||
1270 | if (!OptNewLoc) | |||
1271 | return; | |||
1272 | LocIdx NewLoc = *OptNewLoc; | |||
1273 | ||||
1274 | // Check whether our local copy of values-by-location in #VarLocs is out of | |||
1275 | // date. Wipe old tracking data for the location if it's been clobbered in | |||
1276 | // the meantime. | |||
1277 | if (MTracker->getNumAtPos(NewLoc) != VarLocs[NewLoc.asU64()]) { | |||
1278 | for (auto &P : ActiveMLocs[NewLoc]) { | |||
1279 | ActiveVLocs.erase(P); | |||
1280 | } | |||
1281 | ActiveMLocs[NewLoc.asU64()].clear(); | |||
1282 | VarLocs[NewLoc.asU64()] = MTracker->getNumAtPos(NewLoc); | |||
1283 | } | |||
1284 | ||||
1285 | ActiveMLocs[NewLoc].insert(Var); | |||
1286 | if (It == ActiveVLocs.end()) { | |||
1287 | ActiveVLocs.insert( | |||
1288 | std::make_pair(Var, LocAndProperties{NewLoc, Properties})); | |||
1289 | } else { | |||
1290 | It->second.Loc = NewLoc; | |||
1291 | It->second.Properties = Properties; | |||
1292 | } | |||
1293 | } | |||
1294 | ||||
1295 | /// Account for a location \p mloc being clobbered. Examine the variable | |||
1296 | /// locations that will be terminated: and try to recover them by using | |||
1297 | /// another location. Optionally, given \p MakeUndef, emit a DBG_VALUE to | |||
1298 | /// explicitly terminate a location if it can't be recovered. | |||
1299 | void clobberMloc(LocIdx MLoc, MachineBasicBlock::iterator Pos, | |||
1300 | bool MakeUndef = true) { | |||
1301 | auto ActiveMLocIt = ActiveMLocs.find(MLoc); | |||
1302 | if (ActiveMLocIt == ActiveMLocs.end()) | |||
1303 | return; | |||
1304 | ||||
1305 | // What was the old variable value? | |||
1306 | ValueIDNum OldValue = VarLocs[MLoc.asU64()]; | |||
1307 | VarLocs[MLoc.asU64()] = ValueIDNum::EmptyValue; | |||
1308 | ||||
1309 | // Examine the remaining variable locations: if we can find the same value | |||
1310 | // again, we can recover the location. | |||
1311 | Optional<LocIdx> NewLoc = None; | |||
1312 | for (auto Loc : MTracker->locations()) | |||
1313 | if (Loc.Value == OldValue) | |||
1314 | NewLoc = Loc.Idx; | |||
1315 | ||||
1316 | // If there is no location, and we weren't asked to make the variable | |||
1317 | // explicitly undef, then stop here. | |||
1318 | if (!NewLoc && !MakeUndef) { | |||
1319 | // Try and recover a few more locations with entry values. | |||
1320 | for (auto &Var : ActiveMLocIt->second) { | |||
1321 | auto &Prop = ActiveVLocs.find(Var)->second.Properties; | |||
1322 | recoverAsEntryValue(Var, Prop, OldValue); | |||
1323 | } | |||
1324 | flushDbgValues(Pos, nullptr); | |||
1325 | return; | |||
1326 | } | |||
1327 | ||||
1328 | // Examine all the variables based on this location. | |||
1329 | DenseSet<DebugVariable> NewMLocs; | |||
1330 | for (auto &Var : ActiveMLocIt->second) { | |||
1331 | auto ActiveVLocIt = ActiveVLocs.find(Var); | |||
1332 | // Re-state the variable location: if there's no replacement then NewLoc | |||
1333 | // is None and a $noreg DBG_VALUE will be created. Otherwise, a DBG_VALUE | |||
1334 | // identifying the alternative location will be emitted. | |||
1335 | const DIExpression *Expr = ActiveVLocIt->second.Properties.DIExpr; | |||
1336 | DbgValueProperties Properties(Expr, false); | |||
1337 | PendingDbgValues.push_back(MTracker->emitLoc(NewLoc, Var, Properties)); | |||
1338 | ||||
1339 | // Update machine locations <=> variable locations maps. Defer updating | |||
1340 | // ActiveMLocs to avoid invalidaing the ActiveMLocIt iterator. | |||
1341 | if (!NewLoc) { | |||
1342 | ActiveVLocs.erase(ActiveVLocIt); | |||
1343 | } else { | |||
1344 | ActiveVLocIt->second.Loc = *NewLoc; | |||
1345 | NewMLocs.insert(Var); | |||
1346 | } | |||
1347 | } | |||
1348 | ||||
1349 | // Commit any deferred ActiveMLoc changes. | |||
1350 | if (!NewMLocs.empty()) | |||
1351 | for (auto &Var : NewMLocs) | |||
1352 | ActiveMLocs[*NewLoc].insert(Var); | |||
1353 | ||||
1354 | // We lazily track what locations have which values; if we've found a new | |||
1355 | // location for the clobbered value, remember it. | |||
1356 | if (NewLoc) | |||
1357 | VarLocs[NewLoc->asU64()] = OldValue; | |||
1358 | ||||
1359 | flushDbgValues(Pos, nullptr); | |||
1360 | ||||
1361 | ActiveMLocIt->second.clear(); | |||
1362 | } | |||
1363 | ||||
1364 | /// Transfer variables based on \p Src to be based on \p Dst. This handles | |||
1365 | /// both register copies as well as spills and restores. Creates DBG_VALUEs | |||
1366 | /// describing the movement. | |||
1367 | void transferMlocs(LocIdx Src, LocIdx Dst, MachineBasicBlock::iterator Pos) { | |||
1368 | // Does Src still contain the value num we expect? If not, it's been | |||
1369 | // clobbered in the meantime, and our variable locations are stale. | |||
1370 | if (VarLocs[Src.asU64()] != MTracker->getNumAtPos(Src)) | |||
1371 | return; | |||
1372 | ||||
1373 | // assert(ActiveMLocs[Dst].size() == 0); | |||
1374 | //^^^ Legitimate scenario on account of un-clobbered slot being assigned to? | |||
1375 | ActiveMLocs[Dst] = ActiveMLocs[Src]; | |||
1376 | VarLocs[Dst.asU64()] = VarLocs[Src.asU64()]; | |||
1377 | ||||
1378 | // For each variable based on Src; create a location at Dst. | |||
1379 | for (auto &Var : ActiveMLocs[Src]) { | |||
1380 | auto ActiveVLocIt = ActiveVLocs.find(Var); | |||
1381 | assert(ActiveVLocIt != ActiveVLocs.end())(static_cast <bool> (ActiveVLocIt != ActiveVLocs.end()) ? void (0) : __assert_fail ("ActiveVLocIt != ActiveVLocs.end()" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 1381, __extension__ __PRETTY_FUNCTION__)); | |||
1382 | ActiveVLocIt->second.Loc = Dst; | |||
1383 | ||||
1384 | assert(Dst != 0)(static_cast <bool> (Dst != 0) ? void (0) : __assert_fail ("Dst != 0", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 1384, __extension__ __PRETTY_FUNCTION__)); | |||
1385 | MachineInstr *MI = | |||
1386 | MTracker->emitLoc(Dst, Var, ActiveVLocIt->second.Properties); | |||
1387 | PendingDbgValues.push_back(MI); | |||
1388 | } | |||
1389 | ActiveMLocs[Src].clear(); | |||
1390 | flushDbgValues(Pos, nullptr); | |||
1391 | ||||
1392 | // XXX XXX XXX "pretend to be old LDV" means dropping all tracking data | |||
1393 | // about the old location. | |||
1394 | if (EmulateOldLDV) | |||
1395 | VarLocs[Src.asU64()] = ValueIDNum::EmptyValue; | |||
1396 | } | |||
1397 | ||||
1398 | MachineInstrBuilder emitMOLoc(const MachineOperand &MO, | |||
1399 | const DebugVariable &Var, | |||
1400 | const DbgValueProperties &Properties) { | |||
1401 | DebugLoc DL = DILocation::get(Var.getVariable()->getContext(), 0, 0, | |||
1402 | Var.getVariable()->getScope(), | |||
1403 | const_cast<DILocation *>(Var.getInlinedAt())); | |||
1404 | auto MIB = BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE)); | |||
1405 | MIB.add(MO); | |||
1406 | if (Properties.Indirect) | |||
1407 | MIB.addImm(0); | |||
1408 | else | |||
1409 | MIB.addReg(0); | |||
1410 | MIB.addMetadata(Var.getVariable()); | |||
1411 | MIB.addMetadata(Properties.DIExpr); | |||
1412 | return MIB; | |||
1413 | } | |||
1414 | }; | |||
1415 | ||||
1416 | class InstrRefBasedLDV : public LDVImpl { | |||
1417 | private: | |||
1418 | using FragmentInfo = DIExpression::FragmentInfo; | |||
1419 | using OptFragmentInfo = Optional<DIExpression::FragmentInfo>; | |||
1420 | ||||
1421 | // Helper while building OverlapMap, a map of all fragments seen for a given | |||
1422 | // DILocalVariable. | |||
1423 | using VarToFragments = | |||
1424 | DenseMap<const DILocalVariable *, SmallSet<FragmentInfo, 4>>; | |||
1425 | ||||
1426 | /// Machine location/value transfer function, a mapping of which locations | |||
1427 | /// are assigned which new values. | |||
1428 | using MLocTransferMap = std::map<LocIdx, ValueIDNum>; | |||
1429 | ||||
1430 | /// Live in/out structure for the variable values: a per-block map of | |||
1431 | /// variables to their values. XXX, better name? | |||
1432 | using LiveIdxT = | |||
1433 | DenseMap<const MachineBasicBlock *, DenseMap<DebugVariable, DbgValue> *>; | |||
1434 | ||||
1435 | using VarAndLoc = std::pair<DebugVariable, DbgValue>; | |||
1436 | ||||
1437 | /// Type for a live-in value: the predecessor block, and its value. | |||
1438 | using InValueT = std::pair<MachineBasicBlock *, DbgValue *>; | |||
1439 | ||||
1440 | /// Vector (per block) of a collection (inner smallvector) of live-ins. | |||
1441 | /// Used as the result type for the variable value dataflow problem. | |||
1442 | using LiveInsT = SmallVector<SmallVector<VarAndLoc, 8>, 8>; | |||
1443 | ||||
1444 | const TargetRegisterInfo *TRI; | |||
1445 | const TargetInstrInfo *TII; | |||
1446 | const TargetFrameLowering *TFI; | |||
1447 | const MachineFrameInfo *MFI; | |||
1448 | BitVector CalleeSavedRegs; | |||
1449 | LexicalScopes LS; | |||
1450 | TargetPassConfig *TPC; | |||
1451 | ||||
1452 | /// Object to track machine locations as we step through a block. Could | |||
1453 | /// probably be a field rather than a pointer, as it's always used. | |||
1454 | MLocTracker *MTracker; | |||
1455 | ||||
1456 | /// Number of the current block LiveDebugValues is stepping through. | |||
1457 | unsigned CurBB; | |||
1458 | ||||
1459 | /// Number of the current instruction LiveDebugValues is evaluating. | |||
1460 | unsigned CurInst; | |||
1461 | ||||
1462 | /// Variable tracker -- listens to DBG_VALUEs occurring as InstrRefBasedImpl | |||
1463 | /// steps through a block. Reads the values at each location from the | |||
1464 | /// MLocTracker object. | |||
1465 | VLocTracker *VTracker; | |||
1466 | ||||
1467 | /// Tracker for transfers, listens to DBG_VALUEs and transfers of values | |||
1468 | /// between locations during stepping, creates new DBG_VALUEs when values move | |||
1469 | /// location. | |||
1470 | TransferTracker *TTracker; | |||
1471 | ||||
1472 | /// Blocks which are artificial, i.e. blocks which exclusively contain | |||
1473 | /// instructions without DebugLocs, or with line 0 locations. | |||
1474 | SmallPtrSet<const MachineBasicBlock *, 16> ArtificialBlocks; | |||
1475 | ||||
1476 | // Mapping of blocks to and from their RPOT order. | |||
1477 | DenseMap<unsigned int, MachineBasicBlock *> OrderToBB; | |||
1478 | DenseMap<MachineBasicBlock *, unsigned int> BBToOrder; | |||
1479 | DenseMap<unsigned, unsigned> BBNumToRPO; | |||
1480 | ||||
1481 | /// Pair of MachineInstr, and its 1-based offset into the containing block. | |||
1482 | using InstAndNum = std::pair<const MachineInstr *, unsigned>; | |||
1483 | /// Map from debug instruction number to the MachineInstr labelled with that | |||
1484 | /// number, and its location within the function. Used to transform | |||
1485 | /// instruction numbers in DBG_INSTR_REFs into machine value numbers. | |||
1486 | std::map<uint64_t, InstAndNum> DebugInstrNumToInstr; | |||
1487 | ||||
1488 | /// Record of where we observed a DBG_PHI instruction. | |||
1489 | class DebugPHIRecord { | |||
1490 | public: | |||
1491 | uint64_t InstrNum; ///< Instruction number of this DBG_PHI. | |||
1492 | MachineBasicBlock *MBB; ///< Block where DBG_PHI occurred. | |||
1493 | ValueIDNum ValueRead; ///< The value number read by the DBG_PHI. | |||
1494 | LocIdx ReadLoc; ///< Register/Stack location the DBG_PHI reads. | |||
1495 | ||||
1496 | operator unsigned() const { return InstrNum; } | |||
1497 | }; | |||
1498 | ||||
1499 | /// Map from instruction numbers defined by DBG_PHIs to a record of what that | |||
1500 | /// DBG_PHI read and where. Populated and edited during the machine value | |||
1501 | /// location problem -- we use LLVMs SSA Updater to fix changes by | |||
1502 | /// optimizations that destroy PHI instructions. | |||
1503 | SmallVector<DebugPHIRecord, 32> DebugPHINumToValue; | |||
1504 | ||||
1505 | // Map of overlapping variable fragments. | |||
1506 | OverlapMap OverlapFragments; | |||
1507 | VarToFragments SeenFragments; | |||
1508 | ||||
1509 | /// Tests whether this instruction is a spill to a stack slot. | |||
1510 | bool isSpillInstruction(const MachineInstr &MI, MachineFunction *MF); | |||
1511 | ||||
1512 | /// Decide if @MI is a spill instruction and return true if it is. We use 2 | |||
1513 | /// criteria to make this decision: | |||
1514 | /// - Is this instruction a store to a spill slot? | |||
1515 | /// - Is there a register operand that is both used and killed? | |||
1516 | /// TODO: Store optimization can fold spills into other stores (including | |||
1517 | /// other spills). We do not handle this yet (more than one memory operand). | |||
1518 | bool isLocationSpill(const MachineInstr &MI, MachineFunction *MF, | |||
1519 | unsigned &Reg); | |||
1520 | ||||
1521 | /// If a given instruction is identified as a spill, return the spill slot | |||
1522 | /// and set \p Reg to the spilled register. | |||
1523 | Optional<SpillLoc> isRestoreInstruction(const MachineInstr &MI, | |||
1524 | MachineFunction *MF, unsigned &Reg); | |||
1525 | ||||
1526 | /// Given a spill instruction, extract the register and offset used to | |||
1527 | /// address the spill slot in a target independent way. | |||
1528 | SpillLoc extractSpillBaseRegAndOffset(const MachineInstr &MI); | |||
1529 | ||||
1530 | /// Observe a single instruction while stepping through a block. | |||
1531 | void process(MachineInstr &MI, ValueIDNum **MLiveOuts = nullptr, | |||
1532 | ValueIDNum **MLiveIns = nullptr); | |||
1533 | ||||
1534 | /// Examines whether \p MI is a DBG_VALUE and notifies trackers. | |||
1535 | /// \returns true if MI was recognized and processed. | |||
1536 | bool transferDebugValue(const MachineInstr &MI); | |||
1537 | ||||
1538 | /// Examines whether \p MI is a DBG_INSTR_REF and notifies trackers. | |||
1539 | /// \returns true if MI was recognized and processed. | |||
1540 | bool transferDebugInstrRef(MachineInstr &MI, ValueIDNum **MLiveOuts, | |||
1541 | ValueIDNum **MLiveIns); | |||
1542 | ||||
1543 | /// Stores value-information about where this PHI occurred, and what | |||
1544 | /// instruction number is associated with it. | |||
1545 | /// \returns true if MI was recognized and processed. | |||
1546 | bool transferDebugPHI(MachineInstr &MI); | |||
1547 | ||||
1548 | /// Examines whether \p MI is copy instruction, and notifies trackers. | |||
1549 | /// \returns true if MI was recognized and processed. | |||
1550 | bool transferRegisterCopy(MachineInstr &MI); | |||
1551 | ||||
1552 | /// Examines whether \p MI is stack spill or restore instruction, and | |||
1553 | /// notifies trackers. \returns true if MI was recognized and processed. | |||
1554 | bool transferSpillOrRestoreInst(MachineInstr &MI); | |||
1555 | ||||
1556 | /// Examines \p MI for any registers that it defines, and notifies trackers. | |||
1557 | void transferRegisterDef(MachineInstr &MI); | |||
1558 | ||||
1559 | /// Copy one location to the other, accounting for movement of subregisters | |||
1560 | /// too. | |||
1561 | void performCopy(Register Src, Register Dst); | |||
1562 | ||||
1563 | void accumulateFragmentMap(MachineInstr &MI); | |||
1564 | ||||
1565 | /// Determine the machine value number referred to by (potentially several) | |||
1566 | /// DBG_PHI instructions. Block duplication and tail folding can duplicate | |||
1567 | /// DBG_PHIs, shifting the position where values in registers merge, and | |||
1568 | /// forming another mini-ssa problem to solve. | |||
1569 | /// \p Here the position of a DBG_INSTR_REF seeking a machine value number | |||
1570 | /// \p InstrNum Debug instruction number defined by DBG_PHI instructions. | |||
1571 | /// \returns The machine value number at position Here, or None. | |||
1572 | Optional<ValueIDNum> resolveDbgPHIs(MachineFunction &MF, | |||
1573 | ValueIDNum **MLiveOuts, | |||
1574 | ValueIDNum **MLiveIns, MachineInstr &Here, | |||
1575 | uint64_t InstrNum); | |||
1576 | ||||
1577 | /// Step through the function, recording register definitions and movements | |||
1578 | /// in an MLocTracker. Convert the observations into a per-block transfer | |||
1579 | /// function in \p MLocTransfer, suitable for using with the machine value | |||
1580 | /// location dataflow problem. | |||
1581 | void | |||
1582 | produceMLocTransferFunction(MachineFunction &MF, | |||
1583 | SmallVectorImpl<MLocTransferMap> &MLocTransfer, | |||
1584 | unsigned MaxNumBlocks); | |||
1585 | ||||
1586 | /// Solve the machine value location dataflow problem. Takes as input the | |||
1587 | /// transfer functions in \p MLocTransfer. Writes the output live-in and | |||
1588 | /// live-out arrays to the (initialized to zero) multidimensional arrays in | |||
1589 | /// \p MInLocs and \p MOutLocs. The outer dimension is indexed by block | |||
1590 | /// number, the inner by LocIdx. | |||
1591 | void mlocDataflow(ValueIDNum **MInLocs, ValueIDNum **MOutLocs, | |||
1592 | SmallVectorImpl<MLocTransferMap> &MLocTransfer); | |||
1593 | ||||
1594 | /// Perform a control flow join (lattice value meet) of the values in machine | |||
1595 | /// locations at \p MBB. Follows the algorithm described in the file-comment, | |||
1596 | /// reading live-outs of predecessors from \p OutLocs, the current live ins | |||
1597 | /// from \p InLocs, and assigning the newly computed live ins back into | |||
1598 | /// \p InLocs. \returns two bools -- the first indicates whether a change | |||
1599 | /// was made, the second whether a lattice downgrade occurred. If the latter | |||
1600 | /// is true, revisiting this block is necessary. | |||
1601 | std::tuple<bool, bool> | |||
1602 | mlocJoin(MachineBasicBlock &MBB, | |||
1603 | SmallPtrSet<const MachineBasicBlock *, 16> &Visited, | |||
1604 | ValueIDNum **OutLocs, ValueIDNum *InLocs); | |||
1605 | ||||
1606 | /// Solve the variable value dataflow problem, for a single lexical scope. | |||
1607 | /// Uses the algorithm from the file comment to resolve control flow joins, | |||
1608 | /// although there are extra hacks, see vlocJoin. Reads the | |||
1609 | /// locations of values from the \p MInLocs and \p MOutLocs arrays (see | |||
1610 | /// mlocDataflow) and reads the variable values transfer function from | |||
1611 | /// \p AllTheVlocs. Live-in and Live-out variable values are stored locally, | |||
1612 | /// with the live-ins permanently stored to \p Output once the fixedpoint is | |||
1613 | /// reached. | |||
1614 | /// \p VarsWeCareAbout contains a collection of the variables in \p Scope | |||
1615 | /// that we should be tracking. | |||
1616 | /// \p AssignBlocks contains the set of blocks that aren't in \p Scope, but | |||
1617 | /// which do contain DBG_VALUEs, which VarLocBasedImpl tracks locations | |||
1618 | /// through. | |||
1619 | void vlocDataflow(const LexicalScope *Scope, const DILocation *DILoc, | |||
1620 | const SmallSet<DebugVariable, 4> &VarsWeCareAbout, | |||
1621 | SmallPtrSetImpl<MachineBasicBlock *> &AssignBlocks, | |||
1622 | LiveInsT &Output, ValueIDNum **MOutLocs, | |||
1623 | ValueIDNum **MInLocs, | |||
1624 | SmallVectorImpl<VLocTracker> &AllTheVLocs); | |||
1625 | ||||
1626 | /// Compute the live-ins to a block, considering control flow merges according | |||
1627 | /// to the method in the file comment. Live out and live in variable values | |||
1628 | /// are stored in \p VLOCOutLocs and \p VLOCInLocs. The live-ins for \p MBB | |||
1629 | /// are computed and stored into \p VLOCInLocs. \returns true if the live-ins | |||
1630 | /// are modified. | |||
1631 | /// \p InLocsT Output argument, storage for calculated live-ins. | |||
1632 | /// \returns two bools -- the first indicates whether a change | |||
1633 | /// was made, the second whether a lattice downgrade occurred. If the latter | |||
1634 | /// is true, revisiting this block is necessary. | |||
1635 | std::tuple<bool, bool> | |||
1636 | vlocJoin(MachineBasicBlock &MBB, LiveIdxT &VLOCOutLocs, LiveIdxT &VLOCInLocs, | |||
1637 | SmallPtrSet<const MachineBasicBlock *, 16> *VLOCVisited, | |||
1638 | unsigned BBNum, const SmallSet<DebugVariable, 4> &AllVars, | |||
1639 | ValueIDNum **MOutLocs, ValueIDNum **MInLocs, | |||
1640 | SmallPtrSet<const MachineBasicBlock *, 8> &InScopeBlocks, | |||
1641 | SmallPtrSet<const MachineBasicBlock *, 8> &BlocksToExplore, | |||
1642 | DenseMap<DebugVariable, DbgValue> &InLocsT); | |||
1643 | ||||
1644 | /// Continue exploration of the variable-value lattice, as explained in the | |||
1645 | /// file-level comment. \p OldLiveInLocation contains the current | |||
1646 | /// exploration position, from which we need to descend further. \p Values | |||
1647 | /// contains the set of live-in values, \p CurBlockRPONum the RPO number of | |||
1648 | /// the current block, and \p CandidateLocations a set of locations that | |||
1649 | /// should be considered as PHI locations, if we reach the bottom of the | |||
1650 | /// lattice. \returns true if we should downgrade; the value is the agreeing | |||
1651 | /// value number in a non-backedge predecessor. | |||
1652 | bool vlocDowngradeLattice(const MachineBasicBlock &MBB, | |||
1653 | const DbgValue &OldLiveInLocation, | |||
1654 | const SmallVectorImpl<InValueT> &Values, | |||
1655 | unsigned CurBlockRPONum); | |||
1656 | ||||
1657 | /// For the given block and live-outs feeding into it, try to find a | |||
1658 | /// machine location where they all join. If a solution for all predecessors | |||
1659 | /// can't be found, a location where all non-backedge-predecessors join | |||
1660 | /// will be returned instead. While this method finds a join location, this | |||
1661 | /// says nothing as to whether it should be used. | |||
1662 | /// \returns Pair of value ID if found, and true when the correct value | |||
1663 | /// is available on all predecessor edges, or false if it's only available | |||
1664 | /// for non-backedge predecessors. | |||
1665 | std::tuple<Optional<ValueIDNum>, bool> | |||
1666 | pickVPHILoc(MachineBasicBlock &MBB, const DebugVariable &Var, | |||
1667 | const LiveIdxT &LiveOuts, ValueIDNum **MOutLocs, | |||
1668 | ValueIDNum **MInLocs, | |||
1669 | const SmallVectorImpl<MachineBasicBlock *> &BlockOrders); | |||
1670 | ||||
1671 | /// Given the solutions to the two dataflow problems, machine value locations | |||
1672 | /// in \p MInLocs and live-in variable values in \p SavedLiveIns, runs the | |||
1673 | /// TransferTracker class over the function to produce live-in and transfer | |||
1674 | /// DBG_VALUEs, then inserts them. Groups of DBG_VALUEs are inserted in the | |||
1675 | /// order given by AllVarsNumbering -- this could be any stable order, but | |||
1676 | /// right now "order of appearence in function, when explored in RPO", so | |||
1677 | /// that we can compare explictly against VarLocBasedImpl. | |||
1678 | void emitLocations(MachineFunction &MF, LiveInsT SavedLiveIns, | |||
1679 | ValueIDNum **MOutLocs, ValueIDNum **MInLocs, | |||
1680 | DenseMap<DebugVariable, unsigned> &AllVarsNumbering, | |||
1681 | const TargetPassConfig &TPC); | |||
1682 | ||||
1683 | /// Boilerplate computation of some initial sets, artifical blocks and | |||
1684 | /// RPOT block ordering. | |||
1685 | void initialSetup(MachineFunction &MF); | |||
1686 | ||||
1687 | bool ExtendRanges(MachineFunction &MF, TargetPassConfig *TPC) override; | |||
1688 | ||||
1689 | public: | |||
1690 | /// Default construct and initialize the pass. | |||
1691 | InstrRefBasedLDV(); | |||
1692 | ||||
1693 | LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) | |||
1694 | void dump_mloc_transfer(const MLocTransferMap &mloc_transfer) const; | |||
1695 | ||||
1696 | bool isCalleeSaved(LocIdx L) { | |||
1697 | unsigned Reg = MTracker->LocIdxToLocID[L]; | |||
1698 | for (MCRegAliasIterator RAI(Reg, TRI, true); RAI.isValid(); ++RAI) | |||
1699 | if (CalleeSavedRegs.test(*RAI)) | |||
1700 | return true; | |||
1701 | return false; | |||
1702 | } | |||
1703 | }; | |||
1704 | ||||
1705 | } // end anonymous namespace | |||
1706 | ||||
1707 | //===----------------------------------------------------------------------===// | |||
1708 | // Implementation | |||
1709 | //===----------------------------------------------------------------------===// | |||
1710 | ||||
1711 | ValueIDNum ValueIDNum::EmptyValue = {UINT_MAX(2147483647 *2U +1U), UINT_MAX(2147483647 *2U +1U), UINT_MAX(2147483647 *2U +1U)}; | |||
1712 | ||||
1713 | /// Default construct and initialize the pass. | |||
1714 | InstrRefBasedLDV::InstrRefBasedLDV() {} | |||
1715 | ||||
1716 | //===----------------------------------------------------------------------===// | |||
1717 | // Debug Range Extension Implementation | |||
1718 | //===----------------------------------------------------------------------===// | |||
1719 | ||||
1720 | #ifndef NDEBUG | |||
1721 | // Something to restore in the future. | |||
1722 | // void InstrRefBasedLDV::printVarLocInMBB(..) | |||
1723 | #endif | |||
1724 | ||||
1725 | SpillLoc | |||
1726 | InstrRefBasedLDV::extractSpillBaseRegAndOffset(const MachineInstr &MI) { | |||
1727 | assert(MI.hasOneMemOperand() &&(static_cast <bool> (MI.hasOneMemOperand() && "Spill instruction does not have exactly one memory operand?" ) ? void (0) : __assert_fail ("MI.hasOneMemOperand() && \"Spill instruction does not have exactly one memory operand?\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 1728, __extension__ __PRETTY_FUNCTION__)) | |||
1728 | "Spill instruction does not have exactly one memory operand?")(static_cast <bool> (MI.hasOneMemOperand() && "Spill instruction does not have exactly one memory operand?" ) ? void (0) : __assert_fail ("MI.hasOneMemOperand() && \"Spill instruction does not have exactly one memory operand?\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 1728, __extension__ __PRETTY_FUNCTION__)); | |||
1729 | auto MMOI = MI.memoperands_begin(); | |||
1730 | const PseudoSourceValue *PVal = (*MMOI)->getPseudoValue(); | |||
1731 | assert(PVal->kind() == PseudoSourceValue::FixedStack &&(static_cast <bool> (PVal->kind() == PseudoSourceValue ::FixedStack && "Inconsistent memory operand in spill instruction" ) ? void (0) : __assert_fail ("PVal->kind() == PseudoSourceValue::FixedStack && \"Inconsistent memory operand in spill instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 1732, __extension__ __PRETTY_FUNCTION__)) | |||
1732 | "Inconsistent memory operand in spill instruction")(static_cast <bool> (PVal->kind() == PseudoSourceValue ::FixedStack && "Inconsistent memory operand in spill instruction" ) ? void (0) : __assert_fail ("PVal->kind() == PseudoSourceValue::FixedStack && \"Inconsistent memory operand in spill instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 1732, __extension__ __PRETTY_FUNCTION__)); | |||
1733 | int FI = cast<FixedStackPseudoSourceValue>(PVal)->getFrameIndex(); | |||
1734 | const MachineBasicBlock *MBB = MI.getParent(); | |||
1735 | Register Reg; | |||
1736 | StackOffset Offset = TFI->getFrameIndexReference(*MBB->getParent(), FI, Reg); | |||
1737 | return {Reg, Offset}; | |||
1738 | } | |||
1739 | ||||
1740 | /// End all previous ranges related to @MI and start a new range from @MI | |||
1741 | /// if it is a DBG_VALUE instr. | |||
1742 | bool InstrRefBasedLDV::transferDebugValue(const MachineInstr &MI) { | |||
1743 | if (!MI.isDebugValue()) | |||
1744 | return false; | |||
1745 | ||||
1746 | const DILocalVariable *Var = MI.getDebugVariable(); | |||
1747 | const DIExpression *Expr = MI.getDebugExpression(); | |||
1748 | const DILocation *DebugLoc = MI.getDebugLoc(); | |||
1749 | const DILocation *InlinedAt = DebugLoc->getInlinedAt(); | |||
1750 | assert(Var->isValidLocationForIntrinsic(DebugLoc) &&(static_cast <bool> (Var->isValidLocationForIntrinsic (DebugLoc) && "Expected inlined-at fields to agree") ? void (0) : __assert_fail ("Var->isValidLocationForIntrinsic(DebugLoc) && \"Expected inlined-at fields to agree\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 1751, __extension__ __PRETTY_FUNCTION__)) | |||
1751 | "Expected inlined-at fields to agree")(static_cast <bool> (Var->isValidLocationForIntrinsic (DebugLoc) && "Expected inlined-at fields to agree") ? void (0) : __assert_fail ("Var->isValidLocationForIntrinsic(DebugLoc) && \"Expected inlined-at fields to agree\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 1751, __extension__ __PRETTY_FUNCTION__)); | |||
1752 | ||||
1753 | DebugVariable V(Var, Expr, InlinedAt); | |||
1754 | DbgValueProperties Properties(MI); | |||
1755 | ||||
1756 | // If there are no instructions in this lexical scope, do no location tracking | |||
1757 | // at all, this variable shouldn't get a legitimate location range. | |||
1758 | auto *Scope = LS.findLexicalScope(MI.getDebugLoc().get()); | |||
1759 | if (Scope == nullptr) | |||
1760 | return true; // handled it; by doing nothing | |||
1761 | ||||
1762 | const MachineOperand &MO = MI.getOperand(0); | |||
1763 | ||||
1764 | // MLocTracker needs to know that this register is read, even if it's only | |||
1765 | // read by a debug inst. | |||
1766 | if (MO.isReg() && MO.getReg() != 0) | |||
1767 | (void)MTracker->readReg(MO.getReg()); | |||
1768 | ||||
1769 | // If we're preparing for the second analysis (variables), the machine value | |||
1770 | // locations are already solved, and we report this DBG_VALUE and the value | |||
1771 | // it refers to to VLocTracker. | |||
1772 | if (VTracker) { | |||
1773 | if (MO.isReg()) { | |||
1774 | // Feed defVar the new variable location, or if this is a | |||
1775 | // DBG_VALUE $noreg, feed defVar None. | |||
1776 | if (MO.getReg()) | |||
1777 | VTracker->defVar(MI, Properties, MTracker->readReg(MO.getReg())); | |||
1778 | else | |||
1779 | VTracker->defVar(MI, Properties, None); | |||
1780 | } else if (MI.getOperand(0).isImm() || MI.getOperand(0).isFPImm() || | |||
1781 | MI.getOperand(0).isCImm()) { | |||
1782 | VTracker->defVar(MI, MI.getOperand(0)); | |||
1783 | } | |||
1784 | } | |||
1785 | ||||
1786 | // If performing final tracking of transfers, report this variable definition | |||
1787 | // to the TransferTracker too. | |||
1788 | if (TTracker) | |||
1789 | TTracker->redefVar(MI); | |||
1790 | return true; | |||
1791 | } | |||
1792 | ||||
1793 | bool InstrRefBasedLDV::transferDebugInstrRef(MachineInstr &MI, | |||
1794 | ValueIDNum **MLiveOuts, | |||
1795 | ValueIDNum **MLiveIns) { | |||
1796 | if (!MI.isDebugRef()) | |||
1797 | return false; | |||
1798 | ||||
1799 | // Only handle this instruction when we are building the variable value | |||
1800 | // transfer function. | |||
1801 | if (!VTracker) | |||
1802 | return false; | |||
1803 | ||||
1804 | unsigned InstNo = MI.getOperand(0).getImm(); | |||
1805 | unsigned OpNo = MI.getOperand(1).getImm(); | |||
1806 | ||||
1807 | const DILocalVariable *Var = MI.getDebugVariable(); | |||
1808 | const DIExpression *Expr = MI.getDebugExpression(); | |||
1809 | const DILocation *DebugLoc = MI.getDebugLoc(); | |||
1810 | const DILocation *InlinedAt = DebugLoc->getInlinedAt(); | |||
1811 | assert(Var->isValidLocationForIntrinsic(DebugLoc) &&(static_cast <bool> (Var->isValidLocationForIntrinsic (DebugLoc) && "Expected inlined-at fields to agree") ? void (0) : __assert_fail ("Var->isValidLocationForIntrinsic(DebugLoc) && \"Expected inlined-at fields to agree\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 1812, __extension__ __PRETTY_FUNCTION__)) | |||
1812 | "Expected inlined-at fields to agree")(static_cast <bool> (Var->isValidLocationForIntrinsic (DebugLoc) && "Expected inlined-at fields to agree") ? void (0) : __assert_fail ("Var->isValidLocationForIntrinsic(DebugLoc) && \"Expected inlined-at fields to agree\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 1812, __extension__ __PRETTY_FUNCTION__)); | |||
1813 | ||||
1814 | DebugVariable V(Var, Expr, InlinedAt); | |||
1815 | ||||
1816 | auto *Scope = LS.findLexicalScope(MI.getDebugLoc().get()); | |||
1817 | if (Scope == nullptr) | |||
1818 | return true; // Handled by doing nothing. This variable is never in scope. | |||
1819 | ||||
1820 | const MachineFunction &MF = *MI.getParent()->getParent(); | |||
1821 | ||||
1822 | // Various optimizations may have happened to the value during codegen, | |||
1823 | // recorded in the value substitution table. Apply any substitutions to | |||
1824 | // the instruction / operand number in this DBG_INSTR_REF, and collect | |||
1825 | // any subregister extractions performed during optimization. | |||
1826 | ||||
1827 | // Create dummy substitution with Src set, for lookup. | |||
1828 | auto SoughtSub = | |||
1829 | MachineFunction::DebugSubstitution({InstNo, OpNo}, {0, 0}, 0); | |||
1830 | ||||
1831 | SmallVector<unsigned, 4> SeenSubregs; | |||
1832 | auto LowerBoundIt = llvm::lower_bound(MF.DebugValueSubstitutions, SoughtSub); | |||
1833 | while (LowerBoundIt != MF.DebugValueSubstitutions.end() && | |||
1834 | LowerBoundIt->Src == SoughtSub.Src) { | |||
1835 | std::tie(InstNo, OpNo) = LowerBoundIt->Dest; | |||
1836 | SoughtSub.Src = LowerBoundIt->Dest; | |||
1837 | if (unsigned Subreg = LowerBoundIt->Subreg) | |||
1838 | SeenSubregs.push_back(Subreg); | |||
1839 | LowerBoundIt = llvm::lower_bound(MF.DebugValueSubstitutions, SoughtSub); | |||
1840 | } | |||
1841 | ||||
1842 | // Default machine value number is <None> -- if no instruction defines | |||
1843 | // the corresponding value, it must have been optimized out. | |||
1844 | Optional<ValueIDNum> NewID = None; | |||
1845 | ||||
1846 | // Try to lookup the instruction number, and find the machine value number | |||
1847 | // that it defines. It could be an instruction, or a PHI. | |||
1848 | auto InstrIt = DebugInstrNumToInstr.find(InstNo); | |||
1849 | auto PHIIt = std::lower_bound(DebugPHINumToValue.begin(), | |||
1850 | DebugPHINumToValue.end(), InstNo); | |||
1851 | if (InstrIt != DebugInstrNumToInstr.end()) { | |||
1852 | const MachineInstr &TargetInstr = *InstrIt->second.first; | |||
1853 | uint64_t BlockNo = TargetInstr.getParent()->getNumber(); | |||
1854 | ||||
1855 | // Pick out the designated operand. | |||
1856 | assert(OpNo < TargetInstr.getNumOperands())(static_cast <bool> (OpNo < TargetInstr.getNumOperands ()) ? void (0) : __assert_fail ("OpNo < TargetInstr.getNumOperands()" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 1856, __extension__ __PRETTY_FUNCTION__)); | |||
1857 | const MachineOperand &MO = TargetInstr.getOperand(OpNo); | |||
1858 | ||||
1859 | // Today, this can only be a register. | |||
1860 | assert(MO.isReg() && MO.isDef())(static_cast <bool> (MO.isReg() && MO.isDef()) ? void (0) : __assert_fail ("MO.isReg() && MO.isDef()" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 1860, __extension__ __PRETTY_FUNCTION__)); | |||
1861 | ||||
1862 | unsigned LocID = MTracker->getLocID(MO.getReg(), false); | |||
1863 | LocIdx L = MTracker->LocIDToLocIdx[LocID]; | |||
1864 | NewID = ValueIDNum(BlockNo, InstrIt->second.second, L); | |||
1865 | } else if (PHIIt != DebugPHINumToValue.end() && PHIIt->InstrNum == InstNo) { | |||
1866 | // It's actually a PHI value. Which value it is might not be obvious, use | |||
1867 | // the resolver helper to find out. | |||
1868 | NewID = resolveDbgPHIs(*MI.getParent()->getParent(), MLiveOuts, MLiveIns, | |||
1869 | MI, InstNo); | |||
1870 | } | |||
1871 | ||||
1872 | // Apply any subregister extractions, in reverse. We might have seen code | |||
1873 | // like this: | |||
1874 | // CALL64 @foo, implicit-def $rax | |||
1875 | // %0:gr64 = COPY $rax | |||
1876 | // %1:gr32 = COPY %0.sub_32bit | |||
1877 | // %2:gr16 = COPY %1.sub_16bit | |||
1878 | // %3:gr8 = COPY %2.sub_8bit | |||
1879 | // In which case each copy would have been recorded as a substitution with | |||
1880 | // a subregister qualifier. Apply those qualifiers now. | |||
1881 | if (NewID && !SeenSubregs.empty()) { | |||
1882 | unsigned Offset = 0; | |||
1883 | unsigned Size = 0; | |||
1884 | ||||
1885 | // Look at each subregister that we passed through, and progressively | |||
1886 | // narrow in, accumulating any offsets that occur. Substitutions should | |||
1887 | // only ever be the same or narrower width than what they read from; | |||
1888 | // iterate in reverse order so that we go from wide to small. | |||
1889 | for (unsigned Subreg : reverse(SeenSubregs)) { | |||
1890 | unsigned ThisSize = TRI->getSubRegIdxSize(Subreg); | |||
1891 | unsigned ThisOffset = TRI->getSubRegIdxOffset(Subreg); | |||
1892 | Offset += ThisOffset; | |||
1893 | Size = (Size == 0) ? ThisSize : std::min(Size, ThisSize); | |||
1894 | } | |||
1895 | ||||
1896 | // If that worked, look for an appropriate subregister with the register | |||
1897 | // where the define happens. Don't look at values that were defined during | |||
1898 | // a stack write: we can't currently express register locations within | |||
1899 | // spills. | |||
1900 | LocIdx L = NewID->getLoc(); | |||
1901 | if (NewID && !MTracker->isSpill(L)) { | |||
1902 | // Find the register class for the register where this def happened. | |||
1903 | // FIXME: no index for this? | |||
1904 | Register Reg = MTracker->LocIdxToLocID[L]; | |||
1905 | const TargetRegisterClass *TRC = nullptr; | |||
1906 | for (auto *TRCI : TRI->regclasses()) | |||
1907 | if (TRCI->contains(Reg)) | |||
1908 | TRC = TRCI; | |||
1909 | assert(TRC && "Couldn't find target register class?")(static_cast <bool> (TRC && "Couldn't find target register class?" ) ? void (0) : __assert_fail ("TRC && \"Couldn't find target register class?\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 1909, __extension__ __PRETTY_FUNCTION__)); | |||
1910 | ||||
1911 | // If the register we have isn't the right size or in the right place, | |||
1912 | // Try to find a subregister inside it. | |||
1913 | unsigned MainRegSize = TRI->getRegSizeInBits(*TRC); | |||
1914 | if (Size != MainRegSize || Offset) { | |||
1915 | // Enumerate all subregisters, searching. | |||
1916 | Register NewReg = 0; | |||
1917 | for (MCSubRegIterator SRI(Reg, TRI, false); SRI.isValid(); ++SRI) { | |||
1918 | unsigned Subreg = TRI->getSubRegIndex(Reg, *SRI); | |||
1919 | unsigned SubregSize = TRI->getSubRegIdxSize(Subreg); | |||
1920 | unsigned SubregOffset = TRI->getSubRegIdxOffset(Subreg); | |||
1921 | if (SubregSize == Size && SubregOffset == Offset) { | |||
1922 | NewReg = *SRI; | |||
1923 | break; | |||
1924 | } | |||
1925 | } | |||
1926 | ||||
1927 | // If we didn't find anything: there's no way to express our value. | |||
1928 | if (!NewReg) { | |||
1929 | NewID = None; | |||
1930 | } else { | |||
1931 | // Re-state the value as being defined within the subregister | |||
1932 | // that we found. | |||
1933 | LocIdx NewLoc = MTracker->lookupOrTrackRegister(NewReg); | |||
1934 | NewID = ValueIDNum(NewID->getBlock(), NewID->getInst(), NewLoc); | |||
1935 | } | |||
1936 | } | |||
1937 | } else { | |||
1938 | // If we can't handle subregisters, unset the new value. | |||
1939 | NewID = None; | |||
1940 | } | |||
1941 | } | |||
1942 | ||||
1943 | // We, we have a value number or None. Tell the variable value tracker about | |||
1944 | // it. The rest of this LiveDebugValues implementation acts exactly the same | |||
1945 | // for DBG_INSTR_REFs as DBG_VALUEs (just, the former can refer to values that | |||
1946 | // aren't immediately available). | |||
1947 | DbgValueProperties Properties(Expr, false); | |||
1948 | VTracker->defVar(MI, Properties, NewID); | |||
1949 | ||||
1950 | // If we're on the final pass through the function, decompose this INSTR_REF | |||
1951 | // into a plain DBG_VALUE. | |||
1952 | if (!TTracker) | |||
1953 | return true; | |||
1954 | ||||
1955 | // Pick a location for the machine value number, if such a location exists. | |||
1956 | // (This information could be stored in TransferTracker to make it faster). | |||
1957 | Optional<LocIdx> FoundLoc = None; | |||
1958 | for (auto Location : MTracker->locations()) { | |||
1959 | LocIdx CurL = Location.Idx; | |||
1960 | ValueIDNum ID = MTracker->LocIdxToIDNum[CurL]; | |||
1961 | if (NewID && ID == NewID) { | |||
1962 | // If this is the first location with that value, pick it. Otherwise, | |||
1963 | // consider whether it's a "longer term" location. | |||
1964 | if (!FoundLoc) { | |||
1965 | FoundLoc = CurL; | |||
1966 | continue; | |||
1967 | } | |||
1968 | ||||
1969 | if (MTracker->isSpill(CurL)) | |||
1970 | FoundLoc = CurL; // Spills are a longer term location. | |||
1971 | else if (!MTracker->isSpill(*FoundLoc) && | |||
1972 | !MTracker->isSpill(CurL) && | |||
1973 | !isCalleeSaved(*FoundLoc) && | |||
1974 | isCalleeSaved(CurL)) | |||
1975 | FoundLoc = CurL; // Callee saved regs are longer term than normal. | |||
1976 | } | |||
1977 | } | |||
1978 | ||||
1979 | // Tell transfer tracker that the variable value has changed. | |||
1980 | TTracker->redefVar(MI, Properties, FoundLoc); | |||
1981 | ||||
1982 | // If there was a value with no location; but the value is defined in a | |||
1983 | // later instruction in this block, this is a block-local use-before-def. | |||
1984 | if (!FoundLoc && NewID && NewID->getBlock() == CurBB && | |||
1985 | NewID->getInst() > CurInst) | |||
1986 | TTracker->addUseBeforeDef(V, {MI.getDebugExpression(), false}, *NewID); | |||
1987 | ||||
1988 | // Produce a DBG_VALUE representing what this DBG_INSTR_REF meant. | |||
1989 | // This DBG_VALUE is potentially a $noreg / undefined location, if | |||
1990 | // FoundLoc is None. | |||
1991 | // (XXX -- could morph the DBG_INSTR_REF in the future). | |||
1992 | MachineInstr *DbgMI = MTracker->emitLoc(FoundLoc, V, Properties); | |||
1993 | TTracker->PendingDbgValues.push_back(DbgMI); | |||
1994 | TTracker->flushDbgValues(MI.getIterator(), nullptr); | |||
1995 | return true; | |||
1996 | } | |||
1997 | ||||
1998 | bool InstrRefBasedLDV::transferDebugPHI(MachineInstr &MI) { | |||
1999 | if (!MI.isDebugPHI()) | |||
2000 | return false; | |||
2001 | ||||
2002 | // Analyse these only when solving the machine value location problem. | |||
2003 | if (VTracker || TTracker) | |||
2004 | return true; | |||
2005 | ||||
2006 | // First operand is the value location, either a stack slot or register. | |||
2007 | // Second is the debug instruction number of the original PHI. | |||
2008 | const MachineOperand &MO = MI.getOperand(0); | |||
2009 | unsigned InstrNum = MI.getOperand(1).getImm(); | |||
2010 | ||||
2011 | if (MO.isReg()) { | |||
2012 | // The value is whatever's currently in the register. Read and record it, | |||
2013 | // to be analysed later. | |||
2014 | Register Reg = MO.getReg(); | |||
2015 | ValueIDNum Num = MTracker->readReg(Reg); | |||
2016 | auto PHIRec = DebugPHIRecord( | |||
2017 | {InstrNum, MI.getParent(), Num, MTracker->lookupOrTrackRegister(Reg)}); | |||
2018 | DebugPHINumToValue.push_back(PHIRec); | |||
2019 | } else { | |||
2020 | // The value is whatever's in this stack slot. | |||
2021 | assert(MO.isFI())(static_cast <bool> (MO.isFI()) ? void (0) : __assert_fail ("MO.isFI()", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 2021, __extension__ __PRETTY_FUNCTION__)); | |||
2022 | unsigned FI = MO.getIndex(); | |||
2023 | ||||
2024 | // If the stack slot is dead, then this was optimized away. | |||
2025 | // FIXME: stack slot colouring should account for slots that get merged. | |||
2026 | if (MFI->isDeadObjectIndex(FI)) | |||
2027 | return true; | |||
2028 | ||||
2029 | // Identify this spill slot. | |||
2030 | Register Base; | |||
2031 | StackOffset Offs = TFI->getFrameIndexReference(*MI.getMF(), FI, Base); | |||
2032 | SpillLoc SL = {Base, Offs}; | |||
2033 | Optional<ValueIDNum> Num = MTracker->readSpill(SL); | |||
2034 | ||||
2035 | if (!Num) | |||
2036 | // Nothing ever writes to this slot. Curious, but nothing we can do. | |||
2037 | return true; | |||
2038 | ||||
2039 | // Record this DBG_PHI for later analysis. | |||
2040 | auto DbgPHI = DebugPHIRecord( | |||
2041 | {InstrNum, MI.getParent(), *Num, *MTracker->getSpillMLoc(SL)}); | |||
2042 | DebugPHINumToValue.push_back(DbgPHI); | |||
2043 | } | |||
2044 | ||||
2045 | return true; | |||
2046 | } | |||
2047 | ||||
2048 | void InstrRefBasedLDV::transferRegisterDef(MachineInstr &MI) { | |||
2049 | // Meta Instructions do not affect the debug liveness of any register they | |||
2050 | // define. | |||
2051 | if (MI.isImplicitDef()) { | |||
2052 | // Except when there's an implicit def, and the location it's defining has | |||
2053 | // no value number. The whole point of an implicit def is to announce that | |||
2054 | // the register is live, without be specific about it's value. So define | |||
2055 | // a value if there isn't one already. | |||
2056 | ValueIDNum Num = MTracker->readReg(MI.getOperand(0).getReg()); | |||
2057 | // Has a legitimate value -> ignore the implicit def. | |||
2058 | if (Num.getLoc() != 0) | |||
2059 | return; | |||
2060 | // Otherwise, def it here. | |||
2061 | } else if (MI.isMetaInstruction()) | |||
2062 | return; | |||
2063 | ||||
2064 | MachineFunction *MF = MI.getMF(); | |||
2065 | const TargetLowering *TLI = MF->getSubtarget().getTargetLowering(); | |||
2066 | Register SP = TLI->getStackPointerRegisterToSaveRestore(); | |||
2067 | ||||
2068 | // Find the regs killed by MI, and find regmasks of preserved regs. | |||
2069 | // Max out the number of statically allocated elements in `DeadRegs`, as this | |||
2070 | // prevents fallback to std::set::count() operations. | |||
2071 | SmallSet<uint32_t, 32> DeadRegs; | |||
2072 | SmallVector<const uint32_t *, 4> RegMasks; | |||
2073 | SmallVector<const MachineOperand *, 4> RegMaskPtrs; | |||
2074 | for (const MachineOperand &MO : MI.operands()) { | |||
2075 | // Determine whether the operand is a register def. | |||
2076 | if (MO.isReg() && MO.isDef() && MO.getReg() && | |||
2077 | Register::isPhysicalRegister(MO.getReg()) && | |||
2078 | !(MI.isCall() && MO.getReg() == SP)) { | |||
2079 | // Remove ranges of all aliased registers. | |||
2080 | for (MCRegAliasIterator RAI(MO.getReg(), TRI, true); RAI.isValid(); ++RAI) | |||
2081 | // FIXME: Can we break out of this loop early if no insertion occurs? | |||
2082 | DeadRegs.insert(*RAI); | |||
2083 | } else if (MO.isRegMask()) { | |||
2084 | RegMasks.push_back(MO.getRegMask()); | |||
2085 | RegMaskPtrs.push_back(&MO); | |||
2086 | } | |||
2087 | } | |||
2088 | ||||
2089 | // Tell MLocTracker about all definitions, of regmasks and otherwise. | |||
2090 | for (uint32_t DeadReg : DeadRegs) | |||
2091 | MTracker->defReg(DeadReg, CurBB, CurInst); | |||
2092 | ||||
2093 | for (auto *MO : RegMaskPtrs) | |||
2094 | MTracker->writeRegMask(MO, CurBB, CurInst); | |||
2095 | ||||
2096 | if (!TTracker) | |||
2097 | return; | |||
2098 | ||||
2099 | // When committing variable values to locations: tell transfer tracker that | |||
2100 | // we've clobbered things. It may be able to recover the variable from a | |||
2101 | // different location. | |||
2102 | ||||
2103 | // Inform TTracker about any direct clobbers. | |||
2104 | for (uint32_t DeadReg : DeadRegs) { | |||
2105 | LocIdx Loc = MTracker->lookupOrTrackRegister(DeadReg); | |||
2106 | TTracker->clobberMloc(Loc, MI.getIterator(), false); | |||
2107 | } | |||
2108 | ||||
2109 | // Look for any clobbers performed by a register mask. Only test locations | |||
2110 | // that are actually being tracked. | |||
2111 | for (auto L : MTracker->locations()) { | |||
2112 | // Stack locations can't be clobbered by regmasks. | |||
2113 | if (MTracker->isSpill(L.Idx)) | |||
2114 | continue; | |||
2115 | ||||
2116 | Register Reg = MTracker->LocIdxToLocID[L.Idx]; | |||
2117 | for (auto *MO : RegMaskPtrs) | |||
2118 | if (MO->clobbersPhysReg(Reg)) | |||
2119 | TTracker->clobberMloc(L.Idx, MI.getIterator(), false); | |||
2120 | } | |||
2121 | } | |||
2122 | ||||
2123 | void InstrRefBasedLDV::performCopy(Register SrcRegNum, Register DstRegNum) { | |||
2124 | ValueIDNum SrcValue = MTracker->readReg(SrcRegNum); | |||
2125 | ||||
2126 | MTracker->setReg(DstRegNum, SrcValue); | |||
2127 | ||||
2128 | // In all circumstances, re-def the super registers. It's definitely a new | |||
2129 | // value now. This doesn't uniquely identify the composition of subregs, for | |||
2130 | // example, two identical values in subregisters composed in different | |||
2131 | // places would not get equal value numbers. | |||
2132 | for (MCSuperRegIterator SRI(DstRegNum, TRI); SRI.isValid(); ++SRI) | |||
2133 | MTracker->defReg(*SRI, CurBB, CurInst); | |||
2134 | ||||
2135 | // If we're emulating VarLocBasedImpl, just define all the subregisters. | |||
2136 | // DBG_VALUEs of them will expect to be tracked from the DBG_VALUE, not | |||
2137 | // through prior copies. | |||
2138 | if (EmulateOldLDV) { | |||
2139 | for (MCSubRegIndexIterator DRI(DstRegNum, TRI); DRI.isValid(); ++DRI) | |||
2140 | MTracker->defReg(DRI.getSubReg(), CurBB, CurInst); | |||
2141 | return; | |||
2142 | } | |||
2143 | ||||
2144 | // Otherwise, actually copy subregisters from one location to another. | |||
2145 | // XXX: in addition, any subregisters of DstRegNum that don't line up with | |||
2146 | // the source register should be def'd. | |||
2147 | for (MCSubRegIndexIterator SRI(SrcRegNum, TRI); SRI.isValid(); ++SRI) { | |||
2148 | unsigned SrcSubReg = SRI.getSubReg(); | |||
2149 | unsigned SubRegIdx = SRI.getSubRegIndex(); | |||
2150 | unsigned DstSubReg = TRI->getSubReg(DstRegNum, SubRegIdx); | |||
2151 | if (!DstSubReg) | |||
2152 | continue; | |||
2153 | ||||
2154 | // Do copy. There are two matching subregisters, the source value should | |||
2155 | // have been def'd when the super-reg was, the latter might not be tracked | |||
2156 | // yet. | |||
2157 | // This will force SrcSubReg to be tracked, if it isn't yet. | |||
2158 | (void)MTracker->readReg(SrcSubReg); | |||
2159 | LocIdx SrcL = MTracker->getRegMLoc(SrcSubReg); | |||
2160 | assert(SrcL.asU64())(static_cast <bool> (SrcL.asU64()) ? void (0) : __assert_fail ("SrcL.asU64()", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 2160, __extension__ __PRETTY_FUNCTION__)); | |||
2161 | (void)MTracker->readReg(DstSubReg); | |||
2162 | LocIdx DstL = MTracker->getRegMLoc(DstSubReg); | |||
2163 | assert(DstL.asU64())(static_cast <bool> (DstL.asU64()) ? void (0) : __assert_fail ("DstL.asU64()", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 2163, __extension__ __PRETTY_FUNCTION__)); | |||
2164 | (void)DstL; | |||
2165 | ValueIDNum CpyValue = {SrcValue.getBlock(), SrcValue.getInst(), SrcL}; | |||
2166 | ||||
2167 | MTracker->setReg(DstSubReg, CpyValue); | |||
2168 | } | |||
2169 | } | |||
2170 | ||||
2171 | bool InstrRefBasedLDV::isSpillInstruction(const MachineInstr &MI, | |||
2172 | MachineFunction *MF) { | |||
2173 | // TODO: Handle multiple stores folded into one. | |||
2174 | if (!MI.hasOneMemOperand()) | |||
2175 | return false; | |||
2176 | ||||
2177 | if (!MI.getSpillSize(TII) && !MI.getFoldedSpillSize(TII)) | |||
2178 | return false; // This is not a spill instruction, since no valid size was | |||
2179 | // returned from either function. | |||
2180 | ||||
2181 | return true; | |||
2182 | } | |||
2183 | ||||
2184 | bool InstrRefBasedLDV::isLocationSpill(const MachineInstr &MI, | |||
2185 | MachineFunction *MF, unsigned &Reg) { | |||
2186 | if (!isSpillInstruction(MI, MF)) | |||
2187 | return false; | |||
2188 | ||||
2189 | int FI; | |||
2190 | Reg = TII->isStoreToStackSlotPostFE(MI, FI); | |||
2191 | return Reg != 0; | |||
2192 | } | |||
2193 | ||||
2194 | Optional<SpillLoc> | |||
2195 | InstrRefBasedLDV::isRestoreInstruction(const MachineInstr &MI, | |||
2196 | MachineFunction *MF, unsigned &Reg) { | |||
2197 | if (!MI.hasOneMemOperand()) | |||
2198 | return None; | |||
2199 | ||||
2200 | // FIXME: Handle folded restore instructions with more than one memory | |||
2201 | // operand. | |||
2202 | if (MI.getRestoreSize(TII)) { | |||
2203 | Reg = MI.getOperand(0).getReg(); | |||
2204 | return extractSpillBaseRegAndOffset(MI); | |||
2205 | } | |||
2206 | return None; | |||
2207 | } | |||
2208 | ||||
2209 | bool InstrRefBasedLDV::transferSpillOrRestoreInst(MachineInstr &MI) { | |||
2210 | // XXX -- it's too difficult to implement VarLocBasedImpl's stack location | |||
2211 | // limitations under the new model. Therefore, when comparing them, compare | |||
2212 | // versions that don't attempt spills or restores at all. | |||
2213 | if (EmulateOldLDV) | |||
2214 | return false; | |||
2215 | ||||
2216 | MachineFunction *MF = MI.getMF(); | |||
2217 | unsigned Reg; | |||
2218 | Optional<SpillLoc> Loc; | |||
2219 | ||||
2220 | LLVM_DEBUG(dbgs() << "Examining instruction: "; MI.dump();)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("livedebugvalues")) { dbgs() << "Examining instruction: " ; MI.dump();; } } while (false); | |||
2221 | ||||
2222 | // First, if there are any DBG_VALUEs pointing at a spill slot that is | |||
2223 | // written to, terminate that variable location. The value in memory | |||
2224 | // will have changed. DbgEntityHistoryCalculator doesn't try to detect this. | |||
2225 | if (isSpillInstruction(MI, MF)) { | |||
2226 | Loc = extractSpillBaseRegAndOffset(MI); | |||
2227 | ||||
2228 | if (TTracker) { | |||
2229 | Optional<LocIdx> MLoc = MTracker->getSpillMLoc(*Loc); | |||
2230 | if (MLoc) { | |||
2231 | // Un-set this location before clobbering, so that we don't salvage | |||
2232 | // the variable location back to the same place. | |||
2233 | MTracker->setMLoc(*MLoc, ValueIDNum::EmptyValue); | |||
2234 | TTracker->clobberMloc(*MLoc, MI.getIterator()); | |||
2235 | } | |||
2236 | } | |||
2237 | } | |||
2238 | ||||
2239 | // Try to recognise spill and restore instructions that may transfer a value. | |||
2240 | if (isLocationSpill(MI, MF, Reg)) { | |||
2241 | Loc = extractSpillBaseRegAndOffset(MI); | |||
2242 | auto ValueID = MTracker->readReg(Reg); | |||
2243 | ||||
2244 | // If the location is empty, produce a phi, signify it's the live-in value. | |||
2245 | if (ValueID.getLoc() == 0) | |||
2246 | ValueID = {CurBB, 0, MTracker->getRegMLoc(Reg)}; | |||
2247 | ||||
2248 | MTracker->setSpill(*Loc, ValueID); | |||
2249 | auto OptSpillLocIdx = MTracker->getSpillMLoc(*Loc); | |||
2250 | assert(OptSpillLocIdx && "Spill slot set but has no LocIdx?")(static_cast <bool> (OptSpillLocIdx && "Spill slot set but has no LocIdx?" ) ? void (0) : __assert_fail ("OptSpillLocIdx && \"Spill slot set but has no LocIdx?\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 2250, __extension__ __PRETTY_FUNCTION__)); | |||
2251 | LocIdx SpillLocIdx = *OptSpillLocIdx; | |||
2252 | ||||
2253 | // Tell TransferTracker about this spill, produce DBG_VALUEs for it. | |||
2254 | if (TTracker) | |||
2255 | TTracker->transferMlocs(MTracker->getRegMLoc(Reg), SpillLocIdx, | |||
2256 | MI.getIterator()); | |||
2257 | } else { | |||
2258 | if (!(Loc = isRestoreInstruction(MI, MF, Reg))) | |||
2259 | return false; | |||
2260 | ||||
2261 | // Is there a value to be restored? | |||
2262 | auto OptValueID = MTracker->readSpill(*Loc); | |||
2263 | if (OptValueID) { | |||
2264 | ValueIDNum ValueID = *OptValueID; | |||
2265 | LocIdx SpillLocIdx = *MTracker->getSpillMLoc(*Loc); | |||
2266 | // XXX -- can we recover sub-registers of this value? Until we can, first | |||
2267 | // overwrite all defs of the register being restored to. | |||
2268 | for (MCRegAliasIterator RAI(Reg, TRI, true); RAI.isValid(); ++RAI) | |||
2269 | MTracker->defReg(*RAI, CurBB, CurInst); | |||
2270 | ||||
2271 | // Now override the reg we're restoring to. | |||
2272 | MTracker->setReg(Reg, ValueID); | |||
2273 | ||||
2274 | // Report this restore to the transfer tracker too. | |||
2275 | if (TTracker) | |||
2276 | TTracker->transferMlocs(SpillLocIdx, MTracker->getRegMLoc(Reg), | |||
2277 | MI.getIterator()); | |||
2278 | } else { | |||
2279 | // There isn't anything in the location; not clear if this is a code path | |||
2280 | // that still runs. Def this register anyway just in case. | |||
2281 | for (MCRegAliasIterator RAI(Reg, TRI, true); RAI.isValid(); ++RAI) | |||
2282 | MTracker->defReg(*RAI, CurBB, CurInst); | |||
2283 | ||||
2284 | // Force the spill slot to be tracked. | |||
2285 | LocIdx L = MTracker->getOrTrackSpillLoc(*Loc); | |||
2286 | ||||
2287 | // Set the restored value to be a machine phi number, signifying that it's | |||
2288 | // whatever the spills live-in value is in this block. Definitely has | |||
2289 | // a LocIdx due to the setSpill above. | |||
2290 | ValueIDNum ValueID = {CurBB, 0, L}; | |||
2291 | MTracker->setReg(Reg, ValueID); | |||
2292 | MTracker->setSpill(*Loc, ValueID); | |||
2293 | } | |||
2294 | } | |||
2295 | return true; | |||
2296 | } | |||
2297 | ||||
2298 | bool InstrRefBasedLDV::transferRegisterCopy(MachineInstr &MI) { | |||
2299 | auto DestSrc = TII->isCopyInstr(MI); | |||
2300 | if (!DestSrc) | |||
2301 | return false; | |||
2302 | ||||
2303 | const MachineOperand *DestRegOp = DestSrc->Destination; | |||
2304 | const MachineOperand *SrcRegOp = DestSrc->Source; | |||
2305 | ||||
2306 | auto isCalleeSavedReg = [&](unsigned Reg) { | |||
2307 | for (MCRegAliasIterator RAI(Reg, TRI, true); RAI.isValid(); ++RAI) | |||
2308 | if (CalleeSavedRegs.test(*RAI)) | |||
2309 | return true; | |||
2310 | return false; | |||
2311 | }; | |||
2312 | ||||
2313 | Register SrcReg = SrcRegOp->getReg(); | |||
2314 | Register DestReg = DestRegOp->getReg(); | |||
2315 | ||||
2316 | // Ignore identity copies. Yep, these make it as far as LiveDebugValues. | |||
2317 | if (SrcReg == DestReg) | |||
2318 | return true; | |||
2319 | ||||
2320 | // For emulating VarLocBasedImpl: | |||
2321 | // We want to recognize instructions where destination register is callee | |||
2322 | // saved register. If register that could be clobbered by the call is | |||
2323 | // included, there would be a great chance that it is going to be clobbered | |||
2324 | // soon. It is more likely that previous register, which is callee saved, is | |||
2325 | // going to stay unclobbered longer, even if it is killed. | |||
2326 | // | |||
2327 | // For InstrRefBasedImpl, we can track multiple locations per value, so | |||
2328 | // ignore this condition. | |||
2329 | if (EmulateOldLDV && !isCalleeSavedReg(DestReg)) | |||
2330 | return false; | |||
2331 | ||||
2332 | // InstrRefBasedImpl only followed killing copies. | |||
2333 | if (EmulateOldLDV && !SrcRegOp->isKill()) | |||
2334 | return false; | |||
2335 | ||||
2336 | // Copy MTracker info, including subregs if available. | |||
2337 | InstrRefBasedLDV::performCopy(SrcReg, DestReg); | |||
2338 | ||||
2339 | // Only produce a transfer of DBG_VALUE within a block where old LDV | |||
2340 | // would have. We might make use of the additional value tracking in some | |||
2341 | // other way, later. | |||
2342 | if (TTracker && isCalleeSavedReg(DestReg) && SrcRegOp->isKill()) | |||
2343 | TTracker->transferMlocs(MTracker->getRegMLoc(SrcReg), | |||
2344 | MTracker->getRegMLoc(DestReg), MI.getIterator()); | |||
2345 | ||||
2346 | // VarLocBasedImpl would quit tracking the old location after copying. | |||
2347 | if (EmulateOldLDV && SrcReg != DestReg) | |||
2348 | MTracker->defReg(SrcReg, CurBB, CurInst); | |||
2349 | ||||
2350 | // Finally, the copy might have clobbered variables based on the destination | |||
2351 | // register. Tell TTracker about it, in case a backup location exists. | |||
2352 | if (TTracker) { | |||
2353 | for (MCRegAliasIterator RAI(DestReg, TRI, true); RAI.isValid(); ++RAI) { | |||
2354 | LocIdx ClobberedLoc = MTracker->getRegMLoc(*RAI); | |||
2355 | TTracker->clobberMloc(ClobberedLoc, MI.getIterator(), false); | |||
2356 | } | |||
2357 | } | |||
2358 | ||||
2359 | return true; | |||
2360 | } | |||
2361 | ||||
2362 | /// Accumulate a mapping between each DILocalVariable fragment and other | |||
2363 | /// fragments of that DILocalVariable which overlap. This reduces work during | |||
2364 | /// the data-flow stage from "Find any overlapping fragments" to "Check if the | |||
2365 | /// known-to-overlap fragments are present". | |||
2366 | /// \param MI A previously unprocessed DEBUG_VALUE instruction to analyze for | |||
2367 | /// fragment usage. | |||
2368 | void InstrRefBasedLDV::accumulateFragmentMap(MachineInstr &MI) { | |||
2369 | DebugVariable MIVar(MI.getDebugVariable(), MI.getDebugExpression(), | |||
2370 | MI.getDebugLoc()->getInlinedAt()); | |||
2371 | FragmentInfo ThisFragment = MIVar.getFragmentOrDefault(); | |||
2372 | ||||
2373 | // If this is the first sighting of this variable, then we are guaranteed | |||
2374 | // there are currently no overlapping fragments either. Initialize the set | |||
2375 | // of seen fragments, record no overlaps for the current one, and return. | |||
2376 | auto SeenIt = SeenFragments.find(MIVar.getVariable()); | |||
2377 | if (SeenIt == SeenFragments.end()) { | |||
2378 | SmallSet<FragmentInfo, 4> OneFragment; | |||
2379 | OneFragment.insert(ThisFragment); | |||
2380 | SeenFragments.insert({MIVar.getVariable(), OneFragment}); | |||
2381 | ||||
2382 | OverlapFragments.insert({{MIVar.getVariable(), ThisFragment}, {}}); | |||
2383 | return; | |||
2384 | } | |||
2385 | ||||
2386 | // If this particular Variable/Fragment pair already exists in the overlap | |||
2387 | // map, it has already been accounted for. | |||
2388 | auto IsInOLapMap = | |||
2389 | OverlapFragments.insert({{MIVar.getVariable(), ThisFragment}, {}}); | |||
2390 | if (!IsInOLapMap.second) | |||
2391 | return; | |||
2392 | ||||
2393 | auto &ThisFragmentsOverlaps = IsInOLapMap.first->second; | |||
2394 | auto &AllSeenFragments = SeenIt->second; | |||
2395 | ||||
2396 | // Otherwise, examine all other seen fragments for this variable, with "this" | |||
2397 | // fragment being a previously unseen fragment. Record any pair of | |||
2398 | // overlapping fragments. | |||
2399 | for (auto &ASeenFragment : AllSeenFragments) { | |||
2400 | // Does this previously seen fragment overlap? | |||
2401 | if (DIExpression::fragmentsOverlap(ThisFragment, ASeenFragment)) { | |||
2402 | // Yes: Mark the current fragment as being overlapped. | |||
2403 | ThisFragmentsOverlaps.push_back(ASeenFragment); | |||
2404 | // Mark the previously seen fragment as being overlapped by the current | |||
2405 | // one. | |||
2406 | auto ASeenFragmentsOverlaps = | |||
2407 | OverlapFragments.find({MIVar.getVariable(), ASeenFragment}); | |||
2408 | assert(ASeenFragmentsOverlaps != OverlapFragments.end() &&(static_cast <bool> (ASeenFragmentsOverlaps != OverlapFragments .end() && "Previously seen var fragment has no vector of overlaps" ) ? void (0) : __assert_fail ("ASeenFragmentsOverlaps != OverlapFragments.end() && \"Previously seen var fragment has no vector of overlaps\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 2409, __extension__ __PRETTY_FUNCTION__)) | |||
2409 | "Previously seen var fragment has no vector of overlaps")(static_cast <bool> (ASeenFragmentsOverlaps != OverlapFragments .end() && "Previously seen var fragment has no vector of overlaps" ) ? void (0) : __assert_fail ("ASeenFragmentsOverlaps != OverlapFragments.end() && \"Previously seen var fragment has no vector of overlaps\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 2409, __extension__ __PRETTY_FUNCTION__)); | |||
2410 | ASeenFragmentsOverlaps->second.push_back(ThisFragment); | |||
2411 | } | |||
2412 | } | |||
2413 | ||||
2414 | AllSeenFragments.insert(ThisFragment); | |||
2415 | } | |||
2416 | ||||
2417 | void InstrRefBasedLDV::process(MachineInstr &MI, ValueIDNum **MLiveOuts, | |||
2418 | ValueIDNum **MLiveIns) { | |||
2419 | // Try to interpret an MI as a debug or transfer instruction. Only if it's | |||
2420 | // none of these should we interpret it's register defs as new value | |||
2421 | // definitions. | |||
2422 | if (transferDebugValue(MI)) | |||
2423 | return; | |||
2424 | if (transferDebugInstrRef(MI, MLiveOuts, MLiveIns)) | |||
2425 | return; | |||
2426 | if (transferDebugPHI(MI)) | |||
2427 | return; | |||
2428 | if (transferRegisterCopy(MI)) | |||
2429 | return; | |||
2430 | if (transferSpillOrRestoreInst(MI)) | |||
2431 | return; | |||
2432 | transferRegisterDef(MI); | |||
2433 | } | |||
2434 | ||||
2435 | void InstrRefBasedLDV::produceMLocTransferFunction( | |||
2436 | MachineFunction &MF, SmallVectorImpl<MLocTransferMap> &MLocTransfer, | |||
2437 | unsigned MaxNumBlocks) { | |||
2438 | // Because we try to optimize around register mask operands by ignoring regs | |||
2439 | // that aren't currently tracked, we set up something ugly for later: RegMask | |||
2440 | // operands that are seen earlier than the first use of a register, still need | |||
2441 | // to clobber that register in the transfer function. But this information | |||
2442 | // isn't actively recorded. Instead, we track each RegMask used in each block, | |||
2443 | // and accumulated the clobbered but untracked registers in each block into | |||
2444 | // the following bitvector. Later, if new values are tracked, we can add | |||
2445 | // appropriate clobbers. | |||
2446 | SmallVector<BitVector, 32> BlockMasks; | |||
2447 | BlockMasks.resize(MaxNumBlocks); | |||
2448 | ||||
2449 | // Reserve one bit per register for the masks described above. | |||
2450 | unsigned BVWords = MachineOperand::getRegMaskSize(TRI->getNumRegs()); | |||
2451 | for (auto &BV : BlockMasks) | |||
2452 | BV.resize(TRI->getNumRegs(), true); | |||
2453 | ||||
2454 | // Step through all instructions and inhale the transfer function. | |||
2455 | for (auto &MBB : MF) { | |||
2456 | // Object fields that are read by trackers to know where we are in the | |||
2457 | // function. | |||
2458 | CurBB = MBB.getNumber(); | |||
2459 | CurInst = 1; | |||
2460 | ||||
2461 | // Set all machine locations to a PHI value. For transfer function | |||
2462 | // production only, this signifies the live-in value to the block. | |||
2463 | MTracker->reset(); | |||
2464 | MTracker->setMPhis(CurBB); | |||
2465 | ||||
2466 | // Step through each instruction in this block. | |||
2467 | for (auto &MI : MBB) { | |||
2468 | process(MI); | |||
2469 | // Also accumulate fragment map. | |||
2470 | if (MI.isDebugValue()) | |||
2471 | accumulateFragmentMap(MI); | |||
2472 | ||||
2473 | // Create a map from the instruction number (if present) to the | |||
2474 | // MachineInstr and its position. | |||
2475 | if (uint64_t InstrNo = MI.peekDebugInstrNum()) { | |||
2476 | auto InstrAndPos = std::make_pair(&MI, CurInst); | |||
2477 | auto InsertResult = | |||
2478 | DebugInstrNumToInstr.insert(std::make_pair(InstrNo, InstrAndPos)); | |||
2479 | ||||
2480 | // There should never be duplicate instruction numbers. | |||
2481 | assert(InsertResult.second)(static_cast <bool> (InsertResult.second) ? void (0) : __assert_fail ("InsertResult.second", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 2481, __extension__ __PRETTY_FUNCTION__)); | |||
2482 | (void)InsertResult; | |||
2483 | } | |||
2484 | ||||
2485 | ++CurInst; | |||
2486 | } | |||
2487 | ||||
2488 | // Produce the transfer function, a map of machine location to new value. If | |||
2489 | // any machine location has the live-in phi value from the start of the | |||
2490 | // block, it's live-through and doesn't need recording in the transfer | |||
2491 | // function. | |||
2492 | for (auto Location : MTracker->locations()) { | |||
2493 | LocIdx Idx = Location.Idx; | |||
2494 | ValueIDNum &P = Location.Value; | |||
2495 | if (P.isPHI() && P.getLoc() == Idx.asU64()) | |||
2496 | continue; | |||
2497 | ||||
2498 | // Insert-or-update. | |||
2499 | auto &TransferMap = MLocTransfer[CurBB]; | |||
2500 | auto Result = TransferMap.insert(std::make_pair(Idx.asU64(), P)); | |||
2501 | if (!Result.second) | |||
2502 | Result.first->second = P; | |||
2503 | } | |||
2504 | ||||
2505 | // Accumulate any bitmask operands into the clobberred reg mask for this | |||
2506 | // block. | |||
2507 | for (auto &P : MTracker->Masks) { | |||
2508 | BlockMasks[CurBB].clearBitsNotInMask(P.first->getRegMask(), BVWords); | |||
2509 | } | |||
2510 | } | |||
2511 | ||||
2512 | // Compute a bitvector of all the registers that are tracked in this block. | |||
2513 | const TargetLowering *TLI = MF.getSubtarget().getTargetLowering(); | |||
2514 | Register SP = TLI->getStackPointerRegisterToSaveRestore(); | |||
2515 | BitVector UsedRegs(TRI->getNumRegs()); | |||
2516 | for (auto Location : MTracker->locations()) { | |||
2517 | unsigned ID = MTracker->LocIdxToLocID[Location.Idx]; | |||
2518 | if (ID >= TRI->getNumRegs() || ID == SP) | |||
2519 | continue; | |||
2520 | UsedRegs.set(ID); | |||
2521 | } | |||
2522 | ||||
2523 | // Check that any regmask-clobber of a register that gets tracked, is not | |||
2524 | // live-through in the transfer function. It needs to be clobbered at the | |||
2525 | // very least. | |||
2526 | for (unsigned int I = 0; I < MaxNumBlocks; ++I) { | |||
2527 | BitVector &BV = BlockMasks[I]; | |||
2528 | BV.flip(); | |||
2529 | BV &= UsedRegs; | |||
2530 | // This produces all the bits that we clobber, but also use. Check that | |||
2531 | // they're all clobbered or at least set in the designated transfer | |||
2532 | // elem. | |||
2533 | for (unsigned Bit : BV.set_bits()) { | |||
2534 | unsigned ID = MTracker->getLocID(Bit, false); | |||
2535 | LocIdx Idx = MTracker->LocIDToLocIdx[ID]; | |||
2536 | auto &TransferMap = MLocTransfer[I]; | |||
2537 | ||||
2538 | // Install a value representing the fact that this location is effectively | |||
2539 | // written to in this block. As there's no reserved value, instead use | |||
2540 | // a value number that is never generated. Pick the value number for the | |||
2541 | // first instruction in the block, def'ing this location, which we know | |||
2542 | // this block never used anyway. | |||
2543 | ValueIDNum NotGeneratedNum = ValueIDNum(I, 1, Idx); | |||
2544 | auto Result = | |||
2545 | TransferMap.insert(std::make_pair(Idx.asU64(), NotGeneratedNum)); | |||
2546 | if (!Result.second) { | |||
2547 | ValueIDNum &ValueID = Result.first->second; | |||
2548 | if (ValueID.getBlock() == I && ValueID.isPHI()) | |||
2549 | // It was left as live-through. Set it to clobbered. | |||
2550 | ValueID = NotGeneratedNum; | |||
2551 | } | |||
2552 | } | |||
2553 | } | |||
2554 | } | |||
2555 | ||||
2556 | std::tuple<bool, bool> | |||
2557 | InstrRefBasedLDV::mlocJoin(MachineBasicBlock &MBB, | |||
2558 | SmallPtrSet<const MachineBasicBlock *, 16> &Visited, | |||
2559 | ValueIDNum **OutLocs, ValueIDNum *InLocs) { | |||
2560 | LLVM_DEBUG(dbgs() << "join MBB: " << MBB.getNumber() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("livedebugvalues")) { dbgs() << "join MBB: " << MBB .getNumber() << "\n"; } } while (false); | |||
2561 | bool Changed = false; | |||
2562 | bool DowngradeOccurred = false; | |||
2563 | ||||
2564 | // Collect predecessors that have been visited. Anything that hasn't been | |||
2565 | // visited yet is a backedge on the first iteration, and the meet of it's | |||
2566 | // lattice value for all locations will be unaffected. | |||
2567 | SmallVector<const MachineBasicBlock *, 8> BlockOrders; | |||
2568 | for (auto Pred : MBB.predecessors()) { | |||
2569 | if (Visited.count(Pred)) { | |||
2570 | BlockOrders.push_back(Pred); | |||
2571 | } | |||
2572 | } | |||
2573 | ||||
2574 | // Visit predecessors in RPOT order. | |||
2575 | auto Cmp = [&](const MachineBasicBlock *A, const MachineBasicBlock *B) { | |||
2576 | return BBToOrder.find(A)->second < BBToOrder.find(B)->second; | |||
2577 | }; | |||
2578 | llvm::sort(BlockOrders, Cmp); | |||
2579 | ||||
2580 | // Skip entry block. | |||
2581 | if (BlockOrders.size() == 0) | |||
2582 | return std::tuple<bool, bool>(false, false); | |||
2583 | ||||
2584 | // Step through all machine locations, then look at each predecessor and | |||
2585 | // detect disagreements. | |||
2586 | unsigned ThisBlockRPO = BBToOrder.find(&MBB)->second; | |||
2587 | for (auto Location : MTracker->locations()) { | |||
2588 | LocIdx Idx = Location.Idx; | |||
2589 | // Pick out the first predecessors live-out value for this location. It's | |||
2590 | // guaranteed to be not a backedge, as we order by RPO. | |||
2591 | ValueIDNum BaseVal = OutLocs[BlockOrders[0]->getNumber()][Idx.asU64()]; | |||
2592 | ||||
2593 | // Some flags for whether there's a disagreement, and whether it's a | |||
2594 | // disagreement with a backedge or not. | |||
2595 | bool Disagree = false; | |||
2596 | bool NonBackEdgeDisagree = false; | |||
2597 | ||||
2598 | // Loop around everything that wasn't 'base'. | |||
2599 | for (unsigned int I = 1; I < BlockOrders.size(); ++I) { | |||
2600 | auto *MBB = BlockOrders[I]; | |||
2601 | if (BaseVal != OutLocs[MBB->getNumber()][Idx.asU64()]) { | |||
2602 | // Live-out of a predecessor disagrees with the first predecessor. | |||
2603 | Disagree = true; | |||
2604 | ||||
2605 | // Test whether it's a disagreemnt in the backedges or not. | |||
2606 | if (BBToOrder.find(MBB)->second < ThisBlockRPO) // might be self b/e | |||
2607 | NonBackEdgeDisagree = true; | |||
2608 | } | |||
2609 | } | |||
2610 | ||||
2611 | bool OverRide = false; | |||
2612 | if (Disagree && !NonBackEdgeDisagree) { | |||
2613 | // Only the backedges disagree. Consider demoting the livein | |||
2614 | // lattice value, as per the file level comment. The value we consider | |||
2615 | // demoting to is the value that the non-backedge predecessors agree on. | |||
2616 | // The order of values is that non-PHIs are \top, a PHI at this block | |||
2617 | // \bot, and phis between the two are ordered by their RPO number. | |||
2618 | // If there's no agreement, or we've already demoted to this PHI value | |||
2619 | // before, replace with a PHI value at this block. | |||
2620 | ||||
2621 | // Calculate order numbers: zero means normal def, nonzero means RPO | |||
2622 | // number. | |||
2623 | unsigned BaseBlockRPONum = BBNumToRPO[BaseVal.getBlock()] + 1; | |||
2624 | if (!BaseVal.isPHI()) | |||
2625 | BaseBlockRPONum = 0; | |||
2626 | ||||
2627 | ValueIDNum &InLocID = InLocs[Idx.asU64()]; | |||
2628 | unsigned InLocRPONum = BBNumToRPO[InLocID.getBlock()] + 1; | |||
2629 | if (!InLocID.isPHI()) | |||
2630 | InLocRPONum = 0; | |||
2631 | ||||
2632 | // Should we ignore the disagreeing backedges, and override with the | |||
2633 | // value the other predecessors agree on (in "base")? | |||
2634 | unsigned ThisBlockRPONum = BBNumToRPO[MBB.getNumber()] + 1; | |||
2635 | if (BaseBlockRPONum > InLocRPONum && BaseBlockRPONum < ThisBlockRPONum) { | |||
2636 | // Override. | |||
2637 | OverRide = true; | |||
2638 | DowngradeOccurred = true; | |||
2639 | } | |||
2640 | } | |||
2641 | // else: if we disagree in the non-backedges, then this is definitely | |||
2642 | // a control flow merge where different values merge. Make it a PHI. | |||
2643 | ||||
2644 | // Generate a phi... | |||
2645 | ValueIDNum PHI = {(uint64_t)MBB.getNumber(), 0, Idx}; | |||
2646 | ValueIDNum NewVal = (Disagree && !OverRide) ? PHI : BaseVal; | |||
2647 | if (InLocs[Idx.asU64()] != NewVal) { | |||
2648 | Changed |= true; | |||
2649 | InLocs[Idx.asU64()] = NewVal; | |||
2650 | } | |||
2651 | } | |||
2652 | ||||
2653 | // TODO: Reimplement NumInserted and NumRemoved. | |||
2654 | return std::tuple<bool, bool>(Changed, DowngradeOccurred); | |||
2655 | } | |||
2656 | ||||
2657 | void InstrRefBasedLDV::mlocDataflow( | |||
2658 | ValueIDNum **MInLocs, ValueIDNum **MOutLocs, | |||
2659 | SmallVectorImpl<MLocTransferMap> &MLocTransfer) { | |||
2660 | std::priority_queue<unsigned int, std::vector<unsigned int>, | |||
2661 | std::greater<unsigned int>> | |||
2662 | Worklist, Pending; | |||
2663 | ||||
2664 | // We track what is on the current and pending worklist to avoid inserting | |||
2665 | // the same thing twice. We could avoid this with a custom priority queue, | |||
2666 | // but this is probably not worth it. | |||
2667 | SmallPtrSet<MachineBasicBlock *, 16> OnPending, OnWorklist; | |||
2668 | ||||
2669 | // Initialize worklist with every block to be visited. | |||
2670 | for (unsigned int I = 0; I < BBToOrder.size(); ++I) { | |||
2671 | Worklist.push(I); | |||
2672 | OnWorklist.insert(OrderToBB[I]); | |||
2673 | } | |||
2674 | ||||
2675 | MTracker->reset(); | |||
2676 | ||||
2677 | // Set inlocs for entry block -- each as a PHI at the entry block. Represents | |||
2678 | // the incoming value to the function. | |||
2679 | MTracker->setMPhis(0); | |||
2680 | for (auto Location : MTracker->locations()) | |||
2681 | MInLocs[0][Location.Idx.asU64()] = Location.Value; | |||
2682 | ||||
2683 | SmallPtrSet<const MachineBasicBlock *, 16> Visited; | |||
2684 | while (!Worklist.empty() || !Pending.empty()) { | |||
2685 | // Vector for storing the evaluated block transfer function. | |||
2686 | SmallVector<std::pair<LocIdx, ValueIDNum>, 32> ToRemap; | |||
2687 | ||||
2688 | while (!Worklist.empty()) { | |||
2689 | MachineBasicBlock *MBB = OrderToBB[Worklist.top()]; | |||
2690 | CurBB = MBB->getNumber(); | |||
2691 | Worklist.pop(); | |||
2692 | ||||
2693 | // Join the values in all predecessor blocks. | |||
2694 | bool InLocsChanged, DowngradeOccurred; | |||
2695 | std::tie(InLocsChanged, DowngradeOccurred) = | |||
2696 | mlocJoin(*MBB, Visited, MOutLocs, MInLocs[CurBB]); | |||
2697 | InLocsChanged |= Visited.insert(MBB).second; | |||
2698 | ||||
2699 | // If a downgrade occurred, book us in for re-examination on the next | |||
2700 | // iteration. | |||
2701 | if (DowngradeOccurred && OnPending.insert(MBB).second) | |||
2702 | Pending.push(BBToOrder[MBB]); | |||
2703 | ||||
2704 | // Don't examine transfer function if we've visited this loc at least | |||
2705 | // once, and inlocs haven't changed. | |||
2706 | if (!InLocsChanged) | |||
2707 | continue; | |||
2708 | ||||
2709 | // Load the current set of live-ins into MLocTracker. | |||
2710 | MTracker->loadFromArray(MInLocs[CurBB], CurBB); | |||
2711 | ||||
2712 | // Each element of the transfer function can be a new def, or a read of | |||
2713 | // a live-in value. Evaluate each element, and store to "ToRemap". | |||
2714 | ToRemap.clear(); | |||
2715 | for (auto &P : MLocTransfer[CurBB]) { | |||
2716 | if (P.second.getBlock() == CurBB && P.second.isPHI()) { | |||
2717 | // This is a movement of whatever was live in. Read it. | |||
2718 | ValueIDNum NewID = MTracker->getNumAtPos(P.second.getLoc()); | |||
2719 | ToRemap.push_back(std::make_pair(P.first, NewID)); | |||
2720 | } else { | |||
2721 | // It's a def. Just set it. | |||
2722 | assert(P.second.getBlock() == CurBB)(static_cast <bool> (P.second.getBlock() == CurBB) ? void (0) : __assert_fail ("P.second.getBlock() == CurBB", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 2722, __extension__ __PRETTY_FUNCTION__)); | |||
2723 | ToRemap.push_back(std::make_pair(P.first, P.second)); | |||
2724 | } | |||
2725 | } | |||
2726 | ||||
2727 | // Commit the transfer function changes into mloc tracker, which | |||
2728 | // transforms the contents of the MLocTracker into the live-outs. | |||
2729 | for (auto &P : ToRemap) | |||
2730 | MTracker->setMLoc(P.first, P.second); | |||
2731 | ||||
2732 | // Now copy out-locs from mloc tracker into out-loc vector, checking | |||
2733 | // whether changes have occurred. These changes can have come from both | |||
2734 | // the transfer function, and mlocJoin. | |||
2735 | bool OLChanged = false; | |||
2736 | for (auto Location : MTracker->locations()) { | |||
2737 | OLChanged |= MOutLocs[CurBB][Location.Idx.asU64()] != Location.Value; | |||
2738 | MOutLocs[CurBB][Location.Idx.asU64()] = Location.Value; | |||
2739 | } | |||
2740 | ||||
2741 | MTracker->reset(); | |||
2742 | ||||
2743 | // No need to examine successors again if out-locs didn't change. | |||
2744 | if (!OLChanged) | |||
2745 | continue; | |||
2746 | ||||
2747 | // All successors should be visited: put any back-edges on the pending | |||
2748 | // list for the next dataflow iteration, and any other successors to be | |||
2749 | // visited this iteration, if they're not going to be already. | |||
2750 | for (auto s : MBB->successors()) { | |||
2751 | // Does branching to this successor represent a back-edge? | |||
2752 | if (BBToOrder[s] > BBToOrder[MBB]) { | |||
2753 | // No: visit it during this dataflow iteration. | |||
2754 | if (OnWorklist.insert(s).second) | |||
2755 | Worklist.push(BBToOrder[s]); | |||
2756 | } else { | |||
2757 | // Yes: visit it on the next iteration. | |||
2758 | if (OnPending.insert(s).second) | |||
2759 | Pending.push(BBToOrder[s]); | |||
2760 | } | |||
2761 | } | |||
2762 | } | |||
2763 | ||||
2764 | Worklist.swap(Pending); | |||
2765 | std::swap(OnPending, OnWorklist); | |||
2766 | OnPending.clear(); | |||
2767 | // At this point, pending must be empty, since it was just the empty | |||
2768 | // worklist | |||
2769 | assert(Pending.empty() && "Pending should be empty")(static_cast <bool> (Pending.empty() && "Pending should be empty" ) ? void (0) : __assert_fail ("Pending.empty() && \"Pending should be empty\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 2769, __extension__ __PRETTY_FUNCTION__)); | |||
2770 | } | |||
2771 | ||||
2772 | // Once all the live-ins don't change on mlocJoin(), we've reached a | |||
2773 | // fixedpoint. | |||
2774 | } | |||
2775 | ||||
2776 | bool InstrRefBasedLDV::vlocDowngradeLattice( | |||
2777 | const MachineBasicBlock &MBB, const DbgValue &OldLiveInLocation, | |||
2778 | const SmallVectorImpl<InValueT> &Values, unsigned CurBlockRPONum) { | |||
2779 | // Ranking value preference: see file level comment, the highest rank is | |||
2780 | // a plain def, followed by PHI values in reverse post-order. Numerically, | |||
2781 | // we assign all defs the rank '0', all PHIs their blocks RPO number plus | |||
2782 | // one, and consider the lowest value the highest ranked. | |||
2783 | int OldLiveInRank = BBNumToRPO[OldLiveInLocation.ID.getBlock()] + 1; | |||
2784 | if (!OldLiveInLocation.ID.isPHI()) | |||
2785 | OldLiveInRank = 0; | |||
2786 | ||||
2787 | // Allow any unresolvable conflict to be over-ridden. | |||
2788 | if (OldLiveInLocation.Kind == DbgValue::NoVal) { | |||
2789 | // Although if it was an unresolvable conflict from _this_ block, then | |||
2790 | // all other seeking of downgrades and PHIs must have failed before hand. | |||
2791 | if (OldLiveInLocation.BlockNo == (unsigned)MBB.getNumber()) | |||
2792 | return false; | |||
2793 | OldLiveInRank = INT_MIN(-2147483647 -1); | |||
2794 | } | |||
2795 | ||||
2796 | auto &InValue = *Values[0].second; | |||
2797 | ||||
2798 | if (InValue.Kind == DbgValue::Const || InValue.Kind == DbgValue::NoVal) | |||
2799 | return false; | |||
2800 | ||||
2801 | unsigned ThisRPO = BBNumToRPO[InValue.ID.getBlock()]; | |||
2802 | int ThisRank = ThisRPO + 1; | |||
2803 | if (!InValue.ID.isPHI()) | |||
2804 | ThisRank = 0; | |||
2805 | ||||
2806 | // Too far down the lattice? | |||
2807 | if (ThisRPO >= CurBlockRPONum) | |||
2808 | return false; | |||
2809 | ||||
2810 | // Higher in the lattice than what we've already explored? | |||
2811 | if (ThisRank <= OldLiveInRank) | |||
2812 | return false; | |||
2813 | ||||
2814 | return true; | |||
2815 | } | |||
2816 | ||||
2817 | std::tuple<Optional<ValueIDNum>, bool> InstrRefBasedLDV::pickVPHILoc( | |||
2818 | MachineBasicBlock &MBB, const DebugVariable &Var, const LiveIdxT &LiveOuts, | |||
2819 | ValueIDNum **MOutLocs, ValueIDNum **MInLocs, | |||
2820 | const SmallVectorImpl<MachineBasicBlock *> &BlockOrders) { | |||
2821 | // Collect a set of locations from predecessor where its live-out value can | |||
2822 | // be found. | |||
2823 | SmallVector<SmallVector<LocIdx, 4>, 8> Locs; | |||
2824 | unsigned NumLocs = MTracker->getNumLocs(); | |||
2825 | unsigned BackEdgesStart = 0; | |||
2826 | ||||
2827 | for (auto p : BlockOrders) { | |||
2828 | // Pick out where backedges start in the list of predecessors. Relies on | |||
2829 | // BlockOrders being sorted by RPO. | |||
2830 | if (BBToOrder[p] < BBToOrder[&MBB]) | |||
2831 | ++BackEdgesStart; | |||
2832 | ||||
2833 | // For each predecessor, create a new set of locations. | |||
2834 | Locs.resize(Locs.size() + 1); | |||
2835 | unsigned ThisBBNum = p->getNumber(); | |||
2836 | auto LiveOutMap = LiveOuts.find(p); | |||
2837 | if (LiveOutMap == LiveOuts.end()) | |||
2838 | // This predecessor isn't in scope, it must have no live-in/live-out | |||
2839 | // locations. | |||
2840 | continue; | |||
2841 | ||||
2842 | auto It = LiveOutMap->second->find(Var); | |||
2843 | if (It == LiveOutMap->second->end()) | |||
2844 | // There's no value recorded for this variable in this predecessor, | |||
2845 | // leave an empty set of locations. | |||
2846 | continue; | |||
2847 | ||||
2848 | const DbgValue &OutVal = It->second; | |||
2849 | ||||
2850 | if (OutVal.Kind == DbgValue::Const || OutVal.Kind == DbgValue::NoVal) | |||
2851 | // Consts and no-values cannot have locations we can join on. | |||
2852 | continue; | |||
2853 | ||||
2854 | assert(OutVal.Kind == DbgValue::Proposed || OutVal.Kind == DbgValue::Def)(static_cast <bool> (OutVal.Kind == DbgValue::Proposed || OutVal.Kind == DbgValue::Def) ? void (0) : __assert_fail ("OutVal.Kind == DbgValue::Proposed || OutVal.Kind == DbgValue::Def" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 2854, __extension__ __PRETTY_FUNCTION__)); | |||
2855 | ValueIDNum ValToLookFor = OutVal.ID; | |||
2856 | ||||
2857 | // Search the live-outs of the predecessor for the specified value. | |||
2858 | for (unsigned int I = 0; I < NumLocs; ++I) { | |||
2859 | if (MOutLocs[ThisBBNum][I] == ValToLookFor) | |||
2860 | Locs.back().push_back(LocIdx(I)); | |||
2861 | } | |||
2862 | } | |||
2863 | ||||
2864 | // If there were no locations at all, return an empty result. | |||
2865 | if (Locs.empty()) | |||
2866 | return std::tuple<Optional<ValueIDNum>, bool>(None, false); | |||
2867 | ||||
2868 | // Lambda for seeking a common location within a range of location-sets. | |||
2869 | using LocsIt = SmallVector<SmallVector<LocIdx, 4>, 8>::iterator; | |||
2870 | auto SeekLocation = | |||
2871 | [&Locs](llvm::iterator_range<LocsIt> SearchRange) -> Optional<LocIdx> { | |||
2872 | // Starting with the first set of locations, take the intersection with | |||
2873 | // subsequent sets. | |||
2874 | SmallVector<LocIdx, 4> base = Locs[0]; | |||
2875 | for (auto &S : SearchRange) { | |||
2876 | SmallVector<LocIdx, 4> new_base; | |||
2877 | std::set_intersection(base.begin(), base.end(), S.begin(), S.end(), | |||
2878 | std::inserter(new_base, new_base.begin())); | |||
2879 | base = new_base; | |||
2880 | } | |||
2881 | if (base.empty()) | |||
2882 | return None; | |||
2883 | ||||
2884 | // We now have a set of LocIdxes that contain the right output value in | |||
2885 | // each of the predecessors. Pick the lowest; if there's a register loc, | |||
2886 | // that'll be it. | |||
2887 | return *base.begin(); | |||
2888 | }; | |||
2889 | ||||
2890 | // Search for a common location for all predecessors. If we can't, then fall | |||
2891 | // back to only finding a common location between non-backedge predecessors. | |||
2892 | bool ValidForAllLocs = true; | |||
2893 | auto TheLoc = SeekLocation(Locs); | |||
2894 | if (!TheLoc) { | |||
2895 | ValidForAllLocs = false; | |||
2896 | TheLoc = | |||
2897 | SeekLocation(make_range(Locs.begin(), Locs.begin() + BackEdgesStart)); | |||
2898 | } | |||
2899 | ||||
2900 | if (!TheLoc) | |||
2901 | return std::tuple<Optional<ValueIDNum>, bool>(None, false); | |||
2902 | ||||
2903 | // Return a PHI-value-number for the found location. | |||
2904 | LocIdx L = *TheLoc; | |||
2905 | ValueIDNum PHIVal = {(unsigned)MBB.getNumber(), 0, L}; | |||
2906 | return std::tuple<Optional<ValueIDNum>, bool>(PHIVal, ValidForAllLocs); | |||
2907 | } | |||
2908 | ||||
2909 | std::tuple<bool, bool> InstrRefBasedLDV::vlocJoin( | |||
2910 | MachineBasicBlock &MBB, LiveIdxT &VLOCOutLocs, LiveIdxT &VLOCInLocs, | |||
2911 | SmallPtrSet<const MachineBasicBlock *, 16> *VLOCVisited, unsigned BBNum, | |||
2912 | const SmallSet<DebugVariable, 4> &AllVars, ValueIDNum **MOutLocs, | |||
2913 | ValueIDNum **MInLocs, | |||
2914 | SmallPtrSet<const MachineBasicBlock *, 8> &InScopeBlocks, | |||
2915 | SmallPtrSet<const MachineBasicBlock *, 8> &BlocksToExplore, | |||
2916 | DenseMap<DebugVariable, DbgValue> &InLocsT) { | |||
2917 | bool DowngradeOccurred = false; | |||
2918 | ||||
2919 | // To emulate VarLocBasedImpl, process this block if it's not in scope but | |||
2920 | // _does_ assign a variable value. No live-ins for this scope are transferred | |||
2921 | // in though, so we can return immediately. | |||
2922 | if (InScopeBlocks.count(&MBB) == 0 && !ArtificialBlocks.count(&MBB)) { | |||
2923 | if (VLOCVisited) | |||
2924 | return std::tuple<bool, bool>(true, false); | |||
2925 | return std::tuple<bool, bool>(false, false); | |||
2926 | } | |||
2927 | ||||
2928 | LLVM_DEBUG(dbgs() << "join MBB: " << MBB.getNumber() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("livedebugvalues")) { dbgs() << "join MBB: " << MBB .getNumber() << "\n"; } } while (false); | |||
2929 | bool Changed = false; | |||
2930 | ||||
2931 | // Find any live-ins computed in a prior iteration. | |||
2932 | auto ILSIt = VLOCInLocs.find(&MBB); | |||
2933 | assert(ILSIt != VLOCInLocs.end())(static_cast <bool> (ILSIt != VLOCInLocs.end()) ? void ( 0) : __assert_fail ("ILSIt != VLOCInLocs.end()", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 2933, __extension__ __PRETTY_FUNCTION__)); | |||
2934 | auto &ILS = *ILSIt->second; | |||
2935 | ||||
2936 | // Order predecessors by RPOT order, for exploring them in that order. | |||
2937 | SmallVector<MachineBasicBlock *, 8> BlockOrders(MBB.predecessors()); | |||
2938 | ||||
2939 | auto Cmp = [&](MachineBasicBlock *A, MachineBasicBlock *B) { | |||
2940 | return BBToOrder[A] < BBToOrder[B]; | |||
2941 | }; | |||
2942 | ||||
2943 | llvm::sort(BlockOrders, Cmp); | |||
2944 | ||||
2945 | unsigned CurBlockRPONum = BBToOrder[&MBB]; | |||
2946 | ||||
2947 | // Force a re-visit to loop heads in the first dataflow iteration. | |||
2948 | // FIXME: if we could "propose" Const values this wouldn't be needed, | |||
2949 | // because they'd need to be confirmed before being emitted. | |||
2950 | if (!BlockOrders.empty() && | |||
2951 | BBToOrder[BlockOrders[BlockOrders.size() - 1]] >= CurBlockRPONum && | |||
2952 | VLOCVisited) | |||
2953 | DowngradeOccurred = true; | |||
2954 | ||||
2955 | auto ConfirmValue = [&InLocsT](const DebugVariable &DV, DbgValue VR) { | |||
2956 | auto Result = InLocsT.insert(std::make_pair(DV, VR)); | |||
2957 | (void)Result; | |||
2958 | assert(Result.second)(static_cast <bool> (Result.second) ? void (0) : __assert_fail ("Result.second", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 2958, __extension__ __PRETTY_FUNCTION__)); | |||
2959 | }; | |||
2960 | ||||
2961 | auto ConfirmNoVal = [&ConfirmValue, &MBB](const DebugVariable &Var, const DbgValueProperties &Properties) { | |||
2962 | DbgValue NoLocPHIVal(MBB.getNumber(), Properties, DbgValue::NoVal); | |||
2963 | ||||
2964 | ConfirmValue(Var, NoLocPHIVal); | |||
2965 | }; | |||
2966 | ||||
2967 | // Attempt to join the values for each variable. | |||
2968 | for (auto &Var : AllVars) { | |||
2969 | // Collect all the DbgValues for this variable. | |||
2970 | SmallVector<InValueT, 8> Values; | |||
2971 | bool Bail = false; | |||
2972 | unsigned BackEdgesStart = 0; | |||
2973 | for (auto p : BlockOrders) { | |||
2974 | // If the predecessor isn't in scope / to be explored, we'll never be | |||
2975 | // able to join any locations. | |||
2976 | if (!BlocksToExplore.contains(p)) { | |||
2977 | Bail = true; | |||
2978 | break; | |||
2979 | } | |||
2980 | ||||
2981 | // Don't attempt to handle unvisited predecessors: they're implicitly | |||
2982 | // "unknown"s in the lattice. | |||
2983 | if (VLOCVisited && !VLOCVisited->count(p)) | |||
2984 | continue; | |||
2985 | ||||
2986 | // If the predecessors OutLocs is absent, there's not much we can do. | |||
2987 | auto OL = VLOCOutLocs.find(p); | |||
2988 | if (OL == VLOCOutLocs.end()) { | |||
2989 | Bail = true; | |||
2990 | break; | |||
2991 | } | |||
2992 | ||||
2993 | // No live-out value for this predecessor also means we can't produce | |||
2994 | // a joined value. | |||
2995 | auto VIt = OL->second->find(Var); | |||
2996 | if (VIt == OL->second->end()) { | |||
2997 | Bail = true; | |||
2998 | break; | |||
2999 | } | |||
3000 | ||||
3001 | // Keep track of where back-edges begin in the Values vector. Relies on | |||
3002 | // BlockOrders being sorted by RPO. | |||
3003 | unsigned ThisBBRPONum = BBToOrder[p]; | |||
3004 | if (ThisBBRPONum < CurBlockRPONum) | |||
3005 | ++BackEdgesStart; | |||
3006 | ||||
3007 | Values.push_back(std::make_pair(p, &VIt->second)); | |||
3008 | } | |||
3009 | ||||
3010 | // If there were no values, or one of the predecessors couldn't have a | |||
3011 | // value, then give up immediately. It's not safe to produce a live-in | |||
3012 | // value. | |||
3013 | if (Bail || Values.size() == 0) | |||
3014 | continue; | |||
3015 | ||||
3016 | // Enumeration identifying the current state of the predecessors values. | |||
3017 | enum { | |||
3018 | Unset = 0, | |||
3019 | Agreed, // All preds agree on the variable value. | |||
3020 | PropDisagree, // All preds agree, but the value kind is Proposed in some. | |||
3021 | BEDisagree, // Only back-edges disagree on variable value. | |||
3022 | PHINeeded, // Non-back-edge predecessors have conflicing values. | |||
3023 | NoSolution // Conflicting Value metadata makes solution impossible. | |||
3024 | } OurState = Unset; | |||
3025 | ||||
3026 | // All (non-entry) blocks have at least one non-backedge predecessor. | |||
3027 | // Pick the variable value from the first of these, to compare against | |||
3028 | // all others. | |||
3029 | const DbgValue &FirstVal = *Values[0].second; | |||
3030 | const ValueIDNum &FirstID = FirstVal.ID; | |||
3031 | ||||
3032 | // Scan for variable values that can't be resolved: if they have different | |||
3033 | // DIExpressions, different indirectness, or are mixed constants / | |||
3034 | // non-constants. | |||
3035 | for (auto &V : Values) { | |||
3036 | if (V.second->Properties != FirstVal.Properties) | |||
3037 | OurState = NoSolution; | |||
3038 | if (V.second->Kind == DbgValue::Const && FirstVal.Kind != DbgValue::Const) | |||
3039 | OurState = NoSolution; | |||
3040 | } | |||
3041 | ||||
3042 | // Flags diagnosing _how_ the values disagree. | |||
3043 | bool NonBackEdgeDisagree = false; | |||
3044 | bool DisagreeOnPHINess = false; | |||
3045 | bool IDDisagree = false; | |||
3046 | bool Disagree = false; | |||
3047 | if (OurState == Unset) { | |||
3048 | for (auto &V : Values) { | |||
3049 | if (*V.second == FirstVal) | |||
3050 | continue; // No disagreement. | |||
3051 | ||||
3052 | Disagree = true; | |||
3053 | ||||
3054 | // Flag whether the value number actually diagrees. | |||
3055 | if (V.second->ID != FirstID) | |||
3056 | IDDisagree = true; | |||
3057 | ||||
3058 | // Distinguish whether disagreement happens in backedges or not. | |||
3059 | // Relies on Values (and BlockOrders) being sorted by RPO. | |||
3060 | unsigned ThisBBRPONum = BBToOrder[V.first]; | |||
3061 | if (ThisBBRPONum < CurBlockRPONum) | |||
3062 | NonBackEdgeDisagree = true; | |||
3063 | ||||
3064 | // Is there a difference in whether the value is definite or only | |||
3065 | // proposed? | |||
3066 | if (V.second->Kind != FirstVal.Kind && | |||
3067 | (V.second->Kind == DbgValue::Proposed || | |||
3068 | V.second->Kind == DbgValue::Def) && | |||
3069 | (FirstVal.Kind == DbgValue::Proposed || | |||
3070 | FirstVal.Kind == DbgValue::Def)) | |||
3071 | DisagreeOnPHINess = true; | |||
3072 | } | |||
3073 | ||||
3074 | // Collect those flags together and determine an overall state for | |||
3075 | // what extend the predecessors agree on a live-in value. | |||
3076 | if (!Disagree) | |||
3077 | OurState = Agreed; | |||
3078 | else if (!IDDisagree && DisagreeOnPHINess) | |||
3079 | OurState = PropDisagree; | |||
3080 | else if (!NonBackEdgeDisagree) | |||
3081 | OurState = BEDisagree; | |||
3082 | else | |||
3083 | OurState = PHINeeded; | |||
3084 | } | |||
3085 | ||||
3086 | // An extra indicator: if we only disagree on whether the value is a | |||
3087 | // Def, or proposed, then also flag whether that disagreement happens | |||
3088 | // in backedges only. | |||
3089 | bool PropOnlyInBEs = Disagree && !IDDisagree && DisagreeOnPHINess && | |||
3090 | !NonBackEdgeDisagree && FirstVal.Kind == DbgValue::Def; | |||
3091 | ||||
3092 | const auto &Properties = FirstVal.Properties; | |||
3093 | ||||
3094 | auto OldLiveInIt = ILS.find(Var); | |||
3095 | const DbgValue *OldLiveInLocation = | |||
3096 | (OldLiveInIt != ILS.end()) ? &OldLiveInIt->second : nullptr; | |||
3097 | ||||
3098 | bool OverRide = false; | |||
3099 | if (OurState == BEDisagree && OldLiveInLocation) { | |||
3100 | // Only backedges disagree: we can consider downgrading. If there was a | |||
3101 | // previous live-in value, use it to work out whether the current | |||
3102 | // incoming value represents a lattice downgrade or not. | |||
3103 | OverRide = | |||
3104 | vlocDowngradeLattice(MBB, *OldLiveInLocation, Values, CurBlockRPONum); | |||
3105 | } | |||
3106 | ||||
3107 | // Use the current state of predecessor agreement and other flags to work | |||
3108 | // out what to do next. Possibilities include: | |||
3109 | // * Accept a value all predecessors agree on, or accept one that | |||
3110 | // represents a step down the exploration lattice, | |||
3111 | // * Use a PHI value number, if one can be found, | |||
3112 | // * Propose a PHI value number, and see if it gets confirmed later, | |||
3113 | // * Emit a 'NoVal' value, indicating we couldn't resolve anything. | |||
3114 | if (OurState == Agreed) { | |||
3115 | // Easiest solution: all predecessors agree on the variable value. | |||
3116 | ConfirmValue(Var, FirstVal); | |||
3117 | } else if (OurState == BEDisagree && OverRide) { | |||
3118 | // Only backedges disagree, and the other predecessors have produced | |||
3119 | // a new live-in value further down the exploration lattice. | |||
3120 | DowngradeOccurred = true; | |||
3121 | ConfirmValue(Var, FirstVal); | |||
3122 | } else if (OurState == PropDisagree) { | |||
3123 | // Predecessors agree on value, but some say it's only a proposed value. | |||
3124 | // Propagate it as proposed: unless it was proposed in this block, in | |||
3125 | // which case we're able to confirm the value. | |||
3126 | if (FirstID.getBlock() == (uint64_t)MBB.getNumber() && FirstID.isPHI()) { | |||
3127 | ConfirmValue(Var, DbgValue(FirstID, Properties, DbgValue::Def)); | |||
3128 | } else if (PropOnlyInBEs) { | |||
3129 | // If only backedges disagree, a higher (in RPO) block confirmed this | |||
3130 | // location, and we need to propagate it into this loop. | |||
3131 | ConfirmValue(Var, DbgValue(FirstID, Properties, DbgValue::Def)); | |||
3132 | } else { | |||
3133 | // Otherwise; a Def meeting a Proposed is still a Proposed. | |||
3134 | ConfirmValue(Var, DbgValue(FirstID, Properties, DbgValue::Proposed)); | |||
3135 | } | |||
3136 | } else if ((OurState == PHINeeded || OurState == BEDisagree)) { | |||
3137 | // Predecessors disagree and can't be downgraded: this can only be | |||
3138 | // solved with a PHI. Use pickVPHILoc to go look for one. | |||
3139 | Optional<ValueIDNum> VPHI; | |||
3140 | bool AllEdgesVPHI = false; | |||
3141 | std::tie(VPHI, AllEdgesVPHI) = | |||
3142 | pickVPHILoc(MBB, Var, VLOCOutLocs, MOutLocs, MInLocs, BlockOrders); | |||
3143 | ||||
3144 | if (VPHI && AllEdgesVPHI) { | |||
3145 | // There's a PHI value that's valid for all predecessors -- we can use | |||
3146 | // it. If any of the non-backedge predecessors have proposed values | |||
3147 | // though, this PHI is also only proposed, until the predecessors are | |||
3148 | // confirmed. | |||
3149 | DbgValue::KindT K = DbgValue::Def; | |||
3150 | for (unsigned int I = 0; I < BackEdgesStart; ++I) | |||
3151 | if (Values[I].second->Kind == DbgValue::Proposed) | |||
3152 | K = DbgValue::Proposed; | |||
3153 | ||||
3154 | ConfirmValue(Var, DbgValue(*VPHI, Properties, K)); | |||
3155 | } else if (VPHI) { | |||
3156 | // There's a PHI value, but it's only legal for backedges. Leave this | |||
3157 | // as a proposed PHI value: it might come back on the backedges, | |||
3158 | // and allow us to confirm it in the future. | |||
3159 | DbgValue NoBEValue = DbgValue(*VPHI, Properties, DbgValue::Proposed); | |||
3160 | ConfirmValue(Var, NoBEValue); | |||
3161 | } else { | |||
3162 | ConfirmNoVal(Var, Properties); | |||
3163 | } | |||
3164 | } else { | |||
3165 | // Otherwise: we don't know. Emit a "phi but no real loc" phi. | |||
3166 | ConfirmNoVal(Var, Properties); | |||
3167 | } | |||
3168 | } | |||
3169 | ||||
3170 | // Store newly calculated in-locs into VLOCInLocs, if they've changed. | |||
3171 | Changed = ILS != InLocsT; | |||
3172 | if (Changed) | |||
3173 | ILS = InLocsT; | |||
3174 | ||||
3175 | return std::tuple<bool, bool>(Changed, DowngradeOccurred); | |||
3176 | } | |||
3177 | ||||
3178 | void InstrRefBasedLDV::vlocDataflow( | |||
3179 | const LexicalScope *Scope, const DILocation *DILoc, | |||
3180 | const SmallSet<DebugVariable, 4> &VarsWeCareAbout, | |||
3181 | SmallPtrSetImpl<MachineBasicBlock *> &AssignBlocks, LiveInsT &Output, | |||
3182 | ValueIDNum **MOutLocs, ValueIDNum **MInLocs, | |||
3183 | SmallVectorImpl<VLocTracker> &AllTheVLocs) { | |||
3184 | // This method is much like mlocDataflow: but focuses on a single | |||
3185 | // LexicalScope at a time. Pick out a set of blocks and variables that are | |||
3186 | // to have their value assignments solved, then run our dataflow algorithm | |||
3187 | // until a fixedpoint is reached. | |||
3188 | std::priority_queue<unsigned int, std::vector<unsigned int>, | |||
3189 | std::greater<unsigned int>> | |||
3190 | Worklist, Pending; | |||
3191 | SmallPtrSet<MachineBasicBlock *, 16> OnWorklist, OnPending; | |||
3192 | ||||
3193 | // The set of blocks we'll be examining. | |||
3194 | SmallPtrSet<const MachineBasicBlock *, 8> BlocksToExplore; | |||
3195 | ||||
3196 | // The order in which to examine them (RPO). | |||
3197 | SmallVector<MachineBasicBlock *, 8> BlockOrders; | |||
3198 | ||||
3199 | // RPO ordering function. | |||
3200 | auto Cmp = [&](MachineBasicBlock *A, MachineBasicBlock *B) { | |||
3201 | return BBToOrder[A] < BBToOrder[B]; | |||
3202 | }; | |||
3203 | ||||
3204 | LS.getMachineBasicBlocks(DILoc, BlocksToExplore); | |||
3205 | ||||
3206 | // A separate container to distinguish "blocks we're exploring" versus | |||
3207 | // "blocks that are potentially in scope. See comment at start of vlocJoin. | |||
3208 | SmallPtrSet<const MachineBasicBlock *, 8> InScopeBlocks = BlocksToExplore; | |||
3209 | ||||
3210 | // Old LiveDebugValues tracks variable locations that come out of blocks | |||
3211 | // not in scope, where DBG_VALUEs occur. This is something we could | |||
3212 | // legitimately ignore, but lets allow it for now. | |||
3213 | if (EmulateOldLDV) | |||
3214 | BlocksToExplore.insert(AssignBlocks.begin(), AssignBlocks.end()); | |||
3215 | ||||
3216 | // We also need to propagate variable values through any artificial blocks | |||
3217 | // that immediately follow blocks in scope. | |||
3218 | DenseSet<const MachineBasicBlock *> ToAdd; | |||
3219 | ||||
3220 | // Helper lambda: For a given block in scope, perform a depth first search | |||
3221 | // of all the artificial successors, adding them to the ToAdd collection. | |||
3222 | auto AccumulateArtificialBlocks = | |||
3223 | [this, &ToAdd, &BlocksToExplore, | |||
3224 | &InScopeBlocks](const MachineBasicBlock *MBB) { | |||
3225 | // Depth-first-search state: each node is a block and which successor | |||
3226 | // we're currently exploring. | |||
3227 | SmallVector<std::pair<const MachineBasicBlock *, | |||
3228 | MachineBasicBlock::const_succ_iterator>, | |||
3229 | 8> | |||
3230 | DFS; | |||
3231 | ||||
3232 | // Find any artificial successors not already tracked. | |||
3233 | for (auto *succ : MBB->successors()) { | |||
3234 | if (BlocksToExplore.count(succ) || InScopeBlocks.count(succ)) | |||
3235 | continue; | |||
3236 | if (!ArtificialBlocks.count(succ)) | |||
3237 | continue; | |||
3238 | DFS.push_back(std::make_pair(succ, succ->succ_begin())); | |||
3239 | ToAdd.insert(succ); | |||
3240 | } | |||
3241 | ||||
3242 | // Search all those blocks, depth first. | |||
3243 | while (!DFS.empty()) { | |||
3244 | const MachineBasicBlock *CurBB = DFS.back().first; | |||
3245 | MachineBasicBlock::const_succ_iterator &CurSucc = DFS.back().second; | |||
3246 | // Walk back if we've explored this blocks successors to the end. | |||
3247 | if (CurSucc == CurBB->succ_end()) { | |||
3248 | DFS.pop_back(); | |||
3249 | continue; | |||
3250 | } | |||
3251 | ||||
3252 | // If the current successor is artificial and unexplored, descend into | |||
3253 | // it. | |||
3254 | if (!ToAdd.count(*CurSucc) && ArtificialBlocks.count(*CurSucc)) { | |||
3255 | DFS.push_back(std::make_pair(*CurSucc, (*CurSucc)->succ_begin())); | |||
3256 | ToAdd.insert(*CurSucc); | |||
3257 | continue; | |||
3258 | } | |||
3259 | ||||
3260 | ++CurSucc; | |||
3261 | } | |||
3262 | }; | |||
3263 | ||||
3264 | // Search in-scope blocks and those containing a DBG_VALUE from this scope | |||
3265 | // for artificial successors. | |||
3266 | for (auto *MBB : BlocksToExplore) | |||
3267 | AccumulateArtificialBlocks(MBB); | |||
3268 | for (auto *MBB : InScopeBlocks) | |||
3269 | AccumulateArtificialBlocks(MBB); | |||
3270 | ||||
3271 | BlocksToExplore.insert(ToAdd.begin(), ToAdd.end()); | |||
3272 | InScopeBlocks.insert(ToAdd.begin(), ToAdd.end()); | |||
3273 | ||||
3274 | // Single block scope: not interesting! No propagation at all. Note that | |||
3275 | // this could probably go above ArtificialBlocks without damage, but | |||
3276 | // that then produces output differences from original-live-debug-values, | |||
3277 | // which propagates from a single block into many artificial ones. | |||
3278 | if (BlocksToExplore.size() == 1) | |||
3279 | return; | |||
3280 | ||||
3281 | // Picks out relevants blocks RPO order and sort them. | |||
3282 | for (auto *MBB : BlocksToExplore) | |||
3283 | BlockOrders.push_back(const_cast<MachineBasicBlock *>(MBB)); | |||
3284 | ||||
3285 | llvm::sort(BlockOrders, Cmp); | |||
3286 | unsigned NumBlocks = BlockOrders.size(); | |||
3287 | ||||
3288 | // Allocate some vectors for storing the live ins and live outs. Large. | |||
3289 | SmallVector<DenseMap<DebugVariable, DbgValue>, 32> LiveIns, LiveOuts; | |||
3290 | LiveIns.resize(NumBlocks); | |||
3291 | LiveOuts.resize(NumBlocks); | |||
3292 | ||||
3293 | // Produce by-MBB indexes of live-in/live-outs, to ease lookup within | |||
3294 | // vlocJoin. | |||
3295 | LiveIdxT LiveOutIdx, LiveInIdx; | |||
3296 | LiveOutIdx.reserve(NumBlocks); | |||
3297 | LiveInIdx.reserve(NumBlocks); | |||
3298 | for (unsigned I = 0; I < NumBlocks; ++I) { | |||
3299 | LiveOutIdx[BlockOrders[I]] = &LiveOuts[I]; | |||
3300 | LiveInIdx[BlockOrders[I]] = &LiveIns[I]; | |||
3301 | } | |||
3302 | ||||
3303 | for (auto *MBB : BlockOrders) { | |||
3304 | Worklist.push(BBToOrder[MBB]); | |||
3305 | OnWorklist.insert(MBB); | |||
3306 | } | |||
3307 | ||||
3308 | // Iterate over all the blocks we selected, propagating variable values. | |||
3309 | bool FirstTrip = true; | |||
3310 | SmallPtrSet<const MachineBasicBlock *, 16> VLOCVisited; | |||
3311 | while (!Worklist.empty() || !Pending.empty()) { | |||
3312 | while (!Worklist.empty()) { | |||
3313 | auto *MBB = OrderToBB[Worklist.top()]; | |||
3314 | CurBB = MBB->getNumber(); | |||
3315 | Worklist.pop(); | |||
3316 | ||||
3317 | DenseMap<DebugVariable, DbgValue> JoinedInLocs; | |||
3318 | ||||
3319 | // Join values from predecessors. Updates LiveInIdx, and writes output | |||
3320 | // into JoinedInLocs. | |||
3321 | bool InLocsChanged, DowngradeOccurred; | |||
3322 | std::tie(InLocsChanged, DowngradeOccurred) = vlocJoin( | |||
3323 | *MBB, LiveOutIdx, LiveInIdx, (FirstTrip) ? &VLOCVisited : nullptr, | |||
3324 | CurBB, VarsWeCareAbout, MOutLocs, MInLocs, InScopeBlocks, | |||
3325 | BlocksToExplore, JoinedInLocs); | |||
3326 | ||||
3327 | bool FirstVisit = VLOCVisited.insert(MBB).second; | |||
3328 | ||||
3329 | // Always explore transfer function if inlocs changed, or if we've not | |||
3330 | // visited this block before. | |||
3331 | InLocsChanged |= FirstVisit; | |||
3332 | ||||
3333 | // If a downgrade occurred, book us in for re-examination on the next | |||
3334 | // iteration. | |||
3335 | if (DowngradeOccurred && OnPending.insert(MBB).second) | |||
3336 | Pending.push(BBToOrder[MBB]); | |||
3337 | ||||
3338 | if (!InLocsChanged) | |||
3339 | continue; | |||
3340 | ||||
3341 | // Do transfer function. | |||
3342 | auto &VTracker = AllTheVLocs[MBB->getNumber()]; | |||
3343 | for (auto &Transfer : VTracker.Vars) { | |||
3344 | // Is this var we're mangling in this scope? | |||
3345 | if (VarsWeCareAbout.count(Transfer.first)) { | |||
3346 | // Erase on empty transfer (DBG_VALUE $noreg). | |||
3347 | if (Transfer.second.Kind == DbgValue::Undef) { | |||
3348 | JoinedInLocs.erase(Transfer.first); | |||
3349 | } else { | |||
3350 | // Insert new variable value; or overwrite. | |||
3351 | auto NewValuePair = std::make_pair(Transfer.first, Transfer.second); | |||
3352 | auto Result = JoinedInLocs.insert(NewValuePair); | |||
3353 | if (!Result.second) | |||
3354 | Result.first->second = Transfer.second; | |||
3355 | } | |||
3356 | } | |||
3357 | } | |||
3358 | ||||
3359 | // Did the live-out locations change? | |||
3360 | bool OLChanged = JoinedInLocs != *LiveOutIdx[MBB]; | |||
3361 | ||||
3362 | // If they haven't changed, there's no need to explore further. | |||
3363 | if (!OLChanged) | |||
3364 | continue; | |||
3365 | ||||
3366 | // Commit to the live-out record. | |||
3367 | *LiveOutIdx[MBB] = JoinedInLocs; | |||
3368 | ||||
3369 | // We should visit all successors. Ensure we'll visit any non-backedge | |||
3370 | // successors during this dataflow iteration; book backedge successors | |||
3371 | // to be visited next time around. | |||
3372 | for (auto s : MBB->successors()) { | |||
3373 | // Ignore out of scope / not-to-be-explored successors. | |||
3374 | if (LiveInIdx.find(s) == LiveInIdx.end()) | |||
3375 | continue; | |||
3376 | ||||
3377 | if (BBToOrder[s] > BBToOrder[MBB]) { | |||
3378 | if (OnWorklist.insert(s).second) | |||
3379 | Worklist.push(BBToOrder[s]); | |||
3380 | } else if (OnPending.insert(s).second && (FirstTrip || OLChanged)) { | |||
3381 | Pending.push(BBToOrder[s]); | |||
3382 | } | |||
3383 | } | |||
3384 | } | |||
3385 | Worklist.swap(Pending); | |||
3386 | std::swap(OnWorklist, OnPending); | |||
3387 | OnPending.clear(); | |||
3388 | assert(Pending.empty())(static_cast <bool> (Pending.empty()) ? void (0) : __assert_fail ("Pending.empty()", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 3388, __extension__ __PRETTY_FUNCTION__)); | |||
3389 | FirstTrip = false; | |||
3390 | } | |||
3391 | ||||
3392 | // Dataflow done. Now what? Save live-ins. Ignore any that are still marked | |||
3393 | // as being variable-PHIs, because those did not have their machine-PHI | |||
3394 | // value confirmed. Such variable values are places that could have been | |||
3395 | // PHIs, but are not. | |||
3396 | for (auto *MBB : BlockOrders) { | |||
3397 | auto &VarMap = *LiveInIdx[MBB]; | |||
3398 | for (auto &P : VarMap) { | |||
3399 | if (P.second.Kind == DbgValue::Proposed || | |||
3400 | P.second.Kind == DbgValue::NoVal) | |||
3401 | continue; | |||
3402 | Output[MBB->getNumber()].push_back(P); | |||
3403 | } | |||
3404 | } | |||
3405 | ||||
3406 | BlockOrders.clear(); | |||
3407 | BlocksToExplore.clear(); | |||
3408 | } | |||
3409 | ||||
3410 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) | |||
3411 | void InstrRefBasedLDV::dump_mloc_transfer( | |||
3412 | const MLocTransferMap &mloc_transfer) const { | |||
3413 | for (auto &P : mloc_transfer) { | |||
3414 | std::string foo = MTracker->LocIdxToName(P.first); | |||
3415 | std::string bar = MTracker->IDAsString(P.second); | |||
3416 | dbgs() << "Loc " << foo << " --> " << bar << "\n"; | |||
3417 | } | |||
3418 | } | |||
3419 | #endif | |||
3420 | ||||
3421 | void InstrRefBasedLDV::emitLocations( | |||
3422 | MachineFunction &MF, LiveInsT SavedLiveIns, ValueIDNum **MOutLocs, | |||
3423 | ValueIDNum **MInLocs, DenseMap<DebugVariable, unsigned> &AllVarsNumbering, | |||
3424 | const TargetPassConfig &TPC) { | |||
3425 | TTracker = new TransferTracker(TII, MTracker, MF, *TRI, CalleeSavedRegs, TPC); | |||
3426 | unsigned NumLocs = MTracker->getNumLocs(); | |||
3427 | ||||
3428 | // For each block, load in the machine value locations and variable value | |||
3429 | // live-ins, then step through each instruction in the block. New DBG_VALUEs | |||
3430 | // to be inserted will be created along the way. | |||
3431 | for (MachineBasicBlock &MBB : MF) { | |||
3432 | unsigned bbnum = MBB.getNumber(); | |||
3433 | MTracker->reset(); | |||
3434 | MTracker->loadFromArray(MInLocs[bbnum], bbnum); | |||
3435 | TTracker->loadInlocs(MBB, MInLocs[bbnum], SavedLiveIns[MBB.getNumber()], | |||
3436 | NumLocs); | |||
3437 | ||||
3438 | CurBB = bbnum; | |||
3439 | CurInst = 1; | |||
3440 | for (auto &MI : MBB) { | |||
3441 | process(MI, MOutLocs, MInLocs); | |||
3442 | TTracker->checkInstForNewValues(CurInst, MI.getIterator()); | |||
3443 | ++CurInst; | |||
3444 | } | |||
3445 | } | |||
3446 | ||||
3447 | // We have to insert DBG_VALUEs in a consistent order, otherwise they appeaer | |||
3448 | // in DWARF in different orders. Use the order that they appear when walking | |||
3449 | // through each block / each instruction, stored in AllVarsNumbering. | |||
3450 | auto OrderDbgValues = [&](const MachineInstr *A, | |||
3451 | const MachineInstr *B) -> bool { | |||
3452 | DebugVariable VarA(A->getDebugVariable(), A->getDebugExpression(), | |||
3453 | A->getDebugLoc()->getInlinedAt()); | |||
3454 | DebugVariable VarB(B->getDebugVariable(), B->getDebugExpression(), | |||
3455 | B->getDebugLoc()->getInlinedAt()); | |||
3456 | return AllVarsNumbering.find(VarA)->second < | |||
3457 | AllVarsNumbering.find(VarB)->second; | |||
3458 | }; | |||
3459 | ||||
3460 | // Go through all the transfers recorded in the TransferTracker -- this is | |||
3461 | // both the live-ins to a block, and any movements of values that happen | |||
3462 | // in the middle. | |||
3463 | for (auto &P : TTracker->Transfers) { | |||
3464 | // Sort them according to appearance order. | |||
3465 | llvm::sort(P.Insts, OrderDbgValues); | |||
3466 | // Insert either before or after the designated point... | |||
3467 | if (P.MBB) { | |||
3468 | MachineBasicBlock &MBB = *P.MBB; | |||
3469 | for (auto *MI : P.Insts) { | |||
3470 | MBB.insert(P.Pos, MI); | |||
3471 | } | |||
3472 | } else { | |||
3473 | // Terminators, like tail calls, can clobber things. Don't try and place | |||
3474 | // transfers after them. | |||
3475 | if (P.Pos->isTerminator()) | |||
3476 | continue; | |||
3477 | ||||
3478 | MachineBasicBlock &MBB = *P.Pos->getParent(); | |||
3479 | for (auto *MI : P.Insts) { | |||
3480 | MBB.insertAfterBundle(P.Pos, MI); | |||
3481 | } | |||
3482 | } | |||
3483 | } | |||
3484 | } | |||
3485 | ||||
3486 | void InstrRefBasedLDV::initialSetup(MachineFunction &MF) { | |||
3487 | // Build some useful data structures. | |||
3488 | auto hasNonArtificialLocation = [](const MachineInstr &MI) -> bool { | |||
3489 | if (const DebugLoc &DL = MI.getDebugLoc()) | |||
3490 | return DL.getLine() != 0; | |||
3491 | return false; | |||
3492 | }; | |||
3493 | // Collect a set of all the artificial blocks. | |||
3494 | for (auto &MBB : MF) | |||
3495 | if (none_of(MBB.instrs(), hasNonArtificialLocation)) | |||
3496 | ArtificialBlocks.insert(&MBB); | |||
3497 | ||||
3498 | // Compute mappings of block <=> RPO order. | |||
3499 | ReversePostOrderTraversal<MachineFunction *> RPOT(&MF); | |||
3500 | unsigned int RPONumber = 0; | |||
3501 | for (MachineBasicBlock *MBB : RPOT) { | |||
3502 | OrderToBB[RPONumber] = MBB; | |||
3503 | BBToOrder[MBB] = RPONumber; | |||
3504 | BBNumToRPO[MBB->getNumber()] = RPONumber; | |||
3505 | ++RPONumber; | |||
3506 | } | |||
3507 | ||||
3508 | // Order value substitutions by their "source" operand pair, for quick lookup. | |||
3509 | llvm::sort(MF.DebugValueSubstitutions); | |||
3510 | ||||
3511 | #ifdef EXPENSIVE_CHECKS | |||
3512 | // As an expensive check, test whether there are any duplicate substitution | |||
3513 | // sources in the collection. | |||
3514 | if (MF.DebugValueSubstitutions.size() > 2) { | |||
3515 | for (auto It = MF.DebugValueSubstitutions.begin(); | |||
3516 | It != std::prev(MF.DebugValueSubstitutions.end()); ++It) { | |||
3517 | assert(It->Src != std::next(It)->Src && "Duplicate variable location "(static_cast <bool> (It->Src != std::next(It)->Src && "Duplicate variable location " "substitution seen" ) ? void (0) : __assert_fail ("It->Src != std::next(It)->Src && \"Duplicate variable location \" \"substitution seen\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 3518, __extension__ __PRETTY_FUNCTION__)) | |||
3518 | "substitution seen")(static_cast <bool> (It->Src != std::next(It)->Src && "Duplicate variable location " "substitution seen" ) ? void (0) : __assert_fail ("It->Src != std::next(It)->Src && \"Duplicate variable location \" \"substitution seen\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 3518, __extension__ __PRETTY_FUNCTION__)); | |||
3519 | } | |||
3520 | } | |||
3521 | #endif | |||
3522 | } | |||
3523 | ||||
3524 | /// Calculate the liveness information for the given machine function and | |||
3525 | /// extend ranges across basic blocks. | |||
3526 | bool InstrRefBasedLDV::ExtendRanges(MachineFunction &MF, | |||
3527 | TargetPassConfig *TPC) { | |||
3528 | // No subprogram means this function contains no debuginfo. | |||
3529 | if (!MF.getFunction().getSubprogram()) | |||
| ||||
3530 | return false; | |||
3531 | ||||
3532 | LLVM_DEBUG(dbgs() << "\nDebug Range Extension\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("livedebugvalues")) { dbgs() << "\nDebug Range Extension\n" ; } } while (false); | |||
3533 | this->TPC = TPC; | |||
3534 | ||||
3535 | TRI = MF.getSubtarget().getRegisterInfo(); | |||
3536 | TII = MF.getSubtarget().getInstrInfo(); | |||
3537 | TFI = MF.getSubtarget().getFrameLowering(); | |||
3538 | TFI->getCalleeSaves(MF, CalleeSavedRegs); | |||
3539 | MFI = &MF.getFrameInfo(); | |||
3540 | LS.initialize(MF); | |||
3541 | ||||
3542 | MTracker = | |||
3543 | new MLocTracker(MF, *TII, *TRI, *MF.getSubtarget().getTargetLowering()); | |||
3544 | VTracker = nullptr; | |||
3545 | TTracker = nullptr; | |||
3546 | ||||
3547 | SmallVector<MLocTransferMap, 32> MLocTransfer; | |||
3548 | SmallVector<VLocTracker, 8> vlocs; | |||
3549 | LiveInsT SavedLiveIns; | |||
3550 | ||||
3551 | int MaxNumBlocks = -1; | |||
3552 | for (auto &MBB : MF) | |||
3553 | MaxNumBlocks = std::max(MBB.getNumber(), MaxNumBlocks); | |||
3554 | assert(MaxNumBlocks >= 0)(static_cast <bool> (MaxNumBlocks >= 0) ? void (0) : __assert_fail ("MaxNumBlocks >= 0", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 3554, __extension__ __PRETTY_FUNCTION__)); | |||
3555 | ++MaxNumBlocks; | |||
3556 | ||||
3557 | MLocTransfer.resize(MaxNumBlocks); | |||
3558 | vlocs.resize(MaxNumBlocks); | |||
3559 | SavedLiveIns.resize(MaxNumBlocks); | |||
3560 | ||||
3561 | initialSetup(MF); | |||
3562 | ||||
3563 | produceMLocTransferFunction(MF, MLocTransfer, MaxNumBlocks); | |||
3564 | ||||
3565 | // Allocate and initialize two array-of-arrays for the live-in and live-out | |||
3566 | // machine values. The outer dimension is the block number; while the inner | |||
3567 | // dimension is a LocIdx from MLocTracker. | |||
3568 | ValueIDNum **MOutLocs = new ValueIDNum *[MaxNumBlocks]; | |||
3569 | ValueIDNum **MInLocs = new ValueIDNum *[MaxNumBlocks]; | |||
3570 | unsigned NumLocs = MTracker->getNumLocs(); | |||
3571 | for (int i = 0; i < MaxNumBlocks; ++i) { | |||
3572 | MOutLocs[i] = new ValueIDNum[NumLocs]; | |||
3573 | MInLocs[i] = new ValueIDNum[NumLocs]; | |||
3574 | } | |||
3575 | ||||
3576 | // Solve the machine value dataflow problem using the MLocTransfer function, | |||
3577 | // storing the computed live-ins / live-outs into the array-of-arrays. We use | |||
3578 | // both live-ins and live-outs for decision making in the variable value | |||
3579 | // dataflow problem. | |||
3580 | mlocDataflow(MInLocs, MOutLocs, MLocTransfer); | |||
3581 | ||||
3582 | // Patch up debug phi numbers, turning unknown block-live-in values into | |||
3583 | // either live-through machine values, or PHIs. | |||
3584 | for (auto &DBG_PHI : DebugPHINumToValue) { | |||
3585 | // Identify unresolved block-live-ins. | |||
3586 | ValueIDNum &Num = DBG_PHI.ValueRead; | |||
3587 | if (!Num.isPHI()) | |||
3588 | continue; | |||
3589 | ||||
3590 | unsigned BlockNo = Num.getBlock(); | |||
3591 | LocIdx LocNo = Num.getLoc(); | |||
3592 | Num = MInLocs[BlockNo][LocNo.asU64()]; | |||
3593 | } | |||
3594 | // Later, we'll be looking up ranges of instruction numbers. | |||
3595 | llvm::sort(DebugPHINumToValue); | |||
3596 | ||||
3597 | // Walk back through each block / instruction, collecting DBG_VALUE | |||
3598 | // instructions and recording what machine value their operands refer to. | |||
3599 | for (auto &OrderPair : OrderToBB) { | |||
3600 | MachineBasicBlock &MBB = *OrderPair.second; | |||
3601 | CurBB = MBB.getNumber(); | |||
3602 | VTracker = &vlocs[CurBB]; | |||
3603 | VTracker->MBB = &MBB; | |||
3604 | MTracker->loadFromArray(MInLocs[CurBB], CurBB); | |||
3605 | CurInst = 1; | |||
3606 | for (auto &MI : MBB) { | |||
3607 | process(MI, MOutLocs, MInLocs); | |||
3608 | ++CurInst; | |||
3609 | } | |||
3610 | MTracker->reset(); | |||
3611 | } | |||
3612 | ||||
3613 | // Number all variables in the order that they appear, to be used as a stable | |||
3614 | // insertion order later. | |||
3615 | DenseMap<DebugVariable, unsigned> AllVarsNumbering; | |||
3616 | ||||
3617 | // Map from one LexicalScope to all the variables in that scope. | |||
3618 | DenseMap<const LexicalScope *, SmallSet<DebugVariable, 4>> ScopeToVars; | |||
3619 | ||||
3620 | // Map from One lexical scope to all blocks in that scope. | |||
3621 | DenseMap<const LexicalScope *, SmallPtrSet<MachineBasicBlock *, 4>> | |||
3622 | ScopeToBlocks; | |||
3623 | ||||
3624 | // Store a DILocation that describes a scope. | |||
3625 | DenseMap<const LexicalScope *, const DILocation *> ScopeToDILocation; | |||
3626 | ||||
3627 | // To mirror old LiveDebugValues, enumerate variables in RPOT order. Otherwise | |||
3628 | // the order is unimportant, it just has to be stable. | |||
3629 | for (unsigned int I = 0; I < OrderToBB.size(); ++I) { | |||
3630 | auto *MBB = OrderToBB[I]; | |||
3631 | auto *VTracker = &vlocs[MBB->getNumber()]; | |||
3632 | // Collect each variable with a DBG_VALUE in this block. | |||
3633 | for (auto &idx : VTracker->Vars) { | |||
3634 | const auto &Var = idx.first; | |||
3635 | const DILocation *ScopeLoc = VTracker->Scopes[Var]; | |||
3636 | assert(ScopeLoc != nullptr)(static_cast <bool> (ScopeLoc != nullptr) ? void (0) : __assert_fail ("ScopeLoc != nullptr", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 3636, __extension__ __PRETTY_FUNCTION__)); | |||
3637 | auto *Scope = LS.findLexicalScope(ScopeLoc); | |||
3638 | ||||
3639 | // No insts in scope -> shouldn't have been recorded. | |||
3640 | assert(Scope != nullptr)(static_cast <bool> (Scope != nullptr) ? void (0) : __assert_fail ("Scope != nullptr", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp" , 3640, __extension__ __PRETTY_FUNCTION__)); | |||
3641 | ||||
3642 | AllVarsNumbering.insert(std::make_pair(Var, AllVarsNumbering.size())); | |||
3643 | ScopeToVars[Scope].insert(Var); | |||
3644 | ScopeToBlocks[Scope].insert(VTracker->MBB); | |||
3645 | ScopeToDILocation[Scope] = ScopeLoc; | |||
3646 | } | |||
3647 | } | |||
3648 | ||||
3649 | // OK. Iterate over scopes: there might be something to be said for | |||
3650 | // ordering them by size/locality, but that's for the future. For each scope, | |||
3651 | // solve the variable value problem, producing a map of variables to values | |||
3652 | // in SavedLiveIns. | |||
3653 | for (auto &P : ScopeToVars) { | |||
3654 | vlocDataflow(P.first, ScopeToDILocation[P.first], P.second, | |||
3655 | ScopeToBlocks[P.first], SavedLiveIns, MOutLocs, MInLocs, | |||
3656 | vlocs); | |||
3657 | } | |||
3658 | ||||
3659 | // Using the computed value locations and variable values for each block, | |||
3660 | // create the DBG_VALUE instructions representing the extended variable | |||
3661 | // locations. | |||
3662 | emitLocations(MF, SavedLiveIns, MOutLocs, MInLocs, AllVarsNumbering, *TPC); | |||
3663 | ||||
3664 | for (int Idx = 0; Idx < MaxNumBlocks; ++Idx) { | |||
3665 | delete[] MOutLocs[Idx]; | |||
3666 | delete[] MInLocs[Idx]; | |||
3667 | } | |||
3668 | delete[] MOutLocs; | |||
3669 | delete[] MInLocs; | |||
3670 | ||||
3671 | // Did we actually make any changes? If we created any DBG_VALUEs, then yes. | |||
3672 | bool Changed = TTracker->Transfers.size() != 0; | |||
3673 | ||||
3674 | delete MTracker; | |||
3675 | delete TTracker; | |||
3676 | MTracker = nullptr; | |||
3677 | VTracker = nullptr; | |||
3678 | TTracker = nullptr; | |||
3679 | ||||
3680 | ArtificialBlocks.clear(); | |||
3681 | OrderToBB.clear(); | |||
3682 | BBToOrder.clear(); | |||
3683 | BBNumToRPO.clear(); | |||
3684 | DebugInstrNumToInstr.clear(); | |||
3685 | DebugPHINumToValue.clear(); | |||
3686 | ||||
3687 | return Changed; | |||
3688 | } | |||
3689 | ||||
3690 | LDVImpl *llvm::makeInstrRefBasedLiveDebugValues() { | |||
3691 | return new InstrRefBasedLDV(); | |||
3692 | } | |||
3693 | ||||
3694 | namespace { | |||
3695 | class LDVSSABlock; | |||
3696 | class LDVSSAUpdater; | |||
3697 | ||||
3698 | // Pick a type to identify incoming block values as we construct SSA. We | |||
3699 | // can't use anything more robust than an integer unfortunately, as SSAUpdater | |||
3700 | // expects to zero-initialize the type. | |||
3701 | typedef uint64_t BlockValueNum; | |||
3702 | ||||
3703 | /// Represents an SSA PHI node for the SSA updater class. Contains the block | |||
3704 | /// this PHI is in, the value number it would have, and the expected incoming | |||
3705 | /// values from parent blocks. | |||
3706 | class LDVSSAPhi { | |||
3707 | public: | |||
3708 | SmallVector<std::pair<LDVSSABlock *, BlockValueNum>, 4> IncomingValues; | |||
3709 | LDVSSABlock *ParentBlock; | |||
3710 | BlockValueNum PHIValNum; | |||
3711 | LDVSSAPhi(BlockValueNum PHIValNum, LDVSSABlock *ParentBlock) | |||
3712 | : ParentBlock(ParentBlock), PHIValNum(PHIValNum) {} | |||
3713 | ||||
3714 | LDVSSABlock *getParent() { return ParentBlock; } | |||
3715 | }; | |||
3716 | ||||
3717 | /// Thin wrapper around a block predecessor iterator. Only difference from a | |||
3718 | /// normal block iterator is that it dereferences to an LDVSSABlock. | |||
3719 | class LDVSSABlockIterator { | |||
3720 | public: | |||
3721 | MachineBasicBlock::pred_iterator PredIt; | |||
3722 | LDVSSAUpdater &Updater; | |||
3723 | ||||
3724 | LDVSSABlockIterator(MachineBasicBlock::pred_iterator PredIt, | |||
3725 | LDVSSAUpdater &Updater) | |||
3726 | : PredIt(PredIt), Updater(Updater) {} | |||
3727 | ||||
3728 | bool operator!=(const LDVSSABlockIterator &OtherIt) const { | |||
3729 | return OtherIt.PredIt != PredIt; | |||
3730 | } | |||
3731 | ||||
3732 | LDVSSABlockIterator &operator++() { | |||
3733 | ++PredIt; | |||
3734 | return *this; | |||
3735 | } | |||
3736 | ||||
3737 | LDVSSABlock *operator*(); | |||
3738 | }; | |||
3739 | ||||
3740 | /// Thin wrapper around a block for SSA Updater interface. Necessary because | |||
3741 | /// we need to track the PHI value(s) that we may have observed as necessary | |||
3742 | /// in this block. | |||
3743 | class LDVSSABlock { | |||
3744 | public: | |||
3745 | MachineBasicBlock &BB; | |||
3746 | LDVSSAUpdater &Updater; | |||
3747 | using PHIListT = SmallVector<LDVSSAPhi, 1>; | |||
3748 | /// List of PHIs in this block. There should only ever be one. | |||
3749 | PHIListT PHIList; | |||
3750 | ||||
3751 | LDVSSABlock(MachineBasicBlock &BB, LDVSSAUpdater &Updater) | |||
3752 | : BB(BB), Updater(Updater) {} | |||
3753 | ||||
3754 | LDVSSABlockIterator succ_begin() { | |||
3755 | return LDVSSABlockIterator(BB.succ_begin(), Updater); | |||
3756 | } | |||
3757 | ||||
3758 | LDVSSABlockIterator succ_end() { | |||
3759 | return LDVSSABlockIterator(BB.succ_end(), Updater); | |||
3760 | } | |||
3761 | ||||
3762 | /// SSAUpdater has requested a PHI: create that within this block record. | |||
3763 | LDVSSAPhi *newPHI(BlockValueNum Value) { | |||
3764 | PHIList.emplace_back(Value, this); | |||
3765 | return &PHIList.back(); | |||
3766 | } | |||
3767 | ||||
3768 | /// SSAUpdater wishes to know what PHIs already exist in this block. | |||
3769 | PHIListT &phis() { return PHIList; } | |||
3770 | }; | |||
3771 | ||||
3772 | /// Utility class for the SSAUpdater interface: tracks blocks, PHIs and values | |||
3773 | /// while SSAUpdater is exploring the CFG. It's passed as a handle / baton to | |||
3774 | // SSAUpdaterTraits<LDVSSAUpdater>. | |||
3775 | class LDVSSAUpdater { | |||
3776 | public: | |||
3777 | /// Map of value numbers to PHI records. | |||
3778 | DenseMap<BlockValueNum, LDVSSAPhi *> PHIs; | |||
3779 | /// Map of which blocks generate Undef values -- blocks that are not | |||
3780 | /// dominated by any Def. | |||
3781 | DenseMap<MachineBasicBlock *, BlockValueNum> UndefMap; | |||
3782 | /// Map of machine blocks to our own records of them. | |||
3783 | DenseMap<MachineBasicBlock *, LDVSSABlock *> BlockMap; | |||
3784 | /// Machine location where any PHI must occur. | |||
3785 | LocIdx Loc; | |||
3786 | /// Table of live-in machine value numbers for blocks / locations. | |||
3787 | ValueIDNum **MLiveIns; | |||
3788 | ||||
3789 | LDVSSAUpdater(LocIdx L, ValueIDNum **MLiveIns) : Loc(L), MLiveIns(MLiveIns) {} | |||
3790 | ||||
3791 | void reset() { | |||
3792 | for (auto &Block : BlockMap) | |||
3793 | delete Block.second; | |||
3794 | ||||
3795 | PHIs.clear(); | |||
3796 | UndefMap.clear(); | |||
3797 | BlockMap.clear(); | |||
3798 | } | |||
3799 | ||||
3800 | ~LDVSSAUpdater() { reset(); } | |||
3801 | ||||
3802 | /// For a given MBB, create a wrapper block for it. Stores it in the | |||
3803 | /// LDVSSAUpdater block map. | |||
3804 | LDVSSABlock *getSSALDVBlock(MachineBasicBlock *BB) { | |||
3805 | auto it = BlockMap.find(BB); | |||
3806 | if (it == BlockMap.end()) { | |||
3807 | BlockMap[BB] = new LDVSSABlock(*BB, *this); | |||
3808 | it = BlockMap.find(BB); | |||
3809 | } | |||
3810 | return it->second; | |||
3811 | } | |||
3812 | ||||
3813 | /// Find the live-in value number for the given block. Looks up the value at | |||
3814 | /// the PHI location on entry. | |||
3815 | BlockValueNum getValue(LDVSSABlock *LDVBB) { | |||
3816 | return MLiveIns[LDVBB->BB.getNumber()][Loc.asU64()].asU64(); | |||
3817 | } | |||
3818 | }; | |||
3819 | ||||
3820 | LDVSSABlock *LDVSSABlockIterator::operator*() { | |||
3821 | return Updater.getSSALDVBlock(*PredIt); | |||
3822 | } | |||
3823 | ||||
3824 | #ifndef NDEBUG | |||
3825 | ||||
3826 | raw_ostream &operator<<(raw_ostream &out, const LDVSSAPhi &PHI) { | |||
3827 | out << "SSALDVPHI " << PHI.PHIValNum; | |||
3828 | return out; | |||
3829 | } | |||
3830 | ||||
3831 | #endif | |||
3832 | ||||
3833 | } // namespace | |||
3834 | ||||
3835 | namespace llvm { | |||
3836 | ||||
3837 | /// Template specialization to give SSAUpdater access to CFG and value | |||
3838 | /// information. SSAUpdater calls methods in these traits, passing in the | |||
3839 | /// LDVSSAUpdater object, to learn about blocks and the values they define. | |||
3840 | /// It also provides methods to create PHI nodes and track them. | |||
3841 | template <> class SSAUpdaterTraits<LDVSSAUpdater> { | |||
3842 | public: | |||
3843 | using BlkT = LDVSSABlock; | |||
3844 | using ValT = BlockValueNum; | |||
3845 | using PhiT = LDVSSAPhi; | |||
3846 | using BlkSucc_iterator = LDVSSABlockIterator; | |||
3847 | ||||
3848 | // Methods to access block successors -- dereferencing to our wrapper class. | |||
3849 | static BlkSucc_iterator BlkSucc_begin(BlkT *BB) { return BB->succ_begin(); } | |||
3850 | static BlkSucc_iterator BlkSucc_end(BlkT *BB) { return BB->succ_end(); } | |||
3851 | ||||
3852 | /// Iterator for PHI operands. | |||
3853 | class PHI_iterator { | |||
3854 | private: | |||
3855 | LDVSSAPhi *PHI; | |||
3856 | unsigned Idx; | |||
3857 | ||||
3858 | public: | |||
3859 | explicit PHI_iterator(LDVSSAPhi *P) // begin iterator | |||
3860 | : PHI(P), Idx(0) {} | |||
3861 | PHI_iterator(LDVSSAPhi *P, bool) // end iterator | |||
3862 | : PHI(P), Idx(PHI->IncomingValues.size()) {} | |||
3863 | ||||
3864 | PHI_iterator &operator++() { | |||
3865 | Idx++; | |||
3866 | return *this; | |||
3867 | } | |||
3868 | bool operator==(const PHI_iterator &X) const { return Idx == X.Idx; } | |||
3869 | bool operator!=(const PHI_iterator &X) const { return !operator==(X); } | |||
3870 | ||||
3871 | BlockValueNum getIncomingValue() { return PHI->IncomingValues[Idx].second; } | |||
3872 | ||||
3873 | LDVSSABlock *getIncomingBlock() { return PHI->IncomingValues[Idx].first; } | |||
3874 | }; | |||
3875 | ||||
3876 | static inline PHI_iterator PHI_begin(PhiT *PHI) { return PHI_iterator(PHI); } | |||
3877 | ||||
3878 | static inline PHI_iterator PHI_end(PhiT *PHI) { | |||
3879 | return PHI_iterator(PHI, true); | |||
3880 | } | |||
3881 | ||||
3882 | /// FindPredecessorBlocks - Put the predecessors of BB into the Preds | |||
3883 | /// vector. | |||
3884 | static void FindPredecessorBlocks(LDVSSABlock *BB, | |||
3885 | SmallVectorImpl<LDVSSABlock *> *Preds) { | |||
3886 | for (MachineBasicBlock::pred_iterator PI = BB->BB.pred_begin(), | |||
3887 | E = BB->BB.pred_end(); | |||
3888 | PI != E; ++PI) | |||
3889 | Preds->push_back(BB->Updater.getSSALDVBlock(*PI)); | |||
3890 | } | |||
3891 | ||||
3892 | /// GetUndefVal - Normally creates an IMPLICIT_DEF instruction with a new | |||
3893 | /// register. For LiveDebugValues, represents a block identified as not having | |||
3894 | /// any DBG_PHI predecessors. | |||
3895 | static BlockValueNum GetUndefVal(LDVSSABlock *BB, LDVSSAUpdater *Updater) { | |||
3896 | // Create a value number for this block -- it needs to be unique and in the | |||
3897 | // "undef" collection, so that we know it's not real. Use a number | |||
3898 | // representing a PHI into this block. | |||
3899 | BlockValueNum Num = ValueIDNum(BB->BB.getNumber(), 0, Updater->Loc).asU64(); | |||
3900 | Updater->UndefMap[&BB->BB] = Num; | |||
3901 | return Num; | |||
3902 | } | |||
3903 | ||||
3904 | /// CreateEmptyPHI - Create a (representation of a) PHI in the given block. | |||
3905 | /// SSAUpdater will populate it with information about incoming values. The | |||
3906 | /// value number of this PHI is whatever the machine value number problem | |||
3907 | /// solution determined it to be. This includes non-phi values if SSAUpdater | |||
3908 | /// tries to create a PHI where the incoming values are identical. | |||
3909 | static BlockValueNum CreateEmptyPHI(LDVSSABlock *BB, unsigned NumPreds, | |||
3910 | LDVSSAUpdater *Updater) { | |||
3911 | BlockValueNum PHIValNum = Updater->getValue(BB); | |||
3912 | LDVSSAPhi *PHI = BB->newPHI(PHIValNum); | |||
3913 | Updater->PHIs[PHIValNum] = PHI; | |||
3914 | return PHIValNum; | |||
3915 | } | |||
3916 | ||||
3917 | /// AddPHIOperand - Add the specified value as an operand of the PHI for | |||
3918 | /// the specified predecessor block. | |||
3919 | static void AddPHIOperand(LDVSSAPhi *PHI, BlockValueNum Val, LDVSSABlock *Pred) { | |||
3920 | PHI->IncomingValues.push_back(std::make_pair(Pred, Val)); | |||
3921 | } | |||
3922 | ||||
3923 | /// ValueIsPHI - Check if the instruction that defines the specified value | |||
3924 | /// is a PHI instruction. | |||
3925 | static LDVSSAPhi *ValueIsPHI(BlockValueNum Val, LDVSSAUpdater *Updater) { | |||
3926 | auto PHIIt = Updater->PHIs.find(Val); | |||
3927 | if (PHIIt == Updater->PHIs.end()) | |||
3928 | return nullptr; | |||
3929 | return PHIIt->second; | |||
3930 | } | |||
3931 | ||||
3932 | /// ValueIsNewPHI - Like ValueIsPHI but also check if the PHI has no source | |||
3933 | /// operands, i.e., it was just added. | |||
3934 | static LDVSSAPhi *ValueIsNewPHI(BlockValueNum Val, LDVSSAUpdater *Updater) { | |||
3935 | LDVSSAPhi *PHI = ValueIsPHI(Val, Updater); | |||
3936 | if (PHI && PHI->IncomingValues.size() == 0) | |||
3937 | return PHI; | |||
3938 | return nullptr; | |||
3939 | } | |||
3940 | ||||
3941 | /// GetPHIValue - For the specified PHI instruction, return the value | |||
3942 | /// that it defines. | |||
3943 | static BlockValueNum GetPHIValue(LDVSSAPhi *PHI) { return PHI->PHIValNum; } | |||
3944 | }; | |||
3945 | ||||
3946 | } // end namespace llvm | |||
3947 | ||||
3948 | Optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIs(MachineFunction &MF, | |||
3949 | ValueIDNum **MLiveOuts, | |||
3950 | ValueIDNum **MLiveIns, | |||
3951 | MachineInstr &Here, | |||
3952 | uint64_t InstrNum) { | |||
3953 | // Pick out records of DBG_PHI instructions that have been observed. If there | |||
3954 | // are none, then we cannot compute a value number. | |||
3955 | auto RangePair = std::equal_range(DebugPHINumToValue.begin(), | |||
3956 | DebugPHINumToValue.end(), InstrNum); | |||
3957 | auto LowerIt = RangePair.first; | |||
3958 | auto UpperIt = RangePair.second; | |||
3959 | ||||
3960 | // No DBG_PHI means there can be no location. | |||
3961 | if (LowerIt == UpperIt) | |||
3962 | return None; | |||
3963 | ||||
3964 | // If there's only one DBG_PHI, then that is our value number. | |||
3965 | if (std::distance(LowerIt, UpperIt) == 1) | |||
3966 | return LowerIt->ValueRead; | |||
3967 | ||||
3968 | auto DBGPHIRange = make_range(LowerIt, UpperIt); | |||
3969 | ||||
3970 | // Pick out the location (physreg, slot) where any PHIs must occur. It's | |||
3971 | // technically possible for us to merge values in different registers in each | |||
3972 | // block, but highly unlikely that LLVM will generate such code after register | |||
3973 | // allocation. | |||
3974 | LocIdx Loc = LowerIt->ReadLoc; | |||
3975 | ||||
3976 | // We have several DBG_PHIs, and a use position (the Here inst). All each | |||
3977 | // DBG_PHI does is identify a value at a program position. We can treat each | |||
3978 | // DBG_PHI like it's a Def of a value, and the use position is a Use of a | |||
3979 | // value, just like SSA. We use the bulk-standard LLVM SSA updater class to | |||
3980 | // determine which Def is used at the Use, and any PHIs that happen along | |||
3981 | // the way. | |||
3982 | // Adapted LLVM SSA Updater: | |||
3983 | LDVSSAUpdater Updater(Loc, MLiveIns); | |||
3984 | // Map of which Def or PHI is the current value in each block. | |||
3985 | DenseMap<LDVSSABlock *, BlockValueNum> AvailableValues; | |||
3986 | // Set of PHIs that we have created along the way. | |||
3987 | SmallVector<LDVSSAPhi *, 8> CreatedPHIs; | |||
3988 | ||||
3989 | // Each existing DBG_PHI is a Def'd value under this model. Record these Defs | |||
3990 | // for the SSAUpdater. | |||
3991 | for (const auto &DBG_PHI : DBGPHIRange) { | |||
3992 | LDVSSABlock *Block = Updater.getSSALDVBlock(DBG_PHI.MBB); | |||
3993 | const ValueIDNum &Num = DBG_PHI.ValueRead; | |||
3994 | AvailableValues.insert(std::make_pair(Block, Num.asU64())); | |||
3995 | } | |||
3996 | ||||
3997 | LDVSSABlock *HereBlock = Updater.getSSALDVBlock(Here.getParent()); | |||
3998 | const auto &AvailIt = AvailableValues.find(HereBlock); | |||
3999 | if (AvailIt != AvailableValues.end()) { | |||
4000 | // Actually, we already know what the value is -- the Use is in the same | |||
4001 | // block as the Def. | |||
4002 | return ValueIDNum::fromU64(AvailIt->second); | |||
4003 | } | |||
4004 | ||||
4005 | // Otherwise, we must use the SSA Updater. It will identify the value number | |||
4006 | // that we are to use, and the PHIs that must happen along the way. | |||
4007 | SSAUpdaterImpl<LDVSSAUpdater> Impl(&Updater, &AvailableValues, &CreatedPHIs); | |||
4008 | BlockValueNum ResultInt = Impl.GetValue(Updater.getSSALDVBlock(Here.getParent())); | |||
4009 | ValueIDNum Result = ValueIDNum::fromU64(ResultInt); | |||
4010 | ||||
4011 | // We have the number for a PHI, or possibly live-through value, to be used | |||
4012 | // at this Use. There are a number of things we have to check about it though: | |||
4013 | // * Does any PHI use an 'Undef' (like an IMPLICIT_DEF) value? If so, this | |||
4014 | // Use was not completely dominated by DBG_PHIs and we should abort. | |||
4015 | // * Are the Defs or PHIs clobbered in a block? SSAUpdater isn't aware that | |||
4016 | // we've left SSA form. Validate that the inputs to each PHI are the | |||
4017 | // expected values. | |||
4018 | // * Is a PHI we've created actually a merging of values, or are all the | |||
4019 | // predecessor values the same, leading to a non-PHI machine value number? | |||
4020 | // (SSAUpdater doesn't know that either). Remap validated PHIs into the | |||
4021 | // the ValidatedValues collection below to sort this out. | |||
4022 | DenseMap<LDVSSABlock *, ValueIDNum> ValidatedValues; | |||
4023 | ||||
4024 | // Define all the input DBG_PHI values in ValidatedValues. | |||
4025 | for (const auto &DBG_PHI : DBGPHIRange) { | |||
4026 | LDVSSABlock *Block = Updater.getSSALDVBlock(DBG_PHI.MBB); | |||
4027 | const ValueIDNum &Num = DBG_PHI.ValueRead; | |||
4028 | ValidatedValues.insert(std::make_pair(Block, Num)); | |||
4029 | } | |||
4030 | ||||
4031 | // Sort PHIs to validate into RPO-order. | |||
4032 | SmallVector<LDVSSAPhi *, 8> SortedPHIs; | |||
4033 | for (auto &PHI : CreatedPHIs) | |||
4034 | SortedPHIs.push_back(PHI); | |||
4035 | ||||
4036 | std::sort( | |||
4037 | SortedPHIs.begin(), SortedPHIs.end(), [&](LDVSSAPhi *A, LDVSSAPhi *B) { | |||
4038 | return BBToOrder[&A->getParent()->BB] < BBToOrder[&B->getParent()->BB]; | |||
4039 | }); | |||
4040 | ||||
4041 | for (auto &PHI : SortedPHIs) { | |||
4042 | ValueIDNum ThisBlockValueNum = | |||
4043 | MLiveIns[PHI->ParentBlock->BB.getNumber()][Loc.asU64()]; | |||
| ||||
4044 | ||||
4045 | // Are all these things actually defined? | |||
4046 | for (auto &PHIIt : PHI->IncomingValues) { | |||
4047 | // Any undef input means DBG_PHIs didn't dominate the use point. | |||
4048 | if (Updater.UndefMap.find(&PHIIt.first->BB) != Updater.UndefMap.end()) | |||
4049 | return None; | |||
4050 | ||||
4051 | ValueIDNum ValueToCheck; | |||
4052 | ValueIDNum *BlockLiveOuts = MLiveOuts[PHIIt.first->BB.getNumber()]; | |||
4053 | ||||
4054 | auto VVal = ValidatedValues.find(PHIIt.first); | |||
4055 | if (VVal == ValidatedValues.end()) { | |||
4056 | // We cross a loop, and this is a backedge. LLVMs tail duplication | |||
4057 | // happens so late that DBG_PHI instructions should not be able to | |||
4058 | // migrate into loops -- meaning we can only be live-through this | |||
4059 | // loop. | |||
4060 | ValueToCheck = ThisBlockValueNum; | |||
4061 | } else { | |||
4062 | // Does the block have as a live-out, in the location we're examining, | |||
4063 | // the value that we expect? If not, it's been moved or clobbered. | |||
4064 | ValueToCheck = VVal->second; | |||
4065 | } | |||
4066 | ||||
4067 | if (BlockLiveOuts[Loc.asU64()] != ValueToCheck) | |||
4068 | return None; | |||
4069 | } | |||
4070 | ||||
4071 | // Record this value as validated. | |||
4072 | ValidatedValues.insert({PHI->ParentBlock, ThisBlockValueNum}); | |||
4073 | } | |||
4074 | ||||
4075 | // All the PHIs are valid: we can return what the SSAUpdater said our value | |||
4076 | // number was. | |||
4077 | return Result; | |||
4078 | } |
1 | //===- llvm/CodeGen/MachineInstr.h - MachineInstr class ---------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file contains the declaration of the MachineInstr class, which is the |
10 | // basic representation for all target dependent machine instructions used by |
11 | // the back end. |
12 | // |
13 | //===----------------------------------------------------------------------===// |
14 | |
15 | #ifndef LLVM_CODEGEN_MACHINEINSTR_H |
16 | #define LLVM_CODEGEN_MACHINEINSTR_H |
17 | |
18 | #include "llvm/ADT/DenseMapInfo.h" |
19 | #include "llvm/ADT/PointerSumType.h" |
20 | #include "llvm/ADT/SmallSet.h" |
21 | #include "llvm/ADT/ilist.h" |
22 | #include "llvm/ADT/ilist_node.h" |
23 | #include "llvm/ADT/iterator_range.h" |
24 | #include "llvm/CodeGen/MachineMemOperand.h" |
25 | #include "llvm/CodeGen/MachineOperand.h" |
26 | #include "llvm/CodeGen/TargetOpcodes.h" |
27 | #include "llvm/IR/DebugLoc.h" |
28 | #include "llvm/IR/InlineAsm.h" |
29 | #include "llvm/IR/PseudoProbe.h" |
30 | #include "llvm/MC/MCInstrDesc.h" |
31 | #include "llvm/MC/MCSymbol.h" |
32 | #include "llvm/Support/ArrayRecycler.h" |
33 | #include "llvm/Support/TrailingObjects.h" |
34 | #include <algorithm> |
35 | #include <cassert> |
36 | #include <cstdint> |
37 | #include <utility> |
38 | |
39 | namespace llvm { |
40 | |
41 | class AAResults; |
42 | template <typename T> class ArrayRef; |
43 | class DIExpression; |
44 | class DILocalVariable; |
45 | class MachineBasicBlock; |
46 | class MachineFunction; |
47 | class MachineRegisterInfo; |
48 | class ModuleSlotTracker; |
49 | class raw_ostream; |
50 | template <typename T> class SmallVectorImpl; |
51 | class SmallBitVector; |
52 | class StringRef; |
53 | class TargetInstrInfo; |
54 | class TargetRegisterClass; |
55 | class TargetRegisterInfo; |
56 | |
57 | //===----------------------------------------------------------------------===// |
58 | /// Representation of each machine instruction. |
59 | /// |
60 | /// This class isn't a POD type, but it must have a trivial destructor. When a |
61 | /// MachineFunction is deleted, all the contained MachineInstrs are deallocated |
62 | /// without having their destructor called. |
63 | /// |
64 | class MachineInstr |
65 | : public ilist_node_with_parent<MachineInstr, MachineBasicBlock, |
66 | ilist_sentinel_tracking<true>> { |
67 | public: |
68 | using mmo_iterator = ArrayRef<MachineMemOperand *>::iterator; |
69 | |
70 | /// Flags to specify different kinds of comments to output in |
71 | /// assembly code. These flags carry semantic information not |
72 | /// otherwise easily derivable from the IR text. |
73 | /// |
74 | enum CommentFlag { |
75 | ReloadReuse = 0x1, // higher bits are reserved for target dep comments. |
76 | NoSchedComment = 0x2, |
77 | TAsmComments = 0x4 // Target Asm comments should start from this value. |
78 | }; |
79 | |
80 | enum MIFlag { |
81 | NoFlags = 0, |
82 | FrameSetup = 1 << 0, // Instruction is used as a part of |
83 | // function frame setup code. |
84 | FrameDestroy = 1 << 1, // Instruction is used as a part of |
85 | // function frame destruction code. |
86 | BundledPred = 1 << 2, // Instruction has bundled predecessors. |
87 | BundledSucc = 1 << 3, // Instruction has bundled successors. |
88 | FmNoNans = 1 << 4, // Instruction does not support Fast |
89 | // math nan values. |
90 | FmNoInfs = 1 << 5, // Instruction does not support Fast |
91 | // math infinity values. |
92 | FmNsz = 1 << 6, // Instruction is not required to retain |
93 | // signed zero values. |
94 | FmArcp = 1 << 7, // Instruction supports Fast math |
95 | // reciprocal approximations. |
96 | FmContract = 1 << 8, // Instruction supports Fast math |
97 | // contraction operations like fma. |
98 | FmAfn = 1 << 9, // Instruction may map to Fast math |
99 | // instrinsic approximation. |
100 | FmReassoc = 1 << 10, // Instruction supports Fast math |
101 | // reassociation of operand order. |
102 | NoUWrap = 1 << 11, // Instruction supports binary operator |
103 | // no unsigned wrap. |
104 | NoSWrap = 1 << 12, // Instruction supports binary operator |
105 | // no signed wrap. |
106 | IsExact = 1 << 13, // Instruction supports division is |
107 | // known to be exact. |
108 | NoFPExcept = 1 << 14, // Instruction does not raise |
109 | // floatint-point exceptions. |
110 | NoMerge = 1 << 15, // Passes that drop source location info |
111 | // (e.g. branch folding) should skip |
112 | // this instruction. |
113 | }; |
114 | |
115 | private: |
116 | const MCInstrDesc *MCID; // Instruction descriptor. |
117 | MachineBasicBlock *Parent = nullptr; // Pointer to the owning basic block. |
118 | |
119 | // Operands are allocated by an ArrayRecycler. |
120 | MachineOperand *Operands = nullptr; // Pointer to the first operand. |
121 | unsigned NumOperands = 0; // Number of operands on instruction. |
122 | |
123 | uint16_t Flags = 0; // Various bits of additional |
124 | // information about machine |
125 | // instruction. |
126 | |
127 | uint8_t AsmPrinterFlags = 0; // Various bits of information used by |
128 | // the AsmPrinter to emit helpful |
129 | // comments. This is *not* semantic |
130 | // information. Do not use this for |
131 | // anything other than to convey comment |
132 | // information to AsmPrinter. |
133 | |
134 | // OperandCapacity has uint8_t size, so it should be next to AsmPrinterFlags |
135 | // to properly pack. |
136 | using OperandCapacity = ArrayRecycler<MachineOperand>::Capacity; |
137 | OperandCapacity CapOperands; // Capacity of the Operands array. |
138 | |
139 | /// Internal implementation detail class that provides out-of-line storage for |
140 | /// extra info used by the machine instruction when this info cannot be stored |
141 | /// in-line within the instruction itself. |
142 | /// |
143 | /// This has to be defined eagerly due to the implementation constraints of |
144 | /// `PointerSumType` where it is used. |
145 | class ExtraInfo final |
146 | : TrailingObjects<ExtraInfo, MachineMemOperand *, MCSymbol *, MDNode *> { |
147 | public: |
148 | static ExtraInfo *create(BumpPtrAllocator &Allocator, |
149 | ArrayRef<MachineMemOperand *> MMOs, |
150 | MCSymbol *PreInstrSymbol = nullptr, |
151 | MCSymbol *PostInstrSymbol = nullptr, |
152 | MDNode *HeapAllocMarker = nullptr) { |
153 | bool HasPreInstrSymbol = PreInstrSymbol != nullptr; |
154 | bool HasPostInstrSymbol = PostInstrSymbol != nullptr; |
155 | bool HasHeapAllocMarker = HeapAllocMarker != nullptr; |
156 | auto *Result = new (Allocator.Allocate( |
157 | totalSizeToAlloc<MachineMemOperand *, MCSymbol *, MDNode *>( |
158 | MMOs.size(), HasPreInstrSymbol + HasPostInstrSymbol, |
159 | HasHeapAllocMarker), |
160 | alignof(ExtraInfo))) |
161 | ExtraInfo(MMOs.size(), HasPreInstrSymbol, HasPostInstrSymbol, |
162 | HasHeapAllocMarker); |
163 | |
164 | // Copy the actual data into the trailing objects. |
165 | std::copy(MMOs.begin(), MMOs.end(), |
166 | Result->getTrailingObjects<MachineMemOperand *>()); |
167 | |
168 | if (HasPreInstrSymbol) |
169 | Result->getTrailingObjects<MCSymbol *>()[0] = PreInstrSymbol; |
170 | if (HasPostInstrSymbol) |
171 | Result->getTrailingObjects<MCSymbol *>()[HasPreInstrSymbol] = |
172 | PostInstrSymbol; |
173 | if (HasHeapAllocMarker) |
174 | Result->getTrailingObjects<MDNode *>()[0] = HeapAllocMarker; |
175 | |
176 | return Result; |
177 | } |
178 | |
179 | ArrayRef<MachineMemOperand *> getMMOs() const { |
180 | return makeArrayRef(getTrailingObjects<MachineMemOperand *>(), NumMMOs); |
181 | } |
182 | |
183 | MCSymbol *getPreInstrSymbol() const { |
184 | return HasPreInstrSymbol ? getTrailingObjects<MCSymbol *>()[0] : nullptr; |
185 | } |
186 | |
187 | MCSymbol *getPostInstrSymbol() const { |
188 | return HasPostInstrSymbol |
189 | ? getTrailingObjects<MCSymbol *>()[HasPreInstrSymbol] |
190 | : nullptr; |
191 | } |
192 | |
193 | MDNode *getHeapAllocMarker() const { |
194 | return HasHeapAllocMarker ? getTrailingObjects<MDNode *>()[0] : nullptr; |
195 | } |
196 | |
197 | private: |
198 | friend TrailingObjects; |
199 | |
200 | // Description of the extra info, used to interpret the actual optional |
201 | // data appended. |
202 | // |
203 | // Note that this is not terribly space optimized. This leaves a great deal |
204 | // of flexibility to fit more in here later. |
205 | const int NumMMOs; |
206 | const bool HasPreInstrSymbol; |
207 | const bool HasPostInstrSymbol; |
208 | const bool HasHeapAllocMarker; |
209 | |
210 | // Implement the `TrailingObjects` internal API. |
211 | size_t numTrailingObjects(OverloadToken<MachineMemOperand *>) const { |
212 | return NumMMOs; |
213 | } |
214 | size_t numTrailingObjects(OverloadToken<MCSymbol *>) const { |
215 | return HasPreInstrSymbol + HasPostInstrSymbol; |
216 | } |
217 | size_t numTrailingObjects(OverloadToken<MDNode *>) const { |
218 | return HasHeapAllocMarker; |
219 | } |
220 | |
221 | // Just a boring constructor to allow us to initialize the sizes. Always use |
222 | // the `create` routine above. |
223 | ExtraInfo(int NumMMOs, bool HasPreInstrSymbol, bool HasPostInstrSymbol, |
224 | bool HasHeapAllocMarker) |
225 | : NumMMOs(NumMMOs), HasPreInstrSymbol(HasPreInstrSymbol), |
226 | HasPostInstrSymbol(HasPostInstrSymbol), |
227 | HasHeapAllocMarker(HasHeapAllocMarker) {} |
228 | }; |
229 | |
230 | /// Enumeration of the kinds of inline extra info available. It is important |
231 | /// that the `MachineMemOperand` inline kind has a tag value of zero to make |
232 | /// it accessible as an `ArrayRef`. |
233 | enum ExtraInfoInlineKinds { |
234 | EIIK_MMO = 0, |
235 | EIIK_PreInstrSymbol, |
236 | EIIK_PostInstrSymbol, |
237 | EIIK_OutOfLine |
238 | }; |
239 | |
240 | // We store extra information about the instruction here. The common case is |
241 | // expected to be nothing or a single pointer (typically a MMO or a symbol). |
242 | // We work to optimize this common case by storing it inline here rather than |
243 | // requiring a separate allocation, but we fall back to an allocation when |
244 | // multiple pointers are needed. |
245 | PointerSumType<ExtraInfoInlineKinds, |
246 | PointerSumTypeMember<EIIK_MMO, MachineMemOperand *>, |
247 | PointerSumTypeMember<EIIK_PreInstrSymbol, MCSymbol *>, |
248 | PointerSumTypeMember<EIIK_PostInstrSymbol, MCSymbol *>, |
249 | PointerSumTypeMember<EIIK_OutOfLine, ExtraInfo *>> |
250 | Info; |
251 | |
252 | DebugLoc debugLoc; // Source line information. |
253 | |
254 | /// Unique instruction number. Used by DBG_INSTR_REFs to refer to the values |
255 | /// defined by this instruction. |
256 | unsigned DebugInstrNum; |
257 | |
258 | // Intrusive list support |
259 | friend struct ilist_traits<MachineInstr>; |
260 | friend struct ilist_callback_traits<MachineBasicBlock>; |
261 | void setParent(MachineBasicBlock *P) { Parent = P; } |
262 | |
263 | /// This constructor creates a copy of the given |
264 | /// MachineInstr in the given MachineFunction. |
265 | MachineInstr(MachineFunction &, const MachineInstr &); |
266 | |
267 | /// This constructor create a MachineInstr and add the implicit operands. |
268 | /// It reserves space for number of operands specified by |
269 | /// MCInstrDesc. An explicit DebugLoc is supplied. |
270 | MachineInstr(MachineFunction &, const MCInstrDesc &tid, DebugLoc dl, |
271 | bool NoImp = false); |
272 | |
273 | // MachineInstrs are pool-allocated and owned by MachineFunction. |
274 | friend class MachineFunction; |
275 | |
276 | void |
277 | dumprImpl(const MachineRegisterInfo &MRI, unsigned Depth, unsigned MaxDepth, |
278 | SmallPtrSetImpl<const MachineInstr *> &AlreadySeenInstrs) const; |
279 | |
280 | public: |
281 | MachineInstr(const MachineInstr &) = delete; |
282 | MachineInstr &operator=(const MachineInstr &) = delete; |
283 | // Use MachineFunction::DeleteMachineInstr() instead. |
284 | ~MachineInstr() = delete; |
285 | |
286 | const MachineBasicBlock* getParent() const { return Parent; } |
287 | MachineBasicBlock* getParent() { return Parent; } |
288 | |
289 | /// Move the instruction before \p MovePos. |
290 | void moveBefore(MachineInstr *MovePos); |
291 | |
292 | /// Return the function that contains the basic block that this instruction |
293 | /// belongs to. |
294 | /// |
295 | /// Note: this is undefined behaviour if the instruction does not have a |
296 | /// parent. |
297 | const MachineFunction *getMF() const; |
298 | MachineFunction *getMF() { |
299 | return const_cast<MachineFunction *>( |
300 | static_cast<const MachineInstr *>(this)->getMF()); |
301 | } |
302 | |
303 | /// Return the asm printer flags bitvector. |
304 | uint8_t getAsmPrinterFlags() const { return AsmPrinterFlags; } |
305 | |
306 | /// Clear the AsmPrinter bitvector. |
307 | void clearAsmPrinterFlags() { AsmPrinterFlags = 0; } |
308 | |
309 | /// Return whether an AsmPrinter flag is set. |
310 | bool getAsmPrinterFlag(CommentFlag Flag) const { |
311 | return AsmPrinterFlags & Flag; |
312 | } |
313 | |
314 | /// Set a flag for the AsmPrinter. |
315 | void setAsmPrinterFlag(uint8_t Flag) { |
316 | AsmPrinterFlags |= Flag; |
317 | } |
318 | |
319 | /// Clear specific AsmPrinter flags. |
320 | void clearAsmPrinterFlag(CommentFlag Flag) { |
321 | AsmPrinterFlags &= ~Flag; |
322 | } |
323 | |
324 | /// Return the MI flags bitvector. |
325 | uint16_t getFlags() const { |
326 | return Flags; |
327 | } |
328 | |
329 | /// Return whether an MI flag is set. |
330 | bool getFlag(MIFlag Flag) const { |
331 | return Flags & Flag; |
332 | } |
333 | |
334 | /// Set a MI flag. |
335 | void setFlag(MIFlag Flag) { |
336 | Flags |= (uint16_t)Flag; |
337 | } |
338 | |
339 | void setFlags(unsigned flags) { |
340 | // Filter out the automatically maintained flags. |
341 | unsigned Mask = BundledPred | BundledSucc; |
342 | Flags = (Flags & Mask) | (flags & ~Mask); |
343 | } |
344 | |
345 | /// clearFlag - Clear a MI flag. |
346 | void clearFlag(MIFlag Flag) { |
347 | Flags &= ~((uint16_t)Flag); |
348 | } |
349 | |
350 | /// Return true if MI is in a bundle (but not the first MI in a bundle). |
351 | /// |
352 | /// A bundle looks like this before it's finalized: |
353 | /// ---------------- |
354 | /// | MI | |
355 | /// ---------------- |
356 | /// | |
357 | /// ---------------- |
358 | /// | MI * | |
359 | /// ---------------- |
360 | /// | |
361 | /// ---------------- |
362 | /// | MI * | |
363 | /// ---------------- |
364 | /// In this case, the first MI starts a bundle but is not inside a bundle, the |
365 | /// next 2 MIs are considered "inside" the bundle. |
366 | /// |
367 | /// After a bundle is finalized, it looks like this: |
368 | /// ---------------- |
369 | /// | Bundle | |
370 | /// ---------------- |
371 | /// | |
372 | /// ---------------- |
373 | /// | MI * | |
374 | /// ---------------- |
375 | /// | |
376 | /// ---------------- |
377 | /// | MI * | |
378 | /// ---------------- |
379 | /// | |
380 | /// ---------------- |
381 | /// | MI * | |
382 | /// ---------------- |
383 | /// The first instruction has the special opcode "BUNDLE". It's not "inside" |
384 | /// a bundle, but the next three MIs are. |
385 | bool isInsideBundle() const { |
386 | return getFlag(BundledPred); |
387 | } |
388 | |
389 | /// Return true if this instruction part of a bundle. This is true |
390 | /// if either itself or its following instruction is marked "InsideBundle". |
391 | bool isBundled() const { |
392 | return isBundledWithPred() || isBundledWithSucc(); |
393 | } |
394 | |
395 | /// Return true if this instruction is part of a bundle, and it is not the |
396 | /// first instruction in the bundle. |
397 | bool isBundledWithPred() const { return getFlag(BundledPred); } |
398 | |
399 | /// Return true if this instruction is part of a bundle, and it is not the |
400 | /// last instruction in the bundle. |
401 | bool isBundledWithSucc() const { return getFlag(BundledSucc); } |
402 | |
403 | /// Bundle this instruction with its predecessor. This can be an unbundled |
404 | /// instruction, or it can be the first instruction in a bundle. |
405 | void bundleWithPred(); |
406 | |
407 | /// Bundle this instruction with its successor. This can be an unbundled |
408 | /// instruction, or it can be the last instruction in a bundle. |
409 | void bundleWithSucc(); |
410 | |
411 | /// Break bundle above this instruction. |
412 | void unbundleFromPred(); |
413 | |
414 | /// Break bundle below this instruction. |
415 | void unbundleFromSucc(); |
416 | |
417 | /// Returns the debug location id of this MachineInstr. |
418 | const DebugLoc &getDebugLoc() const { return debugLoc; } |
419 | |
420 | /// Return the operand containing the offset to be used if this DBG_VALUE |
421 | /// instruction is indirect; will be an invalid register if this value is |
422 | /// not indirect, and an immediate with value 0 otherwise. |
423 | const MachineOperand &getDebugOffset() const { |
424 | assert(isNonListDebugValue() && "not a DBG_VALUE")(static_cast <bool> (isNonListDebugValue() && "not a DBG_VALUE" ) ? void (0) : __assert_fail ("isNonListDebugValue() && \"not a DBG_VALUE\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/MachineInstr.h" , 424, __extension__ __PRETTY_FUNCTION__)); |
425 | return getOperand(1); |
426 | } |
427 | MachineOperand &getDebugOffset() { |
428 | assert(isNonListDebugValue() && "not a DBG_VALUE")(static_cast <bool> (isNonListDebugValue() && "not a DBG_VALUE" ) ? void (0) : __assert_fail ("isNonListDebugValue() && \"not a DBG_VALUE\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/MachineInstr.h" , 428, __extension__ __PRETTY_FUNCTION__)); |
429 | return getOperand(1); |
430 | } |
431 | |
432 | /// Return the operand for the debug variable referenced by |
433 | /// this DBG_VALUE instruction. |
434 | const MachineOperand &getDebugVariableOp() const; |
435 | MachineOperand &getDebugVariableOp(); |
436 | |
437 | /// Return the debug variable referenced by |
438 | /// this DBG_VALUE instruction. |
439 | const DILocalVariable *getDebugVariable() const; |
440 | |
441 | /// Return the operand for the complex address expression referenced by |
442 | /// this DBG_VALUE instruction. |
443 | const MachineOperand &getDebugExpressionOp() const; |
444 | MachineOperand &getDebugExpressionOp(); |
445 | |
446 | /// Return the complex address expression referenced by |
447 | /// this DBG_VALUE instruction. |
448 | const DIExpression *getDebugExpression() const; |
449 | |
450 | /// Return the debug label referenced by |
451 | /// this DBG_LABEL instruction. |
452 | const DILabel *getDebugLabel() const; |
453 | |
454 | /// Fetch the instruction number of this MachineInstr. If it does not have |
455 | /// one already, a new and unique number will be assigned. |
456 | unsigned getDebugInstrNum(); |
457 | |
458 | /// Fetch instruction number of this MachineInstr -- but before it's inserted |
459 | /// into \p MF. Needed for transformations that create an instruction but |
460 | /// don't immediately insert them. |
461 | unsigned getDebugInstrNum(MachineFunction &MF); |
462 | |
463 | /// Examine the instruction number of this MachineInstr. May be zero if |
464 | /// it hasn't been assigned a number yet. |
465 | unsigned peekDebugInstrNum() const { return DebugInstrNum; } |
466 | |
467 | /// Set instruction number of this MachineInstr. Avoid using unless you're |
468 | /// deserializing this information. |
469 | void setDebugInstrNum(unsigned Num) { DebugInstrNum = Num; } |
470 | |
471 | /// Drop any variable location debugging information associated with this |
472 | /// instruction. Use when an instruction is modified in such a way that it no |
473 | /// longer defines the value it used to. Variable locations using that value |
474 | /// will be dropped. |
475 | void dropDebugNumber() { DebugInstrNum = 0; } |
476 | |
477 | /// Emit an error referring to the source location of this instruction. |
478 | /// This should only be used for inline assembly that is somehow |
479 | /// impossible to compile. Other errors should have been handled much |
480 | /// earlier. |
481 | /// |
482 | /// If this method returns, the caller should try to recover from the error. |
483 | void emitError(StringRef Msg) const; |
484 | |
485 | /// Returns the target instruction descriptor of this MachineInstr. |
486 | const MCInstrDesc &getDesc() const { return *MCID; } |
487 | |
488 | /// Returns the opcode of this MachineInstr. |
489 | unsigned getOpcode() const { return MCID->Opcode; } |
490 | |
491 | /// Retuns the total number of operands. |
492 | unsigned getNumOperands() const { return NumOperands; } |
493 | |
494 | /// Returns the total number of operands which are debug locations. |
495 | unsigned getNumDebugOperands() const { |
496 | return std::distance(debug_operands().begin(), debug_operands().end()); |
497 | } |
498 | |
499 | const MachineOperand& getOperand(unsigned i) const { |
500 | assert(i < getNumOperands() && "getOperand() out of range!")(static_cast <bool> (i < getNumOperands() && "getOperand() out of range!") ? void (0) : __assert_fail ("i < getNumOperands() && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/MachineInstr.h" , 500, __extension__ __PRETTY_FUNCTION__)); |
501 | return Operands[i]; |
502 | } |
503 | MachineOperand& getOperand(unsigned i) { |
504 | assert(i < getNumOperands() && "getOperand() out of range!")(static_cast <bool> (i < getNumOperands() && "getOperand() out of range!") ? void (0) : __assert_fail ("i < getNumOperands() && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/MachineInstr.h" , 504, __extension__ __PRETTY_FUNCTION__)); |
505 | return Operands[i]; |
506 | } |
507 | |
508 | MachineOperand &getDebugOperand(unsigned Index) { |
509 | assert(Index < getNumDebugOperands() && "getDebugOperand() out of range!")(static_cast <bool> (Index < getNumDebugOperands() && "getDebugOperand() out of range!") ? void (0) : __assert_fail ("Index < getNumDebugOperands() && \"getDebugOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/MachineInstr.h" , 509, __extension__ __PRETTY_FUNCTION__)); |
510 | return *(debug_operands().begin() + Index); |
511 | } |
512 | const MachineOperand &getDebugOperand(unsigned Index) const { |
513 | assert(Index < getNumDebugOperands() && "getDebugOperand() out of range!")(static_cast <bool> (Index < getNumDebugOperands() && "getDebugOperand() out of range!") ? void (0) : __assert_fail ("Index < getNumDebugOperands() && \"getDebugOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/MachineInstr.h" , 513, __extension__ __PRETTY_FUNCTION__)); |
514 | return *(debug_operands().begin() + Index); |
515 | } |
516 | |
517 | SmallSet<Register, 4> getUsedDebugRegs() const { |
518 | assert(isDebugValue() && "not a DBG_VALUE*")(static_cast <bool> (isDebugValue() && "not a DBG_VALUE*" ) ? void (0) : __assert_fail ("isDebugValue() && \"not a DBG_VALUE*\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/MachineInstr.h" , 518, __extension__ __PRETTY_FUNCTION__)); |
519 | SmallSet<Register, 4> UsedRegs; |
520 | for (auto MO : debug_operands()) |
521 | if (MO.isReg() && MO.getReg()) |
522 | UsedRegs.insert(MO.getReg()); |
523 | return UsedRegs; |
524 | } |
525 | |
526 | /// Returns whether this debug value has at least one debug operand with the |
527 | /// register \p Reg. |
528 | bool hasDebugOperandForReg(Register Reg) const { |
529 | return any_of(debug_operands(), [Reg](const MachineOperand &Op) { |
530 | return Op.isReg() && Op.getReg() == Reg; |
531 | }); |
532 | } |
533 | |
534 | /// Returns a range of all of the operands that correspond to a debug use of |
535 | /// \p Reg. |
536 | template <typename Operand, typename Instruction> |
537 | static iterator_range< |
538 | filter_iterator<Operand *, std::function<bool(Operand &Op)>>> |
539 | getDebugOperandsForReg(Instruction *MI, Register Reg) { |
540 | std::function<bool(Operand & Op)> OpUsesReg( |
541 | [Reg](Operand &Op) { return Op.isReg() && Op.getReg() == Reg; }); |
542 | return make_filter_range(MI->debug_operands(), OpUsesReg); |
543 | } |
544 | iterator_range<filter_iterator<const MachineOperand *, |
545 | std::function<bool(const MachineOperand &Op)>>> |
546 | getDebugOperandsForReg(Register Reg) const { |
547 | return MachineInstr::getDebugOperandsForReg<const MachineOperand, |
548 | const MachineInstr>(this, Reg); |
549 | } |
550 | iterator_range<filter_iterator<MachineOperand *, |
551 | std::function<bool(MachineOperand &Op)>>> |
552 | getDebugOperandsForReg(Register Reg) { |
553 | return MachineInstr::getDebugOperandsForReg<MachineOperand, MachineInstr>( |
554 | this, Reg); |
555 | } |
556 | |
557 | bool isDebugOperand(const MachineOperand *Op) const { |
558 | return Op >= adl_begin(debug_operands()) && Op <= adl_end(debug_operands()); |
559 | } |
560 | |
561 | unsigned getDebugOperandIndex(const MachineOperand *Op) const { |
562 | assert(isDebugOperand(Op) && "Expected a debug operand.")(static_cast <bool> (isDebugOperand(Op) && "Expected a debug operand." ) ? void (0) : __assert_fail ("isDebugOperand(Op) && \"Expected a debug operand.\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/MachineInstr.h" , 562, __extension__ __PRETTY_FUNCTION__)); |
563 | return std::distance(adl_begin(debug_operands()), Op); |
564 | } |
565 | |
566 | /// Returns the total number of definitions. |
567 | unsigned getNumDefs() const { |
568 | return getNumExplicitDefs() + MCID->getNumImplicitDefs(); |
569 | } |
570 | |
571 | /// Returns true if the instruction has implicit definition. |
572 | bool hasImplicitDef() const { |
573 | for (unsigned I = getNumExplicitOperands(), E = getNumOperands(); |
574 | I != E; ++I) { |
575 | const MachineOperand &MO = getOperand(I); |
576 | if (MO.isDef() && MO.isImplicit()) |
577 | return true; |
578 | } |
579 | return false; |
580 | } |
581 | |
582 | /// Returns the implicit operands number. |
583 | unsigned getNumImplicitOperands() const { |
584 | return getNumOperands() - getNumExplicitOperands(); |
585 | } |
586 | |
587 | /// Return true if operand \p OpIdx is a subregister index. |
588 | bool isOperandSubregIdx(unsigned OpIdx) const { |
589 | assert(getOperand(OpIdx).getType() == MachineOperand::MO_Immediate &&(static_cast <bool> (getOperand(OpIdx).getType() == MachineOperand ::MO_Immediate && "Expected MO_Immediate operand type." ) ? void (0) : __assert_fail ("getOperand(OpIdx).getType() == MachineOperand::MO_Immediate && \"Expected MO_Immediate operand type.\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/MachineInstr.h" , 590, __extension__ __PRETTY_FUNCTION__)) |
590 | "Expected MO_Immediate operand type.")(static_cast <bool> (getOperand(OpIdx).getType() == MachineOperand ::MO_Immediate && "Expected MO_Immediate operand type." ) ? void (0) : __assert_fail ("getOperand(OpIdx).getType() == MachineOperand::MO_Immediate && \"Expected MO_Immediate operand type.\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/MachineInstr.h" , 590, __extension__ __PRETTY_FUNCTION__)); |
591 | if (isExtractSubreg() && OpIdx == 2) |
592 | return true; |
593 | if (isInsertSubreg() && OpIdx == 3) |
594 | return true; |
595 | if (isRegSequence() && OpIdx > 1 && (OpIdx % 2) == 0) |
596 | return true; |
597 | if (isSubregToReg() && OpIdx == 3) |
598 | return true; |
599 | return false; |
600 | } |
601 | |
602 | /// Returns the number of non-implicit operands. |
603 | unsigned getNumExplicitOperands() const; |
604 | |
605 | /// Returns the number of non-implicit definitions. |
606 | unsigned getNumExplicitDefs() const; |
607 | |
608 | /// iterator/begin/end - Iterate over all operands of a machine instruction. |
609 | using mop_iterator = MachineOperand *; |
610 | using const_mop_iterator = const MachineOperand *; |
611 | |
612 | mop_iterator operands_begin() { return Operands; } |
613 | mop_iterator operands_end() { return Operands + NumOperands; } |
614 | |
615 | const_mop_iterator operands_begin() const { return Operands; } |
616 | const_mop_iterator operands_end() const { return Operands + NumOperands; } |
617 | |
618 | iterator_range<mop_iterator> operands() { |
619 | return make_range(operands_begin(), operands_end()); |
620 | } |
621 | iterator_range<const_mop_iterator> operands() const { |
622 | return make_range(operands_begin(), operands_end()); |
623 | } |
624 | iterator_range<mop_iterator> explicit_operands() { |
625 | return make_range(operands_begin(), |
626 | operands_begin() + getNumExplicitOperands()); |
627 | } |
628 | iterator_range<const_mop_iterator> explicit_operands() const { |
629 | return make_range(operands_begin(), |
630 | operands_begin() + getNumExplicitOperands()); |
631 | } |
632 | iterator_range<mop_iterator> implicit_operands() { |
633 | return make_range(explicit_operands().end(), operands_end()); |
634 | } |
635 | iterator_range<const_mop_iterator> implicit_operands() const { |
636 | return make_range(explicit_operands().end(), operands_end()); |
637 | } |
638 | /// Returns a range over all operands that are used to determine the variable |
639 | /// location for this DBG_VALUE instruction. |
640 | iterator_range<mop_iterator> debug_operands() { |
641 | assert(isDebugValue() && "Must be a debug value instruction.")(static_cast <bool> (isDebugValue() && "Must be a debug value instruction." ) ? void (0) : __assert_fail ("isDebugValue() && \"Must be a debug value instruction.\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/MachineInstr.h" , 641, __extension__ __PRETTY_FUNCTION__)); |
642 | return isDebugValueList() |
643 | ? make_range(operands_begin() + 2, operands_end()) |
644 | : make_range(operands_begin(), operands_begin() + 1); |
645 | } |
646 | /// \copydoc debug_operands() |
647 | iterator_range<const_mop_iterator> debug_operands() const { |
648 | assert(isDebugValue() && "Must be a debug value instruction.")(static_cast <bool> (isDebugValue() && "Must be a debug value instruction." ) ? void (0) : __assert_fail ("isDebugValue() && \"Must be a debug value instruction.\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/MachineInstr.h" , 648, __extension__ __PRETTY_FUNCTION__)); |
649 | return isDebugValueList() |
650 | ? make_range(operands_begin() + 2, operands_end()) |
651 | : make_range(operands_begin(), operands_begin() + 1); |
652 | } |
653 | /// Returns a range over all explicit operands that are register definitions. |
654 | /// Implicit definition are not included! |
655 | iterator_range<mop_iterator> defs() { |
656 | return make_range(operands_begin(), |
657 | operands_begin() + getNumExplicitDefs()); |
658 | } |
659 | /// \copydoc defs() |
660 | iterator_range<const_mop_iterator> defs() const { |
661 | return make_range(operands_begin(), |
662 | operands_begin() + getNumExplicitDefs()); |
663 | } |
664 | /// Returns a range that includes all operands that are register uses. |
665 | /// This may include unrelated operands which are not register uses. |
666 | iterator_range<mop_iterator> uses() { |
667 | return make_range(operands_begin() + getNumExplicitDefs(), operands_end()); |
668 | } |
669 | /// \copydoc uses() |
670 | iterator_range<const_mop_iterator> uses() const { |
671 | return make_range(operands_begin() + getNumExplicitDefs(), operands_end()); |
672 | } |
673 | iterator_range<mop_iterator> explicit_uses() { |
674 | return make_range(operands_begin() + getNumExplicitDefs(), |
675 | operands_begin() + getNumExplicitOperands()); |
676 | } |
677 | iterator_range<const_mop_iterator> explicit_uses() const { |
678 | return make_range(operands_begin() + getNumExplicitDefs(), |
679 | operands_begin() + getNumExplicitOperands()); |
680 | } |
681 | |
682 | /// Returns the number of the operand iterator \p I points to. |
683 | unsigned getOperandNo(const_mop_iterator I) const { |
684 | return I - operands_begin(); |
685 | } |
686 | |
687 | /// Access to memory operands of the instruction. If there are none, that does |
688 | /// not imply anything about whether the function accesses memory. Instead, |
689 | /// the caller must behave conservatively. |
690 | ArrayRef<MachineMemOperand *> memoperands() const { |
691 | if (!Info) |
692 | return {}; |
693 | |
694 | if (Info.is<EIIK_MMO>()) |
695 | return makeArrayRef(Info.getAddrOfZeroTagPointer(), 1); |
696 | |
697 | if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>()) |
698 | return EI->getMMOs(); |
699 | |
700 | return {}; |
701 | } |
702 | |
703 | /// Access to memory operands of the instruction. |
704 | /// |
705 | /// If `memoperands_begin() == memoperands_end()`, that does not imply |
706 | /// anything about whether the function accesses memory. Instead, the caller |
707 | /// must behave conservatively. |
708 | mmo_iterator memoperands_begin() const { return memoperands().begin(); } |
709 | |
710 | /// Access to memory operands of the instruction. |
711 | /// |
712 | /// If `memoperands_begin() == memoperands_end()`, that does not imply |
713 | /// anything about whether the function accesses memory. Instead, the caller |
714 | /// must behave conservatively. |
715 | mmo_iterator memoperands_end() const { return memoperands().end(); } |
716 | |
717 | /// Return true if we don't have any memory operands which described the |
718 | /// memory access done by this instruction. If this is true, calling code |
719 | /// must be conservative. |
720 | bool memoperands_empty() const { return memoperands().empty(); } |
721 | |
722 | /// Return true if this instruction has exactly one MachineMemOperand. |
723 | bool hasOneMemOperand() const { return memoperands().size() == 1; } |
724 | |
725 | /// Return the number of memory operands. |
726 | unsigned getNumMemOperands() const { return memoperands().size(); } |
727 | |
728 | /// Helper to extract a pre-instruction symbol if one has been added. |
729 | MCSymbol *getPreInstrSymbol() const { |
730 | if (!Info) |
731 | return nullptr; |
732 | if (MCSymbol *S = Info.get<EIIK_PreInstrSymbol>()) |
733 | return S; |
734 | if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>()) |
735 | return EI->getPreInstrSymbol(); |
736 | |
737 | return nullptr; |
738 | } |
739 | |
740 | /// Helper to extract a post-instruction symbol if one has been added. |
741 | MCSymbol *getPostInstrSymbol() const { |
742 | if (!Info) |
743 | return nullptr; |
744 | if (MCSymbol *S = Info.get<EIIK_PostInstrSymbol>()) |
745 | return S; |
746 | if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>()) |
747 | return EI->getPostInstrSymbol(); |
748 | |
749 | return nullptr; |
750 | } |
751 | |
752 | /// Helper to extract a heap alloc marker if one has been added. |
753 | MDNode *getHeapAllocMarker() const { |
754 | if (!Info) |
755 | return nullptr; |
756 | if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>()) |
757 | return EI->getHeapAllocMarker(); |
758 | |
759 | return nullptr; |
760 | } |
761 | |
762 | /// API for querying MachineInstr properties. They are the same as MCInstrDesc |
763 | /// queries but they are bundle aware. |
764 | |
765 | enum QueryType { |
766 | IgnoreBundle, // Ignore bundles |
767 | AnyInBundle, // Return true if any instruction in bundle has property |
768 | AllInBundle // Return true if all instructions in bundle have property |
769 | }; |
770 | |
771 | /// Return true if the instruction (or in the case of a bundle, |
772 | /// the instructions inside the bundle) has the specified property. |
773 | /// The first argument is the property being queried. |
774 | /// The second argument indicates whether the query should look inside |
775 | /// instruction bundles. |
776 | bool hasProperty(unsigned MCFlag, QueryType Type = AnyInBundle) const { |
777 | assert(MCFlag < 64 &&(static_cast <bool> (MCFlag < 64 && "MCFlag out of range for bit mask in getFlags/hasPropertyInBundle." ) ? void (0) : __assert_fail ("MCFlag < 64 && \"MCFlag out of range for bit mask in getFlags/hasPropertyInBundle.\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/MachineInstr.h" , 778, __extension__ __PRETTY_FUNCTION__)) |
778 | "MCFlag out of range for bit mask in getFlags/hasPropertyInBundle.")(static_cast <bool> (MCFlag < 64 && "MCFlag out of range for bit mask in getFlags/hasPropertyInBundle." ) ? void (0) : __assert_fail ("MCFlag < 64 && \"MCFlag out of range for bit mask in getFlags/hasPropertyInBundle.\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/MachineInstr.h" , 778, __extension__ __PRETTY_FUNCTION__)); |
779 | // Inline the fast path for unbundled or bundle-internal instructions. |
780 | if (Type == IgnoreBundle || !isBundled() || isBundledWithPred()) |
781 | return getDesc().getFlags() & (1ULL << MCFlag); |
782 | |
783 | // If this is the first instruction in a bundle, take the slow path. |
784 | return hasPropertyInBundle(1ULL << MCFlag, Type); |
785 | } |
786 | |
787 | /// Return true if this is an instruction that should go through the usual |
788 | /// legalization steps. |
789 | bool isPreISelOpcode(QueryType Type = IgnoreBundle) const { |
790 | return hasProperty(MCID::PreISelOpcode, Type); |
791 | } |
792 | |
793 | /// Return true if this instruction can have a variable number of operands. |
794 | /// In this case, the variable operands will be after the normal |
795 | /// operands but before the implicit definitions and uses (if any are |
796 | /// present). |
797 | bool isVariadic(QueryType Type = IgnoreBundle) const { |
798 | return hasProperty(MCID::Variadic, Type); |
799 | } |
800 | |
801 | /// Set if this instruction has an optional definition, e.g. |
802 | /// ARM instructions which can set condition code if 's' bit is set. |
803 | bool hasOptionalDef(QueryType Type = IgnoreBundle) const { |
804 | return hasProperty(MCID::HasOptionalDef, Type); |
805 | } |
806 | |
807 | /// Return true if this is a pseudo instruction that doesn't |
808 | /// correspond to a real machine instruction. |
809 | bool isPseudo(QueryType Type = IgnoreBundle) const { |
810 | return hasProperty(MCID::Pseudo, Type); |
811 | } |
812 | |
813 | bool isReturn(QueryType Type = AnyInBundle) const { |
814 | return hasProperty(MCID::Return, Type); |
815 | } |
816 | |
817 | /// Return true if this is an instruction that marks the end of an EH scope, |
818 | /// i.e., a catchpad or a cleanuppad instruction. |
819 | bool isEHScopeReturn(QueryType Type = AnyInBundle) const { |
820 | return hasProperty(MCID::EHScopeReturn, Type); |
821 | } |
822 | |
823 | bool isCall(QueryType Type = AnyInBundle) const { |
824 | return hasProperty(MCID::Call, Type); |
825 | } |
826 | |
827 | /// Return true if this is a call instruction that may have an associated |
828 | /// call site entry in the debug info. |
829 | bool isCandidateForCallSiteEntry(QueryType Type = IgnoreBundle) const; |
830 | /// Return true if copying, moving, or erasing this instruction requires |
831 | /// updating Call Site Info (see \ref copyCallSiteInfo, \ref moveCallSiteInfo, |
832 | /// \ref eraseCallSiteInfo). |
833 | bool shouldUpdateCallSiteInfo() const; |
834 | |
835 | /// Returns true if the specified instruction stops control flow |
836 | /// from executing the instruction immediately following it. Examples include |
837 | /// unconditional branches and return instructions. |
838 | bool isBarrier(QueryType Type = AnyInBundle) const { |
839 | return hasProperty(MCID::Barrier, Type); |
840 | } |
841 | |
842 | /// Returns true if this instruction part of the terminator for a basic block. |
843 | /// Typically this is things like return and branch instructions. |
844 | /// |
845 | /// Various passes use this to insert code into the bottom of a basic block, |
846 | /// but before control flow occurs. |
847 | bool isTerminator(QueryType Type = AnyInBundle) const { |
848 | return hasProperty(MCID::Terminator, Type); |
849 | } |
850 | |
851 | /// Returns true if this is a conditional, unconditional, or indirect branch. |
852 | /// Predicates below can be used to discriminate between |
853 | /// these cases, and the TargetInstrInfo::analyzeBranch method can be used to |
854 | /// get more information. |
855 | bool isBranch(QueryType Type = AnyInBundle) const { |
856 | return hasProperty(MCID::Branch, Type); |
857 | } |
858 | |
859 | /// Return true if this is an indirect branch, such as a |
860 | /// branch through a register. |
861 | bool isIndirectBranch(QueryType Type = AnyInBundle) const { |
862 | return hasProperty(MCID::IndirectBranch, Type); |
863 | } |
864 | |
865 | /// Return true if this is a branch which may fall |
866 | /// through to the next instruction or may transfer control flow to some other |
867 | /// block. The TargetInstrInfo::analyzeBranch method can be used to get more |
868 | /// information about this branch. |
869 | bool isConditionalBranch(QueryType Type = AnyInBundle) const { |
870 | return isBranch(Type) && !isBarrier(Type) && !isIndirectBranch(Type); |
871 | } |
872 | |
873 | /// Return true if this is a branch which always |
874 | /// transfers control flow to some other block. The |
875 | /// TargetInstrInfo::analyzeBranch method can be used to get more information |
876 | /// about this branch. |
877 | bool isUnconditionalBranch(QueryType Type = AnyInBundle) const { |
878 | return isBranch(Type) && isBarrier(Type) && !isIndirectBranch(Type); |
879 | } |
880 | |
881 | /// Return true if this instruction has a predicate operand that |
882 | /// controls execution. It may be set to 'always', or may be set to other |
883 | /// values. There are various methods in TargetInstrInfo that can be used to |
884 | /// control and modify the predicate in this instruction. |
885 | bool isPredicable(QueryType Type = AllInBundle) const { |
886 | // If it's a bundle than all bundled instructions must be predicable for this |
887 | // to return true. |
888 | return hasProperty(MCID::Predicable, Type); |
889 | } |
890 | |
891 | /// Return true if this instruction is a comparison. |
892 | bool isCompare(QueryType Type = IgnoreBundle) const { |
893 | return hasProperty(MCID::Compare, Type); |
894 | } |
895 | |
896 | /// Return true if this instruction is a move immediate |
897 | /// (including conditional moves) instruction. |
898 | bool isMoveImmediate(QueryType Type = IgnoreBundle) const { |
899 | return hasProperty(MCID::MoveImm, Type); |
900 | } |
901 | |
902 | /// Return true if this instruction is a register move. |
903 | /// (including moving values from subreg to reg) |
904 | bool isMoveReg(QueryType Type = IgnoreBundle) const { |
905 | return hasProperty(MCID::MoveReg, Type); |
906 | } |
907 | |
908 | /// Return true if this instruction is a bitcast instruction. |
909 | bool isBitcast(QueryType Type = IgnoreBundle) const { |
910 | return hasProperty(MCID::Bitcast, Type); |
911 | } |
912 | |
913 | /// Return true if this instruction is a select instruction. |
914 | bool isSelect(QueryType Type = IgnoreBundle) const { |
915 | return hasProperty(MCID::Select, Type); |
916 | } |
917 | |
918 | /// Return true if this instruction cannot be safely duplicated. |
919 | /// For example, if the instruction has a unique labels attached |
920 | /// to it, duplicating it would cause multiple definition errors. |
921 | bool isNotDuplicable(QueryType Type = AnyInBundle) const { |
922 | return hasProperty(MCID::NotDuplicable, Type); |
923 | } |
924 | |
925 | /// Return true if this instruction is convergent. |
926 | /// Convergent instructions can not be made control-dependent on any |
927 | /// additional values. |
928 | bool isConvergent(QueryType Type = AnyInBundle) const { |
929 | if (isInlineAsm()) { |
930 | unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); |
931 | if (ExtraInfo & InlineAsm::Extra_IsConvergent) |
932 | return true; |
933 | } |
934 | return hasProperty(MCID::Convergent, Type); |
935 | } |
936 | |
937 | /// Returns true if the specified instruction has a delay slot |
938 | /// which must be filled by the code generator. |
939 | bool hasDelaySlot(QueryType Type = AnyInBundle) const { |
940 | return hasProperty(MCID::DelaySlot, Type); |
941 | } |
942 | |
943 | /// Return true for instructions that can be folded as |
944 | /// memory operands in other instructions. The most common use for this |
945 | /// is instructions that are simple loads from memory that don't modify |
946 | /// the loaded value in any way, but it can also be used for instructions |
947 | /// that can be expressed as constant-pool loads, such as V_SETALLONES |
948 | /// on x86, to allow them to be folded when it is beneficial. |
949 | /// This should only be set on instructions that return a value in their |
950 | /// only virtual register definition. |
951 | bool canFoldAsLoad(QueryType Type = IgnoreBundle) const { |
952 | return hasProperty(MCID::FoldableAsLoad, Type); |
953 | } |
954 | |
955 | /// Return true if this instruction behaves |
956 | /// the same way as the generic REG_SEQUENCE instructions. |
957 | /// E.g., on ARM, |
958 | /// dX VMOVDRR rY, rZ |
959 | /// is equivalent to |
960 | /// dX = REG_SEQUENCE rY, ssub_0, rZ, ssub_1. |
961 | /// |
962 | /// Note that for the optimizers to be able to take advantage of |
963 | /// this property, TargetInstrInfo::getRegSequenceLikeInputs has to be |
964 | /// override accordingly. |
965 | bool isRegSequenceLike(QueryType Type = IgnoreBundle) const { |
966 | return hasProperty(MCID::RegSequence, Type); |
967 | } |
968 | |
969 | /// Return true if this instruction behaves |
970 | /// the same way as the generic EXTRACT_SUBREG instructions. |
971 | /// E.g., on ARM, |
972 | /// rX, rY VMOVRRD dZ |
973 | /// is equivalent to two EXTRACT_SUBREG: |
974 | /// rX = EXTRACT_SUBREG dZ, ssub_0 |
975 | /// rY = EXTRACT_SUBREG dZ, ssub_1 |
976 | /// |
977 | /// Note that for the optimizers to be able to take advantage of |
978 | /// this property, TargetInstrInfo::getExtractSubregLikeInputs has to be |
979 | /// override accordingly. |
980 | bool isExtractSubregLike(QueryType Type = IgnoreBundle) const { |
981 | return hasProperty(MCID::ExtractSubreg, Type); |
982 | } |
983 | |
984 | /// Return true if this instruction behaves |
985 | /// the same way as the generic INSERT_SUBREG instructions. |
986 | /// E.g., on ARM, |
987 | /// dX = VSETLNi32 dY, rZ, Imm |
988 | /// is equivalent to a INSERT_SUBREG: |
989 | /// dX = INSERT_SUBREG dY, rZ, translateImmToSubIdx(Imm) |
990 | /// |
991 | /// Note that for the optimizers to be able to take advantage of |
992 | /// this property, TargetInstrInfo::getInsertSubregLikeInputs has to be |
993 | /// override accordingly. |
994 | bool isInsertSubregLike(QueryType Type = IgnoreBundle) const { |
995 | return hasProperty(MCID::InsertSubreg, Type); |
996 | } |
997 | |
998 | //===--------------------------------------------------------------------===// |
999 | // Side Effect Analysis |
1000 | //===--------------------------------------------------------------------===// |
1001 | |
1002 | /// Return true if this instruction could possibly read memory. |
1003 | /// Instructions with this flag set are not necessarily simple load |
1004 | /// instructions, they may load a value and modify it, for example. |
1005 | bool mayLoad(QueryType Type = AnyInBundle) const { |
1006 | if (isInlineAsm()) { |
1007 | unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); |
1008 | if (ExtraInfo & InlineAsm::Extra_MayLoad) |
1009 | return true; |
1010 | } |
1011 | return hasProperty(MCID::MayLoad, Type); |
1012 | } |
1013 | |
1014 | /// Return true if this instruction could possibly modify memory. |
1015 | /// Instructions with this flag set are not necessarily simple store |
1016 | /// instructions, they may store a modified value based on their operands, or |
1017 | /// may not actually modify anything, for example. |
1018 | bool mayStore(QueryType Type = AnyInBundle) const { |
1019 | if (isInlineAsm()) { |
1020 | unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); |
1021 | if (ExtraInfo & InlineAsm::Extra_MayStore) |
1022 | return true; |
1023 | } |
1024 | return hasProperty(MCID::MayStore, Type); |
1025 | } |
1026 | |
1027 | /// Return true if this instruction could possibly read or modify memory. |
1028 | bool mayLoadOrStore(QueryType Type = AnyInBundle) const { |
1029 | return mayLoad(Type) || mayStore(Type); |
1030 | } |
1031 | |
1032 | /// Return true if this instruction could possibly raise a floating-point |
1033 | /// exception. This is the case if the instruction is a floating-point |
1034 | /// instruction that can in principle raise an exception, as indicated |
1035 | /// by the MCID::MayRaiseFPException property, *and* at the same time, |
1036 | /// the instruction is used in a context where we expect floating-point |
1037 | /// exceptions are not disabled, as indicated by the NoFPExcept MI flag. |
1038 | bool mayRaiseFPException() const { |
1039 | return hasProperty(MCID::MayRaiseFPException) && |
1040 | !getFlag(MachineInstr::MIFlag::NoFPExcept); |
1041 | } |
1042 | |
1043 | //===--------------------------------------------------------------------===// |
1044 | // Flags that indicate whether an instruction can be modified by a method. |
1045 | //===--------------------------------------------------------------------===// |
1046 | |
1047 | /// Return true if this may be a 2- or 3-address |
1048 | /// instruction (of the form "X = op Y, Z, ..."), which produces the same |
1049 | /// result if Y and Z are exchanged. If this flag is set, then the |
1050 | /// TargetInstrInfo::commuteInstruction method may be used to hack on the |
1051 | /// instruction. |
1052 | /// |
1053 | /// Note that this flag may be set on instructions that are only commutable |
1054 | /// sometimes. In these cases, the call to commuteInstruction will fail. |
1055 | /// Also note that some instructions require non-trivial modification to |
1056 | /// commute them. |
1057 | bool isCommutable(QueryType Type = IgnoreBundle) const { |
1058 | return hasProperty(MCID::Commutable, Type); |
1059 | } |
1060 | |
1061 | /// Return true if this is a 2-address instruction |
1062 | /// which can be changed into a 3-address instruction if needed. Doing this |
1063 | /// transformation can be profitable in the register allocator, because it |
1064 | /// means that the instruction can use a 2-address form if possible, but |
1065 | /// degrade into a less efficient form if the source and dest register cannot |
1066 | /// be assigned to the same register. For example, this allows the x86 |
1067 | /// backend to turn a "shl reg, 3" instruction into an LEA instruction, which |
1068 | /// is the same speed as the shift but has bigger code size. |
1069 | /// |
1070 | /// If this returns true, then the target must implement the |
1071 | /// TargetInstrInfo::convertToThreeAddress method for this instruction, which |
1072 | /// is allowed to fail if the transformation isn't valid for this specific |
1073 | /// instruction (e.g. shl reg, 4 on x86). |
1074 | /// |
1075 | bool isConvertibleTo3Addr(QueryType Type = IgnoreBundle) const { |
1076 | return hasProperty(MCID::ConvertibleTo3Addr, Type); |
1077 | } |
1078 | |
1079 | /// Return true if this instruction requires |
1080 | /// custom insertion support when the DAG scheduler is inserting it into a |
1081 | /// machine basic block. If this is true for the instruction, it basically |
1082 | /// means that it is a pseudo instruction used at SelectionDAG time that is |
1083 | /// expanded out into magic code by the target when MachineInstrs are formed. |
1084 | /// |
1085 | /// If this is true, the TargetLoweringInfo::InsertAtEndOfBasicBlock method |
1086 | /// is used to insert this into the MachineBasicBlock. |
1087 | bool usesCustomInsertionHook(QueryType Type = IgnoreBundle) const { |
1088 | return hasProperty(MCID::UsesCustomInserter, Type); |
1089 | } |
1090 | |
1091 | /// Return true if this instruction requires *adjustment* |
1092 | /// after instruction selection by calling a target hook. For example, this |
1093 | /// can be used to fill in ARM 's' optional operand depending on whether |
1094 | /// the conditional flag register is used. |
1095 | bool hasPostISelHook(QueryType Type = IgnoreBundle) const { |
1096 | return hasProperty(MCID::HasPostISelHook, Type); |
1097 | } |
1098 | |
1099 | /// Returns true if this instruction is a candidate for remat. |
1100 | /// This flag is deprecated, please don't use it anymore. If this |
1101 | /// flag is set, the isReallyTriviallyReMaterializable() method is called to |
1102 | /// verify the instruction is really rematable. |
1103 | bool isRematerializable(QueryType Type = AllInBundle) const { |
1104 | // It's only possible to re-mat a bundle if all bundled instructions are |
1105 | // re-materializable. |
1106 | return hasProperty(MCID::Rematerializable, Type); |
1107 | } |
1108 | |
1109 | /// Returns true if this instruction has the same cost (or less) than a move |
1110 | /// instruction. This is useful during certain types of optimizations |
1111 | /// (e.g., remat during two-address conversion or machine licm) |
1112 | /// where we would like to remat or hoist the instruction, but not if it costs |
1113 | /// more than moving the instruction into the appropriate register. Note, we |
1114 | /// are not marking copies from and to the same register class with this flag. |
1115 | bool isAsCheapAsAMove(QueryType Type = AllInBundle) const { |
1116 | // Only returns true for a bundle if all bundled instructions are cheap. |
1117 | return hasProperty(MCID::CheapAsAMove, Type); |
1118 | } |
1119 | |
1120 | /// Returns true if this instruction source operands |
1121 | /// have special register allocation requirements that are not captured by the |
1122 | /// operand register classes. e.g. ARM::STRD's two source registers must be an |
1123 | /// even / odd pair, ARM::STM registers have to be in ascending order. |
1124 | /// Post-register allocation passes should not attempt to change allocations |
1125 | /// for sources of instructions with this flag. |
1126 | bool hasExtraSrcRegAllocReq(QueryType Type = AnyInBundle) const { |
1127 | return hasProperty(MCID::ExtraSrcRegAllocReq, Type); |
1128 | } |
1129 | |
1130 | /// Returns true if this instruction def operands |
1131 | /// have special register allocation requirements that are not captured by the |
1132 | /// operand register classes. e.g. ARM::LDRD's two def registers must be an |
1133 | /// even / odd pair, ARM::LDM registers have to be in ascending order. |
1134 | /// Post-register allocation passes should not attempt to change allocations |
1135 | /// for definitions of instructions with this flag. |
1136 | bool hasExtraDefRegAllocReq(QueryType Type = AnyInBundle) const { |
1137 | return hasProperty(MCID::ExtraDefRegAllocReq, Type); |
1138 | } |
1139 | |
1140 | enum MICheckType { |
1141 | CheckDefs, // Check all operands for equality |
1142 | CheckKillDead, // Check all operands including kill / dead markers |
1143 | IgnoreDefs, // Ignore all definitions |
1144 | IgnoreVRegDefs // Ignore virtual register definitions |
1145 | }; |
1146 | |
1147 | /// Return true if this instruction is identical to \p Other. |
1148 | /// Two instructions are identical if they have the same opcode and all their |
1149 | /// operands are identical (with respect to MachineOperand::isIdenticalTo()). |
1150 | /// Note that this means liveness related flags (dead, undef, kill) do not |
1151 | /// affect the notion of identical. |
1152 | bool isIdenticalTo(const MachineInstr &Other, |
1153 | MICheckType Check = CheckDefs) const; |
1154 | |
1155 | /// Unlink 'this' from the containing basic block, and return it without |
1156 | /// deleting it. |
1157 | /// |
1158 | /// This function can not be used on bundled instructions, use |
1159 | /// removeFromBundle() to remove individual instructions from a bundle. |
1160 | MachineInstr *removeFromParent(); |
1161 | |
1162 | /// Unlink this instruction from its basic block and return it without |
1163 | /// deleting it. |
1164 | /// |
1165 | /// If the instruction is part of a bundle, the other instructions in the |
1166 | /// bundle remain bundled. |
1167 | MachineInstr *removeFromBundle(); |
1168 | |
1169 | /// Unlink 'this' from the containing basic block and delete it. |
1170 | /// |
1171 | /// If this instruction is the header of a bundle, the whole bundle is erased. |
1172 | /// This function can not be used for instructions inside a bundle, use |
1173 | /// eraseFromBundle() to erase individual bundled instructions. |
1174 | void eraseFromParent(); |
1175 | |
1176 | /// Unlink 'this' from the containing basic block and delete it. |
1177 | /// |
1178 | /// For all definitions mark their uses in DBG_VALUE nodes |
1179 | /// as undefined. Otherwise like eraseFromParent(). |
1180 | void eraseFromParentAndMarkDBGValuesForRemoval(); |
1181 | |
1182 | /// Unlink 'this' form its basic block and delete it. |
1183 | /// |
1184 | /// If the instruction is part of a bundle, the other instructions in the |
1185 | /// bundle remain bundled. |
1186 | void eraseFromBundle(); |
1187 | |
1188 | bool isEHLabel() const { return getOpcode() == TargetOpcode::EH_LABEL; } |
1189 | bool isGCLabel() const { return getOpcode() == TargetOpcode::GC_LABEL; } |
1190 | bool isAnnotationLabel() const { |
1191 | return getOpcode() == TargetOpcode::ANNOTATION_LABEL; |
1192 | } |
1193 | |
1194 | /// Returns true if the MachineInstr represents a label. |
1195 | bool isLabel() const { |
1196 | return isEHLabel() || isGCLabel() || isAnnotationLabel(); |
1197 | } |
1198 | |
1199 | bool isCFIInstruction() const { |
1200 | return getOpcode() == TargetOpcode::CFI_INSTRUCTION; |
1201 | } |
1202 | |
1203 | bool isPseudoProbe() const { |
1204 | return getOpcode() == TargetOpcode::PSEUDO_PROBE; |
1205 | } |
1206 | |
1207 | // True if the instruction represents a position in the function. |
1208 | bool isPosition() const { return isLabel() || isCFIInstruction(); } |
1209 | |
1210 | bool isNonListDebugValue() const { |
1211 | return getOpcode() == TargetOpcode::DBG_VALUE; |
1212 | } |
1213 | bool isDebugValueList() const { |
1214 | return getOpcode() == TargetOpcode::DBG_VALUE_LIST; |
1215 | } |
1216 | bool isDebugValue() const { |
1217 | return isNonListDebugValue() || isDebugValueList(); |
1218 | } |
1219 | bool isDebugLabel() const { return getOpcode() == TargetOpcode::DBG_LABEL; } |
1220 | bool isDebugRef() const { return getOpcode() == TargetOpcode::DBG_INSTR_REF; } |
1221 | bool isDebugPHI() const { return getOpcode() == TargetOpcode::DBG_PHI; } |
1222 | bool isDebugInstr() const { |
1223 | return isDebugValue() || isDebugLabel() || isDebugRef() || isDebugPHI(); |
1224 | } |
1225 | bool isDebugOrPseudoInstr() const { |
1226 | return isDebugInstr() || isPseudoProbe(); |
1227 | } |
1228 | |
1229 | bool isDebugOffsetImm() const { |
1230 | return isNonListDebugValue() && getDebugOffset().isImm(); |
1231 | } |
1232 | |
1233 | /// A DBG_VALUE is indirect iff the location operand is a register and |
1234 | /// the offset operand is an immediate. |
1235 | bool isIndirectDebugValue() const { |
1236 | return isDebugOffsetImm() && getDebugOperand(0).isReg(); |
1237 | } |
1238 | |
1239 | /// A DBG_VALUE is an entry value iff its debug expression contains the |
1240 | /// DW_OP_LLVM_entry_value operation. |
1241 | bool isDebugEntryValue() const; |
1242 | |
1243 | /// Return true if the instruction is a debug value which describes a part of |
1244 | /// a variable as unavailable. |
1245 | bool isUndefDebugValue() const { |
1246 | if (!isDebugValue()) |
1247 | return false; |
1248 | // If any $noreg locations are given, this DV is undef. |
1249 | for (const MachineOperand &Op : debug_operands()) |
1250 | if (Op.isReg() && !Op.getReg().isValid()) |
1251 | return true; |
1252 | return false; |
1253 | } |
1254 | |
1255 | bool isPHI() const { |
1256 | return getOpcode() == TargetOpcode::PHI || |
1257 | getOpcode() == TargetOpcode::G_PHI; |
1258 | } |
1259 | bool isKill() const { return getOpcode() == TargetOpcode::KILL; } |
1260 | bool isImplicitDef() const { return getOpcode()==TargetOpcode::IMPLICIT_DEF; } |
1261 | bool isInlineAsm() const { |
1262 | return getOpcode() == TargetOpcode::INLINEASM || |
1263 | getOpcode() == TargetOpcode::INLINEASM_BR; |
1264 | } |
1265 | |
1266 | /// FIXME: Seems like a layering violation that the AsmDialect, which is X86 |
1267 | /// specific, be attached to a generic MachineInstr. |
1268 | bool isMSInlineAsm() const { |
1269 | return isInlineAsm() && getInlineAsmDialect() == InlineAsm::AD_Intel; |
1270 | } |
1271 | |
1272 | bool isStackAligningInlineAsm() const; |
1273 | InlineAsm::AsmDialect getInlineAsmDialect() const; |
1274 | |
1275 | bool isInsertSubreg() const { |
1276 | return getOpcode() == TargetOpcode::INSERT_SUBREG; |
1277 | } |
1278 | |
1279 | bool isSubregToReg() const { |
1280 | return getOpcode() == TargetOpcode::SUBREG_TO_REG; |
1281 | } |
1282 | |
1283 | bool isRegSequence() const { |
1284 | return getOpcode() == TargetOpcode::REG_SEQUENCE; |
1285 | } |
1286 | |
1287 | bool isBundle() const { |
1288 | return getOpcode() == TargetOpcode::BUNDLE; |
1289 | } |
1290 | |
1291 | bool isCopy() const { |
1292 | return getOpcode() == TargetOpcode::COPY; |
1293 | } |
1294 | |
1295 | bool isFullCopy() const { |
1296 | return isCopy() && !getOperand(0).getSubReg() && !getOperand(1).getSubReg(); |
1297 | } |
1298 | |
1299 | bool isExtractSubreg() const { |
1300 | return getOpcode() == TargetOpcode::EXTRACT_SUBREG; |
1301 | } |
1302 | |
1303 | /// Return true if the instruction behaves like a copy. |
1304 | /// This does not include native copy instructions. |
1305 | bool isCopyLike() const { |
1306 | return isCopy() || isSubregToReg(); |
1307 | } |
1308 | |
1309 | /// Return true is the instruction is an identity copy. |
1310 | bool isIdentityCopy() const { |
1311 | return isCopy() && getOperand(0).getReg() == getOperand(1).getReg() && |
1312 | getOperand(0).getSubReg() == getOperand(1).getSubReg(); |
1313 | } |
1314 | |
1315 | /// Return true if this instruction doesn't produce any output in the form of |
1316 | /// executable instructions. |
1317 | bool isMetaInstruction() const { |
1318 | switch (getOpcode()) { |
1319 | default: |
1320 | return false; |
1321 | case TargetOpcode::IMPLICIT_DEF: |
1322 | case TargetOpcode::KILL: |
1323 | case TargetOpcode::CFI_INSTRUCTION: |
1324 | case TargetOpcode::EH_LABEL: |
1325 | case TargetOpcode::GC_LABEL: |
1326 | case TargetOpcode::DBG_VALUE: |
1327 | case TargetOpcode::DBG_VALUE_LIST: |
1328 | case TargetOpcode::DBG_INSTR_REF: |
1329 | case TargetOpcode::DBG_PHI: |
1330 | case TargetOpcode::DBG_LABEL: |
1331 | case TargetOpcode::LIFETIME_START: |
1332 | case TargetOpcode::LIFETIME_END: |
1333 | case TargetOpcode::PSEUDO_PROBE: |
1334 | return true; |
1335 | } |
1336 | } |
1337 | |
1338 | /// Return true if this is a transient instruction that is either very likely |
1339 | /// to be eliminated during register allocation (such as copy-like |
1340 | /// instructions), or if this instruction doesn't have an execution-time cost. |
1341 | bool isTransient() const { |
1342 | switch (getOpcode()) { |
1343 | default: |
1344 | return isMetaInstruction(); |
1345 | // Copy-like instructions are usually eliminated during register allocation. |
1346 | case TargetOpcode::PHI: |
1347 | case TargetOpcode::G_PHI: |
1348 | case TargetOpcode::COPY: |
1349 | case TargetOpcode::INSERT_SUBREG: |
1350 | case TargetOpcode::SUBREG_TO_REG: |
1351 | case TargetOpcode::REG_SEQUENCE: |
1352 | return true; |
1353 | } |
1354 | } |
1355 | |
1356 | /// Return the number of instructions inside the MI bundle, excluding the |
1357 | /// bundle header. |
1358 | /// |
1359 | /// This is the number of instructions that MachineBasicBlock::iterator |
1360 | /// skips, 0 for unbundled instructions. |
1361 | unsigned getBundleSize() const; |
1362 | |
1363 | /// Return true if the MachineInstr reads the specified register. |
1364 | /// If TargetRegisterInfo is passed, then it also checks if there |
1365 | /// is a read of a super-register. |
1366 | /// This does not count partial redefines of virtual registers as reads: |
1367 | /// %reg1024:6 = OP. |
1368 | bool readsRegister(Register Reg, |
1369 | const TargetRegisterInfo *TRI = nullptr) const { |
1370 | return findRegisterUseOperandIdx(Reg, false, TRI) != -1; |
1371 | } |
1372 | |
1373 | /// Return true if the MachineInstr reads the specified virtual register. |
1374 | /// Take into account that a partial define is a |
1375 | /// read-modify-write operation. |
1376 | bool readsVirtualRegister(Register Reg) const { |
1377 | return readsWritesVirtualRegister(Reg).first; |
1378 | } |
1379 | |
1380 | /// Return a pair of bools (reads, writes) indicating if this instruction |
1381 | /// reads or writes Reg. This also considers partial defines. |
1382 | /// If Ops is not null, all operand indices for Reg are added. |
1383 | std::pair<bool,bool> readsWritesVirtualRegister(Register Reg, |
1384 | SmallVectorImpl<unsigned> *Ops = nullptr) const; |
1385 | |
1386 | /// Return true if the MachineInstr kills the specified register. |
1387 | /// If TargetRegisterInfo is passed, then it also checks if there is |
1388 | /// a kill of a super-register. |
1389 | bool killsRegister(Register Reg, |
1390 | const TargetRegisterInfo *TRI = nullptr) const { |
1391 | return findRegisterUseOperandIdx(Reg, true, TRI) != -1; |
1392 | } |
1393 | |
1394 | /// Return true if the MachineInstr fully defines the specified register. |
1395 | /// If TargetRegisterInfo is passed, then it also checks |
1396 | /// if there is a def of a super-register. |
1397 | /// NOTE: It's ignoring subreg indices on virtual registers. |
1398 | bool definesRegister(Register Reg, |
1399 | const TargetRegisterInfo *TRI = nullptr) const { |
1400 | return findRegisterDefOperandIdx(Reg, false, false, TRI) != -1; |
1401 | } |
1402 | |
1403 | /// Return true if the MachineInstr modifies (fully define or partially |
1404 | /// define) the specified register. |
1405 | /// NOTE: It's ignoring subreg indices on virtual registers. |
1406 | bool modifiesRegister(Register Reg, |
1407 | const TargetRegisterInfo *TRI = nullptr) const { |
1408 | return findRegisterDefOperandIdx(Reg, false, true, TRI) != -1; |
1409 | } |
1410 | |
1411 | /// Returns true if the register is dead in this machine instruction. |
1412 | /// If TargetRegisterInfo is passed, then it also checks |
1413 | /// if there is a dead def of a super-register. |
1414 | bool registerDefIsDead(Register Reg, |
1415 | const TargetRegisterInfo *TRI = nullptr) const { |
1416 | return findRegisterDefOperandIdx(Reg, true, false, TRI) != -1; |
1417 | } |
1418 | |
1419 | /// Returns true if the MachineInstr has an implicit-use operand of exactly |
1420 | /// the given register (not considering sub/super-registers). |
1421 | bool hasRegisterImplicitUseOperand(Register Reg) const; |
1422 | |
1423 | /// Returns the operand index that is a use of the specific register or -1 |
1424 | /// if it is not found. It further tightens the search criteria to a use |
1425 | /// that kills the register if isKill is true. |
1426 | int findRegisterUseOperandIdx(Register Reg, bool isKill = false, |
1427 | const TargetRegisterInfo *TRI = nullptr) const; |
1428 | |
1429 | /// Wrapper for findRegisterUseOperandIdx, it returns |
1430 | /// a pointer to the MachineOperand rather than an index. |
1431 | MachineOperand *findRegisterUseOperand(Register Reg, bool isKill = false, |
1432 | const TargetRegisterInfo *TRI = nullptr) { |
1433 | int Idx = findRegisterUseOperandIdx(Reg, isKill, TRI); |
1434 | return (Idx == -1) ? nullptr : &getOperand(Idx); |
1435 | } |
1436 | |
1437 | const MachineOperand *findRegisterUseOperand( |
1438 | Register Reg, bool isKill = false, |
1439 | const TargetRegisterInfo *TRI = nullptr) const { |
1440 | return const_cast<MachineInstr *>(this)-> |
1441 | findRegisterUseOperand(Reg, isKill, TRI); |
1442 | } |
1443 | |
1444 | /// Returns the operand index that is a def of the specified register or |
1445 | /// -1 if it is not found. If isDead is true, defs that are not dead are |
1446 | /// skipped. If Overlap is true, then it also looks for defs that merely |
1447 | /// overlap the specified register. If TargetRegisterInfo is non-null, |
1448 | /// then it also checks if there is a def of a super-register. |
1449 | /// This may also return a register mask operand when Overlap is true. |
1450 | int findRegisterDefOperandIdx(Register Reg, |
1451 | bool isDead = false, bool Overlap = false, |
1452 | const TargetRegisterInfo *TRI = nullptr) const; |
1453 | |
1454 | /// Wrapper for findRegisterDefOperandIdx, it returns |
1455 | /// a pointer to the MachineOperand rather than an index. |
1456 | MachineOperand * |
1457 | findRegisterDefOperand(Register Reg, bool isDead = false, |
1458 | bool Overlap = false, |
1459 | const TargetRegisterInfo *TRI = nullptr) { |
1460 | int Idx = findRegisterDefOperandIdx(Reg, isDead, Overlap, TRI); |
1461 | return (Idx == -1) ? nullptr : &getOperand(Idx); |
1462 | } |
1463 | |
1464 | const MachineOperand * |
1465 | findRegisterDefOperand(Register Reg, bool isDead = false, |
1466 | bool Overlap = false, |
1467 | const TargetRegisterInfo *TRI = nullptr) const { |
1468 | return const_cast<MachineInstr *>(this)->findRegisterDefOperand( |
1469 | Reg, isDead, Overlap, TRI); |
1470 | } |
1471 | |
1472 | /// Find the index of the first operand in the |
1473 | /// operand list that is used to represent the predicate. It returns -1 if |
1474 | /// none is found. |
1475 | int findFirstPredOperandIdx() const; |
1476 | |
1477 | /// Find the index of the flag word operand that |
1478 | /// corresponds to operand OpIdx on an inline asm instruction. Returns -1 if |
1479 | /// getOperand(OpIdx) does not belong to an inline asm operand group. |
1480 | /// |
1481 | /// If GroupNo is not NULL, it will receive the number of the operand group |
1482 | /// containing OpIdx. |
1483 | int findInlineAsmFlagIdx(unsigned OpIdx, unsigned *GroupNo = nullptr) const; |
1484 | |
1485 | /// Compute the static register class constraint for operand OpIdx. |
1486 | /// For normal instructions, this is derived from the MCInstrDesc. |
1487 | /// For inline assembly it is derived from the flag words. |
1488 | /// |
1489 | /// Returns NULL if the static register class constraint cannot be |
1490 | /// determined. |
1491 | const TargetRegisterClass* |
1492 | getRegClassConstraint(unsigned OpIdx, |
1493 | const TargetInstrInfo *TII, |
1494 | const TargetRegisterInfo *TRI) const; |
1495 | |
1496 | /// Applies the constraints (def/use) implied by this MI on \p Reg to |
1497 | /// the given \p CurRC. |
1498 | /// If \p ExploreBundle is set and MI is part of a bundle, all the |
1499 | /// instructions inside the bundle will be taken into account. In other words, |
1500 | /// this method accumulates all the constraints of the operand of this MI and |
1501 | /// the related bundle if MI is a bundle or inside a bundle. |
1502 | /// |
1503 | /// Returns the register class that satisfies both \p CurRC and the |
1504 | /// constraints set by MI. Returns NULL if such a register class does not |
1505 | /// exist. |
1506 | /// |
1507 | /// \pre CurRC must not be NULL. |
1508 | const TargetRegisterClass *getRegClassConstraintEffectForVReg( |
1509 | Register Reg, const TargetRegisterClass *CurRC, |
1510 | const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, |
1511 | bool ExploreBundle = false) const; |
1512 | |
1513 | /// Applies the constraints (def/use) implied by the \p OpIdx operand |
1514 | /// to the given \p CurRC. |
1515 | /// |
1516 | /// Returns the register class that satisfies both \p CurRC and the |
1517 | /// constraints set by \p OpIdx MI. Returns NULL if such a register class |
1518 | /// does not exist. |
1519 | /// |
1520 | /// \pre CurRC must not be NULL. |
1521 | /// \pre The operand at \p OpIdx must be a register. |
1522 | const TargetRegisterClass * |
1523 | getRegClassConstraintEffect(unsigned OpIdx, const TargetRegisterClass *CurRC, |
1524 | const TargetInstrInfo *TII, |
1525 | const TargetRegisterInfo *TRI) const; |
1526 | |
1527 | /// Add a tie between the register operands at DefIdx and UseIdx. |
1528 | /// The tie will cause the register allocator to ensure that the two |
1529 | /// operands are assigned the same physical register. |
1530 | /// |
1531 | /// Tied operands are managed automatically for explicit operands in the |
1532 | /// MCInstrDesc. This method is for exceptional cases like inline asm. |
1533 | void tieOperands(unsigned DefIdx, unsigned UseIdx); |
1534 | |
1535 | /// Given the index of a tied register operand, find the |
1536 | /// operand it is tied to. Defs are tied to uses and vice versa. Returns the |
1537 | /// index of the tied operand which must exist. |
1538 | unsigned findTiedOperandIdx(unsigned OpIdx) const; |
1539 | |
1540 | /// Given the index of a register def operand, |
1541 | /// check if the register def is tied to a source operand, due to either |
1542 | /// two-address elimination or inline assembly constraints. Returns the |
1543 | /// first tied use operand index by reference if UseOpIdx is not null. |
1544 | bool isRegTiedToUseOperand(unsigned DefOpIdx, |
1545 | unsigned *UseOpIdx = nullptr) const { |
1546 | const MachineOperand &MO = getOperand(DefOpIdx); |
1547 | if (!MO.isReg() || !MO.isDef() || !MO.isTied()) |
1548 | return false; |
1549 | if (UseOpIdx) |
1550 | *UseOpIdx = findTiedOperandIdx(DefOpIdx); |
1551 | return true; |
1552 | } |
1553 | |
1554 | /// Return true if the use operand of the specified index is tied to a def |
1555 | /// operand. It also returns the def operand index by reference if DefOpIdx |
1556 | /// is not null. |
1557 | bool isRegTiedToDefOperand(unsigned UseOpIdx, |
1558 | unsigned *DefOpIdx = nullptr) const { |
1559 | const MachineOperand &MO = getOperand(UseOpIdx); |
1560 | if (!MO.isReg() || !MO.isUse() || !MO.isTied()) |
1561 | return false; |
1562 | if (DefOpIdx) |
1563 | *DefOpIdx = findTiedOperandIdx(UseOpIdx); |
1564 | return true; |
1565 | } |
1566 | |
1567 | /// Clears kill flags on all operands. |
1568 | void clearKillInfo(); |
1569 | |
1570 | /// Replace all occurrences of FromReg with ToReg:SubIdx, |
1571 | /// properly composing subreg indices where necessary. |
1572 | void substituteRegister(Register FromReg, Register ToReg, unsigned SubIdx, |
1573 | const TargetRegisterInfo &RegInfo); |
1574 | |
1575 | /// We have determined MI kills a register. Look for the |
1576 | /// operand that uses it and mark it as IsKill. If AddIfNotFound is true, |
1577 | /// add a implicit operand if it's not found. Returns true if the operand |
1578 | /// exists / is added. |
1579 | bool addRegisterKilled(Register IncomingReg, |
1580 | const TargetRegisterInfo *RegInfo, |
1581 | bool AddIfNotFound = false); |
1582 | |
1583 | /// Clear all kill flags affecting Reg. If RegInfo is provided, this includes |
1584 | /// all aliasing registers. |
1585 | void clearRegisterKills(Register Reg, const TargetRegisterInfo *RegInfo); |
1586 | |
1587 | /// We have determined MI defined a register without a use. |
1588 | /// Look for the operand that defines it and mark it as IsDead. If |
1589 | /// AddIfNotFound is true, add a implicit operand if it's not found. Returns |
1590 | /// true if the operand exists / is added. |
1591 | bool addRegisterDead(Register Reg, const TargetRegisterInfo *RegInfo, |
1592 | bool AddIfNotFound = false); |
1593 | |
1594 | /// Clear all dead flags on operands defining register @p Reg. |
1595 | void clearRegisterDeads(Register Reg); |
1596 | |
1597 | /// Mark all subregister defs of register @p Reg with the undef flag. |
1598 | /// This function is used when we determined to have a subregister def in an |
1599 | /// otherwise undefined super register. |
1600 | void setRegisterDefReadUndef(Register Reg, bool IsUndef = true); |
1601 | |
1602 | /// We have determined MI defines a register. Make sure there is an operand |
1603 | /// defining Reg. |
1604 | void addRegisterDefined(Register Reg, |
1605 | const TargetRegisterInfo *RegInfo = nullptr); |
1606 | |
1607 | /// Mark every physreg used by this instruction as |
1608 | /// dead except those in the UsedRegs list. |
1609 | /// |
1610 | /// On instructions with register mask operands, also add implicit-def |
1611 | /// operands for all registers in UsedRegs. |
1612 | void setPhysRegsDeadExcept(ArrayRef<Register> UsedRegs, |
1613 | const TargetRegisterInfo &TRI); |
1614 | |
1615 | /// Return true if it is safe to move this instruction. If |
1616 | /// SawStore is set to true, it means that there is a store (or call) between |
1617 | /// the instruction's location and its intended destination. |
1618 | bool isSafeToMove(AAResults *AA, bool &SawStore) const; |
1619 | |
1620 | /// Returns true if this instruction's memory access aliases the memory |
1621 | /// access of Other. |
1622 | // |
1623 | /// Assumes any physical registers used to compute addresses |
1624 | /// have the same value for both instructions. Returns false if neither |
1625 | /// instruction writes to memory. |
1626 | /// |
1627 | /// @param AA Optional alias analysis, used to compare memory operands. |
1628 | /// @param Other MachineInstr to check aliasing against. |
1629 | /// @param UseTBAA Whether to pass TBAA information to alias analysis. |
1630 | bool mayAlias(AAResults *AA, const MachineInstr &Other, bool UseTBAA) const; |
1631 | |
1632 | /// Return true if this instruction may have an ordered |
1633 | /// or volatile memory reference, or if the information describing the memory |
1634 | /// reference is not available. Return false if it is known to have no |
1635 | /// ordered or volatile memory references. |
1636 | bool hasOrderedMemoryRef() const; |
1637 | |
1638 | /// Return true if this load instruction never traps and points to a memory |
1639 | /// location whose value doesn't change during the execution of this function. |
1640 | /// |
1641 | /// Examples include loading a value from the constant pool or from the |
1642 | /// argument area of a function (if it does not change). If the instruction |
1643 | /// does multiple loads, this returns true only if all of the loads are |
1644 | /// dereferenceable and invariant. |
1645 | bool isDereferenceableInvariantLoad(AAResults *AA) const; |
1646 | |
1647 | /// If the specified instruction is a PHI that always merges together the |
1648 | /// same virtual register, return the register, otherwise return 0. |
1649 | unsigned isConstantValuePHI() const; |
1650 | |
1651 | /// Return true if this instruction has side effects that are not modeled |
1652 | /// by mayLoad / mayStore, etc. |
1653 | /// For all instructions, the property is encoded in MCInstrDesc::Flags |
1654 | /// (see MCInstrDesc::hasUnmodeledSideEffects(). The only exception is |
1655 | /// INLINEASM instruction, in which case the side effect property is encoded |
1656 | /// in one of its operands (see InlineAsm::Extra_HasSideEffect). |
1657 | /// |
1658 | bool hasUnmodeledSideEffects() const; |
1659 | |
1660 | /// Returns true if it is illegal to fold a load across this instruction. |
1661 | bool isLoadFoldBarrier() const; |
1662 | |
1663 | /// Return true if all the defs of this instruction are dead. |
1664 | bool allDefsAreDead() const; |
1665 | |
1666 | /// Return a valid size if the instruction is a spill instruction. |
1667 | Optional<unsigned> getSpillSize(const TargetInstrInfo *TII) const; |
1668 | |
1669 | /// Return a valid size if the instruction is a folded spill instruction. |
1670 | Optional<unsigned> getFoldedSpillSize(const TargetInstrInfo *TII) const; |
1671 | |
1672 | /// Return a valid size if the instruction is a restore instruction. |
1673 | Optional<unsigned> getRestoreSize(const TargetInstrInfo *TII) const; |
1674 | |
1675 | /// Return a valid size if the instruction is a folded restore instruction. |
1676 | Optional<unsigned> |
1677 | getFoldedRestoreSize(const TargetInstrInfo *TII) const; |
1678 | |
1679 | /// Copy implicit register operands from specified |
1680 | /// instruction to this instruction. |
1681 | void copyImplicitOps(MachineFunction &MF, const MachineInstr &MI); |
1682 | |
1683 | /// Debugging support |
1684 | /// @{ |
1685 | /// Determine the generic type to be printed (if needed) on uses and defs. |
1686 | LLT getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes, |
1687 | const MachineRegisterInfo &MRI) const; |
1688 | |
1689 | /// Return true when an instruction has tied register that can't be determined |
1690 | /// by the instruction's descriptor. This is useful for MIR printing, to |
1691 | /// determine whether we need to print the ties or not. |
1692 | bool hasComplexRegisterTies() const; |
1693 | |
1694 | /// Print this MI to \p OS. |
1695 | /// Don't print information that can be inferred from other instructions if |
1696 | /// \p IsStandalone is false. It is usually true when only a fragment of the |
1697 | /// function is printed. |
1698 | /// Only print the defs and the opcode if \p SkipOpers is true. |
1699 | /// Otherwise, also print operands if \p SkipDebugLoc is true. |
1700 | /// Otherwise, also print the debug loc, with a terminating newline. |
1701 | /// \p TII is used to print the opcode name. If it's not present, but the |
1702 | /// MI is in a function, the opcode will be printed using the function's TII. |
1703 | void print(raw_ostream &OS, bool IsStandalone = true, bool SkipOpers = false, |
1704 | bool SkipDebugLoc = false, bool AddNewLine = true, |
1705 | const TargetInstrInfo *TII = nullptr) const; |
1706 | void print(raw_ostream &OS, ModuleSlotTracker &MST, bool IsStandalone = true, |
1707 | bool SkipOpers = false, bool SkipDebugLoc = false, |
1708 | bool AddNewLine = true, |
1709 | const TargetInstrInfo *TII = nullptr) const; |
1710 | void dump() const; |
1711 | /// Print on dbgs() the current instruction and the instructions defining its |
1712 | /// operands and so on until we reach \p MaxDepth. |
1713 | void dumpr(const MachineRegisterInfo &MRI, |
1714 | unsigned MaxDepth = UINT_MAX(2147483647 *2U +1U)) const; |
1715 | /// @} |
1716 | |
1717 | //===--------------------------------------------------------------------===// |
1718 | // Accessors used to build up machine instructions. |
1719 | |
1720 | /// Add the specified operand to the instruction. If it is an implicit |
1721 | /// operand, it is added to the end of the operand list. If it is an |
1722 | /// explicit operand it is added at the end of the explicit operand list |
1723 | /// (before the first implicit operand). |
1724 | /// |
1725 | /// MF must be the machine function that was used to allocate this |
1726 | /// instruction. |
1727 | /// |
1728 | /// MachineInstrBuilder provides a more convenient interface for creating |
1729 | /// instructions and adding operands. |
1730 | void addOperand(MachineFunction &MF, const MachineOperand &Op); |
1731 | |
1732 | /// Add an operand without providing an MF reference. This only works for |
1733 | /// instructions that are inserted in a basic block. |
1734 | /// |
1735 | /// MachineInstrBuilder and the two-argument addOperand(MF, MO) should be |
1736 | /// preferred. |
1737 | void addOperand(const MachineOperand &Op); |
1738 | |
1739 | /// Replace the instruction descriptor (thus opcode) of |
1740 | /// the current instruction with a new one. |
1741 | void setDesc(const MCInstrDesc &tid) { MCID = &tid; } |
1742 | |
1743 | /// Replace current source information with new such. |
1744 | /// Avoid using this, the constructor argument is preferable. |
1745 | void setDebugLoc(DebugLoc dl) { |
1746 | debugLoc = std::move(dl); |
1747 | assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor")(static_cast <bool> (debugLoc.hasTrivialDestructor() && "Expected trivial destructor") ? void (0) : __assert_fail ("debugLoc.hasTrivialDestructor() && \"Expected trivial destructor\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/MachineInstr.h" , 1747, __extension__ __PRETTY_FUNCTION__)); |
1748 | } |
1749 | |
1750 | /// Erase an operand from an instruction, leaving it with one |
1751 | /// fewer operand than it started with. |
1752 | void RemoveOperand(unsigned OpNo); |
1753 | |
1754 | /// Clear this MachineInstr's memory reference descriptor list. This resets |
1755 | /// the memrefs to their most conservative state. This should be used only |
1756 | /// as a last resort since it greatly pessimizes our knowledge of the memory |
1757 | /// access performed by the instruction. |
1758 | void dropMemRefs(MachineFunction &MF); |
1759 | |
1760 | /// Assign this MachineInstr's memory reference descriptor list. |
1761 | /// |
1762 | /// Unlike other methods, this *will* allocate them into a new array |
1763 | /// associated with the provided `MachineFunction`. |
1764 | void setMemRefs(MachineFunction &MF, ArrayRef<MachineMemOperand *> MemRefs); |
1765 | |
1766 | /// Add a MachineMemOperand to the machine instruction. |
1767 | /// This function should be used only occasionally. The setMemRefs function |
1768 | /// is the primary method for setting up a MachineInstr's MemRefs list. |
1769 | void addMemOperand(MachineFunction &MF, MachineMemOperand *MO); |
1770 | |
1771 | /// Clone another MachineInstr's memory reference descriptor list and replace |
1772 | /// ours with it. |
1773 | /// |
1774 | /// Note that `*this` may be the incoming MI! |
1775 | /// |
1776 | /// Prefer this API whenever possible as it can avoid allocations in common |
1777 | /// cases. |
1778 | void cloneMemRefs(MachineFunction &MF, const MachineInstr &MI); |
1779 | |
1780 | /// Clone the merge of multiple MachineInstrs' memory reference descriptors |
1781 | /// list and replace ours with it. |
1782 | /// |
1783 | /// Note that `*this` may be one of the incoming MIs! |
1784 | /// |
1785 | /// Prefer this API whenever possible as it can avoid allocations in common |
1786 | /// cases. |
1787 | void cloneMergedMemRefs(MachineFunction &MF, |
1788 | ArrayRef<const MachineInstr *> MIs); |
1789 | |
1790 | /// Set a symbol that will be emitted just prior to the instruction itself. |
1791 | /// |
1792 | /// Setting this to a null pointer will remove any such symbol. |
1793 | /// |
1794 | /// FIXME: This is not fully implemented yet. |
1795 | void setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol); |
1796 | |
1797 | /// Set a symbol that will be emitted just after the instruction itself. |
1798 | /// |
1799 | /// Setting this to a null pointer will remove any such symbol. |
1800 | /// |
1801 | /// FIXME: This is not fully implemented yet. |
1802 | void setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol); |
1803 | |
1804 | /// Clone another MachineInstr's pre- and post- instruction symbols and |
1805 | /// replace ours with it. |
1806 | void cloneInstrSymbols(MachineFunction &MF, const MachineInstr &MI); |
1807 | |
1808 | /// Set a marker on instructions that denotes where we should create and emit |
1809 | /// heap alloc site labels. This waits until after instruction selection and |
1810 | /// optimizations to create the label, so it should still work if the |
1811 | /// instruction is removed or duplicated. |
1812 | void setHeapAllocMarker(MachineFunction &MF, MDNode *MD); |
1813 | |
1814 | /// Return the MIFlags which represent both MachineInstrs. This |
1815 | /// should be used when merging two MachineInstrs into one. This routine does |
1816 | /// not modify the MIFlags of this MachineInstr. |
1817 | uint16_t mergeFlagsWith(const MachineInstr& Other) const; |
1818 | |
1819 | static uint16_t copyFlagsFromInstruction(const Instruction &I); |
1820 | |
1821 | /// Copy all flags to MachineInst MIFlags |
1822 | void copyIRFlags(const Instruction &I); |
1823 | |
1824 | /// Break any tie involving OpIdx. |
1825 | void untieRegOperand(unsigned OpIdx) { |
1826 | MachineOperand &MO = getOperand(OpIdx); |
1827 | if (MO.isReg() && MO.isTied()) { |
1828 | getOperand(findTiedOperandIdx(OpIdx)).TiedTo = 0; |
1829 | MO.TiedTo = 0; |
1830 | } |
1831 | } |
1832 | |
1833 | /// Add all implicit def and use operands to this instruction. |
1834 | void addImplicitDefUseOperands(MachineFunction &MF); |
1835 | |
1836 | /// Scan instructions immediately following MI and collect any matching |
1837 | /// DBG_VALUEs. |
1838 | void collectDebugValues(SmallVectorImpl<MachineInstr *> &DbgValues); |
1839 | |
1840 | /// Find all DBG_VALUEs that point to the register def in this instruction |
1841 | /// and point them to \p Reg instead. |
1842 | void changeDebugValuesDefReg(Register Reg); |
1843 | |
1844 | /// Returns the Intrinsic::ID for this instruction. |
1845 | /// \pre Must have an intrinsic ID operand. |
1846 | unsigned getIntrinsicID() const { |
1847 | return getOperand(getNumExplicitDefs()).getIntrinsicID(); |
1848 | } |
1849 | |
1850 | /// Sets all register debug operands in this debug value instruction to be |
1851 | /// undef. |
1852 | void setDebugValueUndef() { |
1853 | assert(isDebugValue() && "Must be a debug value instruction.")(static_cast <bool> (isDebugValue() && "Must be a debug value instruction." ) ? void (0) : __assert_fail ("isDebugValue() && \"Must be a debug value instruction.\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/MachineInstr.h" , 1853, __extension__ __PRETTY_FUNCTION__)); |
1854 | for (MachineOperand &MO : debug_operands()) { |
1855 | if (MO.isReg()) { |
1856 | MO.setReg(0); |
1857 | MO.setSubReg(0); |
1858 | } |
1859 | } |
1860 | } |
1861 | |
1862 | PseudoProbeAttributes getPseudoProbeAttribute() const { |
1863 | assert(isPseudoProbe() && "Must be a pseudo probe instruction")(static_cast <bool> (isPseudoProbe() && "Must be a pseudo probe instruction" ) ? void (0) : __assert_fail ("isPseudoProbe() && \"Must be a pseudo probe instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/MachineInstr.h" , 1863, __extension__ __PRETTY_FUNCTION__)); |
1864 | return (PseudoProbeAttributes)getOperand(3).getImm(); |
1865 | } |
1866 | |
1867 | void addPseudoProbeAttribute(PseudoProbeAttributes Attr) { |
1868 | assert(isPseudoProbe() && "Must be a pseudo probe instruction")(static_cast <bool> (isPseudoProbe() && "Must be a pseudo probe instruction" ) ? void (0) : __assert_fail ("isPseudoProbe() && \"Must be a pseudo probe instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/MachineInstr.h" , 1868, __extension__ __PRETTY_FUNCTION__)); |
1869 | MachineOperand &AttrOperand = getOperand(3); |
1870 | AttrOperand.setImm(AttrOperand.getImm() | (uint32_t)Attr); |
1871 | } |
1872 | |
1873 | private: |
1874 | /// If this instruction is embedded into a MachineFunction, return the |
1875 | /// MachineRegisterInfo object for the current function, otherwise |
1876 | /// return null. |
1877 | MachineRegisterInfo *getRegInfo(); |
1878 | |
1879 | /// Unlink all of the register operands in this instruction from their |
1880 | /// respective use lists. This requires that the operands already be on their |
1881 | /// use lists. |
1882 | void RemoveRegOperandsFromUseLists(MachineRegisterInfo&); |
1883 | |
1884 | /// Add all of the register operands in this instruction from their |
1885 | /// respective use lists. This requires that the operands not be on their |
1886 | /// use lists yet. |
1887 | void AddRegOperandsToUseLists(MachineRegisterInfo&); |
1888 | |
1889 | /// Slow path for hasProperty when we're dealing with a bundle. |
1890 | bool hasPropertyInBundle(uint64_t Mask, QueryType Type) const; |
1891 | |
1892 | /// Implements the logic of getRegClassConstraintEffectForVReg for the |
1893 | /// this MI and the given operand index \p OpIdx. |
1894 | /// If the related operand does not constrained Reg, this returns CurRC. |
1895 | const TargetRegisterClass *getRegClassConstraintEffectForVRegImpl( |
1896 | unsigned OpIdx, Register Reg, const TargetRegisterClass *CurRC, |
1897 | const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const; |
1898 | |
1899 | /// Stores extra instruction information inline or allocates as ExtraInfo |
1900 | /// based on the number of pointers. |
1901 | void setExtraInfo(MachineFunction &MF, ArrayRef<MachineMemOperand *> MMOs, |
1902 | MCSymbol *PreInstrSymbol, MCSymbol *PostInstrSymbol, |
1903 | MDNode *HeapAllocMarker); |
1904 | }; |
1905 | |
1906 | /// Special DenseMapInfo traits to compare MachineInstr* by *value* of the |
1907 | /// instruction rather than by pointer value. |
1908 | /// The hashing and equality testing functions ignore definitions so this is |
1909 | /// useful for CSE, etc. |
1910 | struct MachineInstrExpressionTrait : DenseMapInfo<MachineInstr*> { |
1911 | static inline MachineInstr *getEmptyKey() { |
1912 | return nullptr; |
1913 | } |
1914 | |
1915 | static inline MachineInstr *getTombstoneKey() { |
1916 | return reinterpret_cast<MachineInstr*>(-1); |
1917 | } |
1918 | |
1919 | static unsigned getHashValue(const MachineInstr* const &MI); |
1920 | |
1921 | static bool isEqual(const MachineInstr* const &LHS, |
1922 | const MachineInstr* const &RHS) { |
1923 | if (RHS == getEmptyKey() || RHS == getTombstoneKey() || |
1924 | LHS == getEmptyKey() || LHS == getTombstoneKey()) |
1925 | return LHS == RHS; |
1926 | return LHS->isIdenticalTo(*RHS, MachineInstr::IgnoreVRegDefs); |
1927 | } |
1928 | }; |
1929 | |
1930 | //===----------------------------------------------------------------------===// |
1931 | // Debugging Support |
1932 | |
1933 | inline raw_ostream& operator<<(raw_ostream &OS, const MachineInstr &MI) { |
1934 | MI.print(OS); |
1935 | return OS; |
1936 | } |
1937 | |
1938 | } // end namespace llvm |
1939 | |
1940 | #endif // LLVM_CODEGEN_MACHINEINSTR_H |
1 | // RB tree implementation -*- C++ -*- |
2 | |
3 | // Copyright (C) 2001-2020 Free Software Foundation, Inc. |
4 | // |
5 | // This file is part of the GNU ISO C++ Library. This library is free |
6 | // software; you can redistribute it and/or modify it under the |
7 | // terms of the GNU General Public License as published by the |
8 | // Free Software Foundation; either version 3, or (at your option) |
9 | // any later version. |
10 | |
11 | // This library is distributed in the hope that it will be useful, |
12 | // but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | // GNU General Public License for more details. |
15 | |
16 | // Under Section 7 of GPL version 3, you are granted additional |
17 | // permissions described in the GCC Runtime Library Exception, version |
18 | // 3.1, as published by the Free Software Foundation. |
19 | |
20 | // You should have received a copy of the GNU General Public License and |
21 | // a copy of the GCC Runtime Library Exception along with this program; |
22 | // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see |
23 | // <http://www.gnu.org/licenses/>. |
24 | |
25 | /* |
26 | * |
27 | * Copyright (c) 1996,1997 |
28 | * Silicon Graphics Computer Systems, Inc. |
29 | * |
30 | * Permission to use, copy, modify, distribute and sell this software |
31 | * and its documentation for any purpose is hereby granted without fee, |
32 | * provided that the above copyright notice appear in all copies and |
33 | * that both that copyright notice and this permission notice appear |
34 | * in supporting documentation. Silicon Graphics makes no |
35 | * representations about the suitability of this software for any |
36 | * purpose. It is provided "as is" without express or implied warranty. |
37 | * |
38 | * |
39 | * Copyright (c) 1994 |
40 | * Hewlett-Packard Company |
41 | * |
42 | * Permission to use, copy, modify, distribute and sell this software |
43 | * and its documentation for any purpose is hereby granted without fee, |
44 | * provided that the above copyright notice appear in all copies and |
45 | * that both that copyright notice and this permission notice appear |
46 | * in supporting documentation. Hewlett-Packard Company makes no |
47 | * representations about the suitability of this software for any |
48 | * purpose. It is provided "as is" without express or implied warranty. |
49 | * |
50 | * |
51 | */ |
52 | |
53 | /** @file bits/stl_tree.h |
54 | * This is an internal header file, included by other library headers. |
55 | * Do not attempt to use it directly. @headername{map,set} |
56 | */ |
57 | |
58 | #ifndef _STL_TREE_H1 |
59 | #define _STL_TREE_H1 1 |
60 | |
61 | #pragma GCC system_header |
62 | |
63 | #include <bits/stl_algobase.h> |
64 | #include <bits/allocator.h> |
65 | #include <bits/stl_function.h> |
66 | #include <bits/cpp_type_traits.h> |
67 | #include <ext/alloc_traits.h> |
68 | #if __cplusplus201402L >= 201103L |
69 | # include <ext/aligned_buffer.h> |
70 | #endif |
71 | #if __cplusplus201402L > 201402L |
72 | # include <bits/node_handle.h> |
73 | #endif |
74 | |
75 | namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default"))) |
76 | { |
77 | _GLIBCXX_BEGIN_NAMESPACE_VERSION |
78 | |
79 | #if __cplusplus201402L > 201103L |
80 | # define __cpp_lib_generic_associative_lookup201304 201304 |
81 | #endif |
82 | |
83 | // Red-black tree class, designed for use in implementing STL |
84 | // associative containers (set, multiset, map, and multimap). The |
85 | // insertion and deletion algorithms are based on those in Cormen, |
86 | // Leiserson, and Rivest, Introduction to Algorithms (MIT Press, |
87 | // 1990), except that |
88 | // |
89 | // (1) the header cell is maintained with links not only to the root |
90 | // but also to the leftmost node of the tree, to enable constant |
91 | // time begin(), and to the rightmost node of the tree, to enable |
92 | // linear time performance when used with the generic set algorithms |
93 | // (set_union, etc.) |
94 | // |
95 | // (2) when a node being deleted has two children its successor node |
96 | // is relinked into its place, rather than copied, so that the only |
97 | // iterators invalidated are those referring to the deleted node. |
98 | |
99 | enum _Rb_tree_color { _S_red = false, _S_black = true }; |
100 | |
101 | struct _Rb_tree_node_base |
102 | { |
103 | typedef _Rb_tree_node_base* _Base_ptr; |
104 | typedef const _Rb_tree_node_base* _Const_Base_ptr; |
105 | |
106 | _Rb_tree_color _M_color; |
107 | _Base_ptr _M_parent; |
108 | _Base_ptr _M_left; |
109 | _Base_ptr _M_right; |
110 | |
111 | static _Base_ptr |
112 | _S_minimum(_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept |
113 | { |
114 | while (__x->_M_left != 0) __x = __x->_M_left; |
115 | return __x; |
116 | } |
117 | |
118 | static _Const_Base_ptr |
119 | _S_minimum(_Const_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept |
120 | { |
121 | while (__x->_M_left != 0) __x = __x->_M_left; |
122 | return __x; |
123 | } |
124 | |
125 | static _Base_ptr |
126 | _S_maximum(_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept |
127 | { |
128 | while (__x->_M_right != 0) __x = __x->_M_right; |
129 | return __x; |
130 | } |
131 | |
132 | static _Const_Base_ptr |
133 | _S_maximum(_Const_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept |
134 | { |
135 | while (__x->_M_right != 0) __x = __x->_M_right; |
136 | return __x; |
137 | } |
138 | }; |
139 | |
140 | // Helper type offering value initialization guarantee on the compare functor. |
141 | template<typename _Key_compare> |
142 | struct _Rb_tree_key_compare |
143 | { |
144 | _Key_compare _M_key_compare; |
145 | |
146 | _Rb_tree_key_compare() |
147 | _GLIBCXX_NOEXCEPT_IF(noexcept(is_nothrow_default_constructible<_Key_compare> ::value) |
148 | is_nothrow_default_constructible<_Key_compare>::value)noexcept(is_nothrow_default_constructible<_Key_compare> ::value) |
149 | : _M_key_compare() |
150 | { } |
151 | |
152 | _Rb_tree_key_compare(const _Key_compare& __comp) |
153 | : _M_key_compare(__comp) |
154 | { } |
155 | |
156 | #if __cplusplus201402L >= 201103L |
157 | // Copy constructor added for consistency with C++98 mode. |
158 | _Rb_tree_key_compare(const _Rb_tree_key_compare&) = default; |
159 | |
160 | _Rb_tree_key_compare(_Rb_tree_key_compare&& __x) |
161 | noexcept(is_nothrow_copy_constructible<_Key_compare>::value) |
162 | : _M_key_compare(__x._M_key_compare) |
163 | { } |
164 | #endif |
165 | }; |
166 | |
167 | // Helper type to manage default initialization of node count and header. |
168 | struct _Rb_tree_header |
169 | { |
170 | _Rb_tree_node_base _M_header; |
171 | size_t _M_node_count; // Keeps track of size of tree. |
172 | |
173 | _Rb_tree_header() _GLIBCXX_NOEXCEPTnoexcept |
174 | { |
175 | _M_header._M_color = _S_red; |
176 | _M_reset(); |
177 | } |
178 | |
179 | #if __cplusplus201402L >= 201103L |
180 | _Rb_tree_header(_Rb_tree_header&& __x) noexcept |
181 | { |
182 | if (__x._M_header._M_parent != nullptr) |
183 | _M_move_data(__x); |
184 | else |
185 | { |
186 | _M_header._M_color = _S_red; |
187 | _M_reset(); |
188 | } |
189 | } |
190 | #endif |
191 | |
192 | void |
193 | _M_move_data(_Rb_tree_header& __from) |
194 | { |
195 | _M_header._M_color = __from._M_header._M_color; |
196 | _M_header._M_parent = __from._M_header._M_parent; |
197 | _M_header._M_left = __from._M_header._M_left; |
198 | _M_header._M_right = __from._M_header._M_right; |
199 | _M_header._M_parent->_M_parent = &_M_header; |
200 | _M_node_count = __from._M_node_count; |
201 | |
202 | __from._M_reset(); |
203 | } |
204 | |
205 | void |
206 | _M_reset() |
207 | { |
208 | _M_header._M_parent = 0; |
209 | _M_header._M_left = &_M_header; |
210 | _M_header._M_right = &_M_header; |
211 | _M_node_count = 0; |
212 | } |
213 | }; |
214 | |
215 | template<typename _Val> |
216 | struct _Rb_tree_node : public _Rb_tree_node_base |
217 | { |
218 | typedef _Rb_tree_node<_Val>* _Link_type; |
219 | |
220 | #if __cplusplus201402L < 201103L |
221 | _Val _M_value_field; |
222 | |
223 | _Val* |
224 | _M_valptr() |
225 | { return std::__addressof(_M_value_field); } |
226 | |
227 | const _Val* |
228 | _M_valptr() const |
229 | { return std::__addressof(_M_value_field); } |
230 | #else |
231 | __gnu_cxx::__aligned_membuf<_Val> _M_storage; |
232 | |
233 | _Val* |
234 | _M_valptr() |
235 | { return _M_storage._M_ptr(); } |
236 | |
237 | const _Val* |
238 | _M_valptr() const |
239 | { return _M_storage._M_ptr(); } |
240 | #endif |
241 | }; |
242 | |
243 | _GLIBCXX_PURE__attribute__ ((__pure__)) _Rb_tree_node_base* |
244 | _Rb_tree_increment(_Rb_tree_node_base* __x) throw (); |
245 | |
246 | _GLIBCXX_PURE__attribute__ ((__pure__)) const _Rb_tree_node_base* |
247 | _Rb_tree_increment(const _Rb_tree_node_base* __x) throw (); |
248 | |
249 | _GLIBCXX_PURE__attribute__ ((__pure__)) _Rb_tree_node_base* |
250 | _Rb_tree_decrement(_Rb_tree_node_base* __x) throw (); |
251 | |
252 | _GLIBCXX_PURE__attribute__ ((__pure__)) const _Rb_tree_node_base* |
253 | _Rb_tree_decrement(const _Rb_tree_node_base* __x) throw (); |
254 | |
255 | template<typename _Tp> |
256 | struct _Rb_tree_iterator |
257 | { |
258 | typedef _Tp value_type; |
259 | typedef _Tp& reference; |
260 | typedef _Tp* pointer; |
261 | |
262 | typedef bidirectional_iterator_tag iterator_category; |
263 | typedef ptrdiff_t difference_type; |
264 | |
265 | typedef _Rb_tree_iterator<_Tp> _Self; |
266 | typedef _Rb_tree_node_base::_Base_ptr _Base_ptr; |
267 | typedef _Rb_tree_node<_Tp>* _Link_type; |
268 | |
269 | _Rb_tree_iterator() _GLIBCXX_NOEXCEPTnoexcept |
270 | : _M_node() { } |
271 | |
272 | explicit |
273 | _Rb_tree_iterator(_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept |
274 | : _M_node(__x) { } |
275 | |
276 | reference |
277 | operator*() const _GLIBCXX_NOEXCEPTnoexcept |
278 | { return *static_cast<_Link_type>(_M_node)->_M_valptr(); } |
279 | |
280 | pointer |
281 | operator->() const _GLIBCXX_NOEXCEPTnoexcept |
282 | { return static_cast<_Link_type> (_M_node)->_M_valptr(); } |
283 | |
284 | _Self& |
285 | operator++() _GLIBCXX_NOEXCEPTnoexcept |
286 | { |
287 | _M_node = _Rb_tree_increment(_M_node); |
288 | return *this; |
289 | } |
290 | |
291 | _Self |
292 | operator++(int) _GLIBCXX_NOEXCEPTnoexcept |
293 | { |
294 | _Self __tmp = *this; |
295 | _M_node = _Rb_tree_increment(_M_node); |
296 | return __tmp; |
297 | } |
298 | |
299 | _Self& |
300 | operator--() _GLIBCXX_NOEXCEPTnoexcept |
301 | { |
302 | _M_node = _Rb_tree_decrement(_M_node); |
303 | return *this; |
304 | } |
305 | |
306 | _Self |
307 | operator--(int) _GLIBCXX_NOEXCEPTnoexcept |
308 | { |
309 | _Self __tmp = *this; |
310 | _M_node = _Rb_tree_decrement(_M_node); |
311 | return __tmp; |
312 | } |
313 | |
314 | friend bool |
315 | operator==(const _Self& __x, const _Self& __y) _GLIBCXX_NOEXCEPTnoexcept |
316 | { return __x._M_node == __y._M_node; } |
317 | |
318 | #if ! __cpp_lib_three_way_comparison |
319 | friend bool |
320 | operator!=(const _Self& __x, const _Self& __y) _GLIBCXX_NOEXCEPTnoexcept |
321 | { return __x._M_node != __y._M_node; } |
322 | #endif |
323 | |
324 | _Base_ptr _M_node; |
325 | }; |
326 | |
327 | template<typename _Tp> |
328 | struct _Rb_tree_const_iterator |
329 | { |
330 | typedef _Tp value_type; |
331 | typedef const _Tp& reference; |
332 | typedef const _Tp* pointer; |
333 | |
334 | typedef _Rb_tree_iterator<_Tp> iterator; |
335 | |
336 | typedef bidirectional_iterator_tag iterator_category; |
337 | typedef ptrdiff_t difference_type; |
338 | |
339 | typedef _Rb_tree_const_iterator<_Tp> _Self; |
340 | typedef _Rb_tree_node_base::_Const_Base_ptr _Base_ptr; |
341 | typedef const _Rb_tree_node<_Tp>* _Link_type; |
342 | |
343 | _Rb_tree_const_iterator() _GLIBCXX_NOEXCEPTnoexcept |
344 | : _M_node() { } |
345 | |
346 | explicit |
347 | _Rb_tree_const_iterator(_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept |
348 | : _M_node(__x) { } |
349 | |
350 | _Rb_tree_const_iterator(const iterator& __it) _GLIBCXX_NOEXCEPTnoexcept |
351 | : _M_node(__it._M_node) { } |
352 | |
353 | iterator |
354 | _M_const_cast() const _GLIBCXX_NOEXCEPTnoexcept |
355 | { return iterator(const_cast<typename iterator::_Base_ptr>(_M_node)); } |
356 | |
357 | reference |
358 | operator*() const _GLIBCXX_NOEXCEPTnoexcept |
359 | { return *static_cast<_Link_type>(_M_node)->_M_valptr(); } |
360 | |
361 | pointer |
362 | operator->() const _GLIBCXX_NOEXCEPTnoexcept |
363 | { return static_cast<_Link_type>(_M_node)->_M_valptr(); } |
364 | |
365 | _Self& |
366 | operator++() _GLIBCXX_NOEXCEPTnoexcept |
367 | { |
368 | _M_node = _Rb_tree_increment(_M_node); |
369 | return *this; |
370 | } |
371 | |
372 | _Self |
373 | operator++(int) _GLIBCXX_NOEXCEPTnoexcept |
374 | { |
375 | _Self __tmp = *this; |
376 | _M_node = _Rb_tree_increment(_M_node); |
377 | return __tmp; |
378 | } |
379 | |
380 | _Self& |
381 | operator--() _GLIBCXX_NOEXCEPTnoexcept |
382 | { |
383 | _M_node = _Rb_tree_decrement(_M_node); |
384 | return *this; |
385 | } |
386 | |
387 | _Self |
388 | operator--(int) _GLIBCXX_NOEXCEPTnoexcept |
389 | { |
390 | _Self __tmp = *this; |
391 | _M_node = _Rb_tree_decrement(_M_node); |
392 | return __tmp; |
393 | } |
394 | |
395 | friend bool |
396 | operator==(const _Self& __x, const _Self& __y) _GLIBCXX_NOEXCEPTnoexcept |
397 | { return __x._M_node == __y._M_node; } |
398 | |
399 | #if ! __cpp_lib_three_way_comparison |
400 | friend bool |
401 | operator!=(const _Self& __x, const _Self& __y) _GLIBCXX_NOEXCEPTnoexcept |
402 | { return __x._M_node != __y._M_node; } |
403 | #endif |
404 | |
405 | _Base_ptr _M_node; |
406 | }; |
407 | |
408 | void |
409 | _Rb_tree_insert_and_rebalance(const bool __insert_left, |
410 | _Rb_tree_node_base* __x, |
411 | _Rb_tree_node_base* __p, |
412 | _Rb_tree_node_base& __header) throw (); |
413 | |
414 | _Rb_tree_node_base* |
415 | _Rb_tree_rebalance_for_erase(_Rb_tree_node_base* const __z, |
416 | _Rb_tree_node_base& __header) throw (); |
417 | |
418 | #if __cplusplus201402L >= 201402L |
419 | template<typename _Cmp, typename _SfinaeType, typename = __void_t<>> |
420 | struct __has_is_transparent |
421 | { }; |
422 | |
423 | template<typename _Cmp, typename _SfinaeType> |
424 | struct __has_is_transparent<_Cmp, _SfinaeType, |
425 | __void_t<typename _Cmp::is_transparent>> |
426 | { typedef void type; }; |
427 | |
428 | template<typename _Cmp, typename _SfinaeType> |
429 | using __has_is_transparent_t |
430 | = typename __has_is_transparent<_Cmp, _SfinaeType>::type; |
431 | #endif |
432 | |
433 | #if __cplusplus201402L > 201402L |
434 | template<typename _Tree1, typename _Cmp2> |
435 | struct _Rb_tree_merge_helper { }; |
436 | #endif |
437 | |
438 | template<typename _Key, typename _Val, typename _KeyOfValue, |
439 | typename _Compare, typename _Alloc = allocator<_Val> > |
440 | class _Rb_tree |
441 | { |
442 | typedef typename __gnu_cxx::__alloc_traits<_Alloc>::template |
443 | rebind<_Rb_tree_node<_Val> >::other _Node_allocator; |
444 | |
445 | typedef __gnu_cxx::__alloc_traits<_Node_allocator> _Alloc_traits; |
446 | |
447 | protected: |
448 | typedef _Rb_tree_node_base* _Base_ptr; |
449 | typedef const _Rb_tree_node_base* _Const_Base_ptr; |
450 | typedef _Rb_tree_node<_Val>* _Link_type; |
451 | typedef const _Rb_tree_node<_Val>* _Const_Link_type; |
452 | |
453 | private: |
454 | // Functor recycling a pool of nodes and using allocation once the pool |
455 | // is empty. |
456 | struct _Reuse_or_alloc_node |
457 | { |
458 | _Reuse_or_alloc_node(_Rb_tree& __t) |
459 | : _M_root(__t._M_root()), _M_nodes(__t._M_rightmost()), _M_t(__t) |
460 | { |
461 | if (_M_root) |
462 | { |
463 | _M_root->_M_parent = 0; |
464 | |
465 | if (_M_nodes->_M_left) |
466 | _M_nodes = _M_nodes->_M_left; |
467 | } |
468 | else |
469 | _M_nodes = 0; |
470 | } |
471 | |
472 | #if __cplusplus201402L >= 201103L |
473 | _Reuse_or_alloc_node(const _Reuse_or_alloc_node&) = delete; |
474 | #endif |
475 | |
476 | ~_Reuse_or_alloc_node() |
477 | { _M_t._M_erase(static_cast<_Link_type>(_M_root)); } |
478 | |
479 | template<typename _Arg> |
480 | _Link_type |
481 | #if __cplusplus201402L < 201103L |
482 | operator()(const _Arg& __arg) |
483 | #else |
484 | operator()(_Arg&& __arg) |
485 | #endif |
486 | { |
487 | _Link_type __node = static_cast<_Link_type>(_M_extract()); |
488 | if (__node) |
489 | { |
490 | _M_t._M_destroy_node(__node); |
491 | _M_t._M_construct_node(__node, _GLIBCXX_FORWARD(_Arg, __arg)std::forward<_Arg>(__arg)); |
492 | return __node; |
493 | } |
494 | |
495 | return _M_t._M_create_node(_GLIBCXX_FORWARD(_Arg, __arg)std::forward<_Arg>(__arg)); |
496 | } |
497 | |
498 | private: |
499 | _Base_ptr |
500 | _M_extract() |
501 | { |
502 | if (!_M_nodes) |
503 | return _M_nodes; |
504 | |
505 | _Base_ptr __node = _M_nodes; |
506 | _M_nodes = _M_nodes->_M_parent; |
507 | if (_M_nodes) |
508 | { |
509 | if (_M_nodes->_M_right == __node) |
510 | { |
511 | _M_nodes->_M_right = 0; |
512 | |
513 | if (_M_nodes->_M_left) |
514 | { |
515 | _M_nodes = _M_nodes->_M_left; |
516 | |
517 | while (_M_nodes->_M_right) |
518 | _M_nodes = _M_nodes->_M_right; |
519 | |
520 | if (_M_nodes->_M_left) |
521 | _M_nodes = _M_nodes->_M_left; |
522 | } |
523 | } |
524 | else // __node is on the left. |
525 | _M_nodes->_M_left = 0; |
526 | } |
527 | else |
528 | _M_root = 0; |
529 | |
530 | return __node; |
531 | } |
532 | |
533 | _Base_ptr _M_root; |
534 | _Base_ptr _M_nodes; |
535 | _Rb_tree& _M_t; |
536 | }; |
537 | |
538 | // Functor similar to the previous one but without any pool of nodes to |
539 | // recycle. |
540 | struct _Alloc_node |
541 | { |
542 | _Alloc_node(_Rb_tree& __t) |
543 | : _M_t(__t) { } |
544 | |
545 | template<typename _Arg> |
546 | _Link_type |
547 | #if __cplusplus201402L < 201103L |
548 | operator()(const _Arg& __arg) const |
549 | #else |
550 | operator()(_Arg&& __arg) const |
551 | #endif |
552 | { return _M_t._M_create_node(_GLIBCXX_FORWARD(_Arg, __arg)std::forward<_Arg>(__arg)); } |
553 | |
554 | private: |
555 | _Rb_tree& _M_t; |
556 | }; |
557 | |
558 | public: |
559 | typedef _Key key_type; |
560 | typedef _Val value_type; |
561 | typedef value_type* pointer; |
562 | typedef const value_type* const_pointer; |
563 | typedef value_type& reference; |
564 | typedef const value_type& const_reference; |
565 | typedef size_t size_type; |
566 | typedef ptrdiff_t difference_type; |
567 | typedef _Alloc allocator_type; |
568 | |
569 | _Node_allocator& |
570 | _M_get_Node_allocator() _GLIBCXX_NOEXCEPTnoexcept |
571 | { return this->_M_impl; } |
572 | |
573 | const _Node_allocator& |
574 | _M_get_Node_allocator() const _GLIBCXX_NOEXCEPTnoexcept |
575 | { return this->_M_impl; } |
576 | |
577 | allocator_type |
578 | get_allocator() const _GLIBCXX_NOEXCEPTnoexcept |
579 | { return allocator_type(_M_get_Node_allocator()); } |
580 | |
581 | protected: |
582 | _Link_type |
583 | _M_get_node() |
584 | { return _Alloc_traits::allocate(_M_get_Node_allocator(), 1); } |
585 | |
586 | void |
587 | _M_put_node(_Link_type __p) _GLIBCXX_NOEXCEPTnoexcept |
588 | { _Alloc_traits::deallocate(_M_get_Node_allocator(), __p, 1); } |
589 | |
590 | #if __cplusplus201402L < 201103L |
591 | void |
592 | _M_construct_node(_Link_type __node, const value_type& __x) |
593 | { |
594 | __tryif (true) |
595 | { get_allocator().construct(__node->_M_valptr(), __x); } |
596 | __catch(...)if (false) |
597 | { |
598 | _M_put_node(__node); |
599 | __throw_exception_again; |
600 | } |
601 | } |
602 | |
603 | _Link_type |
604 | _M_create_node(const value_type& __x) |
605 | { |
606 | _Link_type __tmp = _M_get_node(); |
607 | _M_construct_node(__tmp, __x); |
608 | return __tmp; |
609 | } |
610 | #else |
611 | template<typename... _Args> |
612 | void |
613 | _M_construct_node(_Link_type __node, _Args&&... __args) |
614 | { |
615 | __tryif (true) |
616 | { |
617 | ::new(__node) _Rb_tree_node<_Val>; |
618 | _Alloc_traits::construct(_M_get_Node_allocator(), |
619 | __node->_M_valptr(), |
620 | std::forward<_Args>(__args)...); |
621 | } |
622 | __catch(...)if (false) |
623 | { |
624 | __node->~_Rb_tree_node<_Val>(); |
625 | _M_put_node(__node); |
626 | __throw_exception_again; |
627 | } |
628 | } |
629 | |
630 | template<typename... _Args> |
631 | _Link_type |
632 | _M_create_node(_Args&&... __args) |
633 | { |
634 | _Link_type __tmp = _M_get_node(); |
635 | _M_construct_node(__tmp, std::forward<_Args>(__args)...); |
636 | return __tmp; |
637 | } |
638 | #endif |
639 | |
640 | void |
641 | _M_destroy_node(_Link_type __p) _GLIBCXX_NOEXCEPTnoexcept |
642 | { |
643 | #if __cplusplus201402L < 201103L |
644 | get_allocator().destroy(__p->_M_valptr()); |
645 | #else |
646 | _Alloc_traits::destroy(_M_get_Node_allocator(), __p->_M_valptr()); |
647 | __p->~_Rb_tree_node<_Val>(); |
648 | #endif |
649 | } |
650 | |
651 | void |
652 | _M_drop_node(_Link_type __p) _GLIBCXX_NOEXCEPTnoexcept |
653 | { |
654 | _M_destroy_node(__p); |
655 | _M_put_node(__p); |
656 | } |
657 | |
658 | template<typename _NodeGen> |
659 | _Link_type |
660 | _M_clone_node(_Const_Link_type __x, _NodeGen& __node_gen) |
661 | { |
662 | _Link_type __tmp = __node_gen(*__x->_M_valptr()); |
663 | __tmp->_M_color = __x->_M_color; |
664 | __tmp->_M_left = 0; |
665 | __tmp->_M_right = 0; |
666 | return __tmp; |
667 | } |
668 | |
669 | protected: |
670 | #if _GLIBCXX_INLINE_VERSION0 |
671 | template<typename _Key_compare> |
672 | #else |
673 | // Unused _Is_pod_comparator is kept as it is part of mangled name. |
674 | template<typename _Key_compare, |
675 | bool /* _Is_pod_comparator */ = __is_pod(_Key_compare)> |
676 | #endif |
677 | struct _Rb_tree_impl |
678 | : public _Node_allocator |
679 | , public _Rb_tree_key_compare<_Key_compare> |
680 | , public _Rb_tree_header |
681 | { |
682 | typedef _Rb_tree_key_compare<_Key_compare> _Base_key_compare; |
683 | |
684 | _Rb_tree_impl() |
685 | _GLIBCXX_NOEXCEPT_IF(noexcept(is_nothrow_default_constructible<_Node_allocator> ::value && is_nothrow_default_constructible<_Base_key_compare >::value) |
686 | is_nothrow_default_constructible<_Node_allocator>::valuenoexcept(is_nothrow_default_constructible<_Node_allocator> ::value && is_nothrow_default_constructible<_Base_key_compare >::value) |
687 | && is_nothrow_default_constructible<_Base_key_compare>::value )noexcept(is_nothrow_default_constructible<_Node_allocator> ::value && is_nothrow_default_constructible<_Base_key_compare >::value) |
688 | : _Node_allocator() |
689 | { } |
690 | |
691 | _Rb_tree_impl(const _Rb_tree_impl& __x) |
692 | : _Node_allocator(_Alloc_traits::_S_select_on_copy(__x)) |
693 | , _Base_key_compare(__x._M_key_compare) |
694 | { } |
695 | |
696 | #if __cplusplus201402L < 201103L |
697 | _Rb_tree_impl(const _Key_compare& __comp, const _Node_allocator& __a) |
698 | : _Node_allocator(__a), _Base_key_compare(__comp) |
699 | { } |
700 | #else |
701 | _Rb_tree_impl(_Rb_tree_impl&&) = default; |
702 | |
703 | explicit |
704 | _Rb_tree_impl(_Node_allocator&& __a) |
705 | : _Node_allocator(std::move(__a)) |
706 | { } |
707 | |
708 | _Rb_tree_impl(_Rb_tree_impl&& __x, _Node_allocator&& __a) |
709 | : _Node_allocator(std::move(__a)), |
710 | _Base_key_compare(std::move(__x)), |
711 | _Rb_tree_header(std::move(__x)) |
712 | { } |
713 | |
714 | _Rb_tree_impl(const _Key_compare& __comp, _Node_allocator&& __a) |
715 | : _Node_allocator(std::move(__a)), _Base_key_compare(__comp) |
716 | { } |
717 | #endif |
718 | }; |
719 | |
720 | _Rb_tree_impl<_Compare> _M_impl; |
721 | |
722 | protected: |
723 | _Base_ptr& |
724 | _M_root() _GLIBCXX_NOEXCEPTnoexcept |
725 | { return this->_M_impl._M_header._M_parent; } |
726 | |
727 | _Const_Base_ptr |
728 | _M_root() const _GLIBCXX_NOEXCEPTnoexcept |
729 | { return this->_M_impl._M_header._M_parent; } |
730 | |
731 | _Base_ptr& |
732 | _M_leftmost() _GLIBCXX_NOEXCEPTnoexcept |
733 | { return this->_M_impl._M_header._M_left; } |
734 | |
735 | _Const_Base_ptr |
736 | _M_leftmost() const _GLIBCXX_NOEXCEPTnoexcept |
737 | { return this->_M_impl._M_header._M_left; } |
738 | |
739 | _Base_ptr& |
740 | _M_rightmost() _GLIBCXX_NOEXCEPTnoexcept |
741 | { return this->_M_impl._M_header._M_right; } |
742 | |
743 | _Const_Base_ptr |
744 | _M_rightmost() const _GLIBCXX_NOEXCEPTnoexcept |
745 | { return this->_M_impl._M_header._M_right; } |
746 | |
747 | _Link_type |
748 | _M_begin() _GLIBCXX_NOEXCEPTnoexcept |
749 | { return static_cast<_Link_type>(this->_M_impl._M_header._M_parent); } |
750 | |
751 | _Const_Link_type |
752 | _M_begin() const _GLIBCXX_NOEXCEPTnoexcept |
753 | { |
754 | return static_cast<_Const_Link_type> |
755 | (this->_M_impl._M_header._M_parent); |
756 | } |
757 | |
758 | _Base_ptr |
759 | _M_end() _GLIBCXX_NOEXCEPTnoexcept |
760 | { return &this->_M_impl._M_header; } |
761 | |
762 | _Const_Base_ptr |
763 | _M_end() const _GLIBCXX_NOEXCEPTnoexcept |
764 | { return &this->_M_impl._M_header; } |
765 | |
766 | static const _Key& |
767 | _S_key(_Const_Link_type __x) |
768 | { |
769 | #if __cplusplus201402L >= 201103L |
770 | // If we're asking for the key we're presumably using the comparison |
771 | // object, and so this is a good place to sanity check it. |
772 | static_assert(__is_invocable<_Compare&, const _Key&, const _Key&>{}, |
773 | "comparison object must be invocable " |
774 | "with two arguments of key type"); |
775 | # if __cplusplus201402L >= 201703L |
776 | // _GLIBCXX_RESOLVE_LIB_DEFECTS |
777 | // 2542. Missing const requirements for associative containers |
778 | if constexpr (__is_invocable<_Compare&, const _Key&, const _Key&>{}) |
779 | static_assert( |
780 | is_invocable_v<const _Compare&, const _Key&, const _Key&>, |
781 | "comparison object must be invocable as const"); |
782 | # endif // C++17 |
783 | #endif // C++11 |
784 | |
785 | return _KeyOfValue()(*__x->_M_valptr()); |
786 | } |
787 | |
788 | static _Link_type |
789 | _S_left(_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept |
790 | { return static_cast<_Link_type>(__x->_M_left); } |
791 | |
792 | static _Const_Link_type |
793 | _S_left(_Const_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept |
794 | { return static_cast<_Const_Link_type>(__x->_M_left); } |
795 | |
796 | static _Link_type |
797 | _S_right(_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept |
798 | { return static_cast<_Link_type>(__x->_M_right); } |
799 | |
800 | static _Const_Link_type |
801 | _S_right(_Const_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept |
802 | { return static_cast<_Const_Link_type>(__x->_M_right); } |
803 | |
804 | static const _Key& |
805 | _S_key(_Const_Base_ptr __x) |
806 | { return _S_key(static_cast<_Const_Link_type>(__x)); } |
807 | |
808 | static _Base_ptr |
809 | _S_minimum(_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept |
810 | { return _Rb_tree_node_base::_S_minimum(__x); } |
811 | |
812 | static _Const_Base_ptr |
813 | _S_minimum(_Const_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept |
814 | { return _Rb_tree_node_base::_S_minimum(__x); } |
815 | |
816 | static _Base_ptr |
817 | _S_maximum(_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept |
818 | { return _Rb_tree_node_base::_S_maximum(__x); } |
819 | |
820 | static _Const_Base_ptr |
821 | _S_maximum(_Const_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept |
822 | { return _Rb_tree_node_base::_S_maximum(__x); } |
823 | |
824 | public: |
825 | typedef _Rb_tree_iterator<value_type> iterator; |
826 | typedef _Rb_tree_const_iterator<value_type> const_iterator; |
827 | |
828 | typedef std::reverse_iterator<iterator> reverse_iterator; |
829 | typedef std::reverse_iterator<const_iterator> const_reverse_iterator; |
830 | |
831 | #if __cplusplus201402L > 201402L |
832 | using node_type = _Node_handle<_Key, _Val, _Node_allocator>; |
833 | using insert_return_type = _Node_insert_return< |
834 | conditional_t<is_same_v<_Key, _Val>, const_iterator, iterator>, |
835 | node_type>; |
836 | #endif |
837 | |
838 | pair<_Base_ptr, _Base_ptr> |
839 | _M_get_insert_unique_pos(const key_type& __k); |
840 | |
841 | pair<_Base_ptr, _Base_ptr> |
842 | _M_get_insert_equal_pos(const key_type& __k); |
843 | |
844 | pair<_Base_ptr, _Base_ptr> |
845 | _M_get_insert_hint_unique_pos(const_iterator __pos, |
846 | const key_type& __k); |
847 | |
848 | pair<_Base_ptr, _Base_ptr> |
849 | _M_get_insert_hint_equal_pos(const_iterator __pos, |
850 | const key_type& __k); |
851 | |
852 | private: |
853 | #if __cplusplus201402L >= 201103L |
854 | template<typename _Arg, typename _NodeGen> |
855 | iterator |
856 | _M_insert_(_Base_ptr __x, _Base_ptr __y, _Arg&& __v, _NodeGen&); |
857 | |
858 | iterator |
859 | _M_insert_node(_Base_ptr __x, _Base_ptr __y, _Link_type __z); |
860 | |
861 | template<typename _Arg> |
862 | iterator |
863 | _M_insert_lower(_Base_ptr __y, _Arg&& __v); |
864 | |
865 | template<typename _Arg> |
866 | iterator |
867 | _M_insert_equal_lower(_Arg&& __x); |
868 | |
869 | iterator |
870 | _M_insert_lower_node(_Base_ptr __p, _Link_type __z); |
871 | |
872 | iterator |
873 | _M_insert_equal_lower_node(_Link_type __z); |
874 | #else |
875 | template<typename _NodeGen> |
876 | iterator |
877 | _M_insert_(_Base_ptr __x, _Base_ptr __y, |
878 | const value_type& __v, _NodeGen&); |
879 | |
880 | // _GLIBCXX_RESOLVE_LIB_DEFECTS |
881 | // 233. Insertion hints in associative containers. |
882 | iterator |
883 | _M_insert_lower(_Base_ptr __y, const value_type& __v); |
884 | |
885 | iterator |
886 | _M_insert_equal_lower(const value_type& __x); |
887 | #endif |
888 | |
889 | template<typename _NodeGen> |
890 | _Link_type |
891 | _M_copy(_Const_Link_type __x, _Base_ptr __p, _NodeGen&); |
892 | |
893 | template<typename _NodeGen> |
894 | _Link_type |
895 | _M_copy(const _Rb_tree& __x, _NodeGen& __gen) |
896 | { |
897 | _Link_type __root = _M_copy(__x._M_begin(), _M_end(), __gen); |
898 | _M_leftmost() = _S_minimum(__root); |
899 | _M_rightmost() = _S_maximum(__root); |
900 | _M_impl._M_node_count = __x._M_impl._M_node_count; |
901 | return __root; |
902 | } |
903 | |
904 | _Link_type |
905 | _M_copy(const _Rb_tree& __x) |
906 | { |
907 | _Alloc_node __an(*this); |
908 | return _M_copy(__x, __an); |
909 | } |
910 | |
911 | void |
912 | _M_erase(_Link_type __x); |
913 | |
914 | iterator |
915 | _M_lower_bound(_Link_type __x, _Base_ptr __y, |
916 | const _Key& __k); |
917 | |
918 | const_iterator |
919 | _M_lower_bound(_Const_Link_type __x, _Const_Base_ptr __y, |
920 | const _Key& __k) const; |
921 | |
922 | iterator |
923 | _M_upper_bound(_Link_type __x, _Base_ptr __y, |
924 | const _Key& __k); |
925 | |
926 | const_iterator |
927 | _M_upper_bound(_Const_Link_type __x, _Const_Base_ptr __y, |
928 | const _Key& __k) const; |
929 | |
930 | public: |
931 | // allocation/deallocation |
932 | #if __cplusplus201402L < 201103L |
933 | _Rb_tree() { } |
934 | #else |
935 | _Rb_tree() = default; |
936 | #endif |
937 | |
938 | _Rb_tree(const _Compare& __comp, |
939 | const allocator_type& __a = allocator_type()) |
940 | : _M_impl(__comp, _Node_allocator(__a)) { } |
941 | |
942 | _Rb_tree(const _Rb_tree& __x) |
943 | : _M_impl(__x._M_impl) |
944 | { |
945 | if (__x._M_root() != 0) |
946 | _M_root() = _M_copy(__x); |
947 | } |
948 | |
949 | #if __cplusplus201402L >= 201103L |
950 | _Rb_tree(const allocator_type& __a) |
951 | : _M_impl(_Node_allocator(__a)) |
952 | { } |
953 | |
954 | _Rb_tree(const _Rb_tree& __x, const allocator_type& __a) |
955 | : _M_impl(__x._M_impl._M_key_compare, _Node_allocator(__a)) |
956 | { |
957 | if (__x._M_root() != nullptr) |
958 | _M_root() = _M_copy(__x); |
959 | } |
960 | |
961 | _Rb_tree(_Rb_tree&&) = default; |
962 | |
963 | _Rb_tree(_Rb_tree&& __x, const allocator_type& __a) |
964 | : _Rb_tree(std::move(__x), _Node_allocator(__a)) |
965 | { } |
966 | |
967 | private: |
968 | _Rb_tree(_Rb_tree&& __x, _Node_allocator&& __a, true_type) |
969 | noexcept(is_nothrow_default_constructible<_Compare>::value) |
970 | : _M_impl(std::move(__x._M_impl), std::move(__a)) |
971 | { } |
972 | |
973 | _Rb_tree(_Rb_tree&& __x, _Node_allocator&& __a, false_type) |
974 | : _M_impl(__x._M_impl._M_key_compare, std::move(__a)) |
975 | { |
976 | if (__x._M_root() != nullptr) |
977 | _M_move_data(__x, false_type{}); |
978 | } |
979 | |
980 | public: |
981 | _Rb_tree(_Rb_tree&& __x, _Node_allocator&& __a) |
982 | noexcept( noexcept( |
983 | _Rb_tree(std::declval<_Rb_tree&&>(), std::declval<_Node_allocator&&>(), |
984 | std::declval<typename _Alloc_traits::is_always_equal>())) ) |
985 | : _Rb_tree(std::move(__x), std::move(__a), |
986 | typename _Alloc_traits::is_always_equal{}) |
987 | { } |
988 | #endif |
989 | |
990 | ~_Rb_tree() _GLIBCXX_NOEXCEPTnoexcept |
991 | { _M_erase(_M_begin()); } |
992 | |
993 | _Rb_tree& |
994 | operator=(const _Rb_tree& __x); |
995 | |
996 | // Accessors. |
997 | _Compare |
998 | key_comp() const |
999 | { return _M_impl._M_key_compare; } |
1000 | |
1001 | iterator |
1002 | begin() _GLIBCXX_NOEXCEPTnoexcept |
1003 | { return iterator(this->_M_impl._M_header._M_left); } |
1004 | |
1005 | const_iterator |
1006 | begin() const _GLIBCXX_NOEXCEPTnoexcept |
1007 | { return const_iterator(this->_M_impl._M_header._M_left); } |
1008 | |
1009 | iterator |
1010 | end() _GLIBCXX_NOEXCEPTnoexcept |
1011 | { return iterator(&this->_M_impl._M_header); } |
1012 | |
1013 | const_iterator |
1014 | end() const _GLIBCXX_NOEXCEPTnoexcept |
1015 | { return const_iterator(&this->_M_impl._M_header); } |
1016 | |
1017 | reverse_iterator |
1018 | rbegin() _GLIBCXX_NOEXCEPTnoexcept |
1019 | { return reverse_iterator(end()); } |
1020 | |
1021 | const_reverse_iterator |
1022 | rbegin() const _GLIBCXX_NOEXCEPTnoexcept |
1023 | { return const_reverse_iterator(end()); } |
1024 | |
1025 | reverse_iterator |
1026 | rend() _GLIBCXX_NOEXCEPTnoexcept |
1027 | { return reverse_iterator(begin()); } |
1028 | |
1029 | const_reverse_iterator |
1030 | rend() const _GLIBCXX_NOEXCEPTnoexcept |
1031 | { return const_reverse_iterator(begin()); } |
1032 | |
1033 | _GLIBCXX_NODISCARD bool |
1034 | empty() const _GLIBCXX_NOEXCEPTnoexcept |
1035 | { return _M_impl._M_node_count == 0; } |
1036 | |
1037 | size_type |
1038 | size() const _GLIBCXX_NOEXCEPTnoexcept |
1039 | { return _M_impl._M_node_count; } |
1040 | |
1041 | size_type |
1042 | max_size() const _GLIBCXX_NOEXCEPTnoexcept |
1043 | { return _Alloc_traits::max_size(_M_get_Node_allocator()); } |
1044 | |
1045 | void |
1046 | swap(_Rb_tree& __t) |
1047 | _GLIBCXX_NOEXCEPT_IF(__is_nothrow_swappable<_Compare>::value)noexcept(__is_nothrow_swappable<_Compare>::value); |
1048 | |
1049 | // Insert/erase. |
1050 | #if __cplusplus201402L >= 201103L |
1051 | template<typename _Arg> |
1052 | pair<iterator, bool> |
1053 | _M_insert_unique(_Arg&& __x); |
1054 | |
1055 | template<typename _Arg> |
1056 | iterator |
1057 | _M_insert_equal(_Arg&& __x); |
1058 | |
1059 | template<typename _Arg, typename _NodeGen> |
1060 | iterator |
1061 | _M_insert_unique_(const_iterator __pos, _Arg&& __x, _NodeGen&); |
1062 | |
1063 | template<typename _Arg> |
1064 | iterator |
1065 | _M_insert_unique_(const_iterator __pos, _Arg&& __x) |
1066 | { |
1067 | _Alloc_node __an(*this); |
1068 | return _M_insert_unique_(__pos, std::forward<_Arg>(__x), __an); |
1069 | } |
1070 | |
1071 | template<typename _Arg, typename _NodeGen> |
1072 | iterator |
1073 | _M_insert_equal_(const_iterator __pos, _Arg&& __x, _NodeGen&); |
1074 | |
1075 | template<typename _Arg> |
1076 | iterator |
1077 | _M_insert_equal_(const_iterator __pos, _Arg&& __x) |
1078 | { |
1079 | _Alloc_node __an(*this); |
1080 | return _M_insert_equal_(__pos, std::forward<_Arg>(__x), __an); |
1081 | } |
1082 | |
1083 | template<typename... _Args> |
1084 | pair<iterator, bool> |
1085 | _M_emplace_unique(_Args&&... __args); |
1086 | |
1087 | template<typename... _Args> |
1088 | iterator |
1089 | _M_emplace_equal(_Args&&... __args); |
1090 | |
1091 | template<typename... _Args> |
1092 | iterator |
1093 | _M_emplace_hint_unique(const_iterator __pos, _Args&&... __args); |
1094 | |
1095 | template<typename... _Args> |
1096 | iterator |
1097 | _M_emplace_hint_equal(const_iterator __pos, _Args&&... __args); |
1098 | |
1099 | template<typename _Iter> |
1100 | using __same_value_type |
1101 | = is_same<value_type, typename iterator_traits<_Iter>::value_type>; |
1102 | |
1103 | template<typename _InputIterator> |
1104 | __enable_if_t<__same_value_type<_InputIterator>::value> |
1105 | _M_insert_range_unique(_InputIterator __first, _InputIterator __last) |
1106 | { |
1107 | _Alloc_node __an(*this); |
1108 | for (; __first != __last; ++__first) |
1109 | _M_insert_unique_(end(), *__first, __an); |
1110 | } |
1111 | |
1112 | template<typename _InputIterator> |
1113 | __enable_if_t<!__same_value_type<_InputIterator>::value> |
1114 | _M_insert_range_unique(_InputIterator __first, _InputIterator __last) |
1115 | { |
1116 | for (; __first != __last; ++__first) |
1117 | _M_emplace_unique(*__first); |
1118 | } |
1119 | |
1120 | template<typename _InputIterator> |
1121 | __enable_if_t<__same_value_type<_InputIterator>::value> |
1122 | _M_insert_range_equal(_InputIterator __first, _InputIterator __last) |
1123 | { |
1124 | _Alloc_node __an(*this); |
1125 | for (; __first != __last; ++__first) |
1126 | _M_insert_equal_(end(), *__first, __an); |
1127 | } |
1128 | |
1129 | template<typename _InputIterator> |
1130 | __enable_if_t<!__same_value_type<_InputIterator>::value> |
1131 | _M_insert_range_equal(_InputIterator __first, _InputIterator __last) |
1132 | { |
1133 | _Alloc_node __an(*this); |
1134 | for (; __first != __last; ++__first) |
1135 | _M_emplace_equal(*__first); |
1136 | } |
1137 | #else |
1138 | pair<iterator, bool> |
1139 | _M_insert_unique(const value_type& __x); |
1140 | |
1141 | iterator |
1142 | _M_insert_equal(const value_type& __x); |
1143 | |
1144 | template<typename _NodeGen> |
1145 | iterator |
1146 | _M_insert_unique_(const_iterator __pos, const value_type& __x, |
1147 | _NodeGen&); |
1148 | |
1149 | iterator |
1150 | _M_insert_unique_(const_iterator __pos, const value_type& __x) |
1151 | { |
1152 | _Alloc_node __an(*this); |
1153 | return _M_insert_unique_(__pos, __x, __an); |
1154 | } |
1155 | |
1156 | template<typename _NodeGen> |
1157 | iterator |
1158 | _M_insert_equal_(const_iterator __pos, const value_type& __x, |
1159 | _NodeGen&); |
1160 | iterator |
1161 | _M_insert_equal_(const_iterator __pos, const value_type& __x) |
1162 | { |
1163 | _Alloc_node __an(*this); |
1164 | return _M_insert_equal_(__pos, __x, __an); |
1165 | } |
1166 | |
1167 | template<typename _InputIterator> |
1168 | void |
1169 | _M_insert_range_unique(_InputIterator __first, _InputIterator __last) |
1170 | { |
1171 | _Alloc_node __an(*this); |
1172 | for (; __first != __last; ++__first) |
1173 | _M_insert_unique_(end(), *__first, __an); |
1174 | } |
1175 | |
1176 | template<typename _InputIterator> |
1177 | void |
1178 | _M_insert_range_equal(_InputIterator __first, _InputIterator __last) |
1179 | { |
1180 | _Alloc_node __an(*this); |
1181 | for (; __first != __last; ++__first) |
1182 | _M_insert_equal_(end(), *__first, __an); |
1183 | } |
1184 | #endif |
1185 | |
1186 | private: |
1187 | void |
1188 | _M_erase_aux(const_iterator __position); |
1189 | |
1190 | void |
1191 | _M_erase_aux(const_iterator __first, const_iterator __last); |
1192 | |
1193 | public: |
1194 | #if __cplusplus201402L >= 201103L |
1195 | // _GLIBCXX_RESOLVE_LIB_DEFECTS |
1196 | // DR 130. Associative erase should return an iterator. |
1197 | _GLIBCXX_ABI_TAG_CXX11__attribute ((__abi_tag__ ("cxx11"))) |
1198 | iterator |
1199 | erase(const_iterator __position) |
1200 | { |
1201 | __glibcxx_assert(__position != end()); |
1202 | const_iterator __result = __position; |
1203 | ++__result; |
1204 | _M_erase_aux(__position); |
1205 | return __result._M_const_cast(); |
1206 | } |
1207 | |
1208 | // LWG 2059. |
1209 | _GLIBCXX_ABI_TAG_CXX11__attribute ((__abi_tag__ ("cxx11"))) |
1210 | iterator |
1211 | erase(iterator __position) |
1212 | { |
1213 | __glibcxx_assert(__position != end()); |
1214 | iterator __result = __position; |
1215 | ++__result; |
1216 | _M_erase_aux(__position); |
1217 | return __result; |
1218 | } |
1219 | #else |
1220 | void |
1221 | erase(iterator __position) |
1222 | { |
1223 | __glibcxx_assert(__position != end()); |
1224 | _M_erase_aux(__position); |
1225 | } |
1226 | |
1227 | void |
1228 | erase(const_iterator __position) |
1229 | { |
1230 | __glibcxx_assert(__position != end()); |
1231 | _M_erase_aux(__position); |
1232 | } |
1233 | #endif |
1234 | |
1235 | size_type |
1236 | erase(const key_type& __x); |
1237 | |
1238 | #if __cplusplus201402L >= 201103L |
1239 | // _GLIBCXX_RESOLVE_LIB_DEFECTS |
1240 | // DR 130. Associative erase should return an iterator. |
1241 | _GLIBCXX_ABI_TAG_CXX11__attribute ((__abi_tag__ ("cxx11"))) |
1242 | iterator |
1243 | erase(const_iterator __first, const_iterator __last) |
1244 | { |
1245 | _M_erase_aux(__first, __last); |
1246 | return __last._M_const_cast(); |
1247 | } |
1248 | #else |
1249 | void |
1250 | erase(iterator __first, iterator __last) |
1251 | { _M_erase_aux(__first, __last); } |
1252 | |
1253 | void |
1254 | erase(const_iterator __first, const_iterator __last) |
1255 | { _M_erase_aux(__first, __last); } |
1256 | #endif |
1257 | |
1258 | void |
1259 | clear() _GLIBCXX_NOEXCEPTnoexcept |
1260 | { |
1261 | _M_erase(_M_begin()); |
1262 | _M_impl._M_reset(); |
1263 | } |
1264 | |
1265 | // Set operations. |
1266 | iterator |
1267 | find(const key_type& __k); |
1268 | |
1269 | const_iterator |
1270 | find(const key_type& __k) const; |
1271 | |
1272 | size_type |
1273 | count(const key_type& __k) const; |
1274 | |
1275 | iterator |
1276 | lower_bound(const key_type& __k) |
1277 | { return _M_lower_bound(_M_begin(), _M_end(), __k); } |
1278 | |
1279 | const_iterator |
1280 | lower_bound(const key_type& __k) const |
1281 | { return _M_lower_bound(_M_begin(), _M_end(), __k); } |
1282 | |
1283 | iterator |
1284 | upper_bound(const key_type& __k) |
1285 | { return _M_upper_bound(_M_begin(), _M_end(), __k); } |
1286 | |
1287 | const_iterator |
1288 | upper_bound(const key_type& __k) const |
1289 | { return _M_upper_bound(_M_begin(), _M_end(), __k); } |
1290 | |
1291 | pair<iterator, iterator> |
1292 | equal_range(const key_type& __k); |
1293 | |
1294 | pair<const_iterator, const_iterator> |
1295 | equal_range(const key_type& __k) const; |
1296 | |
1297 | #if __cplusplus201402L >= 201402L |
1298 | template<typename _Kt, |
1299 | typename _Req = __has_is_transparent_t<_Compare, _Kt>> |
1300 | iterator |
1301 | _M_find_tr(const _Kt& __k) |
1302 | { |
1303 | const _Rb_tree* __const_this = this; |
1304 | return __const_this->_M_find_tr(__k)._M_const_cast(); |
1305 | } |
1306 | |
1307 | template<typename _Kt, |
1308 | typename _Req = __has_is_transparent_t<_Compare, _Kt>> |
1309 | const_iterator |
1310 | _M_find_tr(const _Kt& __k) const |
1311 | { |
1312 | auto __j = _M_lower_bound_tr(__k); |
1313 | if (__j != end() && _M_impl._M_key_compare(__k, _S_key(__j._M_node))) |
1314 | __j = end(); |
1315 | return __j; |
1316 | } |
1317 | |
1318 | template<typename _Kt, |
1319 | typename _Req = __has_is_transparent_t<_Compare, _Kt>> |
1320 | size_type |
1321 | _M_count_tr(const _Kt& __k) const |
1322 | { |
1323 | auto __p = _M_equal_range_tr(__k); |
1324 | return std::distance(__p.first, __p.second); |
1325 | } |
1326 | |
1327 | template<typename _Kt, |
1328 | typename _Req = __has_is_transparent_t<_Compare, _Kt>> |
1329 | iterator |
1330 | _M_lower_bound_tr(const _Kt& __k) |
1331 | { |
1332 | const _Rb_tree* __const_this = this; |
1333 | return __const_this->_M_lower_bound_tr(__k)._M_const_cast(); |
1334 | } |
1335 | |
1336 | template<typename _Kt, |
1337 | typename _Req = __has_is_transparent_t<_Compare, _Kt>> |
1338 | const_iterator |
1339 | _M_lower_bound_tr(const _Kt& __k) const |
1340 | { |
1341 | auto __x = _M_begin(); |
1342 | auto __y = _M_end(); |
1343 | while (__x != 0) |
1344 | if (!_M_impl._M_key_compare(_S_key(__x), __k)) |
1345 | { |
1346 | __y = __x; |
1347 | __x = _S_left(__x); |
1348 | } |
1349 | else |
1350 | __x = _S_right(__x); |
1351 | return const_iterator(__y); |
1352 | } |
1353 | |
1354 | template<typename _Kt, |
1355 | typename _Req = __has_is_transparent_t<_Compare, _Kt>> |
1356 | iterator |
1357 | _M_upper_bound_tr(const _Kt& __k) |
1358 | { |
1359 | const _Rb_tree* __const_this = this; |
1360 | return __const_this->_M_upper_bound_tr(__k)._M_const_cast(); |
1361 | } |
1362 | |
1363 | template<typename _Kt, |
1364 | typename _Req = __has_is_transparent_t<_Compare, _Kt>> |
1365 | const_iterator |
1366 | _M_upper_bound_tr(const _Kt& __k) const |
1367 | { |
1368 | auto __x = _M_begin(); |
1369 | auto __y = _M_end(); |
1370 | while (__x != 0) |
1371 | if (_M_impl._M_key_compare(__k, _S_key(__x))) |
1372 | { |
1373 | __y = __x; |
1374 | __x = _S_left(__x); |
1375 | } |
1376 | else |
1377 | __x = _S_right(__x); |
1378 | return const_iterator(__y); |
1379 | } |
1380 | |
1381 | template<typename _Kt, |
1382 | typename _Req = __has_is_transparent_t<_Compare, _Kt>> |
1383 | pair<iterator, iterator> |
1384 | _M_equal_range_tr(const _Kt& __k) |
1385 | { |
1386 | const _Rb_tree* __const_this = this; |
1387 | auto __ret = __const_this->_M_equal_range_tr(__k); |
1388 | return { __ret.first._M_const_cast(), __ret.second._M_const_cast() }; |
1389 | } |
1390 | |
1391 | template<typename _Kt, |
1392 | typename _Req = __has_is_transparent_t<_Compare, _Kt>> |
1393 | pair<const_iterator, const_iterator> |
1394 | _M_equal_range_tr(const _Kt& __k) const |
1395 | { |
1396 | auto __low = _M_lower_bound_tr(__k); |
1397 | auto __high = __low; |
1398 | auto& __cmp = _M_impl._M_key_compare; |
1399 | while (__high != end() && !__cmp(__k, _S_key(__high._M_node))) |
1400 | ++__high; |
1401 | return { __low, __high }; |
1402 | } |
1403 | #endif |
1404 | |
1405 | // Debugging. |
1406 | bool |
1407 | __rb_verify() const; |
1408 | |
1409 | #if __cplusplus201402L >= 201103L |
1410 | _Rb_tree& |
1411 | operator=(_Rb_tree&&) |
1412 | noexcept(_Alloc_traits::_S_nothrow_move() |
1413 | && is_nothrow_move_assignable<_Compare>::value); |
1414 | |
1415 | template<typename _Iterator> |
1416 | void |
1417 | _M_assign_unique(_Iterator, _Iterator); |
1418 | |
1419 | template<typename _Iterator> |
1420 | void |
1421 | _M_assign_equal(_Iterator, _Iterator); |
1422 | |
1423 | private: |
1424 | // Move elements from container with equal allocator. |
1425 | void |
1426 | _M_move_data(_Rb_tree& __x, true_type) |
1427 | { _M_impl._M_move_data(__x._M_impl); } |
1428 | |
1429 | // Move elements from container with possibly non-equal allocator, |
1430 | // which might result in a copy not a move. |
1431 | void |
1432 | _M_move_data(_Rb_tree&, false_type); |
1433 | |
1434 | // Move assignment from container with equal allocator. |
1435 | void |
1436 | _M_move_assign(_Rb_tree&, true_type); |
1437 | |
1438 | // Move assignment from container with possibly non-equal allocator, |
1439 | // which might result in a copy not a move. |
1440 | void |
1441 | _M_move_assign(_Rb_tree&, false_type); |
1442 | #endif |
1443 | |
1444 | #if __cplusplus201402L > 201402L |
1445 | public: |
1446 | /// Re-insert an extracted node. |
1447 | insert_return_type |
1448 | _M_reinsert_node_unique(node_type&& __nh) |
1449 | { |
1450 | insert_return_type __ret; |
1451 | if (__nh.empty()) |
1452 | __ret.position = end(); |
1453 | else |
1454 | { |
1455 | __glibcxx_assert(_M_get_Node_allocator() == *__nh._M_alloc); |
1456 | |
1457 | auto __res = _M_get_insert_unique_pos(__nh._M_key()); |
1458 | if (__res.second) |
1459 | { |
1460 | __ret.position |
1461 | = _M_insert_node(__res.first, __res.second, __nh._M_ptr); |
1462 | __nh._M_ptr = nullptr; |
1463 | __ret.inserted = true; |
1464 | } |
1465 | else |
1466 | { |
1467 | __ret.node = std::move(__nh); |
1468 | __ret.position = iterator(__res.first); |
1469 | __ret.inserted = false; |
1470 | } |
1471 | } |
1472 | return __ret; |
1473 | } |
1474 | |
1475 | /// Re-insert an extracted node. |
1476 | iterator |
1477 | _M_reinsert_node_equal(node_type&& __nh) |
1478 | { |
1479 | iterator __ret; |
1480 | if (__nh.empty()) |
1481 | __ret = end(); |
1482 | else |
1483 | { |
1484 | __glibcxx_assert(_M_get_Node_allocator() == *__nh._M_alloc); |
1485 | auto __res = _M_get_insert_equal_pos(__nh._M_key()); |
1486 | if (__res.second) |
1487 | __ret = _M_insert_node(__res.first, __res.second, __nh._M_ptr); |
1488 | else |
1489 | __ret = _M_insert_equal_lower_node(__nh._M_ptr); |
1490 | __nh._M_ptr = nullptr; |
1491 | } |
1492 | return __ret; |
1493 | } |
1494 | |
1495 | /// Re-insert an extracted node. |
1496 | iterator |
1497 | _M_reinsert_node_hint_unique(const_iterator __hint, node_type&& __nh) |
1498 | { |
1499 | iterator __ret; |
1500 | if (__nh.empty()) |
1501 | __ret = end(); |
1502 | else |
1503 | { |
1504 | __glibcxx_assert(_M_get_Node_allocator() == *__nh._M_alloc); |
1505 | auto __res = _M_get_insert_hint_unique_pos(__hint, __nh._M_key()); |
1506 | if (__res.second) |
1507 | { |
1508 | __ret = _M_insert_node(__res.first, __res.second, __nh._M_ptr); |
1509 | __nh._M_ptr = nullptr; |
1510 | } |
1511 | else |
1512 | __ret = iterator(__res.first); |
1513 | } |
1514 | return __ret; |
1515 | } |
1516 | |
1517 | /// Re-insert an extracted node. |
1518 | iterator |
1519 | _M_reinsert_node_hint_equal(const_iterator __hint, node_type&& __nh) |
1520 | { |
1521 | iterator __ret; |
1522 | if (__nh.empty()) |
1523 | __ret = end(); |
1524 | else |
1525 | { |
1526 | __glibcxx_assert(_M_get_Node_allocator() == *__nh._M_alloc); |
1527 | auto __res = _M_get_insert_hint_equal_pos(__hint, __nh._M_key()); |
1528 | if (__res.second) |
1529 | __ret = _M_insert_node(__res.first, __res.second, __nh._M_ptr); |
1530 | else |
1531 | __ret = _M_insert_equal_lower_node(__nh._M_ptr); |
1532 | __nh._M_ptr = nullptr; |
1533 | } |
1534 | return __ret; |
1535 | } |
1536 | |
1537 | /// Extract a node. |
1538 | node_type |
1539 | extract(const_iterator __pos) |
1540 | { |
1541 | auto __ptr = _Rb_tree_rebalance_for_erase( |
1542 | __pos._M_const_cast()._M_node, _M_impl._M_header); |
1543 | --_M_impl._M_node_count; |
1544 | return { static_cast<_Link_type>(__ptr), _M_get_Node_allocator() }; |
1545 | } |
1546 | |
1547 | /// Extract a node. |
1548 | node_type |
1549 | extract(const key_type& __k) |
1550 | { |
1551 | node_type __nh; |
1552 | auto __pos = find(__k); |
1553 | if (__pos != end()) |
1554 | __nh = extract(const_iterator(__pos)); |
1555 | return __nh; |
1556 | } |
1557 | |
1558 | template<typename _Compare2> |
1559 | using _Compatible_tree |
1560 | = _Rb_tree<_Key, _Val, _KeyOfValue, _Compare2, _Alloc>; |
1561 | |
1562 | template<typename, typename> |
1563 | friend class _Rb_tree_merge_helper; |
1564 | |
1565 | /// Merge from a compatible container into one with unique keys. |
1566 | template<typename _Compare2> |
1567 | void |
1568 | _M_merge_unique(_Compatible_tree<_Compare2>& __src) noexcept |
1569 | { |
1570 | using _Merge_helper = _Rb_tree_merge_helper<_Rb_tree, _Compare2>; |
1571 | for (auto __i = __src.begin(), __end = __src.end(); __i != __end;) |
1572 | { |
1573 | auto __pos = __i++; |
1574 | auto __res = _M_get_insert_unique_pos(_KeyOfValue()(*__pos)); |
1575 | if (__res.second) |
1576 | { |
1577 | auto& __src_impl = _Merge_helper::_S_get_impl(__src); |
1578 | auto __ptr = _Rb_tree_rebalance_for_erase( |
1579 | __pos._M_node, __src_impl._M_header); |
1580 | --__src_impl._M_node_count; |
1581 | _M_insert_node(__res.first, __res.second, |
1582 | static_cast<_Link_type>(__ptr)); |
1583 | } |
1584 | } |
1585 | } |
1586 | |
1587 | /// Merge from a compatible container into one with equivalent keys. |
1588 | template<typename _Compare2> |
1589 | void |
1590 | _M_merge_equal(_Compatible_tree<_Compare2>& __src) noexcept |
1591 | { |
1592 | using _Merge_helper = _Rb_tree_merge_helper<_Rb_tree, _Compare2>; |
1593 | for (auto __i = __src.begin(), __end = __src.end(); __i != __end;) |
1594 | { |
1595 | auto __pos = __i++; |
1596 | auto __res = _M_get_insert_equal_pos(_KeyOfValue()(*__pos)); |
1597 | if (__res.second) |
1598 | { |
1599 | auto& __src_impl = _Merge_helper::_S_get_impl(__src); |
1600 | auto __ptr = _Rb_tree_rebalance_for_erase( |
1601 | __pos._M_node, __src_impl._M_header); |
1602 | --__src_impl._M_node_count; |
1603 | _M_insert_node(__res.first, __res.second, |
1604 | static_cast<_Link_type>(__ptr)); |
1605 | } |
1606 | } |
1607 | } |
1608 | #endif // C++17 |
1609 | |
1610 | friend bool |
1611 | operator==(const _Rb_tree& __x, const _Rb_tree& __y) |
1612 | { |
1613 | return __x.size() == __y.size() |
1614 | && std::equal(__x.begin(), __x.end(), __y.begin()); |
1615 | } |
1616 | |
1617 | #if __cpp_lib_three_way_comparison |
1618 | friend auto |
1619 | operator<=>(const _Rb_tree& __x, const _Rb_tree& __y) |
1620 | { |
1621 | if constexpr (requires { typename __detail::__synth3way_t<_Val>; }) |
1622 | return std::lexicographical_compare_three_way(__x.begin(), __x.end(), |
1623 | __y.begin(), __y.end(), |
1624 | __detail::__synth3way); |
1625 | } |
1626 | #else |
1627 | friend bool |
1628 | operator<(const _Rb_tree& __x, const _Rb_tree& __y) |
1629 | { |
1630 | return std::lexicographical_compare(__x.begin(), __x.end(), |
1631 | __y.begin(), __y.end()); |
1632 | } |
1633 | |
1634 | friend bool _GLIBCXX_DEPRECATED__attribute__ ((__deprecated__)) |
1635 | operator!=(const _Rb_tree& __x, const _Rb_tree& __y) |
1636 | { return !(__x == __y); } |
1637 | |
1638 | friend bool _GLIBCXX_DEPRECATED__attribute__ ((__deprecated__)) |
1639 | operator>(const _Rb_tree& __x, const _Rb_tree& __y) |
1640 | { return __y < __x; } |
1641 | |
1642 | friend bool _GLIBCXX_DEPRECATED__attribute__ ((__deprecated__)) |
1643 | operator<=(const _Rb_tree& __x, const _Rb_tree& __y) |
1644 | { return !(__y < __x); } |
1645 | |
1646 | friend bool _GLIBCXX_DEPRECATED__attribute__ ((__deprecated__)) |
1647 | operator>=(const _Rb_tree& __x, const _Rb_tree& __y) |
1648 | { return !(__x < __y); } |
1649 | #endif |
1650 | }; |
1651 | |
1652 | template<typename _Key, typename _Val, typename _KeyOfValue, |
1653 | typename _Compare, typename _Alloc> |
1654 | inline void |
1655 | swap(_Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>& __x, |
1656 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>& __y) |
1657 | { __x.swap(__y); } |
1658 | |
1659 | #if __cplusplus201402L >= 201103L |
1660 | template<typename _Key, typename _Val, typename _KeyOfValue, |
1661 | typename _Compare, typename _Alloc> |
1662 | void |
1663 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
1664 | _M_move_data(_Rb_tree& __x, false_type) |
1665 | { |
1666 | if (_M_get_Node_allocator() == __x._M_get_Node_allocator()) |
1667 | _M_move_data(__x, true_type()); |
1668 | else |
1669 | { |
1670 | _Alloc_node __an(*this); |
1671 | auto __lbd = |
1672 | [&__an](const value_type& __cval) |
1673 | { |
1674 | auto& __val = const_cast<value_type&>(__cval); |
1675 | return __an(std::move_if_noexcept(__val)); |
1676 | }; |
1677 | _M_root() = _M_copy(__x, __lbd); |
1678 | } |
1679 | } |
1680 | |
1681 | template<typename _Key, typename _Val, typename _KeyOfValue, |
1682 | typename _Compare, typename _Alloc> |
1683 | inline void |
1684 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
1685 | _M_move_assign(_Rb_tree& __x, true_type) |
1686 | { |
1687 | clear(); |
1688 | if (__x._M_root() != nullptr) |
1689 | _M_move_data(__x, true_type()); |
1690 | std::__alloc_on_move(_M_get_Node_allocator(), |
1691 | __x._M_get_Node_allocator()); |
1692 | } |
1693 | |
1694 | template<typename _Key, typename _Val, typename _KeyOfValue, |
1695 | typename _Compare, typename _Alloc> |
1696 | void |
1697 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
1698 | _M_move_assign(_Rb_tree& __x, false_type) |
1699 | { |
1700 | if (_M_get_Node_allocator() == __x._M_get_Node_allocator()) |
1701 | return _M_move_assign(__x, true_type{}); |
1702 | |
1703 | // Try to move each node reusing existing nodes and copying __x nodes |
1704 | // structure. |
1705 | _Reuse_or_alloc_node __roan(*this); |
1706 | _M_impl._M_reset(); |
1707 | if (__x._M_root() != nullptr) |
1708 | { |
1709 | auto __lbd = |
1710 | [&__roan](const value_type& __cval) |
1711 | { |
1712 | auto& __val = const_cast<value_type&>(__cval); |
1713 | return __roan(std::move(__val)); |
1714 | }; |
1715 | _M_root() = _M_copy(__x, __lbd); |
1716 | __x.clear(); |
1717 | } |
1718 | } |
1719 | |
1720 | template<typename _Key, typename _Val, typename _KeyOfValue, |
1721 | typename _Compare, typename _Alloc> |
1722 | inline _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>& |
1723 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
1724 | operator=(_Rb_tree&& __x) |
1725 | noexcept(_Alloc_traits::_S_nothrow_move() |
1726 | && is_nothrow_move_assignable<_Compare>::value) |
1727 | { |
1728 | _M_impl._M_key_compare = std::move(__x._M_impl._M_key_compare); |
1729 | _M_move_assign(__x, __bool_constant<_Alloc_traits::_S_nothrow_move()>()); |
1730 | return *this; |
1731 | } |
1732 | |
1733 | template<typename _Key, typename _Val, typename _KeyOfValue, |
1734 | typename _Compare, typename _Alloc> |
1735 | template<typename _Iterator> |
1736 | void |
1737 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
1738 | _M_assign_unique(_Iterator __first, _Iterator __last) |
1739 | { |
1740 | _Reuse_or_alloc_node __roan(*this); |
1741 | _M_impl._M_reset(); |
1742 | for (; __first != __last; ++__first) |
1743 | _M_insert_unique_(end(), *__first, __roan); |
1744 | } |
1745 | |
1746 | template<typename _Key, typename _Val, typename _KeyOfValue, |
1747 | typename _Compare, typename _Alloc> |
1748 | template<typename _Iterator> |
1749 | void |
1750 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
1751 | _M_assign_equal(_Iterator __first, _Iterator __last) |
1752 | { |
1753 | _Reuse_or_alloc_node __roan(*this); |
1754 | _M_impl._M_reset(); |
1755 | for (; __first != __last; ++__first) |
1756 | _M_insert_equal_(end(), *__first, __roan); |
1757 | } |
1758 | #endif |
1759 | |
1760 | template<typename _Key, typename _Val, typename _KeyOfValue, |
1761 | typename _Compare, typename _Alloc> |
1762 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>& |
1763 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
1764 | operator=(const _Rb_tree& __x) |
1765 | { |
1766 | if (this != &__x) |
1767 | { |
1768 | // Note that _Key may be a constant type. |
1769 | #if __cplusplus201402L >= 201103L |
1770 | if (_Alloc_traits::_S_propagate_on_copy_assign()) |
1771 | { |
1772 | auto& __this_alloc = this->_M_get_Node_allocator(); |
1773 | auto& __that_alloc = __x._M_get_Node_allocator(); |
1774 | if (!_Alloc_traits::_S_always_equal() |
1775 | && __this_alloc != __that_alloc) |
1776 | { |
1777 | // Replacement allocator cannot free existing storage, we need |
1778 | // to erase nodes first. |
1779 | clear(); |
1780 | std::__alloc_on_copy(__this_alloc, __that_alloc); |
1781 | } |
1782 | } |
1783 | #endif |
1784 | |
1785 | _Reuse_or_alloc_node __roan(*this); |
1786 | _M_impl._M_reset(); |
1787 | _M_impl._M_key_compare = __x._M_impl._M_key_compare; |
1788 | if (__x._M_root() != 0) |
1789 | _M_root() = _M_copy(__x, __roan); |
1790 | } |
1791 | |
1792 | return *this; |
1793 | } |
1794 | |
1795 | template<typename _Key, typename _Val, typename _KeyOfValue, |
1796 | typename _Compare, typename _Alloc> |
1797 | #if __cplusplus201402L >= 201103L |
1798 | template<typename _Arg, typename _NodeGen> |
1799 | #else |
1800 | template<typename _NodeGen> |
1801 | #endif |
1802 | typename _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>::iterator |
1803 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
1804 | _M_insert_(_Base_ptr __x, _Base_ptr __p, |
1805 | #if __cplusplus201402L >= 201103L |
1806 | _Arg&& __v, |
1807 | #else |
1808 | const _Val& __v, |
1809 | #endif |
1810 | _NodeGen& __node_gen) |
1811 | { |
1812 | bool __insert_left = (__x != 0 || __p == _M_end() |
1813 | || _M_impl._M_key_compare(_KeyOfValue()(__v), |
1814 | _S_key(__p))); |
1815 | |
1816 | _Link_type __z = __node_gen(_GLIBCXX_FORWARD(_Arg, __v)std::forward<_Arg>(__v)); |
1817 | |
1818 | _Rb_tree_insert_and_rebalance(__insert_left, __z, __p, |
1819 | this->_M_impl._M_header); |
1820 | ++_M_impl._M_node_count; |
1821 | return iterator(__z); |
1822 | } |
1823 | |
1824 | template<typename _Key, typename _Val, typename _KeyOfValue, |
1825 | typename _Compare, typename _Alloc> |
1826 | #if __cplusplus201402L >= 201103L |
1827 | template<typename _Arg> |
1828 | #endif |
1829 | typename _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>::iterator |
1830 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
1831 | #if __cplusplus201402L >= 201103L |
1832 | _M_insert_lower(_Base_ptr __p, _Arg&& __v) |
1833 | #else |
1834 | _M_insert_lower(_Base_ptr __p, const _Val& __v) |
1835 | #endif |
1836 | { |
1837 | bool __insert_left = (__p == _M_end() |
1838 | || !_M_impl._M_key_compare(_S_key(__p), |
1839 | _KeyOfValue()(__v))); |
1840 | |
1841 | _Link_type __z = _M_create_node(_GLIBCXX_FORWARD(_Arg, __v)std::forward<_Arg>(__v)); |
1842 | |
1843 | _Rb_tree_insert_and_rebalance(__insert_left, __z, __p, |
1844 | this->_M_impl._M_header); |
1845 | ++_M_impl._M_node_count; |
1846 | return iterator(__z); |
1847 | } |
1848 | |
1849 | template<typename _Key, typename _Val, typename _KeyOfValue, |
1850 | typename _Compare, typename _Alloc> |
1851 | #if __cplusplus201402L >= 201103L |
1852 | template<typename _Arg> |
1853 | #endif |
1854 | typename _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>::iterator |
1855 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
1856 | #if __cplusplus201402L >= 201103L |
1857 | _M_insert_equal_lower(_Arg&& __v) |
1858 | #else |
1859 | _M_insert_equal_lower(const _Val& __v) |
1860 | #endif |
1861 | { |
1862 | _Link_type __x = _M_begin(); |
1863 | _Base_ptr __y = _M_end(); |
1864 | while (__x != 0) |
1865 | { |
1866 | __y = __x; |
1867 | __x = !_M_impl._M_key_compare(_S_key(__x), _KeyOfValue()(__v)) ? |
1868 | _S_left(__x) : _S_right(__x); |
1869 | } |
1870 | return _M_insert_lower(__y, _GLIBCXX_FORWARD(_Arg, __v)std::forward<_Arg>(__v)); |
1871 | } |
1872 | |
1873 | template<typename _Key, typename _Val, typename _KoV, |
1874 | typename _Compare, typename _Alloc> |
1875 | template<typename _NodeGen> |
1876 | typename _Rb_tree<_Key, _Val, _KoV, _Compare, _Alloc>::_Link_type |
1877 | _Rb_tree<_Key, _Val, _KoV, _Compare, _Alloc>:: |
1878 | _M_copy(_Const_Link_type __x, _Base_ptr __p, _NodeGen& __node_gen) |
1879 | { |
1880 | // Structural copy. __x and __p must be non-null. |
1881 | _Link_type __top = _M_clone_node(__x, __node_gen); |
1882 | __top->_M_parent = __p; |
1883 | |
1884 | __tryif (true) |
1885 | { |
1886 | if (__x->_M_right) |
1887 | __top->_M_right = _M_copy(_S_right(__x), __top, __node_gen); |
1888 | __p = __top; |
1889 | __x = _S_left(__x); |
1890 | |
1891 | while (__x != 0) |
1892 | { |
1893 | _Link_type __y = _M_clone_node(__x, __node_gen); |
1894 | __p->_M_left = __y; |
1895 | __y->_M_parent = __p; |
1896 | if (__x->_M_right) |
1897 | __y->_M_right = _M_copy(_S_right(__x), __y, __node_gen); |
1898 | __p = __y; |
1899 | __x = _S_left(__x); |
1900 | } |
1901 | } |
1902 | __catch(...)if (false) |
1903 | { |
1904 | _M_erase(__top); |
1905 | __throw_exception_again; |
1906 | } |
1907 | return __top; |
1908 | } |
1909 | |
1910 | template<typename _Key, typename _Val, typename _KeyOfValue, |
1911 | typename _Compare, typename _Alloc> |
1912 | void |
1913 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
1914 | _M_erase(_Link_type __x) |
1915 | { |
1916 | // Erase without rebalancing. |
1917 | while (__x != 0) |
1918 | { |
1919 | _M_erase(_S_right(__x)); |
1920 | _Link_type __y = _S_left(__x); |
1921 | _M_drop_node(__x); |
1922 | __x = __y; |
1923 | } |
1924 | } |
1925 | |
1926 | template<typename _Key, typename _Val, typename _KeyOfValue, |
1927 | typename _Compare, typename _Alloc> |
1928 | typename _Rb_tree<_Key, _Val, _KeyOfValue, |
1929 | _Compare, _Alloc>::iterator |
1930 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
1931 | _M_lower_bound(_Link_type __x, _Base_ptr __y, |
1932 | const _Key& __k) |
1933 | { |
1934 | while (__x != 0) |
1935 | if (!_M_impl._M_key_compare(_S_key(__x), __k)) |
1936 | __y = __x, __x = _S_left(__x); |
1937 | else |
1938 | __x = _S_right(__x); |
1939 | return iterator(__y); |
1940 | } |
1941 | |
1942 | template<typename _Key, typename _Val, typename _KeyOfValue, |
1943 | typename _Compare, typename _Alloc> |
1944 | typename _Rb_tree<_Key, _Val, _KeyOfValue, |
1945 | _Compare, _Alloc>::const_iterator |
1946 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
1947 | _M_lower_bound(_Const_Link_type __x, _Const_Base_ptr __y, |
1948 | const _Key& __k) const |
1949 | { |
1950 | while (__x != 0) |
1951 | if (!_M_impl._M_key_compare(_S_key(__x), __k)) |
1952 | __y = __x, __x = _S_left(__x); |
1953 | else |
1954 | __x = _S_right(__x); |
1955 | return const_iterator(__y); |
1956 | } |
1957 | |
1958 | template<typename _Key, typename _Val, typename _KeyOfValue, |
1959 | typename _Compare, typename _Alloc> |
1960 | typename _Rb_tree<_Key, _Val, _KeyOfValue, |
1961 | _Compare, _Alloc>::iterator |
1962 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
1963 | _M_upper_bound(_Link_type __x, _Base_ptr __y, |
1964 | const _Key& __k) |
1965 | { |
1966 | while (__x != 0) |
1967 | if (_M_impl._M_key_compare(__k, _S_key(__x))) |
1968 | __y = __x, __x = _S_left(__x); |
1969 | else |
1970 | __x = _S_right(__x); |
1971 | return iterator(__y); |
1972 | } |
1973 | |
1974 | template<typename _Key, typename _Val, typename _KeyOfValue, |
1975 | typename _Compare, typename _Alloc> |
1976 | typename _Rb_tree<_Key, _Val, _KeyOfValue, |
1977 | _Compare, _Alloc>::const_iterator |
1978 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
1979 | _M_upper_bound(_Const_Link_type __x, _Const_Base_ptr __y, |
1980 | const _Key& __k) const |
1981 | { |
1982 | while (__x != 0) |
1983 | if (_M_impl._M_key_compare(__k, _S_key(__x))) |
1984 | __y = __x, __x = _S_left(__x); |
1985 | else |
1986 | __x = _S_right(__x); |
1987 | return const_iterator(__y); |
1988 | } |
1989 | |
1990 | template<typename _Key, typename _Val, typename _KeyOfValue, |
1991 | typename _Compare, typename _Alloc> |
1992 | pair<typename _Rb_tree<_Key, _Val, _KeyOfValue, |
1993 | _Compare, _Alloc>::iterator, |
1994 | typename _Rb_tree<_Key, _Val, _KeyOfValue, |
1995 | _Compare, _Alloc>::iterator> |
1996 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
1997 | equal_range(const _Key& __k) |
1998 | { |
1999 | _Link_type __x = _M_begin(); |
2000 | _Base_ptr __y = _M_end(); |
2001 | while (__x != 0) |
2002 | { |
2003 | if (_M_impl._M_key_compare(_S_key(__x), __k)) |
2004 | __x = _S_right(__x); |
2005 | else if (_M_impl._M_key_compare(__k, _S_key(__x))) |
2006 | __y = __x, __x = _S_left(__x); |
2007 | else |
2008 | { |
2009 | _Link_type __xu(__x); |
2010 | _Base_ptr __yu(__y); |
2011 | __y = __x, __x = _S_left(__x); |
2012 | __xu = _S_right(__xu); |
2013 | return pair<iterator, |
2014 | iterator>(_M_lower_bound(__x, __y, __k), |
2015 | _M_upper_bound(__xu, __yu, __k)); |
2016 | } |
2017 | } |
2018 | return pair<iterator, iterator>(iterator(__y), |
2019 | iterator(__y)); |
2020 | } |
2021 | |
2022 | template<typename _Key, typename _Val, typename _KeyOfValue, |
2023 | typename _Compare, typename _Alloc> |
2024 | pair<typename _Rb_tree<_Key, _Val, _KeyOfValue, |
2025 | _Compare, _Alloc>::const_iterator, |
2026 | typename _Rb_tree<_Key, _Val, _KeyOfValue, |
2027 | _Compare, _Alloc>::const_iterator> |
2028 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
2029 | equal_range(const _Key& __k) const |
2030 | { |
2031 | _Const_Link_type __x = _M_begin(); |
2032 | _Const_Base_ptr __y = _M_end(); |
2033 | while (__x != 0) |
2034 | { |
2035 | if (_M_impl._M_key_compare(_S_key(__x), __k)) |
2036 | __x = _S_right(__x); |
2037 | else if (_M_impl._M_key_compare(__k, _S_key(__x))) |
2038 | __y = __x, __x = _S_left(__x); |
2039 | else |
2040 | { |
2041 | _Const_Link_type __xu(__x); |
2042 | _Const_Base_ptr __yu(__y); |
2043 | __y = __x, __x = _S_left(__x); |
2044 | __xu = _S_right(__xu); |
2045 | return pair<const_iterator, |
2046 | const_iterator>(_M_lower_bound(__x, __y, __k), |
2047 | _M_upper_bound(__xu, __yu, __k)); |
2048 | } |
2049 | } |
2050 | return pair<const_iterator, const_iterator>(const_iterator(__y), |
2051 | const_iterator(__y)); |
2052 | } |
2053 | |
2054 | template<typename _Key, typename _Val, typename _KeyOfValue, |
2055 | typename _Compare, typename _Alloc> |
2056 | void |
2057 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
2058 | swap(_Rb_tree& __t) |
2059 | _GLIBCXX_NOEXCEPT_IF(__is_nothrow_swappable<_Compare>::value)noexcept(__is_nothrow_swappable<_Compare>::value) |
2060 | { |
2061 | if (_M_root() == 0) |
2062 | { |
2063 | if (__t._M_root() != 0) |
2064 | _M_impl._M_move_data(__t._M_impl); |
2065 | } |
2066 | else if (__t._M_root() == 0) |
2067 | __t._M_impl._M_move_data(_M_impl); |
2068 | else |
2069 | { |
2070 | std::swap(_M_root(),__t._M_root()); |
2071 | std::swap(_M_leftmost(),__t._M_leftmost()); |
2072 | std::swap(_M_rightmost(),__t._M_rightmost()); |
2073 | |
2074 | _M_root()->_M_parent = _M_end(); |
2075 | __t._M_root()->_M_parent = __t._M_end(); |
2076 | std::swap(this->_M_impl._M_node_count, __t._M_impl._M_node_count); |
2077 | } |
2078 | // No need to swap header's color as it does not change. |
2079 | std::swap(this->_M_impl._M_key_compare, __t._M_impl._M_key_compare); |
2080 | |
2081 | _Alloc_traits::_S_on_swap(_M_get_Node_allocator(), |
2082 | __t._M_get_Node_allocator()); |
2083 | } |
2084 | |
2085 | template<typename _Key, typename _Val, typename _KeyOfValue, |
2086 | typename _Compare, typename _Alloc> |
2087 | pair<typename _Rb_tree<_Key, _Val, _KeyOfValue, |
2088 | _Compare, _Alloc>::_Base_ptr, |
2089 | typename _Rb_tree<_Key, _Val, _KeyOfValue, |
2090 | _Compare, _Alloc>::_Base_ptr> |
2091 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
2092 | _M_get_insert_unique_pos(const key_type& __k) |
2093 | { |
2094 | typedef pair<_Base_ptr, _Base_ptr> _Res; |
2095 | _Link_type __x = _M_begin(); |
2096 | _Base_ptr __y = _M_end(); |
2097 | bool __comp = true; |
2098 | while (__x != 0) |
2099 | { |
2100 | __y = __x; |
2101 | __comp = _M_impl._M_key_compare(__k, _S_key(__x)); |
2102 | __x = __comp ? _S_left(__x) : _S_right(__x); |
2103 | } |
2104 | iterator __j = iterator(__y); |
2105 | if (__comp) |
2106 | { |
2107 | if (__j == begin()) |
2108 | return _Res(__x, __y); |
2109 | else |
2110 | --__j; |
2111 | } |
2112 | if (_M_impl._M_key_compare(_S_key(__j._M_node), __k)) |
2113 | return _Res(__x, __y); |
2114 | return _Res(__j._M_node, 0); |
2115 | } |
2116 | |
2117 | template<typename _Key, typename _Val, typename _KeyOfValue, |
2118 | typename _Compare, typename _Alloc> |
2119 | pair<typename _Rb_tree<_Key, _Val, _KeyOfValue, |
2120 | _Compare, _Alloc>::_Base_ptr, |
2121 | typename _Rb_tree<_Key, _Val, _KeyOfValue, |
2122 | _Compare, _Alloc>::_Base_ptr> |
2123 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
2124 | _M_get_insert_equal_pos(const key_type& __k) |
2125 | { |
2126 | typedef pair<_Base_ptr, _Base_ptr> _Res; |
2127 | _Link_type __x = _M_begin(); |
2128 | _Base_ptr __y = _M_end(); |
2129 | while (__x != 0) |
2130 | { |
2131 | __y = __x; |
2132 | __x = _M_impl._M_key_compare(__k, _S_key(__x)) ? |
2133 | _S_left(__x) : _S_right(__x); |
2134 | } |
2135 | return _Res(__x, __y); |
2136 | } |
2137 | |
2138 | template<typename _Key, typename _Val, typename _KeyOfValue, |
2139 | typename _Compare, typename _Alloc> |
2140 | #if __cplusplus201402L >= 201103L |
2141 | template<typename _Arg> |
2142 | #endif |
2143 | pair<typename _Rb_tree<_Key, _Val, _KeyOfValue, |
2144 | _Compare, _Alloc>::iterator, bool> |
2145 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
2146 | #if __cplusplus201402L >= 201103L |
2147 | _M_insert_unique(_Arg&& __v) |
2148 | #else |
2149 | _M_insert_unique(const _Val& __v) |
2150 | #endif |
2151 | { |
2152 | typedef pair<iterator, bool> _Res; |
2153 | pair<_Base_ptr, _Base_ptr> __res |
2154 | = _M_get_insert_unique_pos(_KeyOfValue()(__v)); |
2155 | |
2156 | if (__res.second) |
2157 | { |
2158 | _Alloc_node __an(*this); |
2159 | return _Res(_M_insert_(__res.first, __res.second, |
2160 | _GLIBCXX_FORWARD(_Arg, __v)std::forward<_Arg>(__v), __an), |
2161 | true); |
2162 | } |
2163 | |
2164 | return _Res(iterator(__res.first), false); |
2165 | } |
2166 | |
2167 | template<typename _Key, typename _Val, typename _KeyOfValue, |
2168 | typename _Compare, typename _Alloc> |
2169 | #if __cplusplus201402L >= 201103L |
2170 | template<typename _Arg> |
2171 | #endif |
2172 | typename _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>::iterator |
2173 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
2174 | #if __cplusplus201402L >= 201103L |
2175 | _M_insert_equal(_Arg&& __v) |
2176 | #else |
2177 | _M_insert_equal(const _Val& __v) |
2178 | #endif |
2179 | { |
2180 | pair<_Base_ptr, _Base_ptr> __res |
2181 | = _M_get_insert_equal_pos(_KeyOfValue()(__v)); |
2182 | _Alloc_node __an(*this); |
2183 | return _M_insert_(__res.first, __res.second, |
2184 | _GLIBCXX_FORWARD(_Arg, __v)std::forward<_Arg>(__v), __an); |
2185 | } |
2186 | |
2187 | template<typename _Key, typename _Val, typename _KeyOfValue, |
2188 | typename _Compare, typename _Alloc> |
2189 | pair<typename _Rb_tree<_Key, _Val, _KeyOfValue, |
2190 | _Compare, _Alloc>::_Base_ptr, |
2191 | typename _Rb_tree<_Key, _Val, _KeyOfValue, |
2192 | _Compare, _Alloc>::_Base_ptr> |
2193 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
2194 | _M_get_insert_hint_unique_pos(const_iterator __position, |
2195 | const key_type& __k) |
2196 | { |
2197 | iterator __pos = __position._M_const_cast(); |
2198 | typedef pair<_Base_ptr, _Base_ptr> _Res; |
2199 | |
2200 | // end() |
2201 | if (__pos._M_node == _M_end()) |
2202 | { |
2203 | if (size() > 0 |
2204 | && _M_impl._M_key_compare(_S_key(_M_rightmost()), __k)) |
2205 | return _Res(0, _M_rightmost()); |
2206 | else |
2207 | return _M_get_insert_unique_pos(__k); |
2208 | } |
2209 | else if (_M_impl._M_key_compare(__k, _S_key(__pos._M_node))) |
2210 | { |
2211 | // First, try before... |
2212 | iterator __before = __pos; |
2213 | if (__pos._M_node == _M_leftmost()) // begin() |
2214 | return _Res(_M_leftmost(), _M_leftmost()); |
2215 | else if (_M_impl._M_key_compare(_S_key((--__before)._M_node), __k)) |
2216 | { |
2217 | if (_S_right(__before._M_node) == 0) |
2218 | return _Res(0, __before._M_node); |
2219 | else |
2220 | return _Res(__pos._M_node, __pos._M_node); |
2221 | } |
2222 | else |
2223 | return _M_get_insert_unique_pos(__k); |
2224 | } |
2225 | else if (_M_impl._M_key_compare(_S_key(__pos._M_node), __k)) |
2226 | { |
2227 | // ... then try after. |
2228 | iterator __after = __pos; |
2229 | if (__pos._M_node == _M_rightmost()) |
2230 | return _Res(0, _M_rightmost()); |
2231 | else if (_M_impl._M_key_compare(__k, _S_key((++__after)._M_node))) |
2232 | { |
2233 | if (_S_right(__pos._M_node) == 0) |
2234 | return _Res(0, __pos._M_node); |
2235 | else |
2236 | return _Res(__after._M_node, __after._M_node); |
2237 | } |
2238 | else |
2239 | return _M_get_insert_unique_pos(__k); |
2240 | } |
2241 | else |
2242 | // Equivalent keys. |
2243 | return _Res(__pos._M_node, 0); |
2244 | } |
2245 | |
2246 | template<typename _Key, typename _Val, typename _KeyOfValue, |
2247 | typename _Compare, typename _Alloc> |
2248 | #if __cplusplus201402L >= 201103L |
2249 | template<typename _Arg, typename _NodeGen> |
2250 | #else |
2251 | template<typename _NodeGen> |
2252 | #endif |
2253 | typename _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>::iterator |
2254 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
2255 | _M_insert_unique_(const_iterator __position, |
2256 | #if __cplusplus201402L >= 201103L |
2257 | _Arg&& __v, |
2258 | #else |
2259 | const _Val& __v, |
2260 | #endif |
2261 | _NodeGen& __node_gen) |
2262 | { |
2263 | pair<_Base_ptr, _Base_ptr> __res |
2264 | = _M_get_insert_hint_unique_pos(__position, _KeyOfValue()(__v)); |
2265 | |
2266 | if (__res.second) |
2267 | return _M_insert_(__res.first, __res.second, |
2268 | _GLIBCXX_FORWARD(_Arg, __v)std::forward<_Arg>(__v), |
2269 | __node_gen); |
2270 | return iterator(__res.first); |
2271 | } |
2272 | |
2273 | template<typename _Key, typename _Val, typename _KeyOfValue, |
2274 | typename _Compare, typename _Alloc> |
2275 | pair<typename _Rb_tree<_Key, _Val, _KeyOfValue, |
2276 | _Compare, _Alloc>::_Base_ptr, |
2277 | typename _Rb_tree<_Key, _Val, _KeyOfValue, |
2278 | _Compare, _Alloc>::_Base_ptr> |
2279 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
2280 | _M_get_insert_hint_equal_pos(const_iterator __position, const key_type& __k) |
2281 | { |
2282 | iterator __pos = __position._M_const_cast(); |
2283 | typedef pair<_Base_ptr, _Base_ptr> _Res; |
2284 | |
2285 | // end() |
2286 | if (__pos._M_node == _M_end()) |
2287 | { |
2288 | if (size() > 0 |
2289 | && !_M_impl._M_key_compare(__k, _S_key(_M_rightmost()))) |
2290 | return _Res(0, _M_rightmost()); |
2291 | else |
2292 | return _M_get_insert_equal_pos(__k); |
2293 | } |
2294 | else if (!_M_impl._M_key_compare(_S_key(__pos._M_node), __k)) |
2295 | { |
2296 | // First, try before... |
2297 | iterator __before = __pos; |
2298 | if (__pos._M_node == _M_leftmost()) // begin() |
2299 | return _Res(_M_leftmost(), _M_leftmost()); |
2300 | else if (!_M_impl._M_key_compare(__k, _S_key((--__before)._M_node))) |
2301 | { |
2302 | if (_S_right(__before._M_node) == 0) |
2303 | return _Res(0, __before._M_node); |
2304 | else |
2305 | return _Res(__pos._M_node, __pos._M_node); |
2306 | } |
2307 | else |
2308 | return _M_get_insert_equal_pos(__k); |
2309 | } |
2310 | else |
2311 | { |
2312 | // ... then try after. |
2313 | iterator __after = __pos; |
2314 | if (__pos._M_node == _M_rightmost()) |
2315 | return _Res(0, _M_rightmost()); |
2316 | else if (!_M_impl._M_key_compare(_S_key((++__after)._M_node), __k)) |
2317 | { |
2318 | if (_S_right(__pos._M_node) == 0) |
2319 | return _Res(0, __pos._M_node); |
2320 | else |
2321 | return _Res(__after._M_node, __after._M_node); |
2322 | } |
2323 | else |
2324 | return _Res(0, 0); |
2325 | } |
2326 | } |
2327 | |
2328 | template<typename _Key, typename _Val, typename _KeyOfValue, |
2329 | typename _Compare, typename _Alloc> |
2330 | #if __cplusplus201402L >= 201103L |
2331 | template<typename _Arg, typename _NodeGen> |
2332 | #else |
2333 | template<typename _NodeGen> |
2334 | #endif |
2335 | typename _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>::iterator |
2336 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
2337 | _M_insert_equal_(const_iterator __position, |
2338 | #if __cplusplus201402L >= 201103L |
2339 | _Arg&& __v, |
2340 | #else |
2341 | const _Val& __v, |
2342 | #endif |
2343 | _NodeGen& __node_gen) |
2344 | { |
2345 | pair<_Base_ptr, _Base_ptr> __res |
2346 | = _M_get_insert_hint_equal_pos(__position, _KeyOfValue()(__v)); |
2347 | |
2348 | if (__res.second) |
2349 | return _M_insert_(__res.first, __res.second, |
2350 | _GLIBCXX_FORWARD(_Arg, __v)std::forward<_Arg>(__v), |
2351 | __node_gen); |
2352 | |
2353 | return _M_insert_equal_lower(_GLIBCXX_FORWARD(_Arg, __v)std::forward<_Arg>(__v)); |
2354 | } |
2355 | |
2356 | #if __cplusplus201402L >= 201103L |
2357 | template<typename _Key, typename _Val, typename _KeyOfValue, |
2358 | typename _Compare, typename _Alloc> |
2359 | typename _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>::iterator |
2360 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
2361 | _M_insert_node(_Base_ptr __x, _Base_ptr __p, _Link_type __z) |
2362 | { |
2363 | bool __insert_left = (__x != 0 || __p == _M_end() |
2364 | || _M_impl._M_key_compare(_S_key(__z), |
2365 | _S_key(__p))); |
2366 | |
2367 | _Rb_tree_insert_and_rebalance(__insert_left, __z, __p, |
2368 | this->_M_impl._M_header); |
2369 | ++_M_impl._M_node_count; |
2370 | return iterator(__z); |
2371 | } |
2372 | |
2373 | template<typename _Key, typename _Val, typename _KeyOfValue, |
2374 | typename _Compare, typename _Alloc> |
2375 | typename _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>::iterator |
2376 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
2377 | _M_insert_lower_node(_Base_ptr __p, _Link_type __z) |
2378 | { |
2379 | bool __insert_left = (__p == _M_end() |
2380 | || !_M_impl._M_key_compare(_S_key(__p), |
2381 | _S_key(__z))); |
2382 | |
2383 | _Rb_tree_insert_and_rebalance(__insert_left, __z, __p, |
2384 | this->_M_impl._M_header); |
2385 | ++_M_impl._M_node_count; |
2386 | return iterator(__z); |
2387 | } |
2388 | |
2389 | template<typename _Key, typename _Val, typename _KeyOfValue, |
2390 | typename _Compare, typename _Alloc> |
2391 | typename _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>::iterator |
2392 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
2393 | _M_insert_equal_lower_node(_Link_type __z) |
2394 | { |
2395 | _Link_type __x = _M_begin(); |
2396 | _Base_ptr __y = _M_end(); |
2397 | while (__x != 0) |
2398 | { |
2399 | __y = __x; |
2400 | __x = !_M_impl._M_key_compare(_S_key(__x), _S_key(__z)) ? |
2401 | _S_left(__x) : _S_right(__x); |
2402 | } |
2403 | return _M_insert_lower_node(__y, __z); |
2404 | } |
2405 | |
2406 | template<typename _Key, typename _Val, typename _KeyOfValue, |
2407 | typename _Compare, typename _Alloc> |
2408 | template<typename... _Args> |
2409 | pair<typename _Rb_tree<_Key, _Val, _KeyOfValue, |
2410 | _Compare, _Alloc>::iterator, bool> |
2411 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
2412 | _M_emplace_unique(_Args&&... __args) |
2413 | { |
2414 | _Link_type __z = _M_create_node(std::forward<_Args>(__args)...); |
2415 | |
2416 | __tryif (true) |
2417 | { |
2418 | typedef pair<iterator, bool> _Res; |
2419 | auto __res = _M_get_insert_unique_pos(_S_key(__z)); |
2420 | if (__res.second) |
2421 | return _Res(_M_insert_node(__res.first, __res.second, __z), true); |
2422 | |
2423 | _M_drop_node(__z); |
2424 | return _Res(iterator(__res.first), false); |
2425 | } |
2426 | __catch(...)if (false) |
2427 | { |
2428 | _M_drop_node(__z); |
2429 | __throw_exception_again; |
2430 | } |
2431 | } |
2432 | |
2433 | template<typename _Key, typename _Val, typename _KeyOfValue, |
2434 | typename _Compare, typename _Alloc> |
2435 | template<typename... _Args> |
2436 | typename _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>::iterator |
2437 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
2438 | _M_emplace_equal(_Args&&... __args) |
2439 | { |
2440 | _Link_type __z = _M_create_node(std::forward<_Args>(__args)...); |
2441 | |
2442 | __tryif (true) |
2443 | { |
2444 | auto __res = _M_get_insert_equal_pos(_S_key(__z)); |
2445 | return _M_insert_node(__res.first, __res.second, __z); |
2446 | } |
2447 | __catch(...)if (false) |
2448 | { |
2449 | _M_drop_node(__z); |
2450 | __throw_exception_again; |
2451 | } |
2452 | } |
2453 | |
2454 | template<typename _Key, typename _Val, typename _KeyOfValue, |
2455 | typename _Compare, typename _Alloc> |
2456 | template<typename... _Args> |
2457 | typename _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>::iterator |
2458 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
2459 | _M_emplace_hint_unique(const_iterator __pos, _Args&&... __args) |
2460 | { |
2461 | _Link_type __z = _M_create_node(std::forward<_Args>(__args)...); |
2462 | |
2463 | __tryif (true) |
2464 | { |
2465 | auto __res = _M_get_insert_hint_unique_pos(__pos, _S_key(__z)); |
2466 | |
2467 | if (__res.second) |
2468 | return _M_insert_node(__res.first, __res.second, __z); |
2469 | |
2470 | _M_drop_node(__z); |
2471 | return iterator(__res.first); |
2472 | } |
2473 | __catch(...)if (false) |
2474 | { |
2475 | _M_drop_node(__z); |
2476 | __throw_exception_again; |
2477 | } |
2478 | } |
2479 | |
2480 | template<typename _Key, typename _Val, typename _KeyOfValue, |
2481 | typename _Compare, typename _Alloc> |
2482 | template<typename... _Args> |
2483 | typename _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>::iterator |
2484 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
2485 | _M_emplace_hint_equal(const_iterator __pos, _Args&&... __args) |
2486 | { |
2487 | _Link_type __z = _M_create_node(std::forward<_Args>(__args)...); |
2488 | |
2489 | __tryif (true) |
2490 | { |
2491 | auto __res = _M_get_insert_hint_equal_pos(__pos, _S_key(__z)); |
2492 | |
2493 | if (__res.second) |
2494 | return _M_insert_node(__res.first, __res.second, __z); |
2495 | |
2496 | return _M_insert_equal_lower_node(__z); |
2497 | } |
2498 | __catch(...)if (false) |
2499 | { |
2500 | _M_drop_node(__z); |
2501 | __throw_exception_again; |
2502 | } |
2503 | } |
2504 | #endif |
2505 | |
2506 | |
2507 | template<typename _Key, typename _Val, typename _KeyOfValue, |
2508 | typename _Compare, typename _Alloc> |
2509 | void |
2510 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
2511 | _M_erase_aux(const_iterator __position) |
2512 | { |
2513 | _Link_type __y = |
2514 | static_cast<_Link_type>(_Rb_tree_rebalance_for_erase |
2515 | (const_cast<_Base_ptr>(__position._M_node), |
2516 | this->_M_impl._M_header)); |
2517 | _M_drop_node(__y); |
2518 | --_M_impl._M_node_count; |
2519 | } |
2520 | |
2521 | template<typename _Key, typename _Val, typename _KeyOfValue, |
2522 | typename _Compare, typename _Alloc> |
2523 | void |
2524 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
2525 | _M_erase_aux(const_iterator __first, const_iterator __last) |
2526 | { |
2527 | if (__first == begin() && __last == end()) |
2528 | clear(); |
2529 | else |
2530 | while (__first != __last) |
2531 | _M_erase_aux(__first++); |
2532 | } |
2533 | |
2534 | template<typename _Key, typename _Val, typename _KeyOfValue, |
2535 | typename _Compare, typename _Alloc> |
2536 | typename _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>::size_type |
2537 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
2538 | erase(const _Key& __x) |
2539 | { |
2540 | pair<iterator, iterator> __p = equal_range(__x); |
2541 | const size_type __old_size = size(); |
2542 | _M_erase_aux(__p.first, __p.second); |
2543 | return __old_size - size(); |
2544 | } |
2545 | |
2546 | template<typename _Key, typename _Val, typename _KeyOfValue, |
2547 | typename _Compare, typename _Alloc> |
2548 | typename _Rb_tree<_Key, _Val, _KeyOfValue, |
2549 | _Compare, _Alloc>::iterator |
2550 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
2551 | find(const _Key& __k) |
2552 | { |
2553 | iterator __j = _M_lower_bound(_M_begin(), _M_end(), __k); |
2554 | return (__j == end() |
2555 | || _M_impl._M_key_compare(__k, |
2556 | _S_key(__j._M_node))) ? end() : __j; |
2557 | } |
2558 | |
2559 | template<typename _Key, typename _Val, typename _KeyOfValue, |
2560 | typename _Compare, typename _Alloc> |
2561 | typename _Rb_tree<_Key, _Val, _KeyOfValue, |
2562 | _Compare, _Alloc>::const_iterator |
2563 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
2564 | find(const _Key& __k) const |
2565 | { |
2566 | const_iterator __j = _M_lower_bound(_M_begin(), _M_end(), __k); |
2567 | return (__j == end() |
2568 | || _M_impl._M_key_compare(__k, |
2569 | _S_key(__j._M_node))) ? end() : __j; |
2570 | } |
2571 | |
2572 | template<typename _Key, typename _Val, typename _KeyOfValue, |
2573 | typename _Compare, typename _Alloc> |
2574 | typename _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>::size_type |
2575 | _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>:: |
2576 | count(const _Key& __k) const |
2577 | { |
2578 | pair<const_iterator, const_iterator> __p = equal_range(__k); |
2579 | const size_type __n = std::distance(__p.first, __p.second); |
2580 | return __n; |
2581 | } |
2582 | |
2583 | _GLIBCXX_PURE__attribute__ ((__pure__)) unsigned int |
2584 | _Rb_tree_black_count(const _Rb_tree_node_base* __node, |
2585 | const _Rb_tree_node_base* __root) throw (); |
2586 | |
2587 | template<typename _Key, typename _Val, typename _KeyOfValue, |
2588 | typename _Compare, typename _Alloc> |
2589 | bool |
2590 | _Rb_tree<_Key,_Val,_KeyOfValue,_Compare,_Alloc>::__rb_verify() const |
2591 | { |
2592 | if (_M_impl._M_node_count == 0 || begin() == end()) |
2593 | return _M_impl._M_node_count == 0 && begin() == end() |
2594 | && this->_M_impl._M_header._M_left == _M_end() |
2595 | && this->_M_impl._M_header._M_right == _M_end(); |
2596 | |
2597 | unsigned int __len = _Rb_tree_black_count(_M_leftmost(), _M_root()); |
2598 | for (const_iterator __it = begin(); __it != end(); ++__it) |
2599 | { |
2600 | _Const_Link_type __x = static_cast<_Const_Link_type>(__it._M_node); |
2601 | _Const_Link_type __L = _S_left(__x); |
2602 | _Const_Link_type __R = _S_right(__x); |
2603 | |
2604 | if (__x->_M_color == _S_red) |
2605 | if ((__L && __L->_M_color == _S_red) |
2606 | || (__R && __R->_M_color == _S_red)) |
2607 | return false; |
2608 | |
2609 | if (__L && _M_impl._M_key_compare(_S_key(__x), _S_key(__L))) |
2610 | return false; |
2611 | if (__R && _M_impl._M_key_compare(_S_key(__R), _S_key(__x))) |
2612 | return false; |
2613 | |
2614 | if (!__L && !__R && _Rb_tree_black_count(__x, _M_root()) != __len) |
2615 | return false; |
2616 | } |
2617 | |
2618 | if (_M_leftmost() != _Rb_tree_node_base::_S_minimum(_M_root())) |
2619 | return false; |
2620 | if (_M_rightmost() != _Rb_tree_node_base::_S_maximum(_M_root())) |
2621 | return false; |
2622 | return true; |
2623 | } |
2624 | |
2625 | #if __cplusplus201402L > 201402L |
2626 | // Allow access to internals of compatible _Rb_tree specializations. |
2627 | template<typename _Key, typename _Val, typename _Sel, typename _Cmp1, |
2628 | typename _Alloc, typename _Cmp2> |
2629 | struct _Rb_tree_merge_helper<_Rb_tree<_Key, _Val, _Sel, _Cmp1, _Alloc>, |
2630 | _Cmp2> |
2631 | { |
2632 | private: |
2633 | friend class _Rb_tree<_Key, _Val, _Sel, _Cmp1, _Alloc>; |
2634 | |
2635 | static auto& |
2636 | _S_get_impl(_Rb_tree<_Key, _Val, _Sel, _Cmp2, _Alloc>& __tree) |
2637 | { return __tree._M_impl; } |
2638 | }; |
2639 | #endif // C++17 |
2640 | |
2641 | _GLIBCXX_END_NAMESPACE_VERSION |
2642 | } // namespace |
2643 | |
2644 | #endif |
1 | //===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file defines the DenseMap class. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #ifndef LLVM_ADT_DENSEMAP_H |
14 | #define LLVM_ADT_DENSEMAP_H |
15 | |
16 | #include "llvm/ADT/DenseMapInfo.h" |
17 | #include "llvm/ADT/EpochTracker.h" |
18 | #include "llvm/Support/AlignOf.h" |
19 | #include "llvm/Support/Compiler.h" |
20 | #include "llvm/Support/MathExtras.h" |
21 | #include "llvm/Support/MemAlloc.h" |
22 | #include "llvm/Support/ReverseIteration.h" |
23 | #include "llvm/Support/type_traits.h" |
24 | #include <algorithm> |
25 | #include <cassert> |
26 | #include <cstddef> |
27 | #include <cstring> |
28 | #include <initializer_list> |
29 | #include <iterator> |
30 | #include <new> |
31 | #include <type_traits> |
32 | #include <utility> |
33 | |
34 | namespace llvm { |
35 | |
36 | namespace detail { |
37 | |
38 | // We extend a pair to allow users to override the bucket type with their own |
39 | // implementation without requiring two members. |
40 | template <typename KeyT, typename ValueT> |
41 | struct DenseMapPair : public std::pair<KeyT, ValueT> { |
42 | using std::pair<KeyT, ValueT>::pair; |
43 | |
44 | KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; } |
45 | const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; } |
46 | ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; } |
47 | const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; } |
48 | }; |
49 | |
50 | } // end namespace detail |
51 | |
52 | template <typename KeyT, typename ValueT, |
53 | typename KeyInfoT = DenseMapInfo<KeyT>, |
54 | typename Bucket = llvm::detail::DenseMapPair<KeyT, ValueT>, |
55 | bool IsConst = false> |
56 | class DenseMapIterator; |
57 | |
58 | template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT, |
59 | typename BucketT> |
60 | class DenseMapBase : public DebugEpochBase { |
61 | template <typename T> |
62 | using const_arg_type_t = typename const_pointer_or_const_ref<T>::type; |
63 | |
64 | public: |
65 | using size_type = unsigned; |
66 | using key_type = KeyT; |
67 | using mapped_type = ValueT; |
68 | using value_type = BucketT; |
69 | |
70 | using iterator = DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT>; |
71 | using const_iterator = |
72 | DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT, true>; |
73 | |
74 | inline iterator begin() { |
75 | // When the map is empty, avoid the overhead of advancing/retreating past |
76 | // empty buckets. |
77 | if (empty()) |
78 | return end(); |
79 | if (shouldReverseIterate<KeyT>()) |
80 | return makeIterator(getBucketsEnd() - 1, getBuckets(), *this); |
81 | return makeIterator(getBuckets(), getBucketsEnd(), *this); |
82 | } |
83 | inline iterator end() { |
84 | return makeIterator(getBucketsEnd(), getBucketsEnd(), *this, true); |
85 | } |
86 | inline const_iterator begin() const { |
87 | if (empty()) |
88 | return end(); |
89 | if (shouldReverseIterate<KeyT>()) |
90 | return makeConstIterator(getBucketsEnd() - 1, getBuckets(), *this); |
91 | return makeConstIterator(getBuckets(), getBucketsEnd(), *this); |
92 | } |
93 | inline const_iterator end() const { |
94 | return makeConstIterator(getBucketsEnd(), getBucketsEnd(), *this, true); |
95 | } |
96 | |
97 | LLVM_NODISCARD[[clang::warn_unused_result]] bool empty() const { |
98 | return getNumEntries() == 0; |
99 | } |
100 | unsigned size() const { return getNumEntries(); } |
101 | |
102 | /// Grow the densemap so that it can contain at least \p NumEntries items |
103 | /// before resizing again. |
104 | void reserve(size_type NumEntries) { |
105 | auto NumBuckets = getMinBucketToReserveForEntries(NumEntries); |
106 | incrementEpoch(); |
107 | if (NumBuckets > getNumBuckets()) |
108 | grow(NumBuckets); |
109 | } |
110 | |
111 | void clear() { |
112 | incrementEpoch(); |
113 | if (getNumEntries() == 0 && getNumTombstones() == 0) return; |
114 | |
115 | // If the capacity of the array is huge, and the # elements used is small, |
116 | // shrink the array. |
117 | if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) { |
118 | shrink_and_clear(); |
119 | return; |
120 | } |
121 | |
122 | const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); |
123 | if (std::is_trivially_destructible<ValueT>::value) { |
124 | // Use a simpler loop when values don't need destruction. |
125 | for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) |
126 | P->getFirst() = EmptyKey; |
127 | } else { |
128 | unsigned NumEntries = getNumEntries(); |
129 | for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) { |
130 | if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) { |
131 | if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) { |
132 | P->getSecond().~ValueT(); |
133 | --NumEntries; |
134 | } |
135 | P->getFirst() = EmptyKey; |
136 | } |
137 | } |
138 | assert(NumEntries == 0 && "Node count imbalance!")(static_cast <bool> (NumEntries == 0 && "Node count imbalance!" ) ? void (0) : __assert_fail ("NumEntries == 0 && \"Node count imbalance!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 138, __extension__ __PRETTY_FUNCTION__)); |
139 | } |
140 | setNumEntries(0); |
141 | setNumTombstones(0); |
142 | } |
143 | |
144 | /// Return 1 if the specified key is in the map, 0 otherwise. |
145 | size_type count(const_arg_type_t<KeyT> Val) const { |
146 | const BucketT *TheBucket; |
147 | return LookupBucketFor(Val, TheBucket) ? 1 : 0; |
148 | } |
149 | |
150 | iterator find(const_arg_type_t<KeyT> Val) { |
151 | BucketT *TheBucket; |
152 | if (LookupBucketFor(Val, TheBucket)) |
153 | return makeIterator(TheBucket, |
154 | shouldReverseIterate<KeyT>() ? getBuckets() |
155 | : getBucketsEnd(), |
156 | *this, true); |
157 | return end(); |
158 | } |
159 | const_iterator find(const_arg_type_t<KeyT> Val) const { |
160 | const BucketT *TheBucket; |
161 | if (LookupBucketFor(Val, TheBucket)) |
162 | return makeConstIterator(TheBucket, |
163 | shouldReverseIterate<KeyT>() ? getBuckets() |
164 | : getBucketsEnd(), |
165 | *this, true); |
166 | return end(); |
167 | } |
168 | |
169 | /// Alternate version of find() which allows a different, and possibly |
170 | /// less expensive, key type. |
171 | /// The DenseMapInfo is responsible for supplying methods |
172 | /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key |
173 | /// type used. |
174 | template<class LookupKeyT> |
175 | iterator find_as(const LookupKeyT &Val) { |
176 | BucketT *TheBucket; |
177 | if (LookupBucketFor(Val, TheBucket)) |
178 | return makeIterator(TheBucket, |
179 | shouldReverseIterate<KeyT>() ? getBuckets() |
180 | : getBucketsEnd(), |
181 | *this, true); |
182 | return end(); |
183 | } |
184 | template<class LookupKeyT> |
185 | const_iterator find_as(const LookupKeyT &Val) const { |
186 | const BucketT *TheBucket; |
187 | if (LookupBucketFor(Val, TheBucket)) |
188 | return makeConstIterator(TheBucket, |
189 | shouldReverseIterate<KeyT>() ? getBuckets() |
190 | : getBucketsEnd(), |
191 | *this, true); |
192 | return end(); |
193 | } |
194 | |
195 | /// lookup - Return the entry for the specified key, or a default |
196 | /// constructed value if no such entry exists. |
197 | ValueT lookup(const_arg_type_t<KeyT> Val) const { |
198 | const BucketT *TheBucket; |
199 | if (LookupBucketFor(Val, TheBucket)) |
200 | return TheBucket->getSecond(); |
201 | return ValueT(); |
202 | } |
203 | |
204 | // Inserts key,value pair into the map if the key isn't already in the map. |
205 | // If the key is already in the map, it returns false and doesn't update the |
206 | // value. |
207 | std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) { |
208 | return try_emplace(KV.first, KV.second); |
209 | } |
210 | |
211 | // Inserts key,value pair into the map if the key isn't already in the map. |
212 | // If the key is already in the map, it returns false and doesn't update the |
213 | // value. |
214 | std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) { |
215 | return try_emplace(std::move(KV.first), std::move(KV.second)); |
216 | } |
217 | |
218 | // Inserts key,value pair into the map if the key isn't already in the map. |
219 | // The value is constructed in-place if the key is not in the map, otherwise |
220 | // it is not moved. |
221 | template <typename... Ts> |
222 | std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) { |
223 | BucketT *TheBucket; |
224 | if (LookupBucketFor(Key, TheBucket)) |
225 | return std::make_pair(makeIterator(TheBucket, |
226 | shouldReverseIterate<KeyT>() |
227 | ? getBuckets() |
228 | : getBucketsEnd(), |
229 | *this, true), |
230 | false); // Already in map. |
231 | |
232 | // Otherwise, insert the new element. |
233 | TheBucket = |
234 | InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...); |
235 | return std::make_pair(makeIterator(TheBucket, |
236 | shouldReverseIterate<KeyT>() |
237 | ? getBuckets() |
238 | : getBucketsEnd(), |
239 | *this, true), |
240 | true); |
241 | } |
242 | |
243 | // Inserts key,value pair into the map if the key isn't already in the map. |
244 | // The value is constructed in-place if the key is not in the map, otherwise |
245 | // it is not moved. |
246 | template <typename... Ts> |
247 | std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&... Args) { |
248 | BucketT *TheBucket; |
249 | if (LookupBucketFor(Key, TheBucket)) |
250 | return std::make_pair(makeIterator(TheBucket, |
251 | shouldReverseIterate<KeyT>() |
252 | ? getBuckets() |
253 | : getBucketsEnd(), |
254 | *this, true), |
255 | false); // Already in map. |
256 | |
257 | // Otherwise, insert the new element. |
258 | TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...); |
259 | return std::make_pair(makeIterator(TheBucket, |
260 | shouldReverseIterate<KeyT>() |
261 | ? getBuckets() |
262 | : getBucketsEnd(), |
263 | *this, true), |
264 | true); |
265 | } |
266 | |
267 | /// Alternate version of insert() which allows a different, and possibly |
268 | /// less expensive, key type. |
269 | /// The DenseMapInfo is responsible for supplying methods |
270 | /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key |
271 | /// type used. |
272 | template <typename LookupKeyT> |
273 | std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV, |
274 | const LookupKeyT &Val) { |
275 | BucketT *TheBucket; |
276 | if (LookupBucketFor(Val, TheBucket)) |
277 | return std::make_pair(makeIterator(TheBucket, |
278 | shouldReverseIterate<KeyT>() |
279 | ? getBuckets() |
280 | : getBucketsEnd(), |
281 | *this, true), |
282 | false); // Already in map. |
283 | |
284 | // Otherwise, insert the new element. |
285 | TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first), |
286 | std::move(KV.second), Val); |
287 | return std::make_pair(makeIterator(TheBucket, |
288 | shouldReverseIterate<KeyT>() |
289 | ? getBuckets() |
290 | : getBucketsEnd(), |
291 | *this, true), |
292 | true); |
293 | } |
294 | |
295 | /// insert - Range insertion of pairs. |
296 | template<typename InputIt> |
297 | void insert(InputIt I, InputIt E) { |
298 | for (; I != E; ++I) |
299 | insert(*I); |
300 | } |
301 | |
302 | bool erase(const KeyT &Val) { |
303 | BucketT *TheBucket; |
304 | if (!LookupBucketFor(Val, TheBucket)) |
305 | return false; // not in map. |
306 | |
307 | TheBucket->getSecond().~ValueT(); |
308 | TheBucket->getFirst() = getTombstoneKey(); |
309 | decrementNumEntries(); |
310 | incrementNumTombstones(); |
311 | return true; |
312 | } |
313 | void erase(iterator I) { |
314 | BucketT *TheBucket = &*I; |
315 | TheBucket->getSecond().~ValueT(); |
316 | TheBucket->getFirst() = getTombstoneKey(); |
317 | decrementNumEntries(); |
318 | incrementNumTombstones(); |
319 | } |
320 | |
321 | value_type& FindAndConstruct(const KeyT &Key) { |
322 | BucketT *TheBucket; |
323 | if (LookupBucketFor(Key, TheBucket)) |
324 | return *TheBucket; |
325 | |
326 | return *InsertIntoBucket(TheBucket, Key); |
327 | } |
328 | |
329 | ValueT &operator[](const KeyT &Key) { |
330 | return FindAndConstruct(Key).second; |
331 | } |
332 | |
333 | value_type& FindAndConstruct(KeyT &&Key) { |
334 | BucketT *TheBucket; |
335 | if (LookupBucketFor(Key, TheBucket)) |
336 | return *TheBucket; |
337 | |
338 | return *InsertIntoBucket(TheBucket, std::move(Key)); |
339 | } |
340 | |
341 | ValueT &operator[](KeyT &&Key) { |
342 | return FindAndConstruct(std::move(Key)).second; |
343 | } |
344 | |
345 | /// isPointerIntoBucketsArray - Return true if the specified pointer points |
346 | /// somewhere into the DenseMap's array of buckets (i.e. either to a key or |
347 | /// value in the DenseMap). |
348 | bool isPointerIntoBucketsArray(const void *Ptr) const { |
349 | return Ptr >= getBuckets() && Ptr < getBucketsEnd(); |
350 | } |
351 | |
352 | /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets |
353 | /// array. In conjunction with the previous method, this can be used to |
354 | /// determine whether an insertion caused the DenseMap to reallocate. |
355 | const void *getPointerIntoBucketsArray() const { return getBuckets(); } |
356 | |
357 | protected: |
358 | DenseMapBase() = default; |
359 | |
360 | void destroyAll() { |
361 | if (getNumBuckets() == 0) // Nothing to do. |
362 | return; |
363 | |
364 | const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); |
365 | for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) { |
366 | if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) && |
367 | !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) |
368 | P->getSecond().~ValueT(); |
369 | P->getFirst().~KeyT(); |
370 | } |
371 | } |
372 | |
373 | void initEmpty() { |
374 | setNumEntries(0); |
375 | setNumTombstones(0); |
376 | |
377 | assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&(static_cast <bool> ((getNumBuckets() & (getNumBuckets ()-1)) == 0 && "# initial buckets must be a power of two!" ) ? void (0) : __assert_fail ("(getNumBuckets() & (getNumBuckets()-1)) == 0 && \"# initial buckets must be a power of two!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 378, __extension__ __PRETTY_FUNCTION__)) |
378 | "# initial buckets must be a power of two!")(static_cast <bool> ((getNumBuckets() & (getNumBuckets ()-1)) == 0 && "# initial buckets must be a power of two!" ) ? void (0) : __assert_fail ("(getNumBuckets() & (getNumBuckets()-1)) == 0 && \"# initial buckets must be a power of two!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 378, __extension__ __PRETTY_FUNCTION__)); |
379 | const KeyT EmptyKey = getEmptyKey(); |
380 | for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B) |
381 | ::new (&B->getFirst()) KeyT(EmptyKey); |
382 | } |
383 | |
384 | /// Returns the number of buckets to allocate to ensure that the DenseMap can |
385 | /// accommodate \p NumEntries without need to grow(). |
386 | unsigned getMinBucketToReserveForEntries(unsigned NumEntries) { |
387 | // Ensure that "NumEntries * 4 < NumBuckets * 3" |
388 | if (NumEntries == 0) |
389 | return 0; |
390 | // +1 is required because of the strict equality. |
391 | // For example if NumEntries is 48, we need to return 401. |
392 | return NextPowerOf2(NumEntries * 4 / 3 + 1); |
393 | } |
394 | |
395 | void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) { |
396 | initEmpty(); |
397 | |
398 | // Insert all the old elements. |
399 | const KeyT EmptyKey = getEmptyKey(); |
400 | const KeyT TombstoneKey = getTombstoneKey(); |
401 | for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) { |
402 | if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) && |
403 | !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) { |
404 | // Insert the key/value into the new table. |
405 | BucketT *DestBucket; |
406 | bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket); |
407 | (void)FoundVal; // silence warning. |
408 | assert(!FoundVal && "Key already in new map?")(static_cast <bool> (!FoundVal && "Key already in new map?" ) ? void (0) : __assert_fail ("!FoundVal && \"Key already in new map?\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 408, __extension__ __PRETTY_FUNCTION__)); |
409 | DestBucket->getFirst() = std::move(B->getFirst()); |
410 | ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond())); |
411 | incrementNumEntries(); |
412 | |
413 | // Free the value. |
414 | B->getSecond().~ValueT(); |
415 | } |
416 | B->getFirst().~KeyT(); |
417 | } |
418 | } |
419 | |
420 | template <typename OtherBaseT> |
421 | void copyFrom( |
422 | const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) { |
423 | assert(&other != this)(static_cast <bool> (&other != this) ? void (0) : __assert_fail ("&other != this", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 423, __extension__ __PRETTY_FUNCTION__)); |
424 | assert(getNumBuckets() == other.getNumBuckets())(static_cast <bool> (getNumBuckets() == other.getNumBuckets ()) ? void (0) : __assert_fail ("getNumBuckets() == other.getNumBuckets()" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 424, __extension__ __PRETTY_FUNCTION__)); |
425 | |
426 | setNumEntries(other.getNumEntries()); |
427 | setNumTombstones(other.getNumTombstones()); |
428 | |
429 | if (std::is_trivially_copyable<KeyT>::value && |
430 | std::is_trivially_copyable<ValueT>::value) |
431 | memcpy(reinterpret_cast<void *>(getBuckets()), other.getBuckets(), |
432 | getNumBuckets() * sizeof(BucketT)); |
433 | else |
434 | for (size_t i = 0; i < getNumBuckets(); ++i) { |
435 | ::new (&getBuckets()[i].getFirst()) |
436 | KeyT(other.getBuckets()[i].getFirst()); |
437 | if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) && |
438 | !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey())) |
439 | ::new (&getBuckets()[i].getSecond()) |
440 | ValueT(other.getBuckets()[i].getSecond()); |
441 | } |
442 | } |
443 | |
444 | static unsigned getHashValue(const KeyT &Val) { |
445 | return KeyInfoT::getHashValue(Val); |
446 | } |
447 | |
448 | template<typename LookupKeyT> |
449 | static unsigned getHashValue(const LookupKeyT &Val) { |
450 | return KeyInfoT::getHashValue(Val); |
451 | } |
452 | |
453 | static const KeyT getEmptyKey() { |
454 | static_assert(std::is_base_of<DenseMapBase, DerivedT>::value, |
455 | "Must pass the derived type to this template!"); |
456 | return KeyInfoT::getEmptyKey(); |
457 | } |
458 | |
459 | static const KeyT getTombstoneKey() { |
460 | return KeyInfoT::getTombstoneKey(); |
461 | } |
462 | |
463 | private: |
464 | iterator makeIterator(BucketT *P, BucketT *E, |
465 | DebugEpochBase &Epoch, |
466 | bool NoAdvance=false) { |
467 | if (shouldReverseIterate<KeyT>()) { |
468 | BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1; |
469 | return iterator(B, E, Epoch, NoAdvance); |
470 | } |
471 | return iterator(P, E, Epoch, NoAdvance); |
472 | } |
473 | |
474 | const_iterator makeConstIterator(const BucketT *P, const BucketT *E, |
475 | const DebugEpochBase &Epoch, |
476 | const bool NoAdvance=false) const { |
477 | if (shouldReverseIterate<KeyT>()) { |
478 | const BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1; |
479 | return const_iterator(B, E, Epoch, NoAdvance); |
480 | } |
481 | return const_iterator(P, E, Epoch, NoAdvance); |
482 | } |
483 | |
484 | unsigned getNumEntries() const { |
485 | return static_cast<const DerivedT *>(this)->getNumEntries(); |
486 | } |
487 | |
488 | void setNumEntries(unsigned Num) { |
489 | static_cast<DerivedT *>(this)->setNumEntries(Num); |
490 | } |
491 | |
492 | void incrementNumEntries() { |
493 | setNumEntries(getNumEntries() + 1); |
494 | } |
495 | |
496 | void decrementNumEntries() { |
497 | setNumEntries(getNumEntries() - 1); |
498 | } |
499 | |
500 | unsigned getNumTombstones() const { |
501 | return static_cast<const DerivedT *>(this)->getNumTombstones(); |
502 | } |
503 | |
504 | void setNumTombstones(unsigned Num) { |
505 | static_cast<DerivedT *>(this)->setNumTombstones(Num); |
506 | } |
507 | |
508 | void incrementNumTombstones() { |
509 | setNumTombstones(getNumTombstones() + 1); |
510 | } |
511 | |
512 | void decrementNumTombstones() { |
513 | setNumTombstones(getNumTombstones() - 1); |
514 | } |
515 | |
516 | const BucketT *getBuckets() const { |
517 | return static_cast<const DerivedT *>(this)->getBuckets(); |
518 | } |
519 | |
520 | BucketT *getBuckets() { |
521 | return static_cast<DerivedT *>(this)->getBuckets(); |
522 | } |
523 | |
524 | unsigned getNumBuckets() const { |
525 | return static_cast<const DerivedT *>(this)->getNumBuckets(); |
526 | } |
527 | |
528 | BucketT *getBucketsEnd() { |
529 | return getBuckets() + getNumBuckets(); |
530 | } |
531 | |
532 | const BucketT *getBucketsEnd() const { |
533 | return getBuckets() + getNumBuckets(); |
534 | } |
535 | |
536 | void grow(unsigned AtLeast) { |
537 | static_cast<DerivedT *>(this)->grow(AtLeast); |
538 | } |
539 | |
540 | void shrink_and_clear() { |
541 | static_cast<DerivedT *>(this)->shrink_and_clear(); |
542 | } |
543 | |
544 | template <typename KeyArg, typename... ValueArgs> |
545 | BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key, |
546 | ValueArgs &&... Values) { |
547 | TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket); |
548 | |
549 | TheBucket->getFirst() = std::forward<KeyArg>(Key); |
550 | ::new (&TheBucket->getSecond()) ValueT(std::forward<ValueArgs>(Values)...); |
551 | return TheBucket; |
552 | } |
553 | |
554 | template <typename LookupKeyT> |
555 | BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key, |
556 | ValueT &&Value, LookupKeyT &Lookup) { |
557 | TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket); |
558 | |
559 | TheBucket->getFirst() = std::move(Key); |
560 | ::new (&TheBucket->getSecond()) ValueT(std::move(Value)); |
561 | return TheBucket; |
562 | } |
563 | |
564 | template <typename LookupKeyT> |
565 | BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup, |
566 | BucketT *TheBucket) { |
567 | incrementEpoch(); |
568 | |
569 | // If the load of the hash table is more than 3/4, or if fewer than 1/8 of |
570 | // the buckets are empty (meaning that many are filled with tombstones), |
571 | // grow the table. |
572 | // |
573 | // The later case is tricky. For example, if we had one empty bucket with |
574 | // tons of tombstones, failing lookups (e.g. for insertion) would have to |
575 | // probe almost the entire table until it found the empty bucket. If the |
576 | // table completely filled with tombstones, no lookup would ever succeed, |
577 | // causing infinite loops in lookup. |
578 | unsigned NewNumEntries = getNumEntries() + 1; |
579 | unsigned NumBuckets = getNumBuckets(); |
580 | if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)__builtin_expect((bool)(NewNumEntries * 4 >= NumBuckets * 3 ), false)) { |
581 | this->grow(NumBuckets * 2); |
582 | LookupBucketFor(Lookup, TheBucket); |
583 | NumBuckets = getNumBuckets(); |
584 | } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=__builtin_expect((bool)(NumBuckets-(NewNumEntries+getNumTombstones ()) <= NumBuckets/8), false) |
585 | NumBuckets/8)__builtin_expect((bool)(NumBuckets-(NewNumEntries+getNumTombstones ()) <= NumBuckets/8), false)) { |
586 | this->grow(NumBuckets); |
587 | LookupBucketFor(Lookup, TheBucket); |
588 | } |
589 | assert(TheBucket)(static_cast <bool> (TheBucket) ? void (0) : __assert_fail ("TheBucket", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 589, __extension__ __PRETTY_FUNCTION__)); |
590 | |
591 | // Only update the state after we've grown our bucket space appropriately |
592 | // so that when growing buckets we have self-consistent entry count. |
593 | incrementNumEntries(); |
594 | |
595 | // If we are writing over a tombstone, remember this. |
596 | const KeyT EmptyKey = getEmptyKey(); |
597 | if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey)) |
598 | decrementNumTombstones(); |
599 | |
600 | return TheBucket; |
601 | } |
602 | |
603 | /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in |
604 | /// FoundBucket. If the bucket contains the key and a value, this returns |
605 | /// true, otherwise it returns a bucket with an empty marker or tombstone and |
606 | /// returns false. |
607 | template<typename LookupKeyT> |
608 | bool LookupBucketFor(const LookupKeyT &Val, |
609 | const BucketT *&FoundBucket) const { |
610 | const BucketT *BucketsPtr = getBuckets(); |
611 | const unsigned NumBuckets = getNumBuckets(); |
612 | |
613 | if (NumBuckets == 0) { |
614 | FoundBucket = nullptr; |
615 | return false; |
616 | } |
617 | |
618 | // FoundTombstone - Keep track of whether we find a tombstone while probing. |
619 | const BucketT *FoundTombstone = nullptr; |
620 | const KeyT EmptyKey = getEmptyKey(); |
621 | const KeyT TombstoneKey = getTombstoneKey(); |
622 | assert(!KeyInfoT::isEqual(Val, EmptyKey) &&(static_cast <bool> (!KeyInfoT::isEqual(Val, EmptyKey) && !KeyInfoT::isEqual(Val, TombstoneKey) && "Empty/Tombstone value shouldn't be inserted into map!" ) ? void (0) : __assert_fail ("!KeyInfoT::isEqual(Val, EmptyKey) && !KeyInfoT::isEqual(Val, TombstoneKey) && \"Empty/Tombstone value shouldn't be inserted into map!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 624, __extension__ __PRETTY_FUNCTION__)) |
623 | !KeyInfoT::isEqual(Val, TombstoneKey) &&(static_cast <bool> (!KeyInfoT::isEqual(Val, EmptyKey) && !KeyInfoT::isEqual(Val, TombstoneKey) && "Empty/Tombstone value shouldn't be inserted into map!" ) ? void (0) : __assert_fail ("!KeyInfoT::isEqual(Val, EmptyKey) && !KeyInfoT::isEqual(Val, TombstoneKey) && \"Empty/Tombstone value shouldn't be inserted into map!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 624, __extension__ __PRETTY_FUNCTION__)) |
624 | "Empty/Tombstone value shouldn't be inserted into map!")(static_cast <bool> (!KeyInfoT::isEqual(Val, EmptyKey) && !KeyInfoT::isEqual(Val, TombstoneKey) && "Empty/Tombstone value shouldn't be inserted into map!" ) ? void (0) : __assert_fail ("!KeyInfoT::isEqual(Val, EmptyKey) && !KeyInfoT::isEqual(Val, TombstoneKey) && \"Empty/Tombstone value shouldn't be inserted into map!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 624, __extension__ __PRETTY_FUNCTION__)); |
625 | |
626 | unsigned BucketNo = getHashValue(Val) & (NumBuckets-1); |
627 | unsigned ProbeAmt = 1; |
628 | while (true) { |
629 | const BucketT *ThisBucket = BucketsPtr + BucketNo; |
630 | // Found Val's bucket? If so, return it. |
631 | if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))__builtin_expect((bool)(KeyInfoT::isEqual(Val, ThisBucket-> getFirst())), true)) { |
632 | FoundBucket = ThisBucket; |
633 | return true; |
634 | } |
635 | |
636 | // If we found an empty bucket, the key doesn't exist in the set. |
637 | // Insert it and return the default value. |
638 | if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))__builtin_expect((bool)(KeyInfoT::isEqual(ThisBucket->getFirst (), EmptyKey)), true)) { |
639 | // If we've already seen a tombstone while probing, fill it in instead |
640 | // of the empty bucket we eventually probed to. |
641 | FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket; |
642 | return false; |
643 | } |
644 | |
645 | // If this is a tombstone, remember it. If Val ends up not in the map, we |
646 | // prefer to return it than something that would require more probing. |
647 | if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) && |
648 | !FoundTombstone) |
649 | FoundTombstone = ThisBucket; // Remember the first tombstone found. |
650 | |
651 | // Otherwise, it's a hash collision or a tombstone, continue quadratic |
652 | // probing. |
653 | BucketNo += ProbeAmt++; |
654 | BucketNo &= (NumBuckets-1); |
655 | } |
656 | } |
657 | |
658 | template <typename LookupKeyT> |
659 | bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) { |
660 | const BucketT *ConstFoundBucket; |
661 | bool Result = const_cast<const DenseMapBase *>(this) |
662 | ->LookupBucketFor(Val, ConstFoundBucket); |
663 | FoundBucket = const_cast<BucketT *>(ConstFoundBucket); |
664 | return Result; |
665 | } |
666 | |
667 | public: |
668 | /// Return the approximate size (in bytes) of the actual map. |
669 | /// This is just the raw memory used by DenseMap. |
670 | /// If entries are pointers to objects, the size of the referenced objects |
671 | /// are not included. |
672 | size_t getMemorySize() const { |
673 | return getNumBuckets() * sizeof(BucketT); |
674 | } |
675 | }; |
676 | |
677 | /// Equality comparison for DenseMap. |
678 | /// |
679 | /// Iterates over elements of LHS confirming that each (key, value) pair in LHS |
680 | /// is also in RHS, and that no additional pairs are in RHS. |
681 | /// Equivalent to N calls to RHS.find and N value comparisons. Amortized |
682 | /// complexity is linear, worst case is O(N^2) (if every hash collides). |
683 | template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT, |
684 | typename BucketT> |
685 | bool operator==( |
686 | const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS, |
687 | const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) { |
688 | if (LHS.size() != RHS.size()) |
689 | return false; |
690 | |
691 | for (auto &KV : LHS) { |
692 | auto I = RHS.find(KV.first); |
693 | if (I == RHS.end() || I->second != KV.second) |
694 | return false; |
695 | } |
696 | |
697 | return true; |
698 | } |
699 | |
700 | /// Inequality comparison for DenseMap. |
701 | /// |
702 | /// Equivalent to !(LHS == RHS). See operator== for performance notes. |
703 | template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT, |
704 | typename BucketT> |
705 | bool operator!=( |
706 | const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS, |
707 | const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) { |
708 | return !(LHS == RHS); |
709 | } |
710 | |
711 | template <typename KeyT, typename ValueT, |
712 | typename KeyInfoT = DenseMapInfo<KeyT>, |
713 | typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>> |
714 | class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>, |
715 | KeyT, ValueT, KeyInfoT, BucketT> { |
716 | friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>; |
717 | |
718 | // Lift some types from the dependent base class into this class for |
719 | // simplicity of referring to them. |
720 | using BaseT = DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>; |
721 | |
722 | BucketT *Buckets; |
723 | unsigned NumEntries; |
724 | unsigned NumTombstones; |
725 | unsigned NumBuckets; |
726 | |
727 | public: |
728 | /// Create a DenseMap with an optional \p InitialReserve that guarantee that |
729 | /// this number of elements can be inserted in the map without grow() |
730 | explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); } |
731 | |
732 | DenseMap(const DenseMap &other) : BaseT() { |
733 | init(0); |
734 | copyFrom(other); |
735 | } |
736 | |
737 | DenseMap(DenseMap &&other) : BaseT() { |
738 | init(0); |
739 | swap(other); |
740 | } |
741 | |
742 | template<typename InputIt> |
743 | DenseMap(const InputIt &I, const InputIt &E) { |
744 | init(std::distance(I, E)); |
745 | this->insert(I, E); |
746 | } |
747 | |
748 | DenseMap(std::initializer_list<typename BaseT::value_type> Vals) { |
749 | init(Vals.size()); |
750 | this->insert(Vals.begin(), Vals.end()); |
751 | } |
752 | |
753 | ~DenseMap() { |
754 | this->destroyAll(); |
755 | deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT)); |
756 | } |
757 | |
758 | void swap(DenseMap& RHS) { |
759 | this->incrementEpoch(); |
760 | RHS.incrementEpoch(); |
761 | std::swap(Buckets, RHS.Buckets); |
762 | std::swap(NumEntries, RHS.NumEntries); |
763 | std::swap(NumTombstones, RHS.NumTombstones); |
764 | std::swap(NumBuckets, RHS.NumBuckets); |
765 | } |
766 | |
767 | DenseMap& operator=(const DenseMap& other) { |
768 | if (&other != this) |
769 | copyFrom(other); |
770 | return *this; |
771 | } |
772 | |
773 | DenseMap& operator=(DenseMap &&other) { |
774 | this->destroyAll(); |
775 | deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT)); |
776 | init(0); |
777 | swap(other); |
778 | return *this; |
779 | } |
780 | |
781 | void copyFrom(const DenseMap& other) { |
782 | this->destroyAll(); |
783 | deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT)); |
784 | if (allocateBuckets(other.NumBuckets)) { |
785 | this->BaseT::copyFrom(other); |
786 | } else { |
787 | NumEntries = 0; |
788 | NumTombstones = 0; |
789 | } |
790 | } |
791 | |
792 | void init(unsigned InitNumEntries) { |
793 | auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries); |
794 | if (allocateBuckets(InitBuckets)) { |
795 | this->BaseT::initEmpty(); |
796 | } else { |
797 | NumEntries = 0; |
798 | NumTombstones = 0; |
799 | } |
800 | } |
801 | |
802 | void grow(unsigned AtLeast) { |
803 | unsigned OldNumBuckets = NumBuckets; |
804 | BucketT *OldBuckets = Buckets; |
805 | |
806 | allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1)))); |
807 | assert(Buckets)(static_cast <bool> (Buckets) ? void (0) : __assert_fail ("Buckets", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 807, __extension__ __PRETTY_FUNCTION__)); |
808 | if (!OldBuckets) { |
809 | this->BaseT::initEmpty(); |
810 | return; |
811 | } |
812 | |
813 | this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets); |
814 | |
815 | // Free the old table. |
816 | deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets, |
817 | alignof(BucketT)); |
818 | } |
819 | |
820 | void shrink_and_clear() { |
821 | unsigned OldNumBuckets = NumBuckets; |
822 | unsigned OldNumEntries = NumEntries; |
823 | this->destroyAll(); |
824 | |
825 | // Reduce the number of buckets. |
826 | unsigned NewNumBuckets = 0; |
827 | if (OldNumEntries) |
828 | NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1)); |
829 | if (NewNumBuckets == NumBuckets) { |
830 | this->BaseT::initEmpty(); |
831 | return; |
832 | } |
833 | |
834 | deallocate_buffer(Buckets, sizeof(BucketT) * OldNumBuckets, |
835 | alignof(BucketT)); |
836 | init(NewNumBuckets); |
837 | } |
838 | |
839 | private: |
840 | unsigned getNumEntries() const { |
841 | return NumEntries; |
842 | } |
843 | |
844 | void setNumEntries(unsigned Num) { |
845 | NumEntries = Num; |
846 | } |
847 | |
848 | unsigned getNumTombstones() const { |
849 | return NumTombstones; |
850 | } |
851 | |
852 | void setNumTombstones(unsigned Num) { |
853 | NumTombstones = Num; |
854 | } |
855 | |
856 | BucketT *getBuckets() const { |
857 | return Buckets; |
858 | } |
859 | |
860 | unsigned getNumBuckets() const { |
861 | return NumBuckets; |
862 | } |
863 | |
864 | bool allocateBuckets(unsigned Num) { |
865 | NumBuckets = Num; |
866 | if (NumBuckets == 0) { |
867 | Buckets = nullptr; |
868 | return false; |
869 | } |
870 | |
871 | Buckets = static_cast<BucketT *>( |
872 | allocate_buffer(sizeof(BucketT) * NumBuckets, alignof(BucketT))); |
873 | return true; |
874 | } |
875 | }; |
876 | |
877 | template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4, |
878 | typename KeyInfoT = DenseMapInfo<KeyT>, |
879 | typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>> |
880 | class SmallDenseMap |
881 | : public DenseMapBase< |
882 | SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT, |
883 | ValueT, KeyInfoT, BucketT> { |
884 | friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>; |
885 | |
886 | // Lift some types from the dependent base class into this class for |
887 | // simplicity of referring to them. |
888 | using BaseT = DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>; |
889 | |
890 | static_assert(isPowerOf2_64(InlineBuckets), |
891 | "InlineBuckets must be a power of 2."); |
892 | |
893 | unsigned Small : 1; |
894 | unsigned NumEntries : 31; |
895 | unsigned NumTombstones; |
896 | |
897 | struct LargeRep { |
898 | BucketT *Buckets; |
899 | unsigned NumBuckets; |
900 | }; |
901 | |
902 | /// A "union" of an inline bucket array and the struct representing |
903 | /// a large bucket. This union will be discriminated by the 'Small' bit. |
904 | AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage; |
905 | |
906 | public: |
907 | explicit SmallDenseMap(unsigned NumInitBuckets = 0) { |
908 | init(NumInitBuckets); |
909 | } |
910 | |
911 | SmallDenseMap(const SmallDenseMap &other) : BaseT() { |
912 | init(0); |
913 | copyFrom(other); |
914 | } |
915 | |
916 | SmallDenseMap(SmallDenseMap &&other) : BaseT() { |
917 | init(0); |
918 | swap(other); |
919 | } |
920 | |
921 | template<typename InputIt> |
922 | SmallDenseMap(const InputIt &I, const InputIt &E) { |
923 | init(NextPowerOf2(std::distance(I, E))); |
924 | this->insert(I, E); |
925 | } |
926 | |
927 | SmallDenseMap(std::initializer_list<typename BaseT::value_type> Vals) |
928 | : SmallDenseMap(Vals.begin(), Vals.end()) {} |
929 | |
930 | ~SmallDenseMap() { |
931 | this->destroyAll(); |
932 | deallocateBuckets(); |
933 | } |
934 | |
935 | void swap(SmallDenseMap& RHS) { |
936 | unsigned TmpNumEntries = RHS.NumEntries; |
937 | RHS.NumEntries = NumEntries; |
938 | NumEntries = TmpNumEntries; |
939 | std::swap(NumTombstones, RHS.NumTombstones); |
940 | |
941 | const KeyT EmptyKey = this->getEmptyKey(); |
942 | const KeyT TombstoneKey = this->getTombstoneKey(); |
943 | if (Small && RHS.Small) { |
944 | // If we're swapping inline bucket arrays, we have to cope with some of |
945 | // the tricky bits of DenseMap's storage system: the buckets are not |
946 | // fully initialized. Thus we swap every key, but we may have |
947 | // a one-directional move of the value. |
948 | for (unsigned i = 0, e = InlineBuckets; i != e; ++i) { |
949 | BucketT *LHSB = &getInlineBuckets()[i], |
950 | *RHSB = &RHS.getInlineBuckets()[i]; |
951 | bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) && |
952 | !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey)); |
953 | bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) && |
954 | !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey)); |
955 | if (hasLHSValue && hasRHSValue) { |
956 | // Swap together if we can... |
957 | std::swap(*LHSB, *RHSB); |
958 | continue; |
959 | } |
960 | // Swap separately and handle any asymmetry. |
961 | std::swap(LHSB->getFirst(), RHSB->getFirst()); |
962 | if (hasLHSValue) { |
963 | ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond())); |
964 | LHSB->getSecond().~ValueT(); |
965 | } else if (hasRHSValue) { |
966 | ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond())); |
967 | RHSB->getSecond().~ValueT(); |
968 | } |
969 | } |
970 | return; |
971 | } |
972 | if (!Small && !RHS.Small) { |
973 | std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets); |
974 | std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets); |
975 | return; |
976 | } |
977 | |
978 | SmallDenseMap &SmallSide = Small ? *this : RHS; |
979 | SmallDenseMap &LargeSide = Small ? RHS : *this; |
980 | |
981 | // First stash the large side's rep and move the small side across. |
982 | LargeRep TmpRep = std::move(*LargeSide.getLargeRep()); |
983 | LargeSide.getLargeRep()->~LargeRep(); |
984 | LargeSide.Small = true; |
985 | // This is similar to the standard move-from-old-buckets, but the bucket |
986 | // count hasn't actually rotated in this case. So we have to carefully |
987 | // move construct the keys and values into their new locations, but there |
988 | // is no need to re-hash things. |
989 | for (unsigned i = 0, e = InlineBuckets; i != e; ++i) { |
990 | BucketT *NewB = &LargeSide.getInlineBuckets()[i], |
991 | *OldB = &SmallSide.getInlineBuckets()[i]; |
992 | ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst())); |
993 | OldB->getFirst().~KeyT(); |
994 | if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) && |
995 | !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) { |
996 | ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond())); |
997 | OldB->getSecond().~ValueT(); |
998 | } |
999 | } |
1000 | |
1001 | // The hard part of moving the small buckets across is done, just move |
1002 | // the TmpRep into its new home. |
1003 | SmallSide.Small = false; |
1004 | new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep)); |
1005 | } |
1006 | |
1007 | SmallDenseMap& operator=(const SmallDenseMap& other) { |
1008 | if (&other != this) |
1009 | copyFrom(other); |
1010 | return *this; |
1011 | } |
1012 | |
1013 | SmallDenseMap& operator=(SmallDenseMap &&other) { |
1014 | this->destroyAll(); |
1015 | deallocateBuckets(); |
1016 | init(0); |
1017 | swap(other); |
1018 | return *this; |
1019 | } |
1020 | |
1021 | void copyFrom(const SmallDenseMap& other) { |
1022 | this->destroyAll(); |
1023 | deallocateBuckets(); |
1024 | Small = true; |
1025 | if (other.getNumBuckets() > InlineBuckets) { |
1026 | Small = false; |
1027 | new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets())); |
1028 | } |
1029 | this->BaseT::copyFrom(other); |
1030 | } |
1031 | |
1032 | void init(unsigned InitBuckets) { |
1033 | Small = true; |
1034 | if (InitBuckets > InlineBuckets) { |
1035 | Small = false; |
1036 | new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets)); |
1037 | } |
1038 | this->BaseT::initEmpty(); |
1039 | } |
1040 | |
1041 | void grow(unsigned AtLeast) { |
1042 | if (AtLeast > InlineBuckets) |
1043 | AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1)); |
1044 | |
1045 | if (Small) { |
1046 | // First move the inline buckets into a temporary storage. |
1047 | AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage; |
1048 | BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage); |
1049 | BucketT *TmpEnd = TmpBegin; |
1050 | |
1051 | // Loop over the buckets, moving non-empty, non-tombstones into the |
1052 | // temporary storage. Have the loop move the TmpEnd forward as it goes. |
1053 | const KeyT EmptyKey = this->getEmptyKey(); |
1054 | const KeyT TombstoneKey = this->getTombstoneKey(); |
1055 | for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) { |
1056 | if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) && |
1057 | !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) { |
1058 | assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&(static_cast <bool> (size_t(TmpEnd - TmpBegin) < InlineBuckets && "Too many inline buckets!") ? void (0) : __assert_fail ("size_t(TmpEnd - TmpBegin) < InlineBuckets && \"Too many inline buckets!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 1059, __extension__ __PRETTY_FUNCTION__)) |
1059 | "Too many inline buckets!")(static_cast <bool> (size_t(TmpEnd - TmpBegin) < InlineBuckets && "Too many inline buckets!") ? void (0) : __assert_fail ("size_t(TmpEnd - TmpBegin) < InlineBuckets && \"Too many inline buckets!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 1059, __extension__ __PRETTY_FUNCTION__)); |
1060 | ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst())); |
1061 | ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond())); |
1062 | ++TmpEnd; |
1063 | P->getSecond().~ValueT(); |
1064 | } |
1065 | P->getFirst().~KeyT(); |
1066 | } |
1067 | |
1068 | // AtLeast == InlineBuckets can happen if there are many tombstones, |
1069 | // and grow() is used to remove them. Usually we always switch to the |
1070 | // large rep here. |
1071 | if (AtLeast > InlineBuckets) { |
1072 | Small = false; |
1073 | new (getLargeRep()) LargeRep(allocateBuckets(AtLeast)); |
1074 | } |
1075 | this->moveFromOldBuckets(TmpBegin, TmpEnd); |
1076 | return; |
1077 | } |
1078 | |
1079 | LargeRep OldRep = std::move(*getLargeRep()); |
1080 | getLargeRep()->~LargeRep(); |
1081 | if (AtLeast <= InlineBuckets) { |
1082 | Small = true; |
1083 | } else { |
1084 | new (getLargeRep()) LargeRep(allocateBuckets(AtLeast)); |
1085 | } |
1086 | |
1087 | this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets); |
1088 | |
1089 | // Free the old table. |
1090 | deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets, |
1091 | alignof(BucketT)); |
1092 | } |
1093 | |
1094 | void shrink_and_clear() { |
1095 | unsigned OldSize = this->size(); |
1096 | this->destroyAll(); |
1097 | |
1098 | // Reduce the number of buckets. |
1099 | unsigned NewNumBuckets = 0; |
1100 | if (OldSize) { |
1101 | NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1); |
1102 | if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u) |
1103 | NewNumBuckets = 64; |
1104 | } |
1105 | if ((Small && NewNumBuckets <= InlineBuckets) || |
1106 | (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) { |
1107 | this->BaseT::initEmpty(); |
1108 | return; |
1109 | } |
1110 | |
1111 | deallocateBuckets(); |
1112 | init(NewNumBuckets); |
1113 | } |
1114 | |
1115 | private: |
1116 | unsigned getNumEntries() const { |
1117 | return NumEntries; |
1118 | } |
1119 | |
1120 | void setNumEntries(unsigned Num) { |
1121 | // NumEntries is hardcoded to be 31 bits wide. |
1122 | assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries")(static_cast <bool> (Num < (1U << 31) && "Cannot support more than 1<<31 entries") ? void (0) : __assert_fail ("Num < (1U << 31) && \"Cannot support more than 1<<31 entries\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 1122, __extension__ __PRETTY_FUNCTION__)); |
1123 | NumEntries = Num; |
1124 | } |
1125 | |
1126 | unsigned getNumTombstones() const { |
1127 | return NumTombstones; |
1128 | } |
1129 | |
1130 | void setNumTombstones(unsigned Num) { |
1131 | NumTombstones = Num; |
1132 | } |
1133 | |
1134 | const BucketT *getInlineBuckets() const { |
1135 | assert(Small)(static_cast <bool> (Small) ? void (0) : __assert_fail ( "Small", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 1135, __extension__ __PRETTY_FUNCTION__)); |
1136 | // Note that this cast does not violate aliasing rules as we assert that |
1137 | // the memory's dynamic type is the small, inline bucket buffer, and the |
1138 | // 'storage' is a POD containing a char buffer. |
1139 | return reinterpret_cast<const BucketT *>(&storage); |
1140 | } |
1141 | |
1142 | BucketT *getInlineBuckets() { |
1143 | return const_cast<BucketT *>( |
1144 | const_cast<const SmallDenseMap *>(this)->getInlineBuckets()); |
1145 | } |
1146 | |
1147 | const LargeRep *getLargeRep() const { |
1148 | assert(!Small)(static_cast <bool> (!Small) ? void (0) : __assert_fail ("!Small", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 1148, __extension__ __PRETTY_FUNCTION__)); |
1149 | // Note, same rule about aliasing as with getInlineBuckets. |
1150 | return reinterpret_cast<const LargeRep *>(&storage); |
1151 | } |
1152 | |
1153 | LargeRep *getLargeRep() { |
1154 | return const_cast<LargeRep *>( |
1155 | const_cast<const SmallDenseMap *>(this)->getLargeRep()); |
1156 | } |
1157 | |
1158 | const BucketT *getBuckets() const { |
1159 | return Small ? getInlineBuckets() : getLargeRep()->Buckets; |
1160 | } |
1161 | |
1162 | BucketT *getBuckets() { |
1163 | return const_cast<BucketT *>( |
1164 | const_cast<const SmallDenseMap *>(this)->getBuckets()); |
1165 | } |
1166 | |
1167 | unsigned getNumBuckets() const { |
1168 | return Small ? InlineBuckets : getLargeRep()->NumBuckets; |
1169 | } |
1170 | |
1171 | void deallocateBuckets() { |
1172 | if (Small) |
1173 | return; |
1174 | |
1175 | deallocate_buffer(getLargeRep()->Buckets, |
1176 | sizeof(BucketT) * getLargeRep()->NumBuckets, |
1177 | alignof(BucketT)); |
1178 | getLargeRep()->~LargeRep(); |
1179 | } |
1180 | |
1181 | LargeRep allocateBuckets(unsigned Num) { |
1182 | assert(Num > InlineBuckets && "Must allocate more buckets than are inline")(static_cast <bool> (Num > InlineBuckets && "Must allocate more buckets than are inline" ) ? void (0) : __assert_fail ("Num > InlineBuckets && \"Must allocate more buckets than are inline\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 1182, __extension__ __PRETTY_FUNCTION__)); |
1183 | LargeRep Rep = {static_cast<BucketT *>(allocate_buffer( |
1184 | sizeof(BucketT) * Num, alignof(BucketT))), |
1185 | Num}; |
1186 | return Rep; |
1187 | } |
1188 | }; |
1189 | |
1190 | template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket, |
1191 | bool IsConst> |
1192 | class DenseMapIterator : DebugEpochBase::HandleBase { |
1193 | friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>; |
1194 | friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>; |
1195 | |
1196 | public: |
1197 | using difference_type = ptrdiff_t; |
1198 | using value_type = |
1199 | typename std::conditional<IsConst, const Bucket, Bucket>::type; |
1200 | using pointer = value_type *; |
1201 | using reference = value_type &; |
1202 | using iterator_category = std::forward_iterator_tag; |
1203 | |
1204 | private: |
1205 | pointer Ptr = nullptr; |
1206 | pointer End = nullptr; |
1207 | |
1208 | public: |
1209 | DenseMapIterator() = default; |
1210 | |
1211 | DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch, |
1212 | bool NoAdvance = false) |
1213 | : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) { |
1214 | assert(isHandleInSync() && "invalid construction!")(static_cast <bool> (isHandleInSync() && "invalid construction!" ) ? void (0) : __assert_fail ("isHandleInSync() && \"invalid construction!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 1214, __extension__ __PRETTY_FUNCTION__)); |
1215 | |
1216 | if (NoAdvance) return; |
1217 | if (shouldReverseIterate<KeyT>()) { |
1218 | RetreatPastEmptyBuckets(); |
1219 | return; |
1220 | } |
1221 | AdvancePastEmptyBuckets(); |
1222 | } |
1223 | |
1224 | // Converting ctor from non-const iterators to const iterators. SFINAE'd out |
1225 | // for const iterator destinations so it doesn't end up as a user defined copy |
1226 | // constructor. |
1227 | template <bool IsConstSrc, |
1228 | typename = std::enable_if_t<!IsConstSrc && IsConst>> |
1229 | DenseMapIterator( |
1230 | const DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc> &I) |
1231 | : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {} |
1232 | |
1233 | reference operator*() const { |
1234 | assert(isHandleInSync() && "invalid iterator access!")(static_cast <bool> (isHandleInSync() && "invalid iterator access!" ) ? void (0) : __assert_fail ("isHandleInSync() && \"invalid iterator access!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 1234, __extension__ __PRETTY_FUNCTION__)); |
1235 | assert(Ptr != End && "dereferencing end() iterator")(static_cast <bool> (Ptr != End && "dereferencing end() iterator" ) ? void (0) : __assert_fail ("Ptr != End && \"dereferencing end() iterator\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 1235, __extension__ __PRETTY_FUNCTION__)); |
1236 | if (shouldReverseIterate<KeyT>()) |
1237 | return Ptr[-1]; |
1238 | return *Ptr; |
1239 | } |
1240 | pointer operator->() const { |
1241 | assert(isHandleInSync() && "invalid iterator access!")(static_cast <bool> (isHandleInSync() && "invalid iterator access!" ) ? void (0) : __assert_fail ("isHandleInSync() && \"invalid iterator access!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 1241, __extension__ __PRETTY_FUNCTION__)); |
1242 | assert(Ptr != End && "dereferencing end() iterator")(static_cast <bool> (Ptr != End && "dereferencing end() iterator" ) ? void (0) : __assert_fail ("Ptr != End && \"dereferencing end() iterator\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 1242, __extension__ __PRETTY_FUNCTION__)); |
1243 | if (shouldReverseIterate<KeyT>()) |
1244 | return &(Ptr[-1]); |
1245 | return Ptr; |
1246 | } |
1247 | |
1248 | friend bool operator==(const DenseMapIterator &LHS, |
1249 | const DenseMapIterator &RHS) { |
1250 | assert((!LHS.Ptr || LHS.isHandleInSync()) && "handle not in sync!")(static_cast <bool> ((!LHS.Ptr || LHS.isHandleInSync()) && "handle not in sync!") ? void (0) : __assert_fail ("(!LHS.Ptr || LHS.isHandleInSync()) && \"handle not in sync!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 1250, __extension__ __PRETTY_FUNCTION__)); |
1251 | assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!")(static_cast <bool> ((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!") ? void (0) : __assert_fail ("(!RHS.Ptr || RHS.isHandleInSync()) && \"handle not in sync!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 1251, __extension__ __PRETTY_FUNCTION__)); |
1252 | assert(LHS.getEpochAddress() == RHS.getEpochAddress() &&(static_cast <bool> (LHS.getEpochAddress() == RHS.getEpochAddress () && "comparing incomparable iterators!") ? void (0) : __assert_fail ("LHS.getEpochAddress() == RHS.getEpochAddress() && \"comparing incomparable iterators!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 1253, __extension__ __PRETTY_FUNCTION__)) |
1253 | "comparing incomparable iterators!")(static_cast <bool> (LHS.getEpochAddress() == RHS.getEpochAddress () && "comparing incomparable iterators!") ? void (0) : __assert_fail ("LHS.getEpochAddress() == RHS.getEpochAddress() && \"comparing incomparable iterators!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 1253, __extension__ __PRETTY_FUNCTION__)); |
1254 | return LHS.Ptr == RHS.Ptr; |
1255 | } |
1256 | |
1257 | friend bool operator!=(const DenseMapIterator &LHS, |
1258 | const DenseMapIterator &RHS) { |
1259 | return !(LHS == RHS); |
1260 | } |
1261 | |
1262 | inline DenseMapIterator& operator++() { // Preincrement |
1263 | assert(isHandleInSync() && "invalid iterator access!")(static_cast <bool> (isHandleInSync() && "invalid iterator access!" ) ? void (0) : __assert_fail ("isHandleInSync() && \"invalid iterator access!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 1263, __extension__ __PRETTY_FUNCTION__)); |
1264 | assert(Ptr != End && "incrementing end() iterator")(static_cast <bool> (Ptr != End && "incrementing end() iterator" ) ? void (0) : __assert_fail ("Ptr != End && \"incrementing end() iterator\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 1264, __extension__ __PRETTY_FUNCTION__)); |
1265 | if (shouldReverseIterate<KeyT>()) { |
1266 | --Ptr; |
1267 | RetreatPastEmptyBuckets(); |
1268 | return *this; |
1269 | } |
1270 | ++Ptr; |
1271 | AdvancePastEmptyBuckets(); |
1272 | return *this; |
1273 | } |
1274 | DenseMapIterator operator++(int) { // Postincrement |
1275 | assert(isHandleInSync() && "invalid iterator access!")(static_cast <bool> (isHandleInSync() && "invalid iterator access!" ) ? void (0) : __assert_fail ("isHandleInSync() && \"invalid iterator access!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 1275, __extension__ __PRETTY_FUNCTION__)); |
1276 | DenseMapIterator tmp = *this; ++*this; return tmp; |
1277 | } |
1278 | |
1279 | private: |
1280 | void AdvancePastEmptyBuckets() { |
1281 | assert(Ptr <= End)(static_cast <bool> (Ptr <= End) ? void (0) : __assert_fail ("Ptr <= End", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 1281, __extension__ __PRETTY_FUNCTION__)); |
1282 | const KeyT Empty = KeyInfoT::getEmptyKey(); |
1283 | const KeyT Tombstone = KeyInfoT::getTombstoneKey(); |
1284 | |
1285 | while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) || |
1286 | KeyInfoT::isEqual(Ptr->getFirst(), Tombstone))) |
1287 | ++Ptr; |
1288 | } |
1289 | |
1290 | void RetreatPastEmptyBuckets() { |
1291 | assert(Ptr >= End)(static_cast <bool> (Ptr >= End) ? void (0) : __assert_fail ("Ptr >= End", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/DenseMap.h" , 1291, __extension__ __PRETTY_FUNCTION__)); |
1292 | const KeyT Empty = KeyInfoT::getEmptyKey(); |
1293 | const KeyT Tombstone = KeyInfoT::getTombstoneKey(); |
1294 | |
1295 | while (Ptr != End && (KeyInfoT::isEqual(Ptr[-1].getFirst(), Empty) || |
1296 | KeyInfoT::isEqual(Ptr[-1].getFirst(), Tombstone))) |
1297 | --Ptr; |
1298 | } |
1299 | }; |
1300 | |
1301 | template <typename KeyT, typename ValueT, typename KeyInfoT> |
1302 | inline size_t capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) { |
1303 | return X.getMemorySize(); |
1304 | } |
1305 | |
1306 | } // end namespace llvm |
1307 | |
1308 | #endif // LLVM_ADT_DENSEMAP_H |