Bug Summary

File:llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp
Warning:line 4043, column 9
Array access (from variable 'MLiveIns') results in a null pointer dereference

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name InstrRefBasedImpl.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/build-llvm/lib/CodeGen -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/build-llvm/lib/CodeGen -I /build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen -I /build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/build-llvm/lib/CodeGen -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-07-24-235614-16331-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp

/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp

1//===- InstrRefBasedImpl.cpp - Tracking Debug Value MIs -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file InstrRefBasedImpl.cpp
9///
10/// This is a separate implementation of LiveDebugValues, see
11/// LiveDebugValues.cpp and VarLocBasedImpl.cpp for more information.
12///
13/// This pass propagates variable locations between basic blocks, resolving
14/// control flow conflicts between them. The problem is much like SSA
15/// construction, where each DBG_VALUE instruction assigns the *value* that
16/// a variable has, and every instruction where the variable is in scope uses
17/// that variable. The resulting map of instruction-to-value is then translated
18/// into a register (or spill) location for each variable over each instruction.
19///
20/// This pass determines which DBG_VALUE dominates which instructions, or if
21/// none do, where values must be merged (like PHI nodes). The added
22/// complication is that because codegen has already finished, a PHI node may
23/// be needed for a variable location to be correct, but no register or spill
24/// slot merges the necessary values. In these circumstances, the variable
25/// location is dropped.
26///
27/// What makes this analysis non-trivial is loops: we cannot tell in advance
28/// whether a variable location is live throughout a loop, or whether its
29/// location is clobbered (or redefined by another DBG_VALUE), without
30/// exploring all the way through.
31///
32/// To make this simpler we perform two kinds of analysis. First, we identify
33/// every value defined by every instruction (ignoring those that only move
34/// another value), then compute a map of which values are available for each
35/// instruction. This is stronger than a reaching-def analysis, as we create
36/// PHI values where other values merge.
37///
38/// Secondly, for each variable, we effectively re-construct SSA using each
39/// DBG_VALUE as a def. The DBG_VALUEs read a value-number computed by the
40/// first analysis from the location they refer to. We can then compute the
41/// dominance frontiers of where a variable has a value, and create PHI nodes
42/// where they merge.
43/// This isn't precisely SSA-construction though, because the function shape
44/// is pre-defined. If a variable location requires a PHI node, but no
45/// PHI for the relevant values is present in the function (as computed by the
46/// first analysis), the location must be dropped.
47///
48/// Once both are complete, we can pass back over all instructions knowing:
49/// * What _value_ each variable should contain, either defined by an
50/// instruction or where control flow merges
51/// * What the location of that value is (if any).
52/// Allowing us to create appropriate live-in DBG_VALUEs, and DBG_VALUEs when
53/// a value moves location. After this pass runs, all variable locations within
54/// a block should be specified by DBG_VALUEs within that block, allowing
55/// DbgEntityHistoryCalculator to focus on individual blocks.
56///
57/// This pass is able to go fast because the size of the first
58/// reaching-definition analysis is proportional to the working-set size of
59/// the function, which the compiler tries to keep small. (It's also
60/// proportional to the number of blocks). Additionally, we repeatedly perform
61/// the second reaching-definition analysis with only the variables and blocks
62/// in a single lexical scope, exploiting their locality.
63///
64/// Determining where PHIs happen is trickier with this approach, and it comes
65/// to a head in the major problem for LiveDebugValues: is a value live-through
66/// a loop, or not? Your garden-variety dataflow analysis aims to build a set of
67/// facts about a function, however this analysis needs to generate new value
68/// numbers at joins.
69///
70/// To do this, consider a lattice of all definition values, from instructions
71/// and from PHIs. Each PHI is characterised by the RPO number of the block it
72/// occurs in. Each value pair A, B can be ordered by RPO(A) < RPO(B):
73/// with non-PHI values at the top, and any PHI value in the last block (by RPO
74/// order) at the bottom.
75///
76/// (Awkwardly: lower-down-the _lattice_ means a greater RPO _number_. Below,
77/// "rank" always refers to the former).
78///
79/// At any join, for each register, we consider:
80/// * All incoming values, and
81/// * The PREVIOUS live-in value at this join.
82/// If all incoming values agree: that's the live-in value. If they do not, the
83/// incoming values are ranked according to the partial order, and the NEXT
84/// LOWEST rank after the PREVIOUS live-in value is picked (multiple values of
85/// the same rank are ignored as conflicting). If there are no candidate values,
86/// or if the rank of the live-in would be lower than the rank of the current
87/// blocks PHIs, create a new PHI value.
88///
89/// Intuitively: if it's not immediately obvious what value a join should result
90/// in, we iteratively descend from instruction-definitions down through PHI
91/// values, getting closer to the current block each time. If the current block
92/// is a loop head, this ordering is effectively searching outer levels of
93/// loops, to find a value that's live-through the current loop.
94///
95/// If there is no value that's live-through this loop, a PHI is created for
96/// this location instead. We can't use a lower-ranked PHI because by definition
97/// it doesn't dominate the current block. We can't create a PHI value any
98/// earlier, because we risk creating a PHI value at a location where values do
99/// not in fact merge, thus misrepresenting the truth, and not making the true
100/// live-through value for variable locations.
101///
102/// This algorithm applies to both calculating the availability of values in
103/// the first analysis, and the location of variables in the second. However
104/// for the second we add an extra dimension of pain: creating a variable
105/// location PHI is only valid if, for each incoming edge,
106/// * There is a value for the variable on the incoming edge, and
107/// * All the edges have that value in the same register.
108/// Or put another way: we can only create a variable-location PHI if there is
109/// a matching machine-location PHI, each input to which is the variables value
110/// in the predecessor block.
111///
112/// To accommodate this difference, each point on the lattice is split in
113/// two: a "proposed" PHI and "definite" PHI. Any PHI that can immediately
114/// have a location determined are "definite" PHIs, and no further work is
115/// needed. Otherwise, a location that all non-backedge predecessors agree
116/// on is picked and propagated as a "proposed" PHI value. If that PHI value
117/// is truly live-through, it'll appear on the loop backedges on the next
118/// dataflow iteration, after which the block live-in moves to be a "definite"
119/// PHI. If it's not truly live-through, the variable value will be downgraded
120/// further as we explore the lattice, or remains "proposed" and is considered
121/// invalid once dataflow completes.
122///
123/// ### Terminology
124///
125/// A machine location is a register or spill slot, a value is something that's
126/// defined by an instruction or PHI node, while a variable value is the value
127/// assigned to a variable. A variable location is a machine location, that must
128/// contain the appropriate variable value. A value that is a PHI node is
129/// occasionally called an mphi.
130///
131/// The first dataflow problem is the "machine value location" problem,
132/// because we're determining which machine locations contain which values.
133/// The "locations" are constant: what's unknown is what value they contain.
134///
135/// The second dataflow problem (the one for variables) is the "variable value
136/// problem", because it's determining what values a variable has, rather than
137/// what location those values are placed in. Unfortunately, it's not that
138/// simple, because producing a PHI value always involves picking a location.
139/// This is an imperfection that we just have to accept, at least for now.
140///
141/// TODO:
142/// Overlapping fragments
143/// Entry values
144/// Add back DEBUG statements for debugging this
145/// Collect statistics
146///
147//===----------------------------------------------------------------------===//
148
149#include "llvm/ADT/DenseMap.h"
150#include "llvm/ADT/PostOrderIterator.h"
151#include "llvm/ADT/STLExtras.h"
152#include "llvm/ADT/SmallPtrSet.h"
153#include "llvm/ADT/SmallSet.h"
154#include "llvm/ADT/SmallVector.h"
155#include "llvm/ADT/Statistic.h"
156#include "llvm/ADT/UniqueVector.h"
157#include "llvm/CodeGen/LexicalScopes.h"
158#include "llvm/CodeGen/MachineBasicBlock.h"
159#include "llvm/CodeGen/MachineFrameInfo.h"
160#include "llvm/CodeGen/MachineFunction.h"
161#include "llvm/CodeGen/MachineFunctionPass.h"
162#include "llvm/CodeGen/MachineInstr.h"
163#include "llvm/CodeGen/MachineInstrBuilder.h"
164#include "llvm/CodeGen/MachineInstrBundle.h"
165#include "llvm/CodeGen/MachineMemOperand.h"
166#include "llvm/CodeGen/MachineOperand.h"
167#include "llvm/CodeGen/PseudoSourceValue.h"
168#include "llvm/CodeGen/RegisterScavenging.h"
169#include "llvm/CodeGen/TargetFrameLowering.h"
170#include "llvm/CodeGen/TargetInstrInfo.h"
171#include "llvm/CodeGen/TargetLowering.h"
172#include "llvm/CodeGen/TargetPassConfig.h"
173#include "llvm/CodeGen/TargetRegisterInfo.h"
174#include "llvm/CodeGen/TargetSubtargetInfo.h"
175#include "llvm/Config/llvm-config.h"
176#include "llvm/IR/DIBuilder.h"
177#include "llvm/IR/DebugInfoMetadata.h"
178#include "llvm/IR/DebugLoc.h"
179#include "llvm/IR/Function.h"
180#include "llvm/IR/Module.h"
181#include "llvm/InitializePasses.h"
182#include "llvm/MC/MCRegisterInfo.h"
183#include "llvm/Pass.h"
184#include "llvm/Support/Casting.h"
185#include "llvm/Support/Compiler.h"
186#include "llvm/Support/Debug.h"
187#include "llvm/Support/TypeSize.h"
188#include "llvm/Support/raw_ostream.h"
189#include "llvm/Target/TargetMachine.h"
190#include "llvm/Transforms/Utils/SSAUpdaterImpl.h"
191#include <algorithm>
192#include <cassert>
193#include <cstdint>
194#include <functional>
195#include <queue>
196#include <tuple>
197#include <utility>
198#include <vector>
199#include <limits.h>
200#include <limits>
201
202#include "LiveDebugValues.h"
203
204using namespace llvm;
205
206// SSAUpdaterImple sets DEBUG_TYPE, change it.
207#undef DEBUG_TYPE"livedebugvalues"
208#define DEBUG_TYPE"livedebugvalues" "livedebugvalues"
209
210// Act more like the VarLoc implementation, by propagating some locations too
211// far and ignoring some transfers.
212static cl::opt<bool> EmulateOldLDV("emulate-old-livedebugvalues", cl::Hidden,
213 cl::desc("Act like old LiveDebugValues did"),
214 cl::init(false));
215
216namespace {
217
218// The location at which a spilled value resides. It consists of a register and
219// an offset.
220struct SpillLoc {
221 unsigned SpillBase;
222 StackOffset SpillOffset;
223 bool operator==(const SpillLoc &Other) const {
224 return std::make_pair(SpillBase, SpillOffset) ==
225 std::make_pair(Other.SpillBase, Other.SpillOffset);
226 }
227 bool operator<(const SpillLoc &Other) const {
228 return std::make_tuple(SpillBase, SpillOffset.getFixed(),
229 SpillOffset.getScalable()) <
230 std::make_tuple(Other.SpillBase, Other.SpillOffset.getFixed(),
231 Other.SpillOffset.getScalable());
232 }
233};
234
235class LocIdx {
236 unsigned Location;
237
238 // Default constructor is private, initializing to an illegal location number.
239 // Use only for "not an entry" elements in IndexedMaps.
240 LocIdx() : Location(UINT_MAX(2147483647 *2U +1U)) { }
241
242public:
243 #define NUM_LOC_BITS24 24
244 LocIdx(unsigned L) : Location(L) {
245 assert(L < (1 << NUM_LOC_BITS) && "Machine locations must fit in 24 bits")(static_cast <bool> (L < (1 << 24) && "Machine locations must fit in 24 bits"
) ? void (0) : __assert_fail ("L < (1 << NUM_LOC_BITS) && \"Machine locations must fit in 24 bits\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 245, __extension__ __PRETTY_FUNCTION__))
;
246 }
247
248 static LocIdx MakeIllegalLoc() {
249 return LocIdx();
250 }
251
252 bool isIllegal() const {
253 return Location == UINT_MAX(2147483647 *2U +1U);
254 }
255
256 uint64_t asU64() const {
257 return Location;
258 }
259
260 bool operator==(unsigned L) const {
261 return Location == L;
262 }
263
264 bool operator==(const LocIdx &L) const {
265 return Location == L.Location;
266 }
267
268 bool operator!=(unsigned L) const {
269 return !(*this == L);
270 }
271
272 bool operator!=(const LocIdx &L) const {
273 return !(*this == L);
274 }
275
276 bool operator<(const LocIdx &Other) const {
277 return Location < Other.Location;
278 }
279};
280
281class LocIdxToIndexFunctor {
282public:
283 using argument_type = LocIdx;
284 unsigned operator()(const LocIdx &L) const {
285 return L.asU64();
286 }
287};
288
289/// Unique identifier for a value defined by an instruction, as a value type.
290/// Casts back and forth to a uint64_t. Probably replacable with something less
291/// bit-constrained. Each value identifies the instruction and machine location
292/// where the value is defined, although there may be no corresponding machine
293/// operand for it (ex: regmasks clobbering values). The instructions are
294/// one-based, and definitions that are PHIs have instruction number zero.
295///
296/// The obvious limits of a 1M block function or 1M instruction blocks are
297/// problematic; but by that point we should probably have bailed out of
298/// trying to analyse the function.
299class ValueIDNum {
300 uint64_t BlockNo : 20; /// The block where the def happens.
301 uint64_t InstNo : 20; /// The Instruction where the def happens.
302 /// One based, is distance from start of block.
303 uint64_t LocNo : NUM_LOC_BITS24; /// The machine location where the def happens.
304
305public:
306 // XXX -- temporarily enabled while the live-in / live-out tables are moved
307 // to something more type-y
308 ValueIDNum() : BlockNo(0xFFFFF),
309 InstNo(0xFFFFF),
310 LocNo(0xFFFFFF) { }
311
312 ValueIDNum(uint64_t Block, uint64_t Inst, uint64_t Loc)
313 : BlockNo(Block), InstNo(Inst), LocNo(Loc) { }
314
315 ValueIDNum(uint64_t Block, uint64_t Inst, LocIdx Loc)
316 : BlockNo(Block), InstNo(Inst), LocNo(Loc.asU64()) { }
317
318 uint64_t getBlock() const { return BlockNo; }
319 uint64_t getInst() const { return InstNo; }
320 uint64_t getLoc() const { return LocNo; }
321 bool isPHI() const { return InstNo == 0; }
322
323 uint64_t asU64() const {
324 uint64_t TmpBlock = BlockNo;
325 uint64_t TmpInst = InstNo;
326 return TmpBlock << 44ull | TmpInst << NUM_LOC_BITS24 | LocNo;
327 }
328
329 static ValueIDNum fromU64(uint64_t v) {
330 uint64_t L = (v & 0x3FFF);
331 return {v >> 44ull, ((v >> NUM_LOC_BITS24) & 0xFFFFF), L};
332 }
333
334 bool operator<(const ValueIDNum &Other) const {
335 return asU64() < Other.asU64();
336 }
337
338 bool operator==(const ValueIDNum &Other) const {
339 return std::tie(BlockNo, InstNo, LocNo) ==
340 std::tie(Other.BlockNo, Other.InstNo, Other.LocNo);
341 }
342
343 bool operator!=(const ValueIDNum &Other) const { return !(*this == Other); }
344
345 std::string asString(const std::string &mlocname) const {
346 return Twine("Value{bb: ")
347 .concat(Twine(BlockNo).concat(
348 Twine(", inst: ")
349 .concat((InstNo ? Twine(InstNo) : Twine("live-in"))
350 .concat(Twine(", loc: ").concat(Twine(mlocname)))
351 .concat(Twine("}")))))
352 .str();
353 }
354
355 static ValueIDNum EmptyValue;
356};
357
358} // end anonymous namespace
359
360namespace {
361
362/// Meta qualifiers for a value. Pair of whatever expression is used to qualify
363/// the the value, and Boolean of whether or not it's indirect.
364class DbgValueProperties {
365public:
366 DbgValueProperties(const DIExpression *DIExpr, bool Indirect)
367 : DIExpr(DIExpr), Indirect(Indirect) {}
368
369 /// Extract properties from an existing DBG_VALUE instruction.
370 DbgValueProperties(const MachineInstr &MI) {
371 assert(MI.isDebugValue())(static_cast <bool> (MI.isDebugValue()) ? void (0) : __assert_fail
("MI.isDebugValue()", "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 371, __extension__ __PRETTY_FUNCTION__))
;
372 DIExpr = MI.getDebugExpression();
373 Indirect = MI.getOperand(1).isImm();
374 }
375
376 bool operator==(const DbgValueProperties &Other) const {
377 return std::tie(DIExpr, Indirect) == std::tie(Other.DIExpr, Other.Indirect);
378 }
379
380 bool operator!=(const DbgValueProperties &Other) const {
381 return !(*this == Other);
382 }
383
384 const DIExpression *DIExpr;
385 bool Indirect;
386};
387
388/// Tracker for what values are in machine locations. Listens to the Things
389/// being Done by various instructions, and maintains a table of what machine
390/// locations have what values (as defined by a ValueIDNum).
391///
392/// There are potentially a much larger number of machine locations on the
393/// target machine than the actual working-set size of the function. On x86 for
394/// example, we're extremely unlikely to want to track values through control
395/// or debug registers. To avoid doing so, MLocTracker has several layers of
396/// indirection going on, with two kinds of ``location'':
397/// * A LocID uniquely identifies a register or spill location, with a
398/// predictable value.
399/// * A LocIdx is a key (in the database sense) for a LocID and a ValueIDNum.
400/// Whenever a location is def'd or used by a MachineInstr, we automagically
401/// create a new LocIdx for a location, but not otherwise. This ensures we only
402/// account for locations that are actually used or defined. The cost is another
403/// vector lookup (of LocID -> LocIdx) over any other implementation. This is
404/// fairly cheap, and the compiler tries to reduce the working-set at any one
405/// time in the function anyway.
406///
407/// Register mask operands completely blow this out of the water; I've just
408/// piled hacks on top of hacks to get around that.
409class MLocTracker {
410public:
411 MachineFunction &MF;
412 const TargetInstrInfo &TII;
413 const TargetRegisterInfo &TRI;
414 const TargetLowering &TLI;
415
416 /// IndexedMap type, mapping from LocIdx to ValueIDNum.
417 using LocToValueType = IndexedMap<ValueIDNum, LocIdxToIndexFunctor>;
418
419 /// Map of LocIdxes to the ValueIDNums that they store. This is tightly
420 /// packed, entries only exist for locations that are being tracked.
421 LocToValueType LocIdxToIDNum;
422
423 /// "Map" of machine location IDs (i.e., raw register or spill number) to the
424 /// LocIdx key / number for that location. There are always at least as many
425 /// as the number of registers on the target -- if the value in the register
426 /// is not being tracked, then the LocIdx value will be zero. New entries are
427 /// appended if a new spill slot begins being tracked.
428 /// This, and the corresponding reverse map persist for the analysis of the
429 /// whole function, and is necessarying for decoding various vectors of
430 /// values.
431 std::vector<LocIdx> LocIDToLocIdx;
432
433 /// Inverse map of LocIDToLocIdx.
434 IndexedMap<unsigned, LocIdxToIndexFunctor> LocIdxToLocID;
435
436 /// Unique-ification of spill slots. Used to number them -- their LocID
437 /// number is the index in SpillLocs minus one plus NumRegs.
438 UniqueVector<SpillLoc> SpillLocs;
439
440 // If we discover a new machine location, assign it an mphi with this
441 // block number.
442 unsigned CurBB;
443
444 /// Cached local copy of the number of registers the target has.
445 unsigned NumRegs;
446
447 /// Collection of register mask operands that have been observed. Second part
448 /// of pair indicates the instruction that they happened in. Used to
449 /// reconstruct where defs happened if we start tracking a location later
450 /// on.
451 SmallVector<std::pair<const MachineOperand *, unsigned>, 32> Masks;
452
453 /// Iterator for locations and the values they contain. Dereferencing
454 /// produces a struct/pair containing the LocIdx key for this location,
455 /// and a reference to the value currently stored. Simplifies the process
456 /// of seeking a particular location.
457 class MLocIterator {
458 LocToValueType &ValueMap;
459 LocIdx Idx;
460
461 public:
462 class value_type {
463 public:
464 value_type(LocIdx Idx, ValueIDNum &Value) : Idx(Idx), Value(Value) { }
465 const LocIdx Idx; /// Read-only index of this location.
466 ValueIDNum &Value; /// Reference to the stored value at this location.
467 };
468
469 MLocIterator(LocToValueType &ValueMap, LocIdx Idx)
470 : ValueMap(ValueMap), Idx(Idx) { }
471
472 bool operator==(const MLocIterator &Other) const {
473 assert(&ValueMap == &Other.ValueMap)(static_cast <bool> (&ValueMap == &Other.ValueMap
) ? void (0) : __assert_fail ("&ValueMap == &Other.ValueMap"
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 473, __extension__ __PRETTY_FUNCTION__))
;
474 return Idx == Other.Idx;
475 }
476
477 bool operator!=(const MLocIterator &Other) const {
478 return !(*this == Other);
479 }
480
481 void operator++() {
482 Idx = LocIdx(Idx.asU64() + 1);
483 }
484
485 value_type operator*() {
486 return value_type(Idx, ValueMap[LocIdx(Idx)]);
487 }
488 };
489
490 MLocTracker(MachineFunction &MF, const TargetInstrInfo &TII,
491 const TargetRegisterInfo &TRI, const TargetLowering &TLI)
492 : MF(MF), TII(TII), TRI(TRI), TLI(TLI),
493 LocIdxToIDNum(ValueIDNum::EmptyValue),
494 LocIdxToLocID(0) {
495 NumRegs = TRI.getNumRegs();
496 reset();
497 LocIDToLocIdx.resize(NumRegs, LocIdx::MakeIllegalLoc());
498 assert(NumRegs < (1u << NUM_LOC_BITS))(static_cast <bool> (NumRegs < (1u << 24)) ? void
(0) : __assert_fail ("NumRegs < (1u << NUM_LOC_BITS)"
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 498, __extension__ __PRETTY_FUNCTION__))
; // Detect bit packing failure
499
500 // Always track SP. This avoids the implicit clobbering caused by regmasks
501 // from affectings its values. (LiveDebugValues disbelieves calls and
502 // regmasks that claim to clobber SP).
503 Register SP = TLI.getStackPointerRegisterToSaveRestore();
504 if (SP) {
505 unsigned ID = getLocID(SP, false);
506 (void)lookupOrTrackRegister(ID);
507 }
508 }
509
510 /// Produce location ID number for indexing LocIDToLocIdx. Takes the register
511 /// or spill number, and flag for whether it's a spill or not.
512 unsigned getLocID(Register RegOrSpill, bool isSpill) {
513 return (isSpill) ? RegOrSpill.id() + NumRegs - 1 : RegOrSpill.id();
514 }
515
516 /// Accessor for reading the value at Idx.
517 ValueIDNum getNumAtPos(LocIdx Idx) const {
518 assert(Idx.asU64() < LocIdxToIDNum.size())(static_cast <bool> (Idx.asU64() < LocIdxToIDNum.size
()) ? void (0) : __assert_fail ("Idx.asU64() < LocIdxToIDNum.size()"
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 518, __extension__ __PRETTY_FUNCTION__))
;
519 return LocIdxToIDNum[Idx];
520 }
521
522 unsigned getNumLocs(void) const { return LocIdxToIDNum.size(); }
523
524 /// Reset all locations to contain a PHI value at the designated block. Used
525 /// sometimes for actual PHI values, othertimes to indicate the block entry
526 /// value (before any more information is known).
527 void setMPhis(unsigned NewCurBB) {
528 CurBB = NewCurBB;
529 for (auto Location : locations())
530 Location.Value = {CurBB, 0, Location.Idx};
531 }
532
533 /// Load values for each location from array of ValueIDNums. Take current
534 /// bbnum just in case we read a value from a hitherto untouched register.
535 void loadFromArray(ValueIDNum *Locs, unsigned NewCurBB) {
536 CurBB = NewCurBB;
537 // Iterate over all tracked locations, and load each locations live-in
538 // value into our local index.
539 for (auto Location : locations())
540 Location.Value = Locs[Location.Idx.asU64()];
541 }
542
543 /// Wipe any un-necessary location records after traversing a block.
544 void reset(void) {
545 // We could reset all the location values too; however either loadFromArray
546 // or setMPhis should be called before this object is re-used. Just
547 // clear Masks, they're definitely not needed.
548 Masks.clear();
549 }
550
551 /// Clear all data. Destroys the LocID <=> LocIdx map, which makes most of
552 /// the information in this pass uninterpretable.
553 void clear(void) {
554 reset();
555 LocIDToLocIdx.clear();
556 LocIdxToLocID.clear();
557 LocIdxToIDNum.clear();
558 //SpillLocs.reset(); XXX UniqueVector::reset assumes a SpillLoc casts from 0
559 SpillLocs = decltype(SpillLocs)();
560
561 LocIDToLocIdx.resize(NumRegs, LocIdx::MakeIllegalLoc());
562 }
563
564 /// Set a locaiton to a certain value.
565 void setMLoc(LocIdx L, ValueIDNum Num) {
566 assert(L.asU64() < LocIdxToIDNum.size())(static_cast <bool> (L.asU64() < LocIdxToIDNum.size(
)) ? void (0) : __assert_fail ("L.asU64() < LocIdxToIDNum.size()"
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 566, __extension__ __PRETTY_FUNCTION__))
;
567 LocIdxToIDNum[L] = Num;
568 }
569
570 /// Create a LocIdx for an untracked register ID. Initialize it to either an
571 /// mphi value representing a live-in, or a recent register mask clobber.
572 LocIdx trackRegister(unsigned ID) {
573 assert(ID != 0)(static_cast <bool> (ID != 0) ? void (0) : __assert_fail
("ID != 0", "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 573, __extension__ __PRETTY_FUNCTION__))
;
574 LocIdx NewIdx = LocIdx(LocIdxToIDNum.size());
575 LocIdxToIDNum.grow(NewIdx);
576 LocIdxToLocID.grow(NewIdx);
577
578 // Default: it's an mphi.
579 ValueIDNum ValNum = {CurBB, 0, NewIdx};
580 // Was this reg ever touched by a regmask?
581 for (const auto &MaskPair : reverse(Masks)) {
582 if (MaskPair.first->clobbersPhysReg(ID)) {
583 // There was an earlier def we skipped.
584 ValNum = {CurBB, MaskPair.second, NewIdx};
585 break;
586 }
587 }
588
589 LocIdxToIDNum[NewIdx] = ValNum;
590 LocIdxToLocID[NewIdx] = ID;
591 return NewIdx;
592 }
593
594 LocIdx lookupOrTrackRegister(unsigned ID) {
595 LocIdx &Index = LocIDToLocIdx[ID];
596 if (Index.isIllegal())
597 Index = trackRegister(ID);
598 return Index;
599 }
600
601 /// Record a definition of the specified register at the given block / inst.
602 /// This doesn't take a ValueIDNum, because the definition and its location
603 /// are synonymous.
604 void defReg(Register R, unsigned BB, unsigned Inst) {
605 unsigned ID = getLocID(R, false);
606 LocIdx Idx = lookupOrTrackRegister(ID);
607 ValueIDNum ValueID = {BB, Inst, Idx};
608 LocIdxToIDNum[Idx] = ValueID;
609 }
610
611 /// Set a register to a value number. To be used if the value number is
612 /// known in advance.
613 void setReg(Register R, ValueIDNum ValueID) {
614 unsigned ID = getLocID(R, false);
615 LocIdx Idx = lookupOrTrackRegister(ID);
616 LocIdxToIDNum[Idx] = ValueID;
617 }
618
619 ValueIDNum readReg(Register R) {
620 unsigned ID = getLocID(R, false);
621 LocIdx Idx = lookupOrTrackRegister(ID);
622 return LocIdxToIDNum[Idx];
623 }
624
625 /// Reset a register value to zero / empty. Needed to replicate the
626 /// VarLoc implementation where a copy to/from a register effectively
627 /// clears the contents of the source register. (Values can only have one
628 /// machine location in VarLocBasedImpl).
629 void wipeRegister(Register R) {
630 unsigned ID = getLocID(R, false);
631 LocIdx Idx = LocIDToLocIdx[ID];
632 LocIdxToIDNum[Idx] = ValueIDNum::EmptyValue;
633 }
634
635 /// Determine the LocIdx of an existing register.
636 LocIdx getRegMLoc(Register R) {
637 unsigned ID = getLocID(R, false);
638 return LocIDToLocIdx[ID];
639 }
640
641 /// Record a RegMask operand being executed. Defs any register we currently
642 /// track, stores a pointer to the mask in case we have to account for it
643 /// later.
644 void writeRegMask(const MachineOperand *MO, unsigned CurBB, unsigned InstID) {
645 // Ensure SP exists, so that we don't override it later.
646 Register SP = TLI.getStackPointerRegisterToSaveRestore();
647
648 // Def any register we track have that isn't preserved. The regmask
649 // terminates the liveness of a register, meaning its value can't be
650 // relied upon -- we represent this by giving it a new value.
651 for (auto Location : locations()) {
652 unsigned ID = LocIdxToLocID[Location.Idx];
653 // Don't clobber SP, even if the mask says it's clobbered.
654 if (ID < NumRegs && ID != SP && MO->clobbersPhysReg(ID))
655 defReg(ID, CurBB, InstID);
656 }
657 Masks.push_back(std::make_pair(MO, InstID));
658 }
659
660 /// Find LocIdx for SpillLoc \p L, creating a new one if it's not tracked.
661 LocIdx getOrTrackSpillLoc(SpillLoc L) {
662 unsigned SpillID = SpillLocs.idFor(L);
663 if (SpillID == 0) {
664 SpillID = SpillLocs.insert(L);
665 unsigned L = getLocID(SpillID, true);
666 LocIdx Idx = LocIdx(LocIdxToIDNum.size()); // New idx
667 LocIdxToIDNum.grow(Idx);
668 LocIdxToLocID.grow(Idx);
669 LocIDToLocIdx.push_back(Idx);
670 LocIdxToLocID[Idx] = L;
671 return Idx;
672 } else {
673 unsigned L = getLocID(SpillID, true);
674 LocIdx Idx = LocIDToLocIdx[L];
675 return Idx;
676 }
677 }
678
679 /// Set the value stored in a spill slot.
680 void setSpill(SpillLoc L, ValueIDNum ValueID) {
681 LocIdx Idx = getOrTrackSpillLoc(L);
682 LocIdxToIDNum[Idx] = ValueID;
683 }
684
685 /// Read whatever value is in a spill slot, or None if it isn't tracked.
686 Optional<ValueIDNum> readSpill(SpillLoc L) {
687 unsigned SpillID = SpillLocs.idFor(L);
688 if (SpillID == 0)
689 return None;
690
691 unsigned LocID = getLocID(SpillID, true);
692 LocIdx Idx = LocIDToLocIdx[LocID];
693 return LocIdxToIDNum[Idx];
694 }
695
696 /// Determine the LocIdx of a spill slot. Return None if it previously
697 /// hasn't had a value assigned.
698 Optional<LocIdx> getSpillMLoc(SpillLoc L) {
699 unsigned SpillID = SpillLocs.idFor(L);
700 if (SpillID == 0)
701 return None;
702 unsigned LocNo = getLocID(SpillID, true);
703 return LocIDToLocIdx[LocNo];
704 }
705
706 /// Return true if Idx is a spill machine location.
707 bool isSpill(LocIdx Idx) const {
708 return LocIdxToLocID[Idx] >= NumRegs;
709 }
710
711 MLocIterator begin() {
712 return MLocIterator(LocIdxToIDNum, 0);
713 }
714
715 MLocIterator end() {
716 return MLocIterator(LocIdxToIDNum, LocIdxToIDNum.size());
717 }
718
719 /// Return a range over all locations currently tracked.
720 iterator_range<MLocIterator> locations() {
721 return llvm::make_range(begin(), end());
722 }
723
724 std::string LocIdxToName(LocIdx Idx) const {
725 unsigned ID = LocIdxToLocID[Idx];
726 if (ID >= NumRegs)
727 return Twine("slot ").concat(Twine(ID - NumRegs)).str();
728 else
729 return TRI.getRegAsmName(ID).str();
730 }
731
732 std::string IDAsString(const ValueIDNum &Num) const {
733 std::string DefName = LocIdxToName(Num.getLoc());
734 return Num.asString(DefName);
735 }
736
737 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__))
738 void dump() {
739 for (auto Location : locations()) {
740 std::string MLocName = LocIdxToName(Location.Value.getLoc());
741 std::string DefName = Location.Value.asString(MLocName);
742 dbgs() << LocIdxToName(Location.Idx) << " --> " << DefName << "\n";
743 }
744 }
745
746 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__))
747 void dump_mloc_map() {
748 for (auto Location : locations()) {
749 std::string foo = LocIdxToName(Location.Idx);
750 dbgs() << "Idx " << Location.Idx.asU64() << " " << foo << "\n";
751 }
752 }
753
754 /// Create a DBG_VALUE based on machine location \p MLoc. Qualify it with the
755 /// information in \pProperties, for variable Var. Don't insert it anywhere,
756 /// just return the builder for it.
757 MachineInstrBuilder emitLoc(Optional<LocIdx> MLoc, const DebugVariable &Var,
758 const DbgValueProperties &Properties) {
759 DebugLoc DL = DILocation::get(Var.getVariable()->getContext(), 0, 0,
760 Var.getVariable()->getScope(),
761 const_cast<DILocation *>(Var.getInlinedAt()));
762 auto MIB = BuildMI(MF, DL, TII.get(TargetOpcode::DBG_VALUE));
763
764 const DIExpression *Expr = Properties.DIExpr;
765 if (!MLoc) {
766 // No location -> DBG_VALUE $noreg
767 MIB.addReg(0, RegState::Debug);
768 MIB.addReg(0, RegState::Debug);
769 } else if (LocIdxToLocID[*MLoc] >= NumRegs) {
770 unsigned LocID = LocIdxToLocID[*MLoc];
771 const SpillLoc &Spill = SpillLocs[LocID - NumRegs + 1];
772
773 auto *TRI = MF.getSubtarget().getRegisterInfo();
774 Expr = TRI->prependOffsetExpression(Expr, DIExpression::ApplyOffset,
775 Spill.SpillOffset);
776 unsigned Base = Spill.SpillBase;
777 MIB.addReg(Base, RegState::Debug);
778 MIB.addImm(0);
779 } else {
780 unsigned LocID = LocIdxToLocID[*MLoc];
781 MIB.addReg(LocID, RegState::Debug);
782 if (Properties.Indirect)
783 MIB.addImm(0);
784 else
785 MIB.addReg(0, RegState::Debug);
786 }
787
788 MIB.addMetadata(Var.getVariable());
789 MIB.addMetadata(Expr);
790 return MIB;
791 }
792};
793
794/// Class recording the (high level) _value_ of a variable. Identifies either
795/// the value of the variable as a ValueIDNum, or a constant MachineOperand.
796/// This class also stores meta-information about how the value is qualified.
797/// Used to reason about variable values when performing the second
798/// (DebugVariable specific) dataflow analysis.
799class DbgValue {
800public:
801 union {
802 /// If Kind is Def, the value number that this value is based on.
803 ValueIDNum ID;
804 /// If Kind is Const, the MachineOperand defining this value.
805 MachineOperand MO;
806 /// For a NoVal DbgValue, which block it was generated in.
807 unsigned BlockNo;
808 };
809 /// Qualifiers for the ValueIDNum above.
810 DbgValueProperties Properties;
811
812 typedef enum {
813 Undef, // Represents a DBG_VALUE $noreg in the transfer function only.
814 Def, // This value is defined by an inst, or is a PHI value.
815 Const, // A constant value contained in the MachineOperand field.
816 Proposed, // This is a tentative PHI value, which may be confirmed or
817 // invalidated later.
818 NoVal // Empty DbgValue, generated during dataflow. BlockNo stores
819 // which block this was generated in.
820 } KindT;
821 /// Discriminator for whether this is a constant or an in-program value.
822 KindT Kind;
823
824 DbgValue(const ValueIDNum &Val, const DbgValueProperties &Prop, KindT Kind)
825 : ID(Val), Properties(Prop), Kind(Kind) {
826 assert(Kind == Def || Kind == Proposed)(static_cast <bool> (Kind == Def || Kind == Proposed) ?
void (0) : __assert_fail ("Kind == Def || Kind == Proposed",
"/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 826, __extension__ __PRETTY_FUNCTION__))
;
827 }
828
829 DbgValue(unsigned BlockNo, const DbgValueProperties &Prop, KindT Kind)
830 : BlockNo(BlockNo), Properties(Prop), Kind(Kind) {
831 assert(Kind == NoVal)(static_cast <bool> (Kind == NoVal) ? void (0) : __assert_fail
("Kind == NoVal", "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 831, __extension__ __PRETTY_FUNCTION__))
;
832 }
833
834 DbgValue(const MachineOperand &MO, const DbgValueProperties &Prop, KindT Kind)
835 : MO(MO), Properties(Prop), Kind(Kind) {
836 assert(Kind == Const)(static_cast <bool> (Kind == Const) ? void (0) : __assert_fail
("Kind == Const", "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 836, __extension__ __PRETTY_FUNCTION__))
;
837 }
838
839 DbgValue(const DbgValueProperties &Prop, KindT Kind)
840 : Properties(Prop), Kind(Kind) {
841 assert(Kind == Undef &&(static_cast <bool> (Kind == Undef && "Empty DbgValue constructor must pass in Undef kind"
) ? void (0) : __assert_fail ("Kind == Undef && \"Empty DbgValue constructor must pass in Undef kind\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 842, __extension__ __PRETTY_FUNCTION__))
842 "Empty DbgValue constructor must pass in Undef kind")(static_cast <bool> (Kind == Undef && "Empty DbgValue constructor must pass in Undef kind"
) ? void (0) : __assert_fail ("Kind == Undef && \"Empty DbgValue constructor must pass in Undef kind\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 842, __extension__ __PRETTY_FUNCTION__))
;
843 }
844
845 void dump(const MLocTracker *MTrack) const {
846 if (Kind == Const) {
847 MO.dump();
848 } else if (Kind == NoVal) {
849 dbgs() << "NoVal(" << BlockNo << ")";
850 } else if (Kind == Proposed) {
851 dbgs() << "VPHI(" << MTrack->IDAsString(ID) << ")";
852 } else {
853 assert(Kind == Def)(static_cast <bool> (Kind == Def) ? void (0) : __assert_fail
("Kind == Def", "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 853, __extension__ __PRETTY_FUNCTION__))
;
854 dbgs() << MTrack->IDAsString(ID);
855 }
856 if (Properties.Indirect)
857 dbgs() << " indir";
858 if (Properties.DIExpr)
859 dbgs() << " " << *Properties.DIExpr;
860 }
861
862 bool operator==(const DbgValue &Other) const {
863 if (std::tie(Kind, Properties) != std::tie(Other.Kind, Other.Properties))
864 return false;
865 else if (Kind == Proposed && ID != Other.ID)
866 return false;
867 else if (Kind == Def && ID != Other.ID)
868 return false;
869 else if (Kind == NoVal && BlockNo != Other.BlockNo)
870 return false;
871 else if (Kind == Const)
872 return MO.isIdenticalTo(Other.MO);
873
874 return true;
875 }
876
877 bool operator!=(const DbgValue &Other) const { return !(*this == Other); }
878};
879
880/// Types for recording sets of variable fragments that overlap. For a given
881/// local variable, we record all other fragments of that variable that could
882/// overlap it, to reduce search time.
883using FragmentOfVar =
884 std::pair<const DILocalVariable *, DIExpression::FragmentInfo>;
885using OverlapMap =
886 DenseMap<FragmentOfVar, SmallVector<DIExpression::FragmentInfo, 1>>;
887
888/// Collection of DBG_VALUEs observed when traversing a block. Records each
889/// variable and the value the DBG_VALUE refers to. Requires the machine value
890/// location dataflow algorithm to have run already, so that values can be
891/// identified.
892class VLocTracker {
893public:
894 /// Map DebugVariable to the latest Value it's defined to have.
895 /// Needs to be a MapVector because we determine order-in-the-input-MIR from
896 /// the order in this container.
897 /// We only retain the last DbgValue in each block for each variable, to
898 /// determine the blocks live-out variable value. The Vars container forms the
899 /// transfer function for this block, as part of the dataflow analysis. The
900 /// movement of values between locations inside of a block is handled at a
901 /// much later stage, in the TransferTracker class.
902 MapVector<DebugVariable, DbgValue> Vars;
903 DenseMap<DebugVariable, const DILocation *> Scopes;
904 MachineBasicBlock *MBB;
905
906public:
907 VLocTracker() {}
908
909 void defVar(const MachineInstr &MI, const DbgValueProperties &Properties,
910 Optional<ValueIDNum> ID) {
911 assert(MI.isDebugValue() || MI.isDebugRef())(static_cast <bool> (MI.isDebugValue() || MI.isDebugRef
()) ? void (0) : __assert_fail ("MI.isDebugValue() || MI.isDebugRef()"
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 911, __extension__ __PRETTY_FUNCTION__))
;
912 DebugVariable Var(MI.getDebugVariable(), MI.getDebugExpression(),
913 MI.getDebugLoc()->getInlinedAt());
914 DbgValue Rec = (ID) ? DbgValue(*ID, Properties, DbgValue::Def)
915 : DbgValue(Properties, DbgValue::Undef);
916
917 // Attempt insertion; overwrite if it's already mapped.
918 auto Result = Vars.insert(std::make_pair(Var, Rec));
919 if (!Result.second)
920 Result.first->second = Rec;
921 Scopes[Var] = MI.getDebugLoc().get();
922 }
923
924 void defVar(const MachineInstr &MI, const MachineOperand &MO) {
925 // Only DBG_VALUEs can define constant-valued variables.
926 assert(MI.isDebugValue())(static_cast <bool> (MI.isDebugValue()) ? void (0) : __assert_fail
("MI.isDebugValue()", "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 926, __extension__ __PRETTY_FUNCTION__))
;
927 DebugVariable Var(MI.getDebugVariable(), MI.getDebugExpression(),
928 MI.getDebugLoc()->getInlinedAt());
929 DbgValueProperties Properties(MI);
930 DbgValue Rec = DbgValue(MO, Properties, DbgValue::Const);
931
932 // Attempt insertion; overwrite if it's already mapped.
933 auto Result = Vars.insert(std::make_pair(Var, Rec));
934 if (!Result.second)
935 Result.first->second = Rec;
936 Scopes[Var] = MI.getDebugLoc().get();
937 }
938};
939
940/// Tracker for converting machine value locations and variable values into
941/// variable locations (the output of LiveDebugValues), recorded as DBG_VALUEs
942/// specifying block live-in locations and transfers within blocks.
943///
944/// Operating on a per-block basis, this class takes a (pre-loaded) MLocTracker
945/// and must be initialized with the set of variable values that are live-in to
946/// the block. The caller then repeatedly calls process(). TransferTracker picks
947/// out variable locations for the live-in variable values (if there _is_ a
948/// location) and creates the corresponding DBG_VALUEs. Then, as the block is
949/// stepped through, transfers of values between machine locations are
950/// identified and if profitable, a DBG_VALUE created.
951///
952/// This is where debug use-before-defs would be resolved: a variable with an
953/// unavailable value could materialize in the middle of a block, when the
954/// value becomes available. Or, we could detect clobbers and re-specify the
955/// variable in a backup location. (XXX these are unimplemented).
956class TransferTracker {
957public:
958 const TargetInstrInfo *TII;
959 const TargetLowering *TLI;
960 /// This machine location tracker is assumed to always contain the up-to-date
961 /// value mapping for all machine locations. TransferTracker only reads
962 /// information from it. (XXX make it const?)
963 MLocTracker *MTracker;
964 MachineFunction &MF;
965 bool ShouldEmitDebugEntryValues;
966
967 /// Record of all changes in variable locations at a block position. Awkwardly
968 /// we allow inserting either before or after the point: MBB != nullptr
969 /// indicates it's before, otherwise after.
970 struct Transfer {
971 MachineBasicBlock::instr_iterator Pos; /// Position to insert DBG_VALUes
972 MachineBasicBlock *MBB; /// non-null if we should insert after.
973 SmallVector<MachineInstr *, 4> Insts; /// Vector of DBG_VALUEs to insert.
974 };
975
976 struct LocAndProperties {
977 LocIdx Loc;
978 DbgValueProperties Properties;
979 };
980
981 /// Collection of transfers (DBG_VALUEs) to be inserted.
982 SmallVector<Transfer, 32> Transfers;
983
984 /// Local cache of what-value-is-in-what-LocIdx. Used to identify differences
985 /// between TransferTrackers view of variable locations and MLocTrackers. For
986 /// example, MLocTracker observes all clobbers, but TransferTracker lazily
987 /// does not.
988 std::vector<ValueIDNum> VarLocs;
989
990 /// Map from LocIdxes to which DebugVariables are based that location.
991 /// Mantained while stepping through the block. Not accurate if
992 /// VarLocs[Idx] != MTracker->LocIdxToIDNum[Idx].
993 std::map<LocIdx, SmallSet<DebugVariable, 4>> ActiveMLocs;
994
995 /// Map from DebugVariable to it's current location and qualifying meta
996 /// information. To be used in conjunction with ActiveMLocs to construct
997 /// enough information for the DBG_VALUEs for a particular LocIdx.
998 DenseMap<DebugVariable, LocAndProperties> ActiveVLocs;
999
1000 /// Temporary cache of DBG_VALUEs to be entered into the Transfers collection.
1001 SmallVector<MachineInstr *, 4> PendingDbgValues;
1002
1003 /// Record of a use-before-def: created when a value that's live-in to the
1004 /// current block isn't available in any machine location, but it will be
1005 /// defined in this block.
1006 struct UseBeforeDef {
1007 /// Value of this variable, def'd in block.
1008 ValueIDNum ID;
1009 /// Identity of this variable.
1010 DebugVariable Var;
1011 /// Additional variable properties.
1012 DbgValueProperties Properties;
1013 };
1014
1015 /// Map from instruction index (within the block) to the set of UseBeforeDefs
1016 /// that become defined at that instruction.
1017 DenseMap<unsigned, SmallVector<UseBeforeDef, 1>> UseBeforeDefs;
1018
1019 /// The set of variables that are in UseBeforeDefs and can become a location
1020 /// once the relevant value is defined. An element being erased from this
1021 /// collection prevents the use-before-def materializing.
1022 DenseSet<DebugVariable> UseBeforeDefVariables;
1023
1024 const TargetRegisterInfo &TRI;
1025 const BitVector &CalleeSavedRegs;
1026
1027 TransferTracker(const TargetInstrInfo *TII, MLocTracker *MTracker,
1028 MachineFunction &MF, const TargetRegisterInfo &TRI,
1029 const BitVector &CalleeSavedRegs, const TargetPassConfig &TPC)
1030 : TII(TII), MTracker(MTracker), MF(MF), TRI(TRI),
1031 CalleeSavedRegs(CalleeSavedRegs) {
1032 TLI = MF.getSubtarget().getTargetLowering();
1033 auto &TM = TPC.getTM<TargetMachine>();
1034 ShouldEmitDebugEntryValues = TM.Options.ShouldEmitDebugEntryValues();
1035 }
1036
1037 /// Load object with live-in variable values. \p mlocs contains the live-in
1038 /// values in each machine location, while \p vlocs the live-in variable
1039 /// values. This method picks variable locations for the live-in variables,
1040 /// creates DBG_VALUEs and puts them in #Transfers, then prepares the other
1041 /// object fields to track variable locations as we step through the block.
1042 /// FIXME: could just examine mloctracker instead of passing in \p mlocs?
1043 void loadInlocs(MachineBasicBlock &MBB, ValueIDNum *MLocs,
1044 SmallVectorImpl<std::pair<DebugVariable, DbgValue>> &VLocs,
1045 unsigned NumLocs) {
1046 ActiveMLocs.clear();
1047 ActiveVLocs.clear();
1048 VarLocs.clear();
1049 VarLocs.reserve(NumLocs);
1050 UseBeforeDefs.clear();
1051 UseBeforeDefVariables.clear();
1052
1053 auto isCalleeSaved = [&](LocIdx L) {
1054 unsigned Reg = MTracker->LocIdxToLocID[L];
1055 if (Reg >= MTracker->NumRegs)
1056 return false;
1057 for (MCRegAliasIterator RAI(Reg, &TRI, true); RAI.isValid(); ++RAI)
1058 if (CalleeSavedRegs.test(*RAI))
1059 return true;
1060 return false;
1061 };
1062
1063 // Map of the preferred location for each value.
1064 std::map<ValueIDNum, LocIdx> ValueToLoc;
1065
1066 // Produce a map of value numbers to the current machine locs they live
1067 // in. When emulating VarLocBasedImpl, there should only be one
1068 // location; when not, we get to pick.
1069 for (auto Location : MTracker->locations()) {
1070 LocIdx Idx = Location.Idx;
1071 ValueIDNum &VNum = MLocs[Idx.asU64()];
1072 VarLocs.push_back(VNum);
1073 auto it = ValueToLoc.find(VNum);
1074 // In order of preference, pick:
1075 // * Callee saved registers,
1076 // * Other registers,
1077 // * Spill slots.
1078 if (it == ValueToLoc.end() || MTracker->isSpill(it->second) ||
1079 (!isCalleeSaved(it->second) && isCalleeSaved(Idx.asU64()))) {
1080 // Insert, or overwrite if insertion failed.
1081 auto PrefLocRes = ValueToLoc.insert(std::make_pair(VNum, Idx));
1082 if (!PrefLocRes.second)
1083 PrefLocRes.first->second = Idx;
1084 }
1085 }
1086
1087 // Now map variables to their picked LocIdxes.
1088 for (auto Var : VLocs) {
1089 if (Var.second.Kind == DbgValue::Const) {
1090 PendingDbgValues.push_back(
1091 emitMOLoc(Var.second.MO, Var.first, Var.second.Properties));
1092 continue;
1093 }
1094
1095 // If the value has no location, we can't make a variable location.
1096 const ValueIDNum &Num = Var.second.ID;
1097 auto ValuesPreferredLoc = ValueToLoc.find(Num);
1098 if (ValuesPreferredLoc == ValueToLoc.end()) {
1099 // If it's a def that occurs in this block, register it as a
1100 // use-before-def to be resolved as we step through the block.
1101 if (Num.getBlock() == (unsigned)MBB.getNumber() && !Num.isPHI())
1102 addUseBeforeDef(Var.first, Var.second.Properties, Num);
1103 else
1104 recoverAsEntryValue(Var.first, Var.second.Properties, Num);
1105 continue;
1106 }
1107
1108 LocIdx M = ValuesPreferredLoc->second;
1109 auto NewValue = LocAndProperties{M, Var.second.Properties};
1110 auto Result = ActiveVLocs.insert(std::make_pair(Var.first, NewValue));
1111 if (!Result.second)
1112 Result.first->second = NewValue;
1113 ActiveMLocs[M].insert(Var.first);
1114 PendingDbgValues.push_back(
1115 MTracker->emitLoc(M, Var.first, Var.second.Properties));
1116 }
1117 flushDbgValues(MBB.begin(), &MBB);
1118 }
1119
1120 /// Record that \p Var has value \p ID, a value that becomes available
1121 /// later in the function.
1122 void addUseBeforeDef(const DebugVariable &Var,
1123 const DbgValueProperties &Properties, ValueIDNum ID) {
1124 UseBeforeDef UBD = {ID, Var, Properties};
1125 UseBeforeDefs[ID.getInst()].push_back(UBD);
1126 UseBeforeDefVariables.insert(Var);
1127 }
1128
1129 /// After the instruction at index \p Inst and position \p pos has been
1130 /// processed, check whether it defines a variable value in a use-before-def.
1131 /// If so, and the variable value hasn't changed since the start of the
1132 /// block, create a DBG_VALUE.
1133 void checkInstForNewValues(unsigned Inst, MachineBasicBlock::iterator pos) {
1134 auto MIt = UseBeforeDefs.find(Inst);
1135 if (MIt == UseBeforeDefs.end())
1136 return;
1137
1138 for (auto &Use : MIt->second) {
1139 LocIdx L = Use.ID.getLoc();
1140
1141 // If something goes very wrong, we might end up labelling a COPY
1142 // instruction or similar with an instruction number, where it doesn't
1143 // actually define a new value, instead it moves a value. In case this
1144 // happens, discard.
1145 if (MTracker->LocIdxToIDNum[L] != Use.ID)
1146 continue;
1147
1148 // If a different debug instruction defined the variable value / location
1149 // since the start of the block, don't materialize this use-before-def.
1150 if (!UseBeforeDefVariables.count(Use.Var))
1151 continue;
1152
1153 PendingDbgValues.push_back(MTracker->emitLoc(L, Use.Var, Use.Properties));
1154 }
1155 flushDbgValues(pos, nullptr);
1156 }
1157
1158 /// Helper to move created DBG_VALUEs into Transfers collection.
1159 void flushDbgValues(MachineBasicBlock::iterator Pos, MachineBasicBlock *MBB) {
1160 if (PendingDbgValues.size() == 0)
1161 return;
1162
1163 // Pick out the instruction start position.
1164 MachineBasicBlock::instr_iterator BundleStart;
1165 if (MBB && Pos == MBB->begin())
1166 BundleStart = MBB->instr_begin();
1167 else
1168 BundleStart = getBundleStart(Pos->getIterator());
1169
1170 Transfers.push_back({BundleStart, MBB, PendingDbgValues});
1171 PendingDbgValues.clear();
1172 }
1173
1174 bool isEntryValueVariable(const DebugVariable &Var,
1175 const DIExpression *Expr) const {
1176 if (!Var.getVariable()->isParameter())
1177 return false;
1178
1179 if (Var.getInlinedAt())
1180 return false;
1181
1182 if (Expr->getNumElements() > 0)
1183 return false;
1184
1185 return true;
1186 }
1187
1188 bool isEntryValueValue(const ValueIDNum &Val) const {
1189 // Must be in entry block (block number zero), and be a PHI / live-in value.
1190 if (Val.getBlock() || !Val.isPHI())
1191 return false;
1192
1193 // Entry values must enter in a register.
1194 if (MTracker->isSpill(Val.getLoc()))
1195 return false;
1196
1197 Register SP = TLI->getStackPointerRegisterToSaveRestore();
1198 Register FP = TRI.getFrameRegister(MF);
1199 Register Reg = MTracker->LocIdxToLocID[Val.getLoc()];
1200 return Reg != SP && Reg != FP;
1201 }
1202
1203 bool recoverAsEntryValue(const DebugVariable &Var, DbgValueProperties &Prop,
1204 const ValueIDNum &Num) {
1205 // Is this variable location a candidate to be an entry value. First,
1206 // should we be trying this at all?
1207 if (!ShouldEmitDebugEntryValues)
1208 return false;
1209
1210 // Is the variable appropriate for entry values (i.e., is a parameter).
1211 if (!isEntryValueVariable(Var, Prop.DIExpr))
1212 return false;
1213
1214 // Is the value assigned to this variable still the entry value?
1215 if (!isEntryValueValue(Num))
1216 return false;
1217
1218 // Emit a variable location using an entry value expression.
1219 DIExpression *NewExpr =
1220 DIExpression::prepend(Prop.DIExpr, DIExpression::EntryValue);
1221 Register Reg = MTracker->LocIdxToLocID[Num.getLoc()];
1222 MachineOperand MO = MachineOperand::CreateReg(Reg, false);
1223 MO.setIsDebug(true);
1224
1225 PendingDbgValues.push_back(emitMOLoc(MO, Var, {NewExpr, Prop.Indirect}));
1226 return true;
1227 }
1228
1229 /// Change a variable value after encountering a DBG_VALUE inside a block.
1230 void redefVar(const MachineInstr &MI) {
1231 DebugVariable Var(MI.getDebugVariable(), MI.getDebugExpression(),
1232 MI.getDebugLoc()->getInlinedAt());
1233 DbgValueProperties Properties(MI);
1234
1235 const MachineOperand &MO = MI.getOperand(0);
1236
1237 // Ignore non-register locations, we don't transfer those.
1238 if (!MO.isReg() || MO.getReg() == 0) {
1239 auto It = ActiveVLocs.find(Var);
1240 if (It != ActiveVLocs.end()) {
1241 ActiveMLocs[It->second.Loc].erase(Var);
1242 ActiveVLocs.erase(It);
1243 }
1244 // Any use-before-defs no longer apply.
1245 UseBeforeDefVariables.erase(Var);
1246 return;
1247 }
1248
1249 Register Reg = MO.getReg();
1250 LocIdx NewLoc = MTracker->getRegMLoc(Reg);
1251 redefVar(MI, Properties, NewLoc);
1252 }
1253
1254 /// Handle a change in variable location within a block. Terminate the
1255 /// variables current location, and record the value it now refers to, so
1256 /// that we can detect location transfers later on.
1257 void redefVar(const MachineInstr &MI, const DbgValueProperties &Properties,
1258 Optional<LocIdx> OptNewLoc) {
1259 DebugVariable Var(MI.getDebugVariable(), MI.getDebugExpression(),
1260 MI.getDebugLoc()->getInlinedAt());
1261 // Any use-before-defs no longer apply.
1262 UseBeforeDefVariables.erase(Var);
1263
1264 // Erase any previous location,
1265 auto It = ActiveVLocs.find(Var);
1266 if (It != ActiveVLocs.end())
1267 ActiveMLocs[It->second.Loc].erase(Var);
1268
1269 // If there _is_ no new location, all we had to do was erase.
1270 if (!OptNewLoc)
1271 return;
1272 LocIdx NewLoc = *OptNewLoc;
1273
1274 // Check whether our local copy of values-by-location in #VarLocs is out of
1275 // date. Wipe old tracking data for the location if it's been clobbered in
1276 // the meantime.
1277 if (MTracker->getNumAtPos(NewLoc) != VarLocs[NewLoc.asU64()]) {
1278 for (auto &P : ActiveMLocs[NewLoc]) {
1279 ActiveVLocs.erase(P);
1280 }
1281 ActiveMLocs[NewLoc.asU64()].clear();
1282 VarLocs[NewLoc.asU64()] = MTracker->getNumAtPos(NewLoc);
1283 }
1284
1285 ActiveMLocs[NewLoc].insert(Var);
1286 if (It == ActiveVLocs.end()) {
1287 ActiveVLocs.insert(
1288 std::make_pair(Var, LocAndProperties{NewLoc, Properties}));
1289 } else {
1290 It->second.Loc = NewLoc;
1291 It->second.Properties = Properties;
1292 }
1293 }
1294
1295 /// Account for a location \p mloc being clobbered. Examine the variable
1296 /// locations that will be terminated: and try to recover them by using
1297 /// another location. Optionally, given \p MakeUndef, emit a DBG_VALUE to
1298 /// explicitly terminate a location if it can't be recovered.
1299 void clobberMloc(LocIdx MLoc, MachineBasicBlock::iterator Pos,
1300 bool MakeUndef = true) {
1301 auto ActiveMLocIt = ActiveMLocs.find(MLoc);
1302 if (ActiveMLocIt == ActiveMLocs.end())
1303 return;
1304
1305 // What was the old variable value?
1306 ValueIDNum OldValue = VarLocs[MLoc.asU64()];
1307 VarLocs[MLoc.asU64()] = ValueIDNum::EmptyValue;
1308
1309 // Examine the remaining variable locations: if we can find the same value
1310 // again, we can recover the location.
1311 Optional<LocIdx> NewLoc = None;
1312 for (auto Loc : MTracker->locations())
1313 if (Loc.Value == OldValue)
1314 NewLoc = Loc.Idx;
1315
1316 // If there is no location, and we weren't asked to make the variable
1317 // explicitly undef, then stop here.
1318 if (!NewLoc && !MakeUndef) {
1319 // Try and recover a few more locations with entry values.
1320 for (auto &Var : ActiveMLocIt->second) {
1321 auto &Prop = ActiveVLocs.find(Var)->second.Properties;
1322 recoverAsEntryValue(Var, Prop, OldValue);
1323 }
1324 flushDbgValues(Pos, nullptr);
1325 return;
1326 }
1327
1328 // Examine all the variables based on this location.
1329 DenseSet<DebugVariable> NewMLocs;
1330 for (auto &Var : ActiveMLocIt->second) {
1331 auto ActiveVLocIt = ActiveVLocs.find(Var);
1332 // Re-state the variable location: if there's no replacement then NewLoc
1333 // is None and a $noreg DBG_VALUE will be created. Otherwise, a DBG_VALUE
1334 // identifying the alternative location will be emitted.
1335 const DIExpression *Expr = ActiveVLocIt->second.Properties.DIExpr;
1336 DbgValueProperties Properties(Expr, false);
1337 PendingDbgValues.push_back(MTracker->emitLoc(NewLoc, Var, Properties));
1338
1339 // Update machine locations <=> variable locations maps. Defer updating
1340 // ActiveMLocs to avoid invalidaing the ActiveMLocIt iterator.
1341 if (!NewLoc) {
1342 ActiveVLocs.erase(ActiveVLocIt);
1343 } else {
1344 ActiveVLocIt->second.Loc = *NewLoc;
1345 NewMLocs.insert(Var);
1346 }
1347 }
1348
1349 // Commit any deferred ActiveMLoc changes.
1350 if (!NewMLocs.empty())
1351 for (auto &Var : NewMLocs)
1352 ActiveMLocs[*NewLoc].insert(Var);
1353
1354 // We lazily track what locations have which values; if we've found a new
1355 // location for the clobbered value, remember it.
1356 if (NewLoc)
1357 VarLocs[NewLoc->asU64()] = OldValue;
1358
1359 flushDbgValues(Pos, nullptr);
1360
1361 ActiveMLocIt->second.clear();
1362 }
1363
1364 /// Transfer variables based on \p Src to be based on \p Dst. This handles
1365 /// both register copies as well as spills and restores. Creates DBG_VALUEs
1366 /// describing the movement.
1367 void transferMlocs(LocIdx Src, LocIdx Dst, MachineBasicBlock::iterator Pos) {
1368 // Does Src still contain the value num we expect? If not, it's been
1369 // clobbered in the meantime, and our variable locations are stale.
1370 if (VarLocs[Src.asU64()] != MTracker->getNumAtPos(Src))
1371 return;
1372
1373 // assert(ActiveMLocs[Dst].size() == 0);
1374 //^^^ Legitimate scenario on account of un-clobbered slot being assigned to?
1375 ActiveMLocs[Dst] = ActiveMLocs[Src];
1376 VarLocs[Dst.asU64()] = VarLocs[Src.asU64()];
1377
1378 // For each variable based on Src; create a location at Dst.
1379 for (auto &Var : ActiveMLocs[Src]) {
1380 auto ActiveVLocIt = ActiveVLocs.find(Var);
1381 assert(ActiveVLocIt != ActiveVLocs.end())(static_cast <bool> (ActiveVLocIt != ActiveVLocs.end())
? void (0) : __assert_fail ("ActiveVLocIt != ActiveVLocs.end()"
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 1381, __extension__ __PRETTY_FUNCTION__))
;
1382 ActiveVLocIt->second.Loc = Dst;
1383
1384 assert(Dst != 0)(static_cast <bool> (Dst != 0) ? void (0) : __assert_fail
("Dst != 0", "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 1384, __extension__ __PRETTY_FUNCTION__))
;
1385 MachineInstr *MI =
1386 MTracker->emitLoc(Dst, Var, ActiveVLocIt->second.Properties);
1387 PendingDbgValues.push_back(MI);
1388 }
1389 ActiveMLocs[Src].clear();
1390 flushDbgValues(Pos, nullptr);
1391
1392 // XXX XXX XXX "pretend to be old LDV" means dropping all tracking data
1393 // about the old location.
1394 if (EmulateOldLDV)
1395 VarLocs[Src.asU64()] = ValueIDNum::EmptyValue;
1396 }
1397
1398 MachineInstrBuilder emitMOLoc(const MachineOperand &MO,
1399 const DebugVariable &Var,
1400 const DbgValueProperties &Properties) {
1401 DebugLoc DL = DILocation::get(Var.getVariable()->getContext(), 0, 0,
1402 Var.getVariable()->getScope(),
1403 const_cast<DILocation *>(Var.getInlinedAt()));
1404 auto MIB = BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE));
1405 MIB.add(MO);
1406 if (Properties.Indirect)
1407 MIB.addImm(0);
1408 else
1409 MIB.addReg(0);
1410 MIB.addMetadata(Var.getVariable());
1411 MIB.addMetadata(Properties.DIExpr);
1412 return MIB;
1413 }
1414};
1415
1416class InstrRefBasedLDV : public LDVImpl {
1417private:
1418 using FragmentInfo = DIExpression::FragmentInfo;
1419 using OptFragmentInfo = Optional<DIExpression::FragmentInfo>;
1420
1421 // Helper while building OverlapMap, a map of all fragments seen for a given
1422 // DILocalVariable.
1423 using VarToFragments =
1424 DenseMap<const DILocalVariable *, SmallSet<FragmentInfo, 4>>;
1425
1426 /// Machine location/value transfer function, a mapping of which locations
1427 /// are assigned which new values.
1428 using MLocTransferMap = std::map<LocIdx, ValueIDNum>;
1429
1430 /// Live in/out structure for the variable values: a per-block map of
1431 /// variables to their values. XXX, better name?
1432 using LiveIdxT =
1433 DenseMap<const MachineBasicBlock *, DenseMap<DebugVariable, DbgValue> *>;
1434
1435 using VarAndLoc = std::pair<DebugVariable, DbgValue>;
1436
1437 /// Type for a live-in value: the predecessor block, and its value.
1438 using InValueT = std::pair<MachineBasicBlock *, DbgValue *>;
1439
1440 /// Vector (per block) of a collection (inner smallvector) of live-ins.
1441 /// Used as the result type for the variable value dataflow problem.
1442 using LiveInsT = SmallVector<SmallVector<VarAndLoc, 8>, 8>;
1443
1444 const TargetRegisterInfo *TRI;
1445 const TargetInstrInfo *TII;
1446 const TargetFrameLowering *TFI;
1447 const MachineFrameInfo *MFI;
1448 BitVector CalleeSavedRegs;
1449 LexicalScopes LS;
1450 TargetPassConfig *TPC;
1451
1452 /// Object to track machine locations as we step through a block. Could
1453 /// probably be a field rather than a pointer, as it's always used.
1454 MLocTracker *MTracker;
1455
1456 /// Number of the current block LiveDebugValues is stepping through.
1457 unsigned CurBB;
1458
1459 /// Number of the current instruction LiveDebugValues is evaluating.
1460 unsigned CurInst;
1461
1462 /// Variable tracker -- listens to DBG_VALUEs occurring as InstrRefBasedImpl
1463 /// steps through a block. Reads the values at each location from the
1464 /// MLocTracker object.
1465 VLocTracker *VTracker;
1466
1467 /// Tracker for transfers, listens to DBG_VALUEs and transfers of values
1468 /// between locations during stepping, creates new DBG_VALUEs when values move
1469 /// location.
1470 TransferTracker *TTracker;
1471
1472 /// Blocks which are artificial, i.e. blocks which exclusively contain
1473 /// instructions without DebugLocs, or with line 0 locations.
1474 SmallPtrSet<const MachineBasicBlock *, 16> ArtificialBlocks;
1475
1476 // Mapping of blocks to and from their RPOT order.
1477 DenseMap<unsigned int, MachineBasicBlock *> OrderToBB;
1478 DenseMap<MachineBasicBlock *, unsigned int> BBToOrder;
1479 DenseMap<unsigned, unsigned> BBNumToRPO;
1480
1481 /// Pair of MachineInstr, and its 1-based offset into the containing block.
1482 using InstAndNum = std::pair<const MachineInstr *, unsigned>;
1483 /// Map from debug instruction number to the MachineInstr labelled with that
1484 /// number, and its location within the function. Used to transform
1485 /// instruction numbers in DBG_INSTR_REFs into machine value numbers.
1486 std::map<uint64_t, InstAndNum> DebugInstrNumToInstr;
1487
1488 /// Record of where we observed a DBG_PHI instruction.
1489 class DebugPHIRecord {
1490 public:
1491 uint64_t InstrNum; ///< Instruction number of this DBG_PHI.
1492 MachineBasicBlock *MBB; ///< Block where DBG_PHI occurred.
1493 ValueIDNum ValueRead; ///< The value number read by the DBG_PHI.
1494 LocIdx ReadLoc; ///< Register/Stack location the DBG_PHI reads.
1495
1496 operator unsigned() const { return InstrNum; }
1497 };
1498
1499 /// Map from instruction numbers defined by DBG_PHIs to a record of what that
1500 /// DBG_PHI read and where. Populated and edited during the machine value
1501 /// location problem -- we use LLVMs SSA Updater to fix changes by
1502 /// optimizations that destroy PHI instructions.
1503 SmallVector<DebugPHIRecord, 32> DebugPHINumToValue;
1504
1505 // Map of overlapping variable fragments.
1506 OverlapMap OverlapFragments;
1507 VarToFragments SeenFragments;
1508
1509 /// Tests whether this instruction is a spill to a stack slot.
1510 bool isSpillInstruction(const MachineInstr &MI, MachineFunction *MF);
1511
1512 /// Decide if @MI is a spill instruction and return true if it is. We use 2
1513 /// criteria to make this decision:
1514 /// - Is this instruction a store to a spill slot?
1515 /// - Is there a register operand that is both used and killed?
1516 /// TODO: Store optimization can fold spills into other stores (including
1517 /// other spills). We do not handle this yet (more than one memory operand).
1518 bool isLocationSpill(const MachineInstr &MI, MachineFunction *MF,
1519 unsigned &Reg);
1520
1521 /// If a given instruction is identified as a spill, return the spill slot
1522 /// and set \p Reg to the spilled register.
1523 Optional<SpillLoc> isRestoreInstruction(const MachineInstr &MI,
1524 MachineFunction *MF, unsigned &Reg);
1525
1526 /// Given a spill instruction, extract the register and offset used to
1527 /// address the spill slot in a target independent way.
1528 SpillLoc extractSpillBaseRegAndOffset(const MachineInstr &MI);
1529
1530 /// Observe a single instruction while stepping through a block.
1531 void process(MachineInstr &MI, ValueIDNum **MLiveOuts = nullptr,
1532 ValueIDNum **MLiveIns = nullptr);
1533
1534 /// Examines whether \p MI is a DBG_VALUE and notifies trackers.
1535 /// \returns true if MI was recognized and processed.
1536 bool transferDebugValue(const MachineInstr &MI);
1537
1538 /// Examines whether \p MI is a DBG_INSTR_REF and notifies trackers.
1539 /// \returns true if MI was recognized and processed.
1540 bool transferDebugInstrRef(MachineInstr &MI, ValueIDNum **MLiveOuts,
1541 ValueIDNum **MLiveIns);
1542
1543 /// Stores value-information about where this PHI occurred, and what
1544 /// instruction number is associated with it.
1545 /// \returns true if MI was recognized and processed.
1546 bool transferDebugPHI(MachineInstr &MI);
1547
1548 /// Examines whether \p MI is copy instruction, and notifies trackers.
1549 /// \returns true if MI was recognized and processed.
1550 bool transferRegisterCopy(MachineInstr &MI);
1551
1552 /// Examines whether \p MI is stack spill or restore instruction, and
1553 /// notifies trackers. \returns true if MI was recognized and processed.
1554 bool transferSpillOrRestoreInst(MachineInstr &MI);
1555
1556 /// Examines \p MI for any registers that it defines, and notifies trackers.
1557 void transferRegisterDef(MachineInstr &MI);
1558
1559 /// Copy one location to the other, accounting for movement of subregisters
1560 /// too.
1561 void performCopy(Register Src, Register Dst);
1562
1563 void accumulateFragmentMap(MachineInstr &MI);
1564
1565 /// Determine the machine value number referred to by (potentially several)
1566 /// DBG_PHI instructions. Block duplication and tail folding can duplicate
1567 /// DBG_PHIs, shifting the position where values in registers merge, and
1568 /// forming another mini-ssa problem to solve.
1569 /// \p Here the position of a DBG_INSTR_REF seeking a machine value number
1570 /// \p InstrNum Debug instruction number defined by DBG_PHI instructions.
1571 /// \returns The machine value number at position Here, or None.
1572 Optional<ValueIDNum> resolveDbgPHIs(MachineFunction &MF,
1573 ValueIDNum **MLiveOuts,
1574 ValueIDNum **MLiveIns, MachineInstr &Here,
1575 uint64_t InstrNum);
1576
1577 /// Step through the function, recording register definitions and movements
1578 /// in an MLocTracker. Convert the observations into a per-block transfer
1579 /// function in \p MLocTransfer, suitable for using with the machine value
1580 /// location dataflow problem.
1581 void
1582 produceMLocTransferFunction(MachineFunction &MF,
1583 SmallVectorImpl<MLocTransferMap> &MLocTransfer,
1584 unsigned MaxNumBlocks);
1585
1586 /// Solve the machine value location dataflow problem. Takes as input the
1587 /// transfer functions in \p MLocTransfer. Writes the output live-in and
1588 /// live-out arrays to the (initialized to zero) multidimensional arrays in
1589 /// \p MInLocs and \p MOutLocs. The outer dimension is indexed by block
1590 /// number, the inner by LocIdx.
1591 void mlocDataflow(ValueIDNum **MInLocs, ValueIDNum **MOutLocs,
1592 SmallVectorImpl<MLocTransferMap> &MLocTransfer);
1593
1594 /// Perform a control flow join (lattice value meet) of the values in machine
1595 /// locations at \p MBB. Follows the algorithm described in the file-comment,
1596 /// reading live-outs of predecessors from \p OutLocs, the current live ins
1597 /// from \p InLocs, and assigning the newly computed live ins back into
1598 /// \p InLocs. \returns two bools -- the first indicates whether a change
1599 /// was made, the second whether a lattice downgrade occurred. If the latter
1600 /// is true, revisiting this block is necessary.
1601 std::tuple<bool, bool>
1602 mlocJoin(MachineBasicBlock &MBB,
1603 SmallPtrSet<const MachineBasicBlock *, 16> &Visited,
1604 ValueIDNum **OutLocs, ValueIDNum *InLocs);
1605
1606 /// Solve the variable value dataflow problem, for a single lexical scope.
1607 /// Uses the algorithm from the file comment to resolve control flow joins,
1608 /// although there are extra hacks, see vlocJoin. Reads the
1609 /// locations of values from the \p MInLocs and \p MOutLocs arrays (see
1610 /// mlocDataflow) and reads the variable values transfer function from
1611 /// \p AllTheVlocs. Live-in and Live-out variable values are stored locally,
1612 /// with the live-ins permanently stored to \p Output once the fixedpoint is
1613 /// reached.
1614 /// \p VarsWeCareAbout contains a collection of the variables in \p Scope
1615 /// that we should be tracking.
1616 /// \p AssignBlocks contains the set of blocks that aren't in \p Scope, but
1617 /// which do contain DBG_VALUEs, which VarLocBasedImpl tracks locations
1618 /// through.
1619 void vlocDataflow(const LexicalScope *Scope, const DILocation *DILoc,
1620 const SmallSet<DebugVariable, 4> &VarsWeCareAbout,
1621 SmallPtrSetImpl<MachineBasicBlock *> &AssignBlocks,
1622 LiveInsT &Output, ValueIDNum **MOutLocs,
1623 ValueIDNum **MInLocs,
1624 SmallVectorImpl<VLocTracker> &AllTheVLocs);
1625
1626 /// Compute the live-ins to a block, considering control flow merges according
1627 /// to the method in the file comment. Live out and live in variable values
1628 /// are stored in \p VLOCOutLocs and \p VLOCInLocs. The live-ins for \p MBB
1629 /// are computed and stored into \p VLOCInLocs. \returns true if the live-ins
1630 /// are modified.
1631 /// \p InLocsT Output argument, storage for calculated live-ins.
1632 /// \returns two bools -- the first indicates whether a change
1633 /// was made, the second whether a lattice downgrade occurred. If the latter
1634 /// is true, revisiting this block is necessary.
1635 std::tuple<bool, bool>
1636 vlocJoin(MachineBasicBlock &MBB, LiveIdxT &VLOCOutLocs, LiveIdxT &VLOCInLocs,
1637 SmallPtrSet<const MachineBasicBlock *, 16> *VLOCVisited,
1638 unsigned BBNum, const SmallSet<DebugVariable, 4> &AllVars,
1639 ValueIDNum **MOutLocs, ValueIDNum **MInLocs,
1640 SmallPtrSet<const MachineBasicBlock *, 8> &InScopeBlocks,
1641 SmallPtrSet<const MachineBasicBlock *, 8> &BlocksToExplore,
1642 DenseMap<DebugVariable, DbgValue> &InLocsT);
1643
1644 /// Continue exploration of the variable-value lattice, as explained in the
1645 /// file-level comment. \p OldLiveInLocation contains the current
1646 /// exploration position, from which we need to descend further. \p Values
1647 /// contains the set of live-in values, \p CurBlockRPONum the RPO number of
1648 /// the current block, and \p CandidateLocations a set of locations that
1649 /// should be considered as PHI locations, if we reach the bottom of the
1650 /// lattice. \returns true if we should downgrade; the value is the agreeing
1651 /// value number in a non-backedge predecessor.
1652 bool vlocDowngradeLattice(const MachineBasicBlock &MBB,
1653 const DbgValue &OldLiveInLocation,
1654 const SmallVectorImpl<InValueT> &Values,
1655 unsigned CurBlockRPONum);
1656
1657 /// For the given block and live-outs feeding into it, try to find a
1658 /// machine location where they all join. If a solution for all predecessors
1659 /// can't be found, a location where all non-backedge-predecessors join
1660 /// will be returned instead. While this method finds a join location, this
1661 /// says nothing as to whether it should be used.
1662 /// \returns Pair of value ID if found, and true when the correct value
1663 /// is available on all predecessor edges, or false if it's only available
1664 /// for non-backedge predecessors.
1665 std::tuple<Optional<ValueIDNum>, bool>
1666 pickVPHILoc(MachineBasicBlock &MBB, const DebugVariable &Var,
1667 const LiveIdxT &LiveOuts, ValueIDNum **MOutLocs,
1668 ValueIDNum **MInLocs,
1669 const SmallVectorImpl<MachineBasicBlock *> &BlockOrders);
1670
1671 /// Given the solutions to the two dataflow problems, machine value locations
1672 /// in \p MInLocs and live-in variable values in \p SavedLiveIns, runs the
1673 /// TransferTracker class over the function to produce live-in and transfer
1674 /// DBG_VALUEs, then inserts them. Groups of DBG_VALUEs are inserted in the
1675 /// order given by AllVarsNumbering -- this could be any stable order, but
1676 /// right now "order of appearence in function, when explored in RPO", so
1677 /// that we can compare explictly against VarLocBasedImpl.
1678 void emitLocations(MachineFunction &MF, LiveInsT SavedLiveIns,
1679 ValueIDNum **MOutLocs, ValueIDNum **MInLocs,
1680 DenseMap<DebugVariable, unsigned> &AllVarsNumbering,
1681 const TargetPassConfig &TPC);
1682
1683 /// Boilerplate computation of some initial sets, artifical blocks and
1684 /// RPOT block ordering.
1685 void initialSetup(MachineFunction &MF);
1686
1687 bool ExtendRanges(MachineFunction &MF, TargetPassConfig *TPC) override;
1688
1689public:
1690 /// Default construct and initialize the pass.
1691 InstrRefBasedLDV();
1692
1693 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__))
1694 void dump_mloc_transfer(const MLocTransferMap &mloc_transfer) const;
1695
1696 bool isCalleeSaved(LocIdx L) {
1697 unsigned Reg = MTracker->LocIdxToLocID[L];
1698 for (MCRegAliasIterator RAI(Reg, TRI, true); RAI.isValid(); ++RAI)
1699 if (CalleeSavedRegs.test(*RAI))
1700 return true;
1701 return false;
1702 }
1703};
1704
1705} // end anonymous namespace
1706
1707//===----------------------------------------------------------------------===//
1708// Implementation
1709//===----------------------------------------------------------------------===//
1710
1711ValueIDNum ValueIDNum::EmptyValue = {UINT_MAX(2147483647 *2U +1U), UINT_MAX(2147483647 *2U +1U), UINT_MAX(2147483647 *2U +1U)};
1712
1713/// Default construct and initialize the pass.
1714InstrRefBasedLDV::InstrRefBasedLDV() {}
1715
1716//===----------------------------------------------------------------------===//
1717// Debug Range Extension Implementation
1718//===----------------------------------------------------------------------===//
1719
1720#ifndef NDEBUG
1721// Something to restore in the future.
1722// void InstrRefBasedLDV::printVarLocInMBB(..)
1723#endif
1724
1725SpillLoc
1726InstrRefBasedLDV::extractSpillBaseRegAndOffset(const MachineInstr &MI) {
1727 assert(MI.hasOneMemOperand() &&(static_cast <bool> (MI.hasOneMemOperand() && "Spill instruction does not have exactly one memory operand?"
) ? void (0) : __assert_fail ("MI.hasOneMemOperand() && \"Spill instruction does not have exactly one memory operand?\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 1728, __extension__ __PRETTY_FUNCTION__))
1728 "Spill instruction does not have exactly one memory operand?")(static_cast <bool> (MI.hasOneMemOperand() && "Spill instruction does not have exactly one memory operand?"
) ? void (0) : __assert_fail ("MI.hasOneMemOperand() && \"Spill instruction does not have exactly one memory operand?\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 1728, __extension__ __PRETTY_FUNCTION__))
;
1729 auto MMOI = MI.memoperands_begin();
1730 const PseudoSourceValue *PVal = (*MMOI)->getPseudoValue();
1731 assert(PVal->kind() == PseudoSourceValue::FixedStack &&(static_cast <bool> (PVal->kind() == PseudoSourceValue
::FixedStack && "Inconsistent memory operand in spill instruction"
) ? void (0) : __assert_fail ("PVal->kind() == PseudoSourceValue::FixedStack && \"Inconsistent memory operand in spill instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 1732, __extension__ __PRETTY_FUNCTION__))
1732 "Inconsistent memory operand in spill instruction")(static_cast <bool> (PVal->kind() == PseudoSourceValue
::FixedStack && "Inconsistent memory operand in spill instruction"
) ? void (0) : __assert_fail ("PVal->kind() == PseudoSourceValue::FixedStack && \"Inconsistent memory operand in spill instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 1732, __extension__ __PRETTY_FUNCTION__))
;
1733 int FI = cast<FixedStackPseudoSourceValue>(PVal)->getFrameIndex();
1734 const MachineBasicBlock *MBB = MI.getParent();
1735 Register Reg;
1736 StackOffset Offset = TFI->getFrameIndexReference(*MBB->getParent(), FI, Reg);
1737 return {Reg, Offset};
1738}
1739
1740/// End all previous ranges related to @MI and start a new range from @MI
1741/// if it is a DBG_VALUE instr.
1742bool InstrRefBasedLDV::transferDebugValue(const MachineInstr &MI) {
1743 if (!MI.isDebugValue())
55
Calling 'MachineInstr::isDebugValue'
57
Returning from 'MachineInstr::isDebugValue'
58
Taking true branch
1744 return false;
59
Returning zero, which participates in a condition later
1745
1746 const DILocalVariable *Var = MI.getDebugVariable();
1747 const DIExpression *Expr = MI.getDebugExpression();
1748 const DILocation *DebugLoc = MI.getDebugLoc();
1749 const DILocation *InlinedAt = DebugLoc->getInlinedAt();
1750 assert(Var->isValidLocationForIntrinsic(DebugLoc) &&(static_cast <bool> (Var->isValidLocationForIntrinsic
(DebugLoc) && "Expected inlined-at fields to agree") ?
void (0) : __assert_fail ("Var->isValidLocationForIntrinsic(DebugLoc) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 1751, __extension__ __PRETTY_FUNCTION__))
1751 "Expected inlined-at fields to agree")(static_cast <bool> (Var->isValidLocationForIntrinsic
(DebugLoc) && "Expected inlined-at fields to agree") ?
void (0) : __assert_fail ("Var->isValidLocationForIntrinsic(DebugLoc) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 1751, __extension__ __PRETTY_FUNCTION__))
;
1752
1753 DebugVariable V(Var, Expr, InlinedAt);
1754 DbgValueProperties Properties(MI);
1755
1756 // If there are no instructions in this lexical scope, do no location tracking
1757 // at all, this variable shouldn't get a legitimate location range.
1758 auto *Scope = LS.findLexicalScope(MI.getDebugLoc().get());
1759 if (Scope == nullptr)
1760 return true; // handled it; by doing nothing
1761
1762 const MachineOperand &MO = MI.getOperand(0);
1763
1764 // MLocTracker needs to know that this register is read, even if it's only
1765 // read by a debug inst.
1766 if (MO.isReg() && MO.getReg() != 0)
1767 (void)MTracker->readReg(MO.getReg());
1768
1769 // If we're preparing for the second analysis (variables), the machine value
1770 // locations are already solved, and we report this DBG_VALUE and the value
1771 // it refers to to VLocTracker.
1772 if (VTracker) {
1773 if (MO.isReg()) {
1774 // Feed defVar the new variable location, or if this is a
1775 // DBG_VALUE $noreg, feed defVar None.
1776 if (MO.getReg())
1777 VTracker->defVar(MI, Properties, MTracker->readReg(MO.getReg()));
1778 else
1779 VTracker->defVar(MI, Properties, None);
1780 } else if (MI.getOperand(0).isImm() || MI.getOperand(0).isFPImm() ||
1781 MI.getOperand(0).isCImm()) {
1782 VTracker->defVar(MI, MI.getOperand(0));
1783 }
1784 }
1785
1786 // If performing final tracking of transfers, report this variable definition
1787 // to the TransferTracker too.
1788 if (TTracker)
1789 TTracker->redefVar(MI);
1790 return true;
1791}
1792
1793bool InstrRefBasedLDV::transferDebugInstrRef(MachineInstr &MI,
1794 ValueIDNum **MLiveOuts,
1795 ValueIDNum **MLiveIns) {
1796 if (!MI.isDebugRef())
12
Taking true branch
64
Calling 'MachineInstr::isDebugRef'
67
Returning from 'MachineInstr::isDebugRef'
68
Taking false branch
1797 return false;
13
Returning without writing to 'MI.DebugInstrNum', which participates in a condition later
1798
1799 // Only handle this instruction when we are building the variable value
1800 // transfer function.
1801 if (!VTracker)
69
Assuming field 'VTracker' is non-null
70
Taking false branch
1802 return false;
1803
1804 unsigned InstNo = MI.getOperand(0).getImm();
1805 unsigned OpNo = MI.getOperand(1).getImm();
1806
1807 const DILocalVariable *Var = MI.getDebugVariable();
1808 const DIExpression *Expr = MI.getDebugExpression();
1809 const DILocation *DebugLoc = MI.getDebugLoc();
1810 const DILocation *InlinedAt = DebugLoc->getInlinedAt();
1811 assert(Var->isValidLocationForIntrinsic(DebugLoc) &&(static_cast <bool> (Var->isValidLocationForIntrinsic
(DebugLoc) && "Expected inlined-at fields to agree") ?
void (0) : __assert_fail ("Var->isValidLocationForIntrinsic(DebugLoc) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 1812, __extension__ __PRETTY_FUNCTION__))
71
'?' condition is true
1812 "Expected inlined-at fields to agree")(static_cast <bool> (Var->isValidLocationForIntrinsic
(DebugLoc) && "Expected inlined-at fields to agree") ?
void (0) : __assert_fail ("Var->isValidLocationForIntrinsic(DebugLoc) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 1812, __extension__ __PRETTY_FUNCTION__))
;
1813
1814 DebugVariable V(Var, Expr, InlinedAt);
1815
1816 auto *Scope = LS.findLexicalScope(MI.getDebugLoc().get());
1817 if (Scope == nullptr)
72
Assuming the condition is false
73
Taking false branch
1818 return true; // Handled by doing nothing. This variable is never in scope.
1819
1820 const MachineFunction &MF = *MI.getParent()->getParent();
1821
1822 // Various optimizations may have happened to the value during codegen,
1823 // recorded in the value substitution table. Apply any substitutions to
1824 // the instruction / operand number in this DBG_INSTR_REF, and collect
1825 // any subregister extractions performed during optimization.
1826
1827 // Create dummy substitution with Src set, for lookup.
1828 auto SoughtSub =
1829 MachineFunction::DebugSubstitution({InstNo, OpNo}, {0, 0}, 0);
1830
1831 SmallVector<unsigned, 4> SeenSubregs;
1832 auto LowerBoundIt = llvm::lower_bound(MF.DebugValueSubstitutions, SoughtSub);
1833 while (LowerBoundIt != MF.DebugValueSubstitutions.end() &&
74
Assuming the condition is false
1834 LowerBoundIt->Src == SoughtSub.Src) {
1835 std::tie(InstNo, OpNo) = LowerBoundIt->Dest;
1836 SoughtSub.Src = LowerBoundIt->Dest;
1837 if (unsigned Subreg = LowerBoundIt->Subreg)
1838 SeenSubregs.push_back(Subreg);
1839 LowerBoundIt = llvm::lower_bound(MF.DebugValueSubstitutions, SoughtSub);
1840 }
1841
1842 // Default machine value number is <None> -- if no instruction defines
1843 // the corresponding value, it must have been optimized out.
1844 Optional<ValueIDNum> NewID = None;
1845
1846 // Try to lookup the instruction number, and find the machine value number
1847 // that it defines. It could be an instruction, or a PHI.
1848 auto InstrIt = DebugInstrNumToInstr.find(InstNo);
1849 auto PHIIt = std::lower_bound(DebugPHINumToValue.begin(),
1850 DebugPHINumToValue.end(), InstNo);
1851 if (InstrIt != DebugInstrNumToInstr.end()) {
75
Calling 'operator!='
78
Returning from 'operator!='
79
Taking false branch
1852 const MachineInstr &TargetInstr = *InstrIt->second.first;
1853 uint64_t BlockNo = TargetInstr.getParent()->getNumber();
1854
1855 // Pick out the designated operand.
1856 assert(OpNo < TargetInstr.getNumOperands())(static_cast <bool> (OpNo < TargetInstr.getNumOperands
()) ? void (0) : __assert_fail ("OpNo < TargetInstr.getNumOperands()"
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 1856, __extension__ __PRETTY_FUNCTION__))
;
1857 const MachineOperand &MO = TargetInstr.getOperand(OpNo);
1858
1859 // Today, this can only be a register.
1860 assert(MO.isReg() && MO.isDef())(static_cast <bool> (MO.isReg() && MO.isDef()) ?
void (0) : __assert_fail ("MO.isReg() && MO.isDef()"
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 1860, __extension__ __PRETTY_FUNCTION__))
;
1861
1862 unsigned LocID = MTracker->getLocID(MO.getReg(), false);
1863 LocIdx L = MTracker->LocIDToLocIdx[LocID];
1864 NewID = ValueIDNum(BlockNo, InstrIt->second.second, L);
1865 } else if (PHIIt != DebugPHINumToValue.end() && PHIIt->InstrNum == InstNo) {
80
Assuming the condition is true
81
Assuming 'InstNo' is equal to field 'InstrNum'
82
Taking true branch
1866 // It's actually a PHI value. Which value it is might not be obvious, use
1867 // the resolver helper to find out.
1868 NewID = resolveDbgPHIs(*MI.getParent()->getParent(), MLiveOuts, MLiveIns,
83
Passing null pointer value via 3rd parameter 'MLiveIns'
84
Calling 'InstrRefBasedLDV::resolveDbgPHIs'
1869 MI, InstNo);
1870 }
1871
1872 // Apply any subregister extractions, in reverse. We might have seen code
1873 // like this:
1874 // CALL64 @foo, implicit-def $rax
1875 // %0:gr64 = COPY $rax
1876 // %1:gr32 = COPY %0.sub_32bit
1877 // %2:gr16 = COPY %1.sub_16bit
1878 // %3:gr8 = COPY %2.sub_8bit
1879 // In which case each copy would have been recorded as a substitution with
1880 // a subregister qualifier. Apply those qualifiers now.
1881 if (NewID && !SeenSubregs.empty()) {
1882 unsigned Offset = 0;
1883 unsigned Size = 0;
1884
1885 // Look at each subregister that we passed through, and progressively
1886 // narrow in, accumulating any offsets that occur. Substitutions should
1887 // only ever be the same or narrower width than what they read from;
1888 // iterate in reverse order so that we go from wide to small.
1889 for (unsigned Subreg : reverse(SeenSubregs)) {
1890 unsigned ThisSize = TRI->getSubRegIdxSize(Subreg);
1891 unsigned ThisOffset = TRI->getSubRegIdxOffset(Subreg);
1892 Offset += ThisOffset;
1893 Size = (Size == 0) ? ThisSize : std::min(Size, ThisSize);
1894 }
1895
1896 // If that worked, look for an appropriate subregister with the register
1897 // where the define happens. Don't look at values that were defined during
1898 // a stack write: we can't currently express register locations within
1899 // spills.
1900 LocIdx L = NewID->getLoc();
1901 if (NewID && !MTracker->isSpill(L)) {
1902 // Find the register class for the register where this def happened.
1903 // FIXME: no index for this?
1904 Register Reg = MTracker->LocIdxToLocID[L];
1905 const TargetRegisterClass *TRC = nullptr;
1906 for (auto *TRCI : TRI->regclasses())
1907 if (TRCI->contains(Reg))
1908 TRC = TRCI;
1909 assert(TRC && "Couldn't find target register class?")(static_cast <bool> (TRC && "Couldn't find target register class?"
) ? void (0) : __assert_fail ("TRC && \"Couldn't find target register class?\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 1909, __extension__ __PRETTY_FUNCTION__))
;
1910
1911 // If the register we have isn't the right size or in the right place,
1912 // Try to find a subregister inside it.
1913 unsigned MainRegSize = TRI->getRegSizeInBits(*TRC);
1914 if (Size != MainRegSize || Offset) {
1915 // Enumerate all subregisters, searching.
1916 Register NewReg = 0;
1917 for (MCSubRegIterator SRI(Reg, TRI, false); SRI.isValid(); ++SRI) {
1918 unsigned Subreg = TRI->getSubRegIndex(Reg, *SRI);
1919 unsigned SubregSize = TRI->getSubRegIdxSize(Subreg);
1920 unsigned SubregOffset = TRI->getSubRegIdxOffset(Subreg);
1921 if (SubregSize == Size && SubregOffset == Offset) {
1922 NewReg = *SRI;
1923 break;
1924 }
1925 }
1926
1927 // If we didn't find anything: there's no way to express our value.
1928 if (!NewReg) {
1929 NewID = None;
1930 } else {
1931 // Re-state the value as being defined within the subregister
1932 // that we found.
1933 LocIdx NewLoc = MTracker->lookupOrTrackRegister(NewReg);
1934 NewID = ValueIDNum(NewID->getBlock(), NewID->getInst(), NewLoc);
1935 }
1936 }
1937 } else {
1938 // If we can't handle subregisters, unset the new value.
1939 NewID = None;
1940 }
1941 }
1942
1943 // We, we have a value number or None. Tell the variable value tracker about
1944 // it. The rest of this LiveDebugValues implementation acts exactly the same
1945 // for DBG_INSTR_REFs as DBG_VALUEs (just, the former can refer to values that
1946 // aren't immediately available).
1947 DbgValueProperties Properties(Expr, false);
1948 VTracker->defVar(MI, Properties, NewID);
1949
1950 // If we're on the final pass through the function, decompose this INSTR_REF
1951 // into a plain DBG_VALUE.
1952 if (!TTracker)
1953 return true;
1954
1955 // Pick a location for the machine value number, if such a location exists.
1956 // (This information could be stored in TransferTracker to make it faster).
1957 Optional<LocIdx> FoundLoc = None;
1958 for (auto Location : MTracker->locations()) {
1959 LocIdx CurL = Location.Idx;
1960 ValueIDNum ID = MTracker->LocIdxToIDNum[CurL];
1961 if (NewID && ID == NewID) {
1962 // If this is the first location with that value, pick it. Otherwise,
1963 // consider whether it's a "longer term" location.
1964 if (!FoundLoc) {
1965 FoundLoc = CurL;
1966 continue;
1967 }
1968
1969 if (MTracker->isSpill(CurL))
1970 FoundLoc = CurL; // Spills are a longer term location.
1971 else if (!MTracker->isSpill(*FoundLoc) &&
1972 !MTracker->isSpill(CurL) &&
1973 !isCalleeSaved(*FoundLoc) &&
1974 isCalleeSaved(CurL))
1975 FoundLoc = CurL; // Callee saved regs are longer term than normal.
1976 }
1977 }
1978
1979 // Tell transfer tracker that the variable value has changed.
1980 TTracker->redefVar(MI, Properties, FoundLoc);
1981
1982 // If there was a value with no location; but the value is defined in a
1983 // later instruction in this block, this is a block-local use-before-def.
1984 if (!FoundLoc && NewID && NewID->getBlock() == CurBB &&
1985 NewID->getInst() > CurInst)
1986 TTracker->addUseBeforeDef(V, {MI.getDebugExpression(), false}, *NewID);
1987
1988 // Produce a DBG_VALUE representing what this DBG_INSTR_REF meant.
1989 // This DBG_VALUE is potentially a $noreg / undefined location, if
1990 // FoundLoc is None.
1991 // (XXX -- could morph the DBG_INSTR_REF in the future).
1992 MachineInstr *DbgMI = MTracker->emitLoc(FoundLoc, V, Properties);
1993 TTracker->PendingDbgValues.push_back(DbgMI);
1994 TTracker->flushDbgValues(MI.getIterator(), nullptr);
1995 return true;
1996}
1997
1998bool InstrRefBasedLDV::transferDebugPHI(MachineInstr &MI) {
1999 if (!MI.isDebugPHI())
17
Taking true branch
2000 return false;
18
Returning without writing to 'MI.DebugInstrNum', which participates in a condition later
2001
2002 // Analyse these only when solving the machine value location problem.
2003 if (VTracker || TTracker)
2004 return true;
2005
2006 // First operand is the value location, either a stack slot or register.
2007 // Second is the debug instruction number of the original PHI.
2008 const MachineOperand &MO = MI.getOperand(0);
2009 unsigned InstrNum = MI.getOperand(1).getImm();
2010
2011 if (MO.isReg()) {
2012 // The value is whatever's currently in the register. Read and record it,
2013 // to be analysed later.
2014 Register Reg = MO.getReg();
2015 ValueIDNum Num = MTracker->readReg(Reg);
2016 auto PHIRec = DebugPHIRecord(
2017 {InstrNum, MI.getParent(), Num, MTracker->lookupOrTrackRegister(Reg)});
2018 DebugPHINumToValue.push_back(PHIRec);
2019 } else {
2020 // The value is whatever's in this stack slot.
2021 assert(MO.isFI())(static_cast <bool> (MO.isFI()) ? void (0) : __assert_fail
("MO.isFI()", "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 2021, __extension__ __PRETTY_FUNCTION__))
;
2022 unsigned FI = MO.getIndex();
2023
2024 // If the stack slot is dead, then this was optimized away.
2025 // FIXME: stack slot colouring should account for slots that get merged.
2026 if (MFI->isDeadObjectIndex(FI))
2027 return true;
2028
2029 // Identify this spill slot.
2030 Register Base;
2031 StackOffset Offs = TFI->getFrameIndexReference(*MI.getMF(), FI, Base);
2032 SpillLoc SL = {Base, Offs};
2033 Optional<ValueIDNum> Num = MTracker->readSpill(SL);
2034
2035 if (!Num)
2036 // Nothing ever writes to this slot. Curious, but nothing we can do.
2037 return true;
2038
2039 // Record this DBG_PHI for later analysis.
2040 auto DbgPHI = DebugPHIRecord(
2041 {InstrNum, MI.getParent(), *Num, *MTracker->getSpillMLoc(SL)});
2042 DebugPHINumToValue.push_back(DbgPHI);
2043 }
2044
2045 return true;
2046}
2047
2048void InstrRefBasedLDV::transferRegisterDef(MachineInstr &MI) {
2049 // Meta Instructions do not affect the debug liveness of any register they
2050 // define.
2051 if (MI.isImplicitDef()) {
39
Taking false branch
2052 // Except when there's an implicit def, and the location it's defining has
2053 // no value number. The whole point of an implicit def is to announce that
2054 // the register is live, without be specific about it's value. So define
2055 // a value if there isn't one already.
2056 ValueIDNum Num = MTracker->readReg(MI.getOperand(0).getReg());
2057 // Has a legitimate value -> ignore the implicit def.
2058 if (Num.getLoc() != 0)
2059 return;
2060 // Otherwise, def it here.
2061 } else if (MI.isMetaInstruction())
40
Taking false branch
2062 return;
2063
2064 MachineFunction *MF = MI.getMF();
2065 const TargetLowering *TLI = MF->getSubtarget().getTargetLowering();
2066 Register SP = TLI->getStackPointerRegisterToSaveRestore();
2067
2068 // Find the regs killed by MI, and find regmasks of preserved regs.
2069 // Max out the number of statically allocated elements in `DeadRegs`, as this
2070 // prevents fallback to std::set::count() operations.
2071 SmallSet<uint32_t, 32> DeadRegs;
2072 SmallVector<const uint32_t *, 4> RegMasks;
2073 SmallVector<const MachineOperand *, 4> RegMaskPtrs;
2074 for (const MachineOperand &MO : MI.operands()) {
41
Assuming '__begin1' is equal to '__end1'
2075 // Determine whether the operand is a register def.
2076 if (MO.isReg() && MO.isDef() && MO.getReg() &&
2077 Register::isPhysicalRegister(MO.getReg()) &&
2078 !(MI.isCall() && MO.getReg() == SP)) {
2079 // Remove ranges of all aliased registers.
2080 for (MCRegAliasIterator RAI(MO.getReg(), TRI, true); RAI.isValid(); ++RAI)
2081 // FIXME: Can we break out of this loop early if no insertion occurs?
2082 DeadRegs.insert(*RAI);
2083 } else if (MO.isRegMask()) {
2084 RegMasks.push_back(MO.getRegMask());
2085 RegMaskPtrs.push_back(&MO);
2086 }
2087 }
2088
2089 // Tell MLocTracker about all definitions, of regmasks and otherwise.
2090 for (uint32_t DeadReg : DeadRegs)
2091 MTracker->defReg(DeadReg, CurBB, CurInst);
2092
2093 for (auto *MO : RegMaskPtrs)
42
Assuming '__begin1' is equal to '__end1'
2094 MTracker->writeRegMask(MO, CurBB, CurInst);
2095
2096 if (!TTracker)
43
Assuming field 'TTracker' is null
44
Taking true branch
2097 return;
45
Returning without writing to 'MI.DebugInstrNum', which participates in a condition later
2098
2099 // When committing variable values to locations: tell transfer tracker that
2100 // we've clobbered things. It may be able to recover the variable from a
2101 // different location.
2102
2103 // Inform TTracker about any direct clobbers.
2104 for (uint32_t DeadReg : DeadRegs) {
2105 LocIdx Loc = MTracker->lookupOrTrackRegister(DeadReg);
2106 TTracker->clobberMloc(Loc, MI.getIterator(), false);
2107 }
2108
2109 // Look for any clobbers performed by a register mask. Only test locations
2110 // that are actually being tracked.
2111 for (auto L : MTracker->locations()) {
2112 // Stack locations can't be clobbered by regmasks.
2113 if (MTracker->isSpill(L.Idx))
2114 continue;
2115
2116 Register Reg = MTracker->LocIdxToLocID[L.Idx];
2117 for (auto *MO : RegMaskPtrs)
2118 if (MO->clobbersPhysReg(Reg))
2119 TTracker->clobberMloc(L.Idx, MI.getIterator(), false);
2120 }
2121}
2122
2123void InstrRefBasedLDV::performCopy(Register SrcRegNum, Register DstRegNum) {
2124 ValueIDNum SrcValue = MTracker->readReg(SrcRegNum);
2125
2126 MTracker->setReg(DstRegNum, SrcValue);
2127
2128 // In all circumstances, re-def the super registers. It's definitely a new
2129 // value now. This doesn't uniquely identify the composition of subregs, for
2130 // example, two identical values in subregisters composed in different
2131 // places would not get equal value numbers.
2132 for (MCSuperRegIterator SRI(DstRegNum, TRI); SRI.isValid(); ++SRI)
2133 MTracker->defReg(*SRI, CurBB, CurInst);
2134
2135 // If we're emulating VarLocBasedImpl, just define all the subregisters.
2136 // DBG_VALUEs of them will expect to be tracked from the DBG_VALUE, not
2137 // through prior copies.
2138 if (EmulateOldLDV) {
2139 for (MCSubRegIndexIterator DRI(DstRegNum, TRI); DRI.isValid(); ++DRI)
2140 MTracker->defReg(DRI.getSubReg(), CurBB, CurInst);
2141 return;
2142 }
2143
2144 // Otherwise, actually copy subregisters from one location to another.
2145 // XXX: in addition, any subregisters of DstRegNum that don't line up with
2146 // the source register should be def'd.
2147 for (MCSubRegIndexIterator SRI(SrcRegNum, TRI); SRI.isValid(); ++SRI) {
2148 unsigned SrcSubReg = SRI.getSubReg();
2149 unsigned SubRegIdx = SRI.getSubRegIndex();
2150 unsigned DstSubReg = TRI->getSubReg(DstRegNum, SubRegIdx);
2151 if (!DstSubReg)
2152 continue;
2153
2154 // Do copy. There are two matching subregisters, the source value should
2155 // have been def'd when the super-reg was, the latter might not be tracked
2156 // yet.
2157 // This will force SrcSubReg to be tracked, if it isn't yet.
2158 (void)MTracker->readReg(SrcSubReg);
2159 LocIdx SrcL = MTracker->getRegMLoc(SrcSubReg);
2160 assert(SrcL.asU64())(static_cast <bool> (SrcL.asU64()) ? void (0) : __assert_fail
("SrcL.asU64()", "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 2160, __extension__ __PRETTY_FUNCTION__))
;
2161 (void)MTracker->readReg(DstSubReg);
2162 LocIdx DstL = MTracker->getRegMLoc(DstSubReg);
2163 assert(DstL.asU64())(static_cast <bool> (DstL.asU64()) ? void (0) : __assert_fail
("DstL.asU64()", "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 2163, __extension__ __PRETTY_FUNCTION__))
;
2164 (void)DstL;
2165 ValueIDNum CpyValue = {SrcValue.getBlock(), SrcValue.getInst(), SrcL};
2166
2167 MTracker->setReg(DstSubReg, CpyValue);
2168 }
2169}
2170
2171bool InstrRefBasedLDV::isSpillInstruction(const MachineInstr &MI,
2172 MachineFunction *MF) {
2173 // TODO: Handle multiple stores folded into one.
2174 if (!MI.hasOneMemOperand())
2175 return false;
2176
2177 if (!MI.getSpillSize(TII) && !MI.getFoldedSpillSize(TII))
2178 return false; // This is not a spill instruction, since no valid size was
2179 // returned from either function.
2180
2181 return true;
2182}
2183
2184bool InstrRefBasedLDV::isLocationSpill(const MachineInstr &MI,
2185 MachineFunction *MF, unsigned &Reg) {
2186 if (!isSpillInstruction(MI, MF))
2187 return false;
2188
2189 int FI;
2190 Reg = TII->isStoreToStackSlotPostFE(MI, FI);
2191 return Reg != 0;
2192}
2193
2194Optional<SpillLoc>
2195InstrRefBasedLDV::isRestoreInstruction(const MachineInstr &MI,
2196 MachineFunction *MF, unsigned &Reg) {
2197 if (!MI.hasOneMemOperand())
2198 return None;
2199
2200 // FIXME: Handle folded restore instructions with more than one memory
2201 // operand.
2202 if (MI.getRestoreSize(TII)) {
2203 Reg = MI.getOperand(0).getReg();
2204 return extractSpillBaseRegAndOffset(MI);
2205 }
2206 return None;
2207}
2208
2209bool InstrRefBasedLDV::transferSpillOrRestoreInst(MachineInstr &MI) {
2210 // XXX -- it's too difficult to implement VarLocBasedImpl's stack location
2211 // limitations under the new model. Therefore, when comparing them, compare
2212 // versions that don't attempt spills or restores at all.
2213 if (EmulateOldLDV)
28
Assuming the condition is false
29
Taking false branch
2214 return false;
2215
2216 MachineFunction *MF = MI.getMF();
2217 unsigned Reg;
2218 Optional<SpillLoc> Loc;
2219
2220 LLVM_DEBUG(dbgs() << "Examining instruction: "; MI.dump();)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("livedebugvalues")) { dbgs() << "Examining instruction: "
; MI.dump();; } } while (false)
;
30
Assuming 'DebugFlag' is false
31
Loop condition is false. Exiting loop
2221
2222 // First, if there are any DBG_VALUEs pointing at a spill slot that is
2223 // written to, terminate that variable location. The value in memory
2224 // will have changed. DbgEntityHistoryCalculator doesn't try to detect this.
2225 if (isSpillInstruction(MI, MF)) {
32
Taking false branch
2226 Loc = extractSpillBaseRegAndOffset(MI);
2227
2228 if (TTracker) {
2229 Optional<LocIdx> MLoc = MTracker->getSpillMLoc(*Loc);
2230 if (MLoc) {
2231 // Un-set this location before clobbering, so that we don't salvage
2232 // the variable location back to the same place.
2233 MTracker->setMLoc(*MLoc, ValueIDNum::EmptyValue);
2234 TTracker->clobberMloc(*MLoc, MI.getIterator());
2235 }
2236 }
2237 }
2238
2239 // Try to recognise spill and restore instructions that may transfer a value.
2240 if (isLocationSpill(MI, MF, Reg)) {
33
Taking false branch
2241 Loc = extractSpillBaseRegAndOffset(MI);
2242 auto ValueID = MTracker->readReg(Reg);
2243
2244 // If the location is empty, produce a phi, signify it's the live-in value.
2245 if (ValueID.getLoc() == 0)
2246 ValueID = {CurBB, 0, MTracker->getRegMLoc(Reg)};
2247
2248 MTracker->setSpill(*Loc, ValueID);
2249 auto OptSpillLocIdx = MTracker->getSpillMLoc(*Loc);
2250 assert(OptSpillLocIdx && "Spill slot set but has no LocIdx?")(static_cast <bool> (OptSpillLocIdx && "Spill slot set but has no LocIdx?"
) ? void (0) : __assert_fail ("OptSpillLocIdx && \"Spill slot set but has no LocIdx?\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 2250, __extension__ __PRETTY_FUNCTION__))
;
2251 LocIdx SpillLocIdx = *OptSpillLocIdx;
2252
2253 // Tell TransferTracker about this spill, produce DBG_VALUEs for it.
2254 if (TTracker)
2255 TTracker->transferMlocs(MTracker->getRegMLoc(Reg), SpillLocIdx,
2256 MI.getIterator());
2257 } else {
2258 if (!(Loc = isRestoreInstruction(MI, MF, Reg)))
34
Taking true branch
2259 return false;
35
Returning without writing to 'MI.DebugInstrNum', which participates in a condition later
2260
2261 // Is there a value to be restored?
2262 auto OptValueID = MTracker->readSpill(*Loc);
2263 if (OptValueID) {
2264 ValueIDNum ValueID = *OptValueID;
2265 LocIdx SpillLocIdx = *MTracker->getSpillMLoc(*Loc);
2266 // XXX -- can we recover sub-registers of this value? Until we can, first
2267 // overwrite all defs of the register being restored to.
2268 for (MCRegAliasIterator RAI(Reg, TRI, true); RAI.isValid(); ++RAI)
2269 MTracker->defReg(*RAI, CurBB, CurInst);
2270
2271 // Now override the reg we're restoring to.
2272 MTracker->setReg(Reg, ValueID);
2273
2274 // Report this restore to the transfer tracker too.
2275 if (TTracker)
2276 TTracker->transferMlocs(SpillLocIdx, MTracker->getRegMLoc(Reg),
2277 MI.getIterator());
2278 } else {
2279 // There isn't anything in the location; not clear if this is a code path
2280 // that still runs. Def this register anyway just in case.
2281 for (MCRegAliasIterator RAI(Reg, TRI, true); RAI.isValid(); ++RAI)
2282 MTracker->defReg(*RAI, CurBB, CurInst);
2283
2284 // Force the spill slot to be tracked.
2285 LocIdx L = MTracker->getOrTrackSpillLoc(*Loc);
2286
2287 // Set the restored value to be a machine phi number, signifying that it's
2288 // whatever the spills live-in value is in this block. Definitely has
2289 // a LocIdx due to the setSpill above.
2290 ValueIDNum ValueID = {CurBB, 0, L};
2291 MTracker->setReg(Reg, ValueID);
2292 MTracker->setSpill(*Loc, ValueID);
2293 }
2294 }
2295 return true;
2296}
2297
2298bool InstrRefBasedLDV::transferRegisterCopy(MachineInstr &MI) {
2299 auto DestSrc = TII->isCopyInstr(MI);
2300 if (!DestSrc)
22
Assuming the condition is true
23
Taking true branch
2301 return false;
24
Returning without writing to 'MI.DebugInstrNum', which participates in a condition later
2302
2303 const MachineOperand *DestRegOp = DestSrc->Destination;
2304 const MachineOperand *SrcRegOp = DestSrc->Source;
2305
2306 auto isCalleeSavedReg = [&](unsigned Reg) {
2307 for (MCRegAliasIterator RAI(Reg, TRI, true); RAI.isValid(); ++RAI)
2308 if (CalleeSavedRegs.test(*RAI))
2309 return true;
2310 return false;
2311 };
2312
2313 Register SrcReg = SrcRegOp->getReg();
2314 Register DestReg = DestRegOp->getReg();
2315
2316 // Ignore identity copies. Yep, these make it as far as LiveDebugValues.
2317 if (SrcReg == DestReg)
2318 return true;
2319
2320 // For emulating VarLocBasedImpl:
2321 // We want to recognize instructions where destination register is callee
2322 // saved register. If register that could be clobbered by the call is
2323 // included, there would be a great chance that it is going to be clobbered
2324 // soon. It is more likely that previous register, which is callee saved, is
2325 // going to stay unclobbered longer, even if it is killed.
2326 //
2327 // For InstrRefBasedImpl, we can track multiple locations per value, so
2328 // ignore this condition.
2329 if (EmulateOldLDV && !isCalleeSavedReg(DestReg))
2330 return false;
2331
2332 // InstrRefBasedImpl only followed killing copies.
2333 if (EmulateOldLDV && !SrcRegOp->isKill())
2334 return false;
2335
2336 // Copy MTracker info, including subregs if available.
2337 InstrRefBasedLDV::performCopy(SrcReg, DestReg);
2338
2339 // Only produce a transfer of DBG_VALUE within a block where old LDV
2340 // would have. We might make use of the additional value tracking in some
2341 // other way, later.
2342 if (TTracker && isCalleeSavedReg(DestReg) && SrcRegOp->isKill())
2343 TTracker->transferMlocs(MTracker->getRegMLoc(SrcReg),
2344 MTracker->getRegMLoc(DestReg), MI.getIterator());
2345
2346 // VarLocBasedImpl would quit tracking the old location after copying.
2347 if (EmulateOldLDV && SrcReg != DestReg)
2348 MTracker->defReg(SrcReg, CurBB, CurInst);
2349
2350 // Finally, the copy might have clobbered variables based on the destination
2351 // register. Tell TTracker about it, in case a backup location exists.
2352 if (TTracker) {
2353 for (MCRegAliasIterator RAI(DestReg, TRI, true); RAI.isValid(); ++RAI) {
2354 LocIdx ClobberedLoc = MTracker->getRegMLoc(*RAI);
2355 TTracker->clobberMloc(ClobberedLoc, MI.getIterator(), false);
2356 }
2357 }
2358
2359 return true;
2360}
2361
2362/// Accumulate a mapping between each DILocalVariable fragment and other
2363/// fragments of that DILocalVariable which overlap. This reduces work during
2364/// the data-flow stage from "Find any overlapping fragments" to "Check if the
2365/// known-to-overlap fragments are present".
2366/// \param MI A previously unprocessed DEBUG_VALUE instruction to analyze for
2367/// fragment usage.
2368void InstrRefBasedLDV::accumulateFragmentMap(MachineInstr &MI) {
2369 DebugVariable MIVar(MI.getDebugVariable(), MI.getDebugExpression(),
2370 MI.getDebugLoc()->getInlinedAt());
2371 FragmentInfo ThisFragment = MIVar.getFragmentOrDefault();
2372
2373 // If this is the first sighting of this variable, then we are guaranteed
2374 // there are currently no overlapping fragments either. Initialize the set
2375 // of seen fragments, record no overlaps for the current one, and return.
2376 auto SeenIt = SeenFragments.find(MIVar.getVariable());
2377 if (SeenIt == SeenFragments.end()) {
2378 SmallSet<FragmentInfo, 4> OneFragment;
2379 OneFragment.insert(ThisFragment);
2380 SeenFragments.insert({MIVar.getVariable(), OneFragment});
2381
2382 OverlapFragments.insert({{MIVar.getVariable(), ThisFragment}, {}});
2383 return;
2384 }
2385
2386 // If this particular Variable/Fragment pair already exists in the overlap
2387 // map, it has already been accounted for.
2388 auto IsInOLapMap =
2389 OverlapFragments.insert({{MIVar.getVariable(), ThisFragment}, {}});
2390 if (!IsInOLapMap.second)
2391 return;
2392
2393 auto &ThisFragmentsOverlaps = IsInOLapMap.first->second;
2394 auto &AllSeenFragments = SeenIt->second;
2395
2396 // Otherwise, examine all other seen fragments for this variable, with "this"
2397 // fragment being a previously unseen fragment. Record any pair of
2398 // overlapping fragments.
2399 for (auto &ASeenFragment : AllSeenFragments) {
2400 // Does this previously seen fragment overlap?
2401 if (DIExpression::fragmentsOverlap(ThisFragment, ASeenFragment)) {
2402 // Yes: Mark the current fragment as being overlapped.
2403 ThisFragmentsOverlaps.push_back(ASeenFragment);
2404 // Mark the previously seen fragment as being overlapped by the current
2405 // one.
2406 auto ASeenFragmentsOverlaps =
2407 OverlapFragments.find({MIVar.getVariable(), ASeenFragment});
2408 assert(ASeenFragmentsOverlaps != OverlapFragments.end() &&(static_cast <bool> (ASeenFragmentsOverlaps != OverlapFragments
.end() && "Previously seen var fragment has no vector of overlaps"
) ? void (0) : __assert_fail ("ASeenFragmentsOverlaps != OverlapFragments.end() && \"Previously seen var fragment has no vector of overlaps\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 2409, __extension__ __PRETTY_FUNCTION__))
2409 "Previously seen var fragment has no vector of overlaps")(static_cast <bool> (ASeenFragmentsOverlaps != OverlapFragments
.end() && "Previously seen var fragment has no vector of overlaps"
) ? void (0) : __assert_fail ("ASeenFragmentsOverlaps != OverlapFragments.end() && \"Previously seen var fragment has no vector of overlaps\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 2409, __extension__ __PRETTY_FUNCTION__))
;
2410 ASeenFragmentsOverlaps->second.push_back(ThisFragment);
2411 }
2412 }
2413
2414 AllSeenFragments.insert(ThisFragment);
2415}
2416
2417void InstrRefBasedLDV::process(MachineInstr &MI, ValueIDNum **MLiveOuts,
2418 ValueIDNum **MLiveIns) {
2419 // Try to interpret an MI as a debug or transfer instruction. Only if it's
2420 // none of these should we interpret it's register defs as new value
2421 // definitions.
2422 if (transferDebugValue(MI))
10
Taking false branch
54
Calling 'InstrRefBasedLDV::transferDebugValue'
60
Returning from 'InstrRefBasedLDV::transferDebugValue'
61
Taking false branch
2423 return;
2424 if (transferDebugInstrRef(MI, MLiveOuts, MLiveIns))
11
Calling 'InstrRefBasedLDV::transferDebugInstrRef'
14
Returning from 'InstrRefBasedLDV::transferDebugInstrRef'
15
Taking false branch
62
Passing null pointer value via 3rd parameter 'MLiveIns'
63
Calling 'InstrRefBasedLDV::transferDebugInstrRef'
2425 return;
2426 if (transferDebugPHI(MI))
16
Calling 'InstrRefBasedLDV::transferDebugPHI'
19
Returning from 'InstrRefBasedLDV::transferDebugPHI'
20
Taking false branch
2427 return;
2428 if (transferRegisterCopy(MI))
21
Calling 'InstrRefBasedLDV::transferRegisterCopy'
25
Returning from 'InstrRefBasedLDV::transferRegisterCopy'
26
Taking false branch
2429 return;
2430 if (transferSpillOrRestoreInst(MI))
27
Calling 'InstrRefBasedLDV::transferSpillOrRestoreInst'
36
Returning from 'InstrRefBasedLDV::transferSpillOrRestoreInst'
37
Taking false branch
2431 return;
2432 transferRegisterDef(MI);
38
Calling 'InstrRefBasedLDV::transferRegisterDef'
46
Returning from 'InstrRefBasedLDV::transferRegisterDef'
2433}
47
Returning without writing to 'MI.DebugInstrNum', which participates in a condition later
2434
2435void InstrRefBasedLDV::produceMLocTransferFunction(
2436 MachineFunction &MF, SmallVectorImpl<MLocTransferMap> &MLocTransfer,
2437 unsigned MaxNumBlocks) {
2438 // Because we try to optimize around register mask operands by ignoring regs
2439 // that aren't currently tracked, we set up something ugly for later: RegMask
2440 // operands that are seen earlier than the first use of a register, still need
2441 // to clobber that register in the transfer function. But this information
2442 // isn't actively recorded. Instead, we track each RegMask used in each block,
2443 // and accumulated the clobbered but untracked registers in each block into
2444 // the following bitvector. Later, if new values are tracked, we can add
2445 // appropriate clobbers.
2446 SmallVector<BitVector, 32> BlockMasks;
2447 BlockMasks.resize(MaxNumBlocks);
2448
2449 // Reserve one bit per register for the masks described above.
2450 unsigned BVWords = MachineOperand::getRegMaskSize(TRI->getNumRegs());
2451 for (auto &BV : BlockMasks)
8
Assuming '__begin1' is equal to '__end1'
2452 BV.resize(TRI->getNumRegs(), true);
2453
2454 // Step through all instructions and inhale the transfer function.
2455 for (auto &MBB : MF) {
2456 // Object fields that are read by trackers to know where we are in the
2457 // function.
2458 CurBB = MBB.getNumber();
2459 CurInst = 1;
2460
2461 // Set all machine locations to a PHI value. For transfer function
2462 // production only, this signifies the live-in value to the block.
2463 MTracker->reset();
2464 MTracker->setMPhis(CurBB);
2465
2466 // Step through each instruction in this block.
2467 for (auto &MI : MBB) {
2468 process(MI);
9
Calling 'InstrRefBasedLDV::process'
48
Returning from 'InstrRefBasedLDV::process'
52
Passing null pointer value via 3rd parameter 'MLiveIns'
53
Calling 'InstrRefBasedLDV::process'
2469 // Also accumulate fragment map.
2470 if (MI.isDebugValue())
49
Taking false branch
2471 accumulateFragmentMap(MI);
2472
2473 // Create a map from the instruction number (if present) to the
2474 // MachineInstr and its position.
2475 if (uint64_t InstrNo = MI.peekDebugInstrNum()) {
50
Assuming 'InstrNo' is 0
51
Taking false branch
2476 auto InstrAndPos = std::make_pair(&MI, CurInst);
2477 auto InsertResult =
2478 DebugInstrNumToInstr.insert(std::make_pair(InstrNo, InstrAndPos));
2479
2480 // There should never be duplicate instruction numbers.
2481 assert(InsertResult.second)(static_cast <bool> (InsertResult.second) ? void (0) : __assert_fail
("InsertResult.second", "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 2481, __extension__ __PRETTY_FUNCTION__))
;
2482 (void)InsertResult;
2483 }
2484
2485 ++CurInst;
2486 }
2487
2488 // Produce the transfer function, a map of machine location to new value. If
2489 // any machine location has the live-in phi value from the start of the
2490 // block, it's live-through and doesn't need recording in the transfer
2491 // function.
2492 for (auto Location : MTracker->locations()) {
2493 LocIdx Idx = Location.Idx;
2494 ValueIDNum &P = Location.Value;
2495 if (P.isPHI() && P.getLoc() == Idx.asU64())
2496 continue;
2497
2498 // Insert-or-update.
2499 auto &TransferMap = MLocTransfer[CurBB];
2500 auto Result = TransferMap.insert(std::make_pair(Idx.asU64(), P));
2501 if (!Result.second)
2502 Result.first->second = P;
2503 }
2504
2505 // Accumulate any bitmask operands into the clobberred reg mask for this
2506 // block.
2507 for (auto &P : MTracker->Masks) {
2508 BlockMasks[CurBB].clearBitsNotInMask(P.first->getRegMask(), BVWords);
2509 }
2510 }
2511
2512 // Compute a bitvector of all the registers that are tracked in this block.
2513 const TargetLowering *TLI = MF.getSubtarget().getTargetLowering();
2514 Register SP = TLI->getStackPointerRegisterToSaveRestore();
2515 BitVector UsedRegs(TRI->getNumRegs());
2516 for (auto Location : MTracker->locations()) {
2517 unsigned ID = MTracker->LocIdxToLocID[Location.Idx];
2518 if (ID >= TRI->getNumRegs() || ID == SP)
2519 continue;
2520 UsedRegs.set(ID);
2521 }
2522
2523 // Check that any regmask-clobber of a register that gets tracked, is not
2524 // live-through in the transfer function. It needs to be clobbered at the
2525 // very least.
2526 for (unsigned int I = 0; I < MaxNumBlocks; ++I) {
2527 BitVector &BV = BlockMasks[I];
2528 BV.flip();
2529 BV &= UsedRegs;
2530 // This produces all the bits that we clobber, but also use. Check that
2531 // they're all clobbered or at least set in the designated transfer
2532 // elem.
2533 for (unsigned Bit : BV.set_bits()) {
2534 unsigned ID = MTracker->getLocID(Bit, false);
2535 LocIdx Idx = MTracker->LocIDToLocIdx[ID];
2536 auto &TransferMap = MLocTransfer[I];
2537
2538 // Install a value representing the fact that this location is effectively
2539 // written to in this block. As there's no reserved value, instead use
2540 // a value number that is never generated. Pick the value number for the
2541 // first instruction in the block, def'ing this location, which we know
2542 // this block never used anyway.
2543 ValueIDNum NotGeneratedNum = ValueIDNum(I, 1, Idx);
2544 auto Result =
2545 TransferMap.insert(std::make_pair(Idx.asU64(), NotGeneratedNum));
2546 if (!Result.second) {
2547 ValueIDNum &ValueID = Result.first->second;
2548 if (ValueID.getBlock() == I && ValueID.isPHI())
2549 // It was left as live-through. Set it to clobbered.
2550 ValueID = NotGeneratedNum;
2551 }
2552 }
2553 }
2554}
2555
2556std::tuple<bool, bool>
2557InstrRefBasedLDV::mlocJoin(MachineBasicBlock &MBB,
2558 SmallPtrSet<const MachineBasicBlock *, 16> &Visited,
2559 ValueIDNum **OutLocs, ValueIDNum *InLocs) {
2560 LLVM_DEBUG(dbgs() << "join MBB: " << MBB.getNumber() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("livedebugvalues")) { dbgs() << "join MBB: " << MBB
.getNumber() << "\n"; } } while (false)
;
2561 bool Changed = false;
2562 bool DowngradeOccurred = false;
2563
2564 // Collect predecessors that have been visited. Anything that hasn't been
2565 // visited yet is a backedge on the first iteration, and the meet of it's
2566 // lattice value for all locations will be unaffected.
2567 SmallVector<const MachineBasicBlock *, 8> BlockOrders;
2568 for (auto Pred : MBB.predecessors()) {
2569 if (Visited.count(Pred)) {
2570 BlockOrders.push_back(Pred);
2571 }
2572 }
2573
2574 // Visit predecessors in RPOT order.
2575 auto Cmp = [&](const MachineBasicBlock *A, const MachineBasicBlock *B) {
2576 return BBToOrder.find(A)->second < BBToOrder.find(B)->second;
2577 };
2578 llvm::sort(BlockOrders, Cmp);
2579
2580 // Skip entry block.
2581 if (BlockOrders.size() == 0)
2582 return std::tuple<bool, bool>(false, false);
2583
2584 // Step through all machine locations, then look at each predecessor and
2585 // detect disagreements.
2586 unsigned ThisBlockRPO = BBToOrder.find(&MBB)->second;
2587 for (auto Location : MTracker->locations()) {
2588 LocIdx Idx = Location.Idx;
2589 // Pick out the first predecessors live-out value for this location. It's
2590 // guaranteed to be not a backedge, as we order by RPO.
2591 ValueIDNum BaseVal = OutLocs[BlockOrders[0]->getNumber()][Idx.asU64()];
2592
2593 // Some flags for whether there's a disagreement, and whether it's a
2594 // disagreement with a backedge or not.
2595 bool Disagree = false;
2596 bool NonBackEdgeDisagree = false;
2597
2598 // Loop around everything that wasn't 'base'.
2599 for (unsigned int I = 1; I < BlockOrders.size(); ++I) {
2600 auto *MBB = BlockOrders[I];
2601 if (BaseVal != OutLocs[MBB->getNumber()][Idx.asU64()]) {
2602 // Live-out of a predecessor disagrees with the first predecessor.
2603 Disagree = true;
2604
2605 // Test whether it's a disagreemnt in the backedges or not.
2606 if (BBToOrder.find(MBB)->second < ThisBlockRPO) // might be self b/e
2607 NonBackEdgeDisagree = true;
2608 }
2609 }
2610
2611 bool OverRide = false;
2612 if (Disagree && !NonBackEdgeDisagree) {
2613 // Only the backedges disagree. Consider demoting the livein
2614 // lattice value, as per the file level comment. The value we consider
2615 // demoting to is the value that the non-backedge predecessors agree on.
2616 // The order of values is that non-PHIs are \top, a PHI at this block
2617 // \bot, and phis between the two are ordered by their RPO number.
2618 // If there's no agreement, or we've already demoted to this PHI value
2619 // before, replace with a PHI value at this block.
2620
2621 // Calculate order numbers: zero means normal def, nonzero means RPO
2622 // number.
2623 unsigned BaseBlockRPONum = BBNumToRPO[BaseVal.getBlock()] + 1;
2624 if (!BaseVal.isPHI())
2625 BaseBlockRPONum = 0;
2626
2627 ValueIDNum &InLocID = InLocs[Idx.asU64()];
2628 unsigned InLocRPONum = BBNumToRPO[InLocID.getBlock()] + 1;
2629 if (!InLocID.isPHI())
2630 InLocRPONum = 0;
2631
2632 // Should we ignore the disagreeing backedges, and override with the
2633 // value the other predecessors agree on (in "base")?
2634 unsigned ThisBlockRPONum = BBNumToRPO[MBB.getNumber()] + 1;
2635 if (BaseBlockRPONum > InLocRPONum && BaseBlockRPONum < ThisBlockRPONum) {
2636 // Override.
2637 OverRide = true;
2638 DowngradeOccurred = true;
2639 }
2640 }
2641 // else: if we disagree in the non-backedges, then this is definitely
2642 // a control flow merge where different values merge. Make it a PHI.
2643
2644 // Generate a phi...
2645 ValueIDNum PHI = {(uint64_t)MBB.getNumber(), 0, Idx};
2646 ValueIDNum NewVal = (Disagree && !OverRide) ? PHI : BaseVal;
2647 if (InLocs[Idx.asU64()] != NewVal) {
2648 Changed |= true;
2649 InLocs[Idx.asU64()] = NewVal;
2650 }
2651 }
2652
2653 // TODO: Reimplement NumInserted and NumRemoved.
2654 return std::tuple<bool, bool>(Changed, DowngradeOccurred);
2655}
2656
2657void InstrRefBasedLDV::mlocDataflow(
2658 ValueIDNum **MInLocs, ValueIDNum **MOutLocs,
2659 SmallVectorImpl<MLocTransferMap> &MLocTransfer) {
2660 std::priority_queue<unsigned int, std::vector<unsigned int>,
2661 std::greater<unsigned int>>
2662 Worklist, Pending;
2663
2664 // We track what is on the current and pending worklist to avoid inserting
2665 // the same thing twice. We could avoid this with a custom priority queue,
2666 // but this is probably not worth it.
2667 SmallPtrSet<MachineBasicBlock *, 16> OnPending, OnWorklist;
2668
2669 // Initialize worklist with every block to be visited.
2670 for (unsigned int I = 0; I < BBToOrder.size(); ++I) {
2671 Worklist.push(I);
2672 OnWorklist.insert(OrderToBB[I]);
2673 }
2674
2675 MTracker->reset();
2676
2677 // Set inlocs for entry block -- each as a PHI at the entry block. Represents
2678 // the incoming value to the function.
2679 MTracker->setMPhis(0);
2680 for (auto Location : MTracker->locations())
2681 MInLocs[0][Location.Idx.asU64()] = Location.Value;
2682
2683 SmallPtrSet<const MachineBasicBlock *, 16> Visited;
2684 while (!Worklist.empty() || !Pending.empty()) {
2685 // Vector for storing the evaluated block transfer function.
2686 SmallVector<std::pair<LocIdx, ValueIDNum>, 32> ToRemap;
2687
2688 while (!Worklist.empty()) {
2689 MachineBasicBlock *MBB = OrderToBB[Worklist.top()];
2690 CurBB = MBB->getNumber();
2691 Worklist.pop();
2692
2693 // Join the values in all predecessor blocks.
2694 bool InLocsChanged, DowngradeOccurred;
2695 std::tie(InLocsChanged, DowngradeOccurred) =
2696 mlocJoin(*MBB, Visited, MOutLocs, MInLocs[CurBB]);
2697 InLocsChanged |= Visited.insert(MBB).second;
2698
2699 // If a downgrade occurred, book us in for re-examination on the next
2700 // iteration.
2701 if (DowngradeOccurred && OnPending.insert(MBB).second)
2702 Pending.push(BBToOrder[MBB]);
2703
2704 // Don't examine transfer function if we've visited this loc at least
2705 // once, and inlocs haven't changed.
2706 if (!InLocsChanged)
2707 continue;
2708
2709 // Load the current set of live-ins into MLocTracker.
2710 MTracker->loadFromArray(MInLocs[CurBB], CurBB);
2711
2712 // Each element of the transfer function can be a new def, or a read of
2713 // a live-in value. Evaluate each element, and store to "ToRemap".
2714 ToRemap.clear();
2715 for (auto &P : MLocTransfer[CurBB]) {
2716 if (P.second.getBlock() == CurBB && P.second.isPHI()) {
2717 // This is a movement of whatever was live in. Read it.
2718 ValueIDNum NewID = MTracker->getNumAtPos(P.second.getLoc());
2719 ToRemap.push_back(std::make_pair(P.first, NewID));
2720 } else {
2721 // It's a def. Just set it.
2722 assert(P.second.getBlock() == CurBB)(static_cast <bool> (P.second.getBlock() == CurBB) ? void
(0) : __assert_fail ("P.second.getBlock() == CurBB", "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 2722, __extension__ __PRETTY_FUNCTION__))
;
2723 ToRemap.push_back(std::make_pair(P.first, P.second));
2724 }
2725 }
2726
2727 // Commit the transfer function changes into mloc tracker, which
2728 // transforms the contents of the MLocTracker into the live-outs.
2729 for (auto &P : ToRemap)
2730 MTracker->setMLoc(P.first, P.second);
2731
2732 // Now copy out-locs from mloc tracker into out-loc vector, checking
2733 // whether changes have occurred. These changes can have come from both
2734 // the transfer function, and mlocJoin.
2735 bool OLChanged = false;
2736 for (auto Location : MTracker->locations()) {
2737 OLChanged |= MOutLocs[CurBB][Location.Idx.asU64()] != Location.Value;
2738 MOutLocs[CurBB][Location.Idx.asU64()] = Location.Value;
2739 }
2740
2741 MTracker->reset();
2742
2743 // No need to examine successors again if out-locs didn't change.
2744 if (!OLChanged)
2745 continue;
2746
2747 // All successors should be visited: put any back-edges on the pending
2748 // list for the next dataflow iteration, and any other successors to be
2749 // visited this iteration, if they're not going to be already.
2750 for (auto s : MBB->successors()) {
2751 // Does branching to this successor represent a back-edge?
2752 if (BBToOrder[s] > BBToOrder[MBB]) {
2753 // No: visit it during this dataflow iteration.
2754 if (OnWorklist.insert(s).second)
2755 Worklist.push(BBToOrder[s]);
2756 } else {
2757 // Yes: visit it on the next iteration.
2758 if (OnPending.insert(s).second)
2759 Pending.push(BBToOrder[s]);
2760 }
2761 }
2762 }
2763
2764 Worklist.swap(Pending);
2765 std::swap(OnPending, OnWorklist);
2766 OnPending.clear();
2767 // At this point, pending must be empty, since it was just the empty
2768 // worklist
2769 assert(Pending.empty() && "Pending should be empty")(static_cast <bool> (Pending.empty() && "Pending should be empty"
) ? void (0) : __assert_fail ("Pending.empty() && \"Pending should be empty\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 2769, __extension__ __PRETTY_FUNCTION__))
;
2770 }
2771
2772 // Once all the live-ins don't change on mlocJoin(), we've reached a
2773 // fixedpoint.
2774}
2775
2776bool InstrRefBasedLDV::vlocDowngradeLattice(
2777 const MachineBasicBlock &MBB, const DbgValue &OldLiveInLocation,
2778 const SmallVectorImpl<InValueT> &Values, unsigned CurBlockRPONum) {
2779 // Ranking value preference: see file level comment, the highest rank is
2780 // a plain def, followed by PHI values in reverse post-order. Numerically,
2781 // we assign all defs the rank '0', all PHIs their blocks RPO number plus
2782 // one, and consider the lowest value the highest ranked.
2783 int OldLiveInRank = BBNumToRPO[OldLiveInLocation.ID.getBlock()] + 1;
2784 if (!OldLiveInLocation.ID.isPHI())
2785 OldLiveInRank = 0;
2786
2787 // Allow any unresolvable conflict to be over-ridden.
2788 if (OldLiveInLocation.Kind == DbgValue::NoVal) {
2789 // Although if it was an unresolvable conflict from _this_ block, then
2790 // all other seeking of downgrades and PHIs must have failed before hand.
2791 if (OldLiveInLocation.BlockNo == (unsigned)MBB.getNumber())
2792 return false;
2793 OldLiveInRank = INT_MIN(-2147483647 -1);
2794 }
2795
2796 auto &InValue = *Values[0].second;
2797
2798 if (InValue.Kind == DbgValue::Const || InValue.Kind == DbgValue::NoVal)
2799 return false;
2800
2801 unsigned ThisRPO = BBNumToRPO[InValue.ID.getBlock()];
2802 int ThisRank = ThisRPO + 1;
2803 if (!InValue.ID.isPHI())
2804 ThisRank = 0;
2805
2806 // Too far down the lattice?
2807 if (ThisRPO >= CurBlockRPONum)
2808 return false;
2809
2810 // Higher in the lattice than what we've already explored?
2811 if (ThisRank <= OldLiveInRank)
2812 return false;
2813
2814 return true;
2815}
2816
2817std::tuple<Optional<ValueIDNum>, bool> InstrRefBasedLDV::pickVPHILoc(
2818 MachineBasicBlock &MBB, const DebugVariable &Var, const LiveIdxT &LiveOuts,
2819 ValueIDNum **MOutLocs, ValueIDNum **MInLocs,
2820 const SmallVectorImpl<MachineBasicBlock *> &BlockOrders) {
2821 // Collect a set of locations from predecessor where its live-out value can
2822 // be found.
2823 SmallVector<SmallVector<LocIdx, 4>, 8> Locs;
2824 unsigned NumLocs = MTracker->getNumLocs();
2825 unsigned BackEdgesStart = 0;
2826
2827 for (auto p : BlockOrders) {
2828 // Pick out where backedges start in the list of predecessors. Relies on
2829 // BlockOrders being sorted by RPO.
2830 if (BBToOrder[p] < BBToOrder[&MBB])
2831 ++BackEdgesStart;
2832
2833 // For each predecessor, create a new set of locations.
2834 Locs.resize(Locs.size() + 1);
2835 unsigned ThisBBNum = p->getNumber();
2836 auto LiveOutMap = LiveOuts.find(p);
2837 if (LiveOutMap == LiveOuts.end())
2838 // This predecessor isn't in scope, it must have no live-in/live-out
2839 // locations.
2840 continue;
2841
2842 auto It = LiveOutMap->second->find(Var);
2843 if (It == LiveOutMap->second->end())
2844 // There's no value recorded for this variable in this predecessor,
2845 // leave an empty set of locations.
2846 continue;
2847
2848 const DbgValue &OutVal = It->second;
2849
2850 if (OutVal.Kind == DbgValue::Const || OutVal.Kind == DbgValue::NoVal)
2851 // Consts and no-values cannot have locations we can join on.
2852 continue;
2853
2854 assert(OutVal.Kind == DbgValue::Proposed || OutVal.Kind == DbgValue::Def)(static_cast <bool> (OutVal.Kind == DbgValue::Proposed ||
OutVal.Kind == DbgValue::Def) ? void (0) : __assert_fail ("OutVal.Kind == DbgValue::Proposed || OutVal.Kind == DbgValue::Def"
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 2854, __extension__ __PRETTY_FUNCTION__))
;
2855 ValueIDNum ValToLookFor = OutVal.ID;
2856
2857 // Search the live-outs of the predecessor for the specified value.
2858 for (unsigned int I = 0; I < NumLocs; ++I) {
2859 if (MOutLocs[ThisBBNum][I] == ValToLookFor)
2860 Locs.back().push_back(LocIdx(I));
2861 }
2862 }
2863
2864 // If there were no locations at all, return an empty result.
2865 if (Locs.empty())
2866 return std::tuple<Optional<ValueIDNum>, bool>(None, false);
2867
2868 // Lambda for seeking a common location within a range of location-sets.
2869 using LocsIt = SmallVector<SmallVector<LocIdx, 4>, 8>::iterator;
2870 auto SeekLocation =
2871 [&Locs](llvm::iterator_range<LocsIt> SearchRange) -> Optional<LocIdx> {
2872 // Starting with the first set of locations, take the intersection with
2873 // subsequent sets.
2874 SmallVector<LocIdx, 4> base = Locs[0];
2875 for (auto &S : SearchRange) {
2876 SmallVector<LocIdx, 4> new_base;
2877 std::set_intersection(base.begin(), base.end(), S.begin(), S.end(),
2878 std::inserter(new_base, new_base.begin()));
2879 base = new_base;
2880 }
2881 if (base.empty())
2882 return None;
2883
2884 // We now have a set of LocIdxes that contain the right output value in
2885 // each of the predecessors. Pick the lowest; if there's a register loc,
2886 // that'll be it.
2887 return *base.begin();
2888 };
2889
2890 // Search for a common location for all predecessors. If we can't, then fall
2891 // back to only finding a common location between non-backedge predecessors.
2892 bool ValidForAllLocs = true;
2893 auto TheLoc = SeekLocation(Locs);
2894 if (!TheLoc) {
2895 ValidForAllLocs = false;
2896 TheLoc =
2897 SeekLocation(make_range(Locs.begin(), Locs.begin() + BackEdgesStart));
2898 }
2899
2900 if (!TheLoc)
2901 return std::tuple<Optional<ValueIDNum>, bool>(None, false);
2902
2903 // Return a PHI-value-number for the found location.
2904 LocIdx L = *TheLoc;
2905 ValueIDNum PHIVal = {(unsigned)MBB.getNumber(), 0, L};
2906 return std::tuple<Optional<ValueIDNum>, bool>(PHIVal, ValidForAllLocs);
2907}
2908
2909std::tuple<bool, bool> InstrRefBasedLDV::vlocJoin(
2910 MachineBasicBlock &MBB, LiveIdxT &VLOCOutLocs, LiveIdxT &VLOCInLocs,
2911 SmallPtrSet<const MachineBasicBlock *, 16> *VLOCVisited, unsigned BBNum,
2912 const SmallSet<DebugVariable, 4> &AllVars, ValueIDNum **MOutLocs,
2913 ValueIDNum **MInLocs,
2914 SmallPtrSet<const MachineBasicBlock *, 8> &InScopeBlocks,
2915 SmallPtrSet<const MachineBasicBlock *, 8> &BlocksToExplore,
2916 DenseMap<DebugVariable, DbgValue> &InLocsT) {
2917 bool DowngradeOccurred = false;
2918
2919 // To emulate VarLocBasedImpl, process this block if it's not in scope but
2920 // _does_ assign a variable value. No live-ins for this scope are transferred
2921 // in though, so we can return immediately.
2922 if (InScopeBlocks.count(&MBB) == 0 && !ArtificialBlocks.count(&MBB)) {
2923 if (VLOCVisited)
2924 return std::tuple<bool, bool>(true, false);
2925 return std::tuple<bool, bool>(false, false);
2926 }
2927
2928 LLVM_DEBUG(dbgs() << "join MBB: " << MBB.getNumber() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("livedebugvalues")) { dbgs() << "join MBB: " << MBB
.getNumber() << "\n"; } } while (false)
;
2929 bool Changed = false;
2930
2931 // Find any live-ins computed in a prior iteration.
2932 auto ILSIt = VLOCInLocs.find(&MBB);
2933 assert(ILSIt != VLOCInLocs.end())(static_cast <bool> (ILSIt != VLOCInLocs.end()) ? void (
0) : __assert_fail ("ILSIt != VLOCInLocs.end()", "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 2933, __extension__ __PRETTY_FUNCTION__))
;
2934 auto &ILS = *ILSIt->second;
2935
2936 // Order predecessors by RPOT order, for exploring them in that order.
2937 SmallVector<MachineBasicBlock *, 8> BlockOrders(MBB.predecessors());
2938
2939 auto Cmp = [&](MachineBasicBlock *A, MachineBasicBlock *B) {
2940 return BBToOrder[A] < BBToOrder[B];
2941 };
2942
2943 llvm::sort(BlockOrders, Cmp);
2944
2945 unsigned CurBlockRPONum = BBToOrder[&MBB];
2946
2947 // Force a re-visit to loop heads in the first dataflow iteration.
2948 // FIXME: if we could "propose" Const values this wouldn't be needed,
2949 // because they'd need to be confirmed before being emitted.
2950 if (!BlockOrders.empty() &&
2951 BBToOrder[BlockOrders[BlockOrders.size() - 1]] >= CurBlockRPONum &&
2952 VLOCVisited)
2953 DowngradeOccurred = true;
2954
2955 auto ConfirmValue = [&InLocsT](const DebugVariable &DV, DbgValue VR) {
2956 auto Result = InLocsT.insert(std::make_pair(DV, VR));
2957 (void)Result;
2958 assert(Result.second)(static_cast <bool> (Result.second) ? void (0) : __assert_fail
("Result.second", "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 2958, __extension__ __PRETTY_FUNCTION__))
;
2959 };
2960
2961 auto ConfirmNoVal = [&ConfirmValue, &MBB](const DebugVariable &Var, const DbgValueProperties &Properties) {
2962 DbgValue NoLocPHIVal(MBB.getNumber(), Properties, DbgValue::NoVal);
2963
2964 ConfirmValue(Var, NoLocPHIVal);
2965 };
2966
2967 // Attempt to join the values for each variable.
2968 for (auto &Var : AllVars) {
2969 // Collect all the DbgValues for this variable.
2970 SmallVector<InValueT, 8> Values;
2971 bool Bail = false;
2972 unsigned BackEdgesStart = 0;
2973 for (auto p : BlockOrders) {
2974 // If the predecessor isn't in scope / to be explored, we'll never be
2975 // able to join any locations.
2976 if (!BlocksToExplore.contains(p)) {
2977 Bail = true;
2978 break;
2979 }
2980
2981 // Don't attempt to handle unvisited predecessors: they're implicitly
2982 // "unknown"s in the lattice.
2983 if (VLOCVisited && !VLOCVisited->count(p))
2984 continue;
2985
2986 // If the predecessors OutLocs is absent, there's not much we can do.
2987 auto OL = VLOCOutLocs.find(p);
2988 if (OL == VLOCOutLocs.end()) {
2989 Bail = true;
2990 break;
2991 }
2992
2993 // No live-out value for this predecessor also means we can't produce
2994 // a joined value.
2995 auto VIt = OL->second->find(Var);
2996 if (VIt == OL->second->end()) {
2997 Bail = true;
2998 break;
2999 }
3000
3001 // Keep track of where back-edges begin in the Values vector. Relies on
3002 // BlockOrders being sorted by RPO.
3003 unsigned ThisBBRPONum = BBToOrder[p];
3004 if (ThisBBRPONum < CurBlockRPONum)
3005 ++BackEdgesStart;
3006
3007 Values.push_back(std::make_pair(p, &VIt->second));
3008 }
3009
3010 // If there were no values, or one of the predecessors couldn't have a
3011 // value, then give up immediately. It's not safe to produce a live-in
3012 // value.
3013 if (Bail || Values.size() == 0)
3014 continue;
3015
3016 // Enumeration identifying the current state of the predecessors values.
3017 enum {
3018 Unset = 0,
3019 Agreed, // All preds agree on the variable value.
3020 PropDisagree, // All preds agree, but the value kind is Proposed in some.
3021 BEDisagree, // Only back-edges disagree on variable value.
3022 PHINeeded, // Non-back-edge predecessors have conflicing values.
3023 NoSolution // Conflicting Value metadata makes solution impossible.
3024 } OurState = Unset;
3025
3026 // All (non-entry) blocks have at least one non-backedge predecessor.
3027 // Pick the variable value from the first of these, to compare against
3028 // all others.
3029 const DbgValue &FirstVal = *Values[0].second;
3030 const ValueIDNum &FirstID = FirstVal.ID;
3031
3032 // Scan for variable values that can't be resolved: if they have different
3033 // DIExpressions, different indirectness, or are mixed constants /
3034 // non-constants.
3035 for (auto &V : Values) {
3036 if (V.second->Properties != FirstVal.Properties)
3037 OurState = NoSolution;
3038 if (V.second->Kind == DbgValue::Const && FirstVal.Kind != DbgValue::Const)
3039 OurState = NoSolution;
3040 }
3041
3042 // Flags diagnosing _how_ the values disagree.
3043 bool NonBackEdgeDisagree = false;
3044 bool DisagreeOnPHINess = false;
3045 bool IDDisagree = false;
3046 bool Disagree = false;
3047 if (OurState == Unset) {
3048 for (auto &V : Values) {
3049 if (*V.second == FirstVal)
3050 continue; // No disagreement.
3051
3052 Disagree = true;
3053
3054 // Flag whether the value number actually diagrees.
3055 if (V.second->ID != FirstID)
3056 IDDisagree = true;
3057
3058 // Distinguish whether disagreement happens in backedges or not.
3059 // Relies on Values (and BlockOrders) being sorted by RPO.
3060 unsigned ThisBBRPONum = BBToOrder[V.first];
3061 if (ThisBBRPONum < CurBlockRPONum)
3062 NonBackEdgeDisagree = true;
3063
3064 // Is there a difference in whether the value is definite or only
3065 // proposed?
3066 if (V.second->Kind != FirstVal.Kind &&
3067 (V.second->Kind == DbgValue::Proposed ||
3068 V.second->Kind == DbgValue::Def) &&
3069 (FirstVal.Kind == DbgValue::Proposed ||
3070 FirstVal.Kind == DbgValue::Def))
3071 DisagreeOnPHINess = true;
3072 }
3073
3074 // Collect those flags together and determine an overall state for
3075 // what extend the predecessors agree on a live-in value.
3076 if (!Disagree)
3077 OurState = Agreed;
3078 else if (!IDDisagree && DisagreeOnPHINess)
3079 OurState = PropDisagree;
3080 else if (!NonBackEdgeDisagree)
3081 OurState = BEDisagree;
3082 else
3083 OurState = PHINeeded;
3084 }
3085
3086 // An extra indicator: if we only disagree on whether the value is a
3087 // Def, or proposed, then also flag whether that disagreement happens
3088 // in backedges only.
3089 bool PropOnlyInBEs = Disagree && !IDDisagree && DisagreeOnPHINess &&
3090 !NonBackEdgeDisagree && FirstVal.Kind == DbgValue::Def;
3091
3092 const auto &Properties = FirstVal.Properties;
3093
3094 auto OldLiveInIt = ILS.find(Var);
3095 const DbgValue *OldLiveInLocation =
3096 (OldLiveInIt != ILS.end()) ? &OldLiveInIt->second : nullptr;
3097
3098 bool OverRide = false;
3099 if (OurState == BEDisagree && OldLiveInLocation) {
3100 // Only backedges disagree: we can consider downgrading. If there was a
3101 // previous live-in value, use it to work out whether the current
3102 // incoming value represents a lattice downgrade or not.
3103 OverRide =
3104 vlocDowngradeLattice(MBB, *OldLiveInLocation, Values, CurBlockRPONum);
3105 }
3106
3107 // Use the current state of predecessor agreement and other flags to work
3108 // out what to do next. Possibilities include:
3109 // * Accept a value all predecessors agree on, or accept one that
3110 // represents a step down the exploration lattice,
3111 // * Use a PHI value number, if one can be found,
3112 // * Propose a PHI value number, and see if it gets confirmed later,
3113 // * Emit a 'NoVal' value, indicating we couldn't resolve anything.
3114 if (OurState == Agreed) {
3115 // Easiest solution: all predecessors agree on the variable value.
3116 ConfirmValue(Var, FirstVal);
3117 } else if (OurState == BEDisagree && OverRide) {
3118 // Only backedges disagree, and the other predecessors have produced
3119 // a new live-in value further down the exploration lattice.
3120 DowngradeOccurred = true;
3121 ConfirmValue(Var, FirstVal);
3122 } else if (OurState == PropDisagree) {
3123 // Predecessors agree on value, but some say it's only a proposed value.
3124 // Propagate it as proposed: unless it was proposed in this block, in
3125 // which case we're able to confirm the value.
3126 if (FirstID.getBlock() == (uint64_t)MBB.getNumber() && FirstID.isPHI()) {
3127 ConfirmValue(Var, DbgValue(FirstID, Properties, DbgValue::Def));
3128 } else if (PropOnlyInBEs) {
3129 // If only backedges disagree, a higher (in RPO) block confirmed this
3130 // location, and we need to propagate it into this loop.
3131 ConfirmValue(Var, DbgValue(FirstID, Properties, DbgValue::Def));
3132 } else {
3133 // Otherwise; a Def meeting a Proposed is still a Proposed.
3134 ConfirmValue(Var, DbgValue(FirstID, Properties, DbgValue::Proposed));
3135 }
3136 } else if ((OurState == PHINeeded || OurState == BEDisagree)) {
3137 // Predecessors disagree and can't be downgraded: this can only be
3138 // solved with a PHI. Use pickVPHILoc to go look for one.
3139 Optional<ValueIDNum> VPHI;
3140 bool AllEdgesVPHI = false;
3141 std::tie(VPHI, AllEdgesVPHI) =
3142 pickVPHILoc(MBB, Var, VLOCOutLocs, MOutLocs, MInLocs, BlockOrders);
3143
3144 if (VPHI && AllEdgesVPHI) {
3145 // There's a PHI value that's valid for all predecessors -- we can use
3146 // it. If any of the non-backedge predecessors have proposed values
3147 // though, this PHI is also only proposed, until the predecessors are
3148 // confirmed.
3149 DbgValue::KindT K = DbgValue::Def;
3150 for (unsigned int I = 0; I < BackEdgesStart; ++I)
3151 if (Values[I].second->Kind == DbgValue::Proposed)
3152 K = DbgValue::Proposed;
3153
3154 ConfirmValue(Var, DbgValue(*VPHI, Properties, K));
3155 } else if (VPHI) {
3156 // There's a PHI value, but it's only legal for backedges. Leave this
3157 // as a proposed PHI value: it might come back on the backedges,
3158 // and allow us to confirm it in the future.
3159 DbgValue NoBEValue = DbgValue(*VPHI, Properties, DbgValue::Proposed);
3160 ConfirmValue(Var, NoBEValue);
3161 } else {
3162 ConfirmNoVal(Var, Properties);
3163 }
3164 } else {
3165 // Otherwise: we don't know. Emit a "phi but no real loc" phi.
3166 ConfirmNoVal(Var, Properties);
3167 }
3168 }
3169
3170 // Store newly calculated in-locs into VLOCInLocs, if they've changed.
3171 Changed = ILS != InLocsT;
3172 if (Changed)
3173 ILS = InLocsT;
3174
3175 return std::tuple<bool, bool>(Changed, DowngradeOccurred);
3176}
3177
3178void InstrRefBasedLDV::vlocDataflow(
3179 const LexicalScope *Scope, const DILocation *DILoc,
3180 const SmallSet<DebugVariable, 4> &VarsWeCareAbout,
3181 SmallPtrSetImpl<MachineBasicBlock *> &AssignBlocks, LiveInsT &Output,
3182 ValueIDNum **MOutLocs, ValueIDNum **MInLocs,
3183 SmallVectorImpl<VLocTracker> &AllTheVLocs) {
3184 // This method is much like mlocDataflow: but focuses on a single
3185 // LexicalScope at a time. Pick out a set of blocks and variables that are
3186 // to have their value assignments solved, then run our dataflow algorithm
3187 // until a fixedpoint is reached.
3188 std::priority_queue<unsigned int, std::vector<unsigned int>,
3189 std::greater<unsigned int>>
3190 Worklist, Pending;
3191 SmallPtrSet<MachineBasicBlock *, 16> OnWorklist, OnPending;
3192
3193 // The set of blocks we'll be examining.
3194 SmallPtrSet<const MachineBasicBlock *, 8> BlocksToExplore;
3195
3196 // The order in which to examine them (RPO).
3197 SmallVector<MachineBasicBlock *, 8> BlockOrders;
3198
3199 // RPO ordering function.
3200 auto Cmp = [&](MachineBasicBlock *A, MachineBasicBlock *B) {
3201 return BBToOrder[A] < BBToOrder[B];
3202 };
3203
3204 LS.getMachineBasicBlocks(DILoc, BlocksToExplore);
3205
3206 // A separate container to distinguish "blocks we're exploring" versus
3207 // "blocks that are potentially in scope. See comment at start of vlocJoin.
3208 SmallPtrSet<const MachineBasicBlock *, 8> InScopeBlocks = BlocksToExplore;
3209
3210 // Old LiveDebugValues tracks variable locations that come out of blocks
3211 // not in scope, where DBG_VALUEs occur. This is something we could
3212 // legitimately ignore, but lets allow it for now.
3213 if (EmulateOldLDV)
3214 BlocksToExplore.insert(AssignBlocks.begin(), AssignBlocks.end());
3215
3216 // We also need to propagate variable values through any artificial blocks
3217 // that immediately follow blocks in scope.
3218 DenseSet<const MachineBasicBlock *> ToAdd;
3219
3220 // Helper lambda: For a given block in scope, perform a depth first search
3221 // of all the artificial successors, adding them to the ToAdd collection.
3222 auto AccumulateArtificialBlocks =
3223 [this, &ToAdd, &BlocksToExplore,
3224 &InScopeBlocks](const MachineBasicBlock *MBB) {
3225 // Depth-first-search state: each node is a block and which successor
3226 // we're currently exploring.
3227 SmallVector<std::pair<const MachineBasicBlock *,
3228 MachineBasicBlock::const_succ_iterator>,
3229 8>
3230 DFS;
3231
3232 // Find any artificial successors not already tracked.
3233 for (auto *succ : MBB->successors()) {
3234 if (BlocksToExplore.count(succ) || InScopeBlocks.count(succ))
3235 continue;
3236 if (!ArtificialBlocks.count(succ))
3237 continue;
3238 DFS.push_back(std::make_pair(succ, succ->succ_begin()));
3239 ToAdd.insert(succ);
3240 }
3241
3242 // Search all those blocks, depth first.
3243 while (!DFS.empty()) {
3244 const MachineBasicBlock *CurBB = DFS.back().first;
3245 MachineBasicBlock::const_succ_iterator &CurSucc = DFS.back().second;
3246 // Walk back if we've explored this blocks successors to the end.
3247 if (CurSucc == CurBB->succ_end()) {
3248 DFS.pop_back();
3249 continue;
3250 }
3251
3252 // If the current successor is artificial and unexplored, descend into
3253 // it.
3254 if (!ToAdd.count(*CurSucc) && ArtificialBlocks.count(*CurSucc)) {
3255 DFS.push_back(std::make_pair(*CurSucc, (*CurSucc)->succ_begin()));
3256 ToAdd.insert(*CurSucc);
3257 continue;
3258 }
3259
3260 ++CurSucc;
3261 }
3262 };
3263
3264 // Search in-scope blocks and those containing a DBG_VALUE from this scope
3265 // for artificial successors.
3266 for (auto *MBB : BlocksToExplore)
3267 AccumulateArtificialBlocks(MBB);
3268 for (auto *MBB : InScopeBlocks)
3269 AccumulateArtificialBlocks(MBB);
3270
3271 BlocksToExplore.insert(ToAdd.begin(), ToAdd.end());
3272 InScopeBlocks.insert(ToAdd.begin(), ToAdd.end());
3273
3274 // Single block scope: not interesting! No propagation at all. Note that
3275 // this could probably go above ArtificialBlocks without damage, but
3276 // that then produces output differences from original-live-debug-values,
3277 // which propagates from a single block into many artificial ones.
3278 if (BlocksToExplore.size() == 1)
3279 return;
3280
3281 // Picks out relevants blocks RPO order and sort them.
3282 for (auto *MBB : BlocksToExplore)
3283 BlockOrders.push_back(const_cast<MachineBasicBlock *>(MBB));
3284
3285 llvm::sort(BlockOrders, Cmp);
3286 unsigned NumBlocks = BlockOrders.size();
3287
3288 // Allocate some vectors for storing the live ins and live outs. Large.
3289 SmallVector<DenseMap<DebugVariable, DbgValue>, 32> LiveIns, LiveOuts;
3290 LiveIns.resize(NumBlocks);
3291 LiveOuts.resize(NumBlocks);
3292
3293 // Produce by-MBB indexes of live-in/live-outs, to ease lookup within
3294 // vlocJoin.
3295 LiveIdxT LiveOutIdx, LiveInIdx;
3296 LiveOutIdx.reserve(NumBlocks);
3297 LiveInIdx.reserve(NumBlocks);
3298 for (unsigned I = 0; I < NumBlocks; ++I) {
3299 LiveOutIdx[BlockOrders[I]] = &LiveOuts[I];
3300 LiveInIdx[BlockOrders[I]] = &LiveIns[I];
3301 }
3302
3303 for (auto *MBB : BlockOrders) {
3304 Worklist.push(BBToOrder[MBB]);
3305 OnWorklist.insert(MBB);
3306 }
3307
3308 // Iterate over all the blocks we selected, propagating variable values.
3309 bool FirstTrip = true;
3310 SmallPtrSet<const MachineBasicBlock *, 16> VLOCVisited;
3311 while (!Worklist.empty() || !Pending.empty()) {
3312 while (!Worklist.empty()) {
3313 auto *MBB = OrderToBB[Worklist.top()];
3314 CurBB = MBB->getNumber();
3315 Worklist.pop();
3316
3317 DenseMap<DebugVariable, DbgValue> JoinedInLocs;
3318
3319 // Join values from predecessors. Updates LiveInIdx, and writes output
3320 // into JoinedInLocs.
3321 bool InLocsChanged, DowngradeOccurred;
3322 std::tie(InLocsChanged, DowngradeOccurred) = vlocJoin(
3323 *MBB, LiveOutIdx, LiveInIdx, (FirstTrip) ? &VLOCVisited : nullptr,
3324 CurBB, VarsWeCareAbout, MOutLocs, MInLocs, InScopeBlocks,
3325 BlocksToExplore, JoinedInLocs);
3326
3327 bool FirstVisit = VLOCVisited.insert(MBB).second;
3328
3329 // Always explore transfer function if inlocs changed, or if we've not
3330 // visited this block before.
3331 InLocsChanged |= FirstVisit;
3332
3333 // If a downgrade occurred, book us in for re-examination on the next
3334 // iteration.
3335 if (DowngradeOccurred && OnPending.insert(MBB).second)
3336 Pending.push(BBToOrder[MBB]);
3337
3338 if (!InLocsChanged)
3339 continue;
3340
3341 // Do transfer function.
3342 auto &VTracker = AllTheVLocs[MBB->getNumber()];
3343 for (auto &Transfer : VTracker.Vars) {
3344 // Is this var we're mangling in this scope?
3345 if (VarsWeCareAbout.count(Transfer.first)) {
3346 // Erase on empty transfer (DBG_VALUE $noreg).
3347 if (Transfer.second.Kind == DbgValue::Undef) {
3348 JoinedInLocs.erase(Transfer.first);
3349 } else {
3350 // Insert new variable value; or overwrite.
3351 auto NewValuePair = std::make_pair(Transfer.first, Transfer.second);
3352 auto Result = JoinedInLocs.insert(NewValuePair);
3353 if (!Result.second)
3354 Result.first->second = Transfer.second;
3355 }
3356 }
3357 }
3358
3359 // Did the live-out locations change?
3360 bool OLChanged = JoinedInLocs != *LiveOutIdx[MBB];
3361
3362 // If they haven't changed, there's no need to explore further.
3363 if (!OLChanged)
3364 continue;
3365
3366 // Commit to the live-out record.
3367 *LiveOutIdx[MBB] = JoinedInLocs;
3368
3369 // We should visit all successors. Ensure we'll visit any non-backedge
3370 // successors during this dataflow iteration; book backedge successors
3371 // to be visited next time around.
3372 for (auto s : MBB->successors()) {
3373 // Ignore out of scope / not-to-be-explored successors.
3374 if (LiveInIdx.find(s) == LiveInIdx.end())
3375 continue;
3376
3377 if (BBToOrder[s] > BBToOrder[MBB]) {
3378 if (OnWorklist.insert(s).second)
3379 Worklist.push(BBToOrder[s]);
3380 } else if (OnPending.insert(s).second && (FirstTrip || OLChanged)) {
3381 Pending.push(BBToOrder[s]);
3382 }
3383 }
3384 }
3385 Worklist.swap(Pending);
3386 std::swap(OnWorklist, OnPending);
3387 OnPending.clear();
3388 assert(Pending.empty())(static_cast <bool> (Pending.empty()) ? void (0) : __assert_fail
("Pending.empty()", "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 3388, __extension__ __PRETTY_FUNCTION__))
;
3389 FirstTrip = false;
3390 }
3391
3392 // Dataflow done. Now what? Save live-ins. Ignore any that are still marked
3393 // as being variable-PHIs, because those did not have their machine-PHI
3394 // value confirmed. Such variable values are places that could have been
3395 // PHIs, but are not.
3396 for (auto *MBB : BlockOrders) {
3397 auto &VarMap = *LiveInIdx[MBB];
3398 for (auto &P : VarMap) {
3399 if (P.second.Kind == DbgValue::Proposed ||
3400 P.second.Kind == DbgValue::NoVal)
3401 continue;
3402 Output[MBB->getNumber()].push_back(P);
3403 }
3404 }
3405
3406 BlockOrders.clear();
3407 BlocksToExplore.clear();
3408}
3409
3410#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3411void InstrRefBasedLDV::dump_mloc_transfer(
3412 const MLocTransferMap &mloc_transfer) const {
3413 for (auto &P : mloc_transfer) {
3414 std::string foo = MTracker->LocIdxToName(P.first);
3415 std::string bar = MTracker->IDAsString(P.second);
3416 dbgs() << "Loc " << foo << " --> " << bar << "\n";
3417 }
3418}
3419#endif
3420
3421void InstrRefBasedLDV::emitLocations(
3422 MachineFunction &MF, LiveInsT SavedLiveIns, ValueIDNum **MOutLocs,
3423 ValueIDNum **MInLocs, DenseMap<DebugVariable, unsigned> &AllVarsNumbering,
3424 const TargetPassConfig &TPC) {
3425 TTracker = new TransferTracker(TII, MTracker, MF, *TRI, CalleeSavedRegs, TPC);
3426 unsigned NumLocs = MTracker->getNumLocs();
3427
3428 // For each block, load in the machine value locations and variable value
3429 // live-ins, then step through each instruction in the block. New DBG_VALUEs
3430 // to be inserted will be created along the way.
3431 for (MachineBasicBlock &MBB : MF) {
3432 unsigned bbnum = MBB.getNumber();
3433 MTracker->reset();
3434 MTracker->loadFromArray(MInLocs[bbnum], bbnum);
3435 TTracker->loadInlocs(MBB, MInLocs[bbnum], SavedLiveIns[MBB.getNumber()],
3436 NumLocs);
3437
3438 CurBB = bbnum;
3439 CurInst = 1;
3440 for (auto &MI : MBB) {
3441 process(MI, MOutLocs, MInLocs);
3442 TTracker->checkInstForNewValues(CurInst, MI.getIterator());
3443 ++CurInst;
3444 }
3445 }
3446
3447 // We have to insert DBG_VALUEs in a consistent order, otherwise they appeaer
3448 // in DWARF in different orders. Use the order that they appear when walking
3449 // through each block / each instruction, stored in AllVarsNumbering.
3450 auto OrderDbgValues = [&](const MachineInstr *A,
3451 const MachineInstr *B) -> bool {
3452 DebugVariable VarA(A->getDebugVariable(), A->getDebugExpression(),
3453 A->getDebugLoc()->getInlinedAt());
3454 DebugVariable VarB(B->getDebugVariable(), B->getDebugExpression(),
3455 B->getDebugLoc()->getInlinedAt());
3456 return AllVarsNumbering.find(VarA)->second <
3457 AllVarsNumbering.find(VarB)->second;
3458 };
3459
3460 // Go through all the transfers recorded in the TransferTracker -- this is
3461 // both the live-ins to a block, and any movements of values that happen
3462 // in the middle.
3463 for (auto &P : TTracker->Transfers) {
3464 // Sort them according to appearance order.
3465 llvm::sort(P.Insts, OrderDbgValues);
3466 // Insert either before or after the designated point...
3467 if (P.MBB) {
3468 MachineBasicBlock &MBB = *P.MBB;
3469 for (auto *MI : P.Insts) {
3470 MBB.insert(P.Pos, MI);
3471 }
3472 } else {
3473 // Terminators, like tail calls, can clobber things. Don't try and place
3474 // transfers after them.
3475 if (P.Pos->isTerminator())
3476 continue;
3477
3478 MachineBasicBlock &MBB = *P.Pos->getParent();
3479 for (auto *MI : P.Insts) {
3480 MBB.insertAfterBundle(P.Pos, MI);
3481 }
3482 }
3483 }
3484}
3485
3486void InstrRefBasedLDV::initialSetup(MachineFunction &MF) {
3487 // Build some useful data structures.
3488 auto hasNonArtificialLocation = [](const MachineInstr &MI) -> bool {
3489 if (const DebugLoc &DL = MI.getDebugLoc())
3490 return DL.getLine() != 0;
3491 return false;
3492 };
3493 // Collect a set of all the artificial blocks.
3494 for (auto &MBB : MF)
3495 if (none_of(MBB.instrs(), hasNonArtificialLocation))
3496 ArtificialBlocks.insert(&MBB);
3497
3498 // Compute mappings of block <=> RPO order.
3499 ReversePostOrderTraversal<MachineFunction *> RPOT(&MF);
3500 unsigned int RPONumber = 0;
3501 for (MachineBasicBlock *MBB : RPOT) {
3502 OrderToBB[RPONumber] = MBB;
3503 BBToOrder[MBB] = RPONumber;
3504 BBNumToRPO[MBB->getNumber()] = RPONumber;
3505 ++RPONumber;
3506 }
3507
3508 // Order value substitutions by their "source" operand pair, for quick lookup.
3509 llvm::sort(MF.DebugValueSubstitutions);
3510
3511#ifdef EXPENSIVE_CHECKS
3512 // As an expensive check, test whether there are any duplicate substitution
3513 // sources in the collection.
3514 if (MF.DebugValueSubstitutions.size() > 2) {
3515 for (auto It = MF.DebugValueSubstitutions.begin();
3516 It != std::prev(MF.DebugValueSubstitutions.end()); ++It) {
3517 assert(It->Src != std::next(It)->Src && "Duplicate variable location "(static_cast <bool> (It->Src != std::next(It)->Src
&& "Duplicate variable location " "substitution seen"
) ? void (0) : __assert_fail ("It->Src != std::next(It)->Src && \"Duplicate variable location \" \"substitution seen\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 3518, __extension__ __PRETTY_FUNCTION__))
3518 "substitution seen")(static_cast <bool> (It->Src != std::next(It)->Src
&& "Duplicate variable location " "substitution seen"
) ? void (0) : __assert_fail ("It->Src != std::next(It)->Src && \"Duplicate variable location \" \"substitution seen\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 3518, __extension__ __PRETTY_FUNCTION__))
;
3519 }
3520 }
3521#endif
3522}
3523
3524/// Calculate the liveness information for the given machine function and
3525/// extend ranges across basic blocks.
3526bool InstrRefBasedLDV::ExtendRanges(MachineFunction &MF,
3527 TargetPassConfig *TPC) {
3528 // No subprogram means this function contains no debuginfo.
3529 if (!MF.getFunction().getSubprogram())
1
Assuming the condition is false
2
Taking false branch
3530 return false;
3531
3532 LLVM_DEBUG(dbgs() << "\nDebug Range Extension\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("livedebugvalues")) { dbgs() << "\nDebug Range Extension\n"
; } } while (false)
;
3
Assuming 'DebugFlag' is false
4
Loop condition is false. Exiting loop
3533 this->TPC = TPC;
3534
3535 TRI = MF.getSubtarget().getRegisterInfo();
3536 TII = MF.getSubtarget().getInstrInfo();
3537 TFI = MF.getSubtarget().getFrameLowering();
3538 TFI->getCalleeSaves(MF, CalleeSavedRegs);
3539 MFI = &MF.getFrameInfo();
3540 LS.initialize(MF);
3541
3542 MTracker =
3543 new MLocTracker(MF, *TII, *TRI, *MF.getSubtarget().getTargetLowering());
3544 VTracker = nullptr;
3545 TTracker = nullptr;
3546
3547 SmallVector<MLocTransferMap, 32> MLocTransfer;
3548 SmallVector<VLocTracker, 8> vlocs;
3549 LiveInsT SavedLiveIns;
3550
3551 int MaxNumBlocks = -1;
3552 for (auto &MBB : MF)
3553 MaxNumBlocks = std::max(MBB.getNumber(), MaxNumBlocks);
3554 assert(MaxNumBlocks >= 0)(static_cast <bool> (MaxNumBlocks >= 0) ? void (0) :
__assert_fail ("MaxNumBlocks >= 0", "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 3554, __extension__ __PRETTY_FUNCTION__))
;
5
Assuming 'MaxNumBlocks' is >= 0
6
'?' condition is true
3555 ++MaxNumBlocks;
3556
3557 MLocTransfer.resize(MaxNumBlocks);
3558 vlocs.resize(MaxNumBlocks);
3559 SavedLiveIns.resize(MaxNumBlocks);
3560
3561 initialSetup(MF);
3562
3563 produceMLocTransferFunction(MF, MLocTransfer, MaxNumBlocks);
7
Calling 'InstrRefBasedLDV::produceMLocTransferFunction'
3564
3565 // Allocate and initialize two array-of-arrays for the live-in and live-out
3566 // machine values. The outer dimension is the block number; while the inner
3567 // dimension is a LocIdx from MLocTracker.
3568 ValueIDNum **MOutLocs = new ValueIDNum *[MaxNumBlocks];
3569 ValueIDNum **MInLocs = new ValueIDNum *[MaxNumBlocks];
3570 unsigned NumLocs = MTracker->getNumLocs();
3571 for (int i = 0; i < MaxNumBlocks; ++i) {
3572 MOutLocs[i] = new ValueIDNum[NumLocs];
3573 MInLocs[i] = new ValueIDNum[NumLocs];
3574 }
3575
3576 // Solve the machine value dataflow problem using the MLocTransfer function,
3577 // storing the computed live-ins / live-outs into the array-of-arrays. We use
3578 // both live-ins and live-outs for decision making in the variable value
3579 // dataflow problem.
3580 mlocDataflow(MInLocs, MOutLocs, MLocTransfer);
3581
3582 // Patch up debug phi numbers, turning unknown block-live-in values into
3583 // either live-through machine values, or PHIs.
3584 for (auto &DBG_PHI : DebugPHINumToValue) {
3585 // Identify unresolved block-live-ins.
3586 ValueIDNum &Num = DBG_PHI.ValueRead;
3587 if (!Num.isPHI())
3588 continue;
3589
3590 unsigned BlockNo = Num.getBlock();
3591 LocIdx LocNo = Num.getLoc();
3592 Num = MInLocs[BlockNo][LocNo.asU64()];
3593 }
3594 // Later, we'll be looking up ranges of instruction numbers.
3595 llvm::sort(DebugPHINumToValue);
3596
3597 // Walk back through each block / instruction, collecting DBG_VALUE
3598 // instructions and recording what machine value their operands refer to.
3599 for (auto &OrderPair : OrderToBB) {
3600 MachineBasicBlock &MBB = *OrderPair.second;
3601 CurBB = MBB.getNumber();
3602 VTracker = &vlocs[CurBB];
3603 VTracker->MBB = &MBB;
3604 MTracker->loadFromArray(MInLocs[CurBB], CurBB);
3605 CurInst = 1;
3606 for (auto &MI : MBB) {
3607 process(MI, MOutLocs, MInLocs);
3608 ++CurInst;
3609 }
3610 MTracker->reset();
3611 }
3612
3613 // Number all variables in the order that they appear, to be used as a stable
3614 // insertion order later.
3615 DenseMap<DebugVariable, unsigned> AllVarsNumbering;
3616
3617 // Map from one LexicalScope to all the variables in that scope.
3618 DenseMap<const LexicalScope *, SmallSet<DebugVariable, 4>> ScopeToVars;
3619
3620 // Map from One lexical scope to all blocks in that scope.
3621 DenseMap<const LexicalScope *, SmallPtrSet<MachineBasicBlock *, 4>>
3622 ScopeToBlocks;
3623
3624 // Store a DILocation that describes a scope.
3625 DenseMap<const LexicalScope *, const DILocation *> ScopeToDILocation;
3626
3627 // To mirror old LiveDebugValues, enumerate variables in RPOT order. Otherwise
3628 // the order is unimportant, it just has to be stable.
3629 for (unsigned int I = 0; I < OrderToBB.size(); ++I) {
3630 auto *MBB = OrderToBB[I];
3631 auto *VTracker = &vlocs[MBB->getNumber()];
3632 // Collect each variable with a DBG_VALUE in this block.
3633 for (auto &idx : VTracker->Vars) {
3634 const auto &Var = idx.first;
3635 const DILocation *ScopeLoc = VTracker->Scopes[Var];
3636 assert(ScopeLoc != nullptr)(static_cast <bool> (ScopeLoc != nullptr) ? void (0) : __assert_fail
("ScopeLoc != nullptr", "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 3636, __extension__ __PRETTY_FUNCTION__))
;
3637 auto *Scope = LS.findLexicalScope(ScopeLoc);
3638
3639 // No insts in scope -> shouldn't have been recorded.
3640 assert(Scope != nullptr)(static_cast <bool> (Scope != nullptr) ? void (0) : __assert_fail
("Scope != nullptr", "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp"
, 3640, __extension__ __PRETTY_FUNCTION__))
;
3641
3642 AllVarsNumbering.insert(std::make_pair(Var, AllVarsNumbering.size()));
3643 ScopeToVars[Scope].insert(Var);
3644 ScopeToBlocks[Scope].insert(VTracker->MBB);
3645 ScopeToDILocation[Scope] = ScopeLoc;
3646 }
3647 }
3648
3649 // OK. Iterate over scopes: there might be something to be said for
3650 // ordering them by size/locality, but that's for the future. For each scope,
3651 // solve the variable value problem, producing a map of variables to values
3652 // in SavedLiveIns.
3653 for (auto &P : ScopeToVars) {
3654 vlocDataflow(P.first, ScopeToDILocation[P.first], P.second,
3655 ScopeToBlocks[P.first], SavedLiveIns, MOutLocs, MInLocs,
3656 vlocs);
3657 }
3658
3659 // Using the computed value locations and variable values for each block,
3660 // create the DBG_VALUE instructions representing the extended variable
3661 // locations.
3662 emitLocations(MF, SavedLiveIns, MOutLocs, MInLocs, AllVarsNumbering, *TPC);
3663
3664 for (int Idx = 0; Idx < MaxNumBlocks; ++Idx) {
3665 delete[] MOutLocs[Idx];
3666 delete[] MInLocs[Idx];
3667 }
3668 delete[] MOutLocs;
3669 delete[] MInLocs;
3670
3671 // Did we actually make any changes? If we created any DBG_VALUEs, then yes.
3672 bool Changed = TTracker->Transfers.size() != 0;
3673
3674 delete MTracker;
3675 delete TTracker;
3676 MTracker = nullptr;
3677 VTracker = nullptr;
3678 TTracker = nullptr;
3679
3680 ArtificialBlocks.clear();
3681 OrderToBB.clear();
3682 BBToOrder.clear();
3683 BBNumToRPO.clear();
3684 DebugInstrNumToInstr.clear();
3685 DebugPHINumToValue.clear();
3686
3687 return Changed;
3688}
3689
3690LDVImpl *llvm::makeInstrRefBasedLiveDebugValues() {
3691 return new InstrRefBasedLDV();
3692}
3693
3694namespace {
3695class LDVSSABlock;
3696class LDVSSAUpdater;
3697
3698// Pick a type to identify incoming block values as we construct SSA. We
3699// can't use anything more robust than an integer unfortunately, as SSAUpdater
3700// expects to zero-initialize the type.
3701typedef uint64_t BlockValueNum;
3702
3703/// Represents an SSA PHI node for the SSA updater class. Contains the block
3704/// this PHI is in, the value number it would have, and the expected incoming
3705/// values from parent blocks.
3706class LDVSSAPhi {
3707public:
3708 SmallVector<std::pair<LDVSSABlock *, BlockValueNum>, 4> IncomingValues;
3709 LDVSSABlock *ParentBlock;
3710 BlockValueNum PHIValNum;
3711 LDVSSAPhi(BlockValueNum PHIValNum, LDVSSABlock *ParentBlock)
3712 : ParentBlock(ParentBlock), PHIValNum(PHIValNum) {}
3713
3714 LDVSSABlock *getParent() { return ParentBlock; }
3715};
3716
3717/// Thin wrapper around a block predecessor iterator. Only difference from a
3718/// normal block iterator is that it dereferences to an LDVSSABlock.
3719class LDVSSABlockIterator {
3720public:
3721 MachineBasicBlock::pred_iterator PredIt;
3722 LDVSSAUpdater &Updater;
3723
3724 LDVSSABlockIterator(MachineBasicBlock::pred_iterator PredIt,
3725 LDVSSAUpdater &Updater)
3726 : PredIt(PredIt), Updater(Updater) {}
3727
3728 bool operator!=(const LDVSSABlockIterator &OtherIt) const {
3729 return OtherIt.PredIt != PredIt;
3730 }
3731
3732 LDVSSABlockIterator &operator++() {
3733 ++PredIt;
3734 return *this;
3735 }
3736
3737 LDVSSABlock *operator*();
3738};
3739
3740/// Thin wrapper around a block for SSA Updater interface. Necessary because
3741/// we need to track the PHI value(s) that we may have observed as necessary
3742/// in this block.
3743class LDVSSABlock {
3744public:
3745 MachineBasicBlock &BB;
3746 LDVSSAUpdater &Updater;
3747 using PHIListT = SmallVector<LDVSSAPhi, 1>;
3748 /// List of PHIs in this block. There should only ever be one.
3749 PHIListT PHIList;
3750
3751 LDVSSABlock(MachineBasicBlock &BB, LDVSSAUpdater &Updater)
3752 : BB(BB), Updater(Updater) {}
3753
3754 LDVSSABlockIterator succ_begin() {
3755 return LDVSSABlockIterator(BB.succ_begin(), Updater);
3756 }
3757
3758 LDVSSABlockIterator succ_end() {
3759 return LDVSSABlockIterator(BB.succ_end(), Updater);
3760 }
3761
3762 /// SSAUpdater has requested a PHI: create that within this block record.
3763 LDVSSAPhi *newPHI(BlockValueNum Value) {
3764 PHIList.emplace_back(Value, this);
3765 return &PHIList.back();
3766 }
3767
3768 /// SSAUpdater wishes to know what PHIs already exist in this block.
3769 PHIListT &phis() { return PHIList; }
3770};
3771
3772/// Utility class for the SSAUpdater interface: tracks blocks, PHIs and values
3773/// while SSAUpdater is exploring the CFG. It's passed as a handle / baton to
3774// SSAUpdaterTraits<LDVSSAUpdater>.
3775class LDVSSAUpdater {
3776public:
3777 /// Map of value numbers to PHI records.
3778 DenseMap<BlockValueNum, LDVSSAPhi *> PHIs;
3779 /// Map of which blocks generate Undef values -- blocks that are not
3780 /// dominated by any Def.
3781 DenseMap<MachineBasicBlock *, BlockValueNum> UndefMap;
3782 /// Map of machine blocks to our own records of them.
3783 DenseMap<MachineBasicBlock *, LDVSSABlock *> BlockMap;
3784 /// Machine location where any PHI must occur.
3785 LocIdx Loc;
3786 /// Table of live-in machine value numbers for blocks / locations.
3787 ValueIDNum **MLiveIns;
3788
3789 LDVSSAUpdater(LocIdx L, ValueIDNum **MLiveIns) : Loc(L), MLiveIns(MLiveIns) {}
3790
3791 void reset() {
3792 for (auto &Block : BlockMap)
3793 delete Block.second;
3794
3795 PHIs.clear();
3796 UndefMap.clear();
3797 BlockMap.clear();
3798 }
3799
3800 ~LDVSSAUpdater() { reset(); }
3801
3802 /// For a given MBB, create a wrapper block for it. Stores it in the
3803 /// LDVSSAUpdater block map.
3804 LDVSSABlock *getSSALDVBlock(MachineBasicBlock *BB) {
3805 auto it = BlockMap.find(BB);
3806 if (it == BlockMap.end()) {
3807 BlockMap[BB] = new LDVSSABlock(*BB, *this);
3808 it = BlockMap.find(BB);
3809 }
3810 return it->second;
3811 }
3812
3813 /// Find the live-in value number for the given block. Looks up the value at
3814 /// the PHI location on entry.
3815 BlockValueNum getValue(LDVSSABlock *LDVBB) {
3816 return MLiveIns[LDVBB->BB.getNumber()][Loc.asU64()].asU64();
3817 }
3818};
3819
3820LDVSSABlock *LDVSSABlockIterator::operator*() {
3821 return Updater.getSSALDVBlock(*PredIt);
3822}
3823
3824#ifndef NDEBUG
3825
3826raw_ostream &operator<<(raw_ostream &out, const LDVSSAPhi &PHI) {
3827 out << "SSALDVPHI " << PHI.PHIValNum;
3828 return out;
3829}
3830
3831#endif
3832
3833} // namespace
3834
3835namespace llvm {
3836
3837/// Template specialization to give SSAUpdater access to CFG and value
3838/// information. SSAUpdater calls methods in these traits, passing in the
3839/// LDVSSAUpdater object, to learn about blocks and the values they define.
3840/// It also provides methods to create PHI nodes and track them.
3841template <> class SSAUpdaterTraits<LDVSSAUpdater> {
3842public:
3843 using BlkT = LDVSSABlock;
3844 using ValT = BlockValueNum;
3845 using PhiT = LDVSSAPhi;
3846 using BlkSucc_iterator = LDVSSABlockIterator;
3847
3848 // Methods to access block successors -- dereferencing to our wrapper class.
3849 static BlkSucc_iterator BlkSucc_begin(BlkT *BB) { return BB->succ_begin(); }
3850 static BlkSucc_iterator BlkSucc_end(BlkT *BB) { return BB->succ_end(); }
3851
3852 /// Iterator for PHI operands.
3853 class PHI_iterator {
3854 private:
3855 LDVSSAPhi *PHI;
3856 unsigned Idx;
3857
3858 public:
3859 explicit PHI_iterator(LDVSSAPhi *P) // begin iterator
3860 : PHI(P), Idx(0) {}
3861 PHI_iterator(LDVSSAPhi *P, bool) // end iterator
3862 : PHI(P), Idx(PHI->IncomingValues.size()) {}
3863
3864 PHI_iterator &operator++() {
3865 Idx++;
3866 return *this;
3867 }
3868 bool operator==(const PHI_iterator &X) const { return Idx == X.Idx; }
3869 bool operator!=(const PHI_iterator &X) const { return !operator==(X); }
3870
3871 BlockValueNum getIncomingValue() { return PHI->IncomingValues[Idx].second; }
3872
3873 LDVSSABlock *getIncomingBlock() { return PHI->IncomingValues[Idx].first; }
3874 };
3875
3876 static inline PHI_iterator PHI_begin(PhiT *PHI) { return PHI_iterator(PHI); }
3877
3878 static inline PHI_iterator PHI_end(PhiT *PHI) {
3879 return PHI_iterator(PHI, true);
3880 }
3881
3882 /// FindPredecessorBlocks - Put the predecessors of BB into the Preds
3883 /// vector.
3884 static void FindPredecessorBlocks(LDVSSABlock *BB,
3885 SmallVectorImpl<LDVSSABlock *> *Preds) {
3886 for (MachineBasicBlock::pred_iterator PI = BB->BB.pred_begin(),
3887 E = BB->BB.pred_end();
3888 PI != E; ++PI)
3889 Preds->push_back(BB->Updater.getSSALDVBlock(*PI));
3890 }
3891
3892 /// GetUndefVal - Normally creates an IMPLICIT_DEF instruction with a new
3893 /// register. For LiveDebugValues, represents a block identified as not having
3894 /// any DBG_PHI predecessors.
3895 static BlockValueNum GetUndefVal(LDVSSABlock *BB, LDVSSAUpdater *Updater) {
3896 // Create a value number for this block -- it needs to be unique and in the
3897 // "undef" collection, so that we know it's not real. Use a number
3898 // representing a PHI into this block.
3899 BlockValueNum Num = ValueIDNum(BB->BB.getNumber(), 0, Updater->Loc).asU64();
3900 Updater->UndefMap[&BB->BB] = Num;
3901 return Num;
3902 }
3903
3904 /// CreateEmptyPHI - Create a (representation of a) PHI in the given block.
3905 /// SSAUpdater will populate it with information about incoming values. The
3906 /// value number of this PHI is whatever the machine value number problem
3907 /// solution determined it to be. This includes non-phi values if SSAUpdater
3908 /// tries to create a PHI where the incoming values are identical.
3909 static BlockValueNum CreateEmptyPHI(LDVSSABlock *BB, unsigned NumPreds,
3910 LDVSSAUpdater *Updater) {
3911 BlockValueNum PHIValNum = Updater->getValue(BB);
3912 LDVSSAPhi *PHI = BB->newPHI(PHIValNum);
3913 Updater->PHIs[PHIValNum] = PHI;
3914 return PHIValNum;
3915 }
3916
3917 /// AddPHIOperand - Add the specified value as an operand of the PHI for
3918 /// the specified predecessor block.
3919 static void AddPHIOperand(LDVSSAPhi *PHI, BlockValueNum Val, LDVSSABlock *Pred) {
3920 PHI->IncomingValues.push_back(std::make_pair(Pred, Val));
3921 }
3922
3923 /// ValueIsPHI - Check if the instruction that defines the specified value
3924 /// is a PHI instruction.
3925 static LDVSSAPhi *ValueIsPHI(BlockValueNum Val, LDVSSAUpdater *Updater) {
3926 auto PHIIt = Updater->PHIs.find(Val);
3927 if (PHIIt == Updater->PHIs.end())
3928 return nullptr;
3929 return PHIIt->second;
3930 }
3931
3932 /// ValueIsNewPHI - Like ValueIsPHI but also check if the PHI has no source
3933 /// operands, i.e., it was just added.
3934 static LDVSSAPhi *ValueIsNewPHI(BlockValueNum Val, LDVSSAUpdater *Updater) {
3935 LDVSSAPhi *PHI = ValueIsPHI(Val, Updater);
3936 if (PHI && PHI->IncomingValues.size() == 0)
3937 return PHI;
3938 return nullptr;
3939 }
3940
3941 /// GetPHIValue - For the specified PHI instruction, return the value
3942 /// that it defines.
3943 static BlockValueNum GetPHIValue(LDVSSAPhi *PHI) { return PHI->PHIValNum; }
3944};
3945
3946} // end namespace llvm
3947
3948Optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIs(MachineFunction &MF,
3949 ValueIDNum **MLiveOuts,
3950 ValueIDNum **MLiveIns,
3951 MachineInstr &Here,
3952 uint64_t InstrNum) {
3953 // Pick out records of DBG_PHI instructions that have been observed. If there
3954 // are none, then we cannot compute a value number.
3955 auto RangePair = std::equal_range(DebugPHINumToValue.begin(),
3956 DebugPHINumToValue.end(), InstrNum);
3957 auto LowerIt = RangePair.first;
3958 auto UpperIt = RangePair.second;
3959
3960 // No DBG_PHI means there can be no location.
3961 if (LowerIt == UpperIt)
85
Assuming 'LowerIt' is not equal to 'UpperIt'
86
Taking false branch
3962 return None;
3963
3964 // If there's only one DBG_PHI, then that is our value number.
3965 if (std::distance(LowerIt, UpperIt) == 1)
87
Assuming the condition is false
88
Taking false branch
3966 return LowerIt->ValueRead;
3967
3968 auto DBGPHIRange = make_range(LowerIt, UpperIt);
3969
3970 // Pick out the location (physreg, slot) where any PHIs must occur. It's
3971 // technically possible for us to merge values in different registers in each
3972 // block, but highly unlikely that LLVM will generate such code after register
3973 // allocation.
3974 LocIdx Loc = LowerIt->ReadLoc;
3975
3976 // We have several DBG_PHIs, and a use position (the Here inst). All each
3977 // DBG_PHI does is identify a value at a program position. We can treat each
3978 // DBG_PHI like it's a Def of a value, and the use position is a Use of a
3979 // value, just like SSA. We use the bulk-standard LLVM SSA updater class to
3980 // determine which Def is used at the Use, and any PHIs that happen along
3981 // the way.
3982 // Adapted LLVM SSA Updater:
3983 LDVSSAUpdater Updater(Loc, MLiveIns);
3984 // Map of which Def or PHI is the current value in each block.
3985 DenseMap<LDVSSABlock *, BlockValueNum> AvailableValues;
3986 // Set of PHIs that we have created along the way.
3987 SmallVector<LDVSSAPhi *, 8> CreatedPHIs;
3988
3989 // Each existing DBG_PHI is a Def'd value under this model. Record these Defs
3990 // for the SSAUpdater.
3991 for (const auto &DBG_PHI : DBGPHIRange) {
89
Assuming '__begin1' is equal to '__end1'
3992 LDVSSABlock *Block = Updater.getSSALDVBlock(DBG_PHI.MBB);
3993 const ValueIDNum &Num = DBG_PHI.ValueRead;
3994 AvailableValues.insert(std::make_pair(Block, Num.asU64()));
3995 }
3996
3997 LDVSSABlock *HereBlock = Updater.getSSALDVBlock(Here.getParent());
3998 const auto &AvailIt = AvailableValues.find(HereBlock);
3999 if (AvailIt != AvailableValues.end()) {
90
Calling 'operator!='
93
Returning from 'operator!='
94
Taking false branch
4000 // Actually, we already know what the value is -- the Use is in the same
4001 // block as the Def.
4002 return ValueIDNum::fromU64(AvailIt->second);
4003 }
4004
4005 // Otherwise, we must use the SSA Updater. It will identify the value number
4006 // that we are to use, and the PHIs that must happen along the way.
4007 SSAUpdaterImpl<LDVSSAUpdater> Impl(&Updater, &AvailableValues, &CreatedPHIs);
4008 BlockValueNum ResultInt = Impl.GetValue(Updater.getSSALDVBlock(Here.getParent()));
4009 ValueIDNum Result = ValueIDNum::fromU64(ResultInt);
4010
4011 // We have the number for a PHI, or possibly live-through value, to be used
4012 // at this Use. There are a number of things we have to check about it though:
4013 // * Does any PHI use an 'Undef' (like an IMPLICIT_DEF) value? If so, this
4014 // Use was not completely dominated by DBG_PHIs and we should abort.
4015 // * Are the Defs or PHIs clobbered in a block? SSAUpdater isn't aware that
4016 // we've left SSA form. Validate that the inputs to each PHI are the
4017 // expected values.
4018 // * Is a PHI we've created actually a merging of values, or are all the
4019 // predecessor values the same, leading to a non-PHI machine value number?
4020 // (SSAUpdater doesn't know that either). Remap validated PHIs into the
4021 // the ValidatedValues collection below to sort this out.
4022 DenseMap<LDVSSABlock *, ValueIDNum> ValidatedValues;
4023
4024 // Define all the input DBG_PHI values in ValidatedValues.
4025 for (const auto &DBG_PHI : DBGPHIRange) {
95
Assuming '__begin1' is equal to '__end1'
4026 LDVSSABlock *Block = Updater.getSSALDVBlock(DBG_PHI.MBB);
4027 const ValueIDNum &Num = DBG_PHI.ValueRead;
4028 ValidatedValues.insert(std::make_pair(Block, Num));
4029 }
4030
4031 // Sort PHIs to validate into RPO-order.
4032 SmallVector<LDVSSAPhi *, 8> SortedPHIs;
4033 for (auto &PHI : CreatedPHIs)
96
Assuming '__begin1' is equal to '__end1'
4034 SortedPHIs.push_back(PHI);
4035
4036 std::sort(
4037 SortedPHIs.begin(), SortedPHIs.end(), [&](LDVSSAPhi *A, LDVSSAPhi *B) {
4038 return BBToOrder[&A->getParent()->BB] < BBToOrder[&B->getParent()->BB];
4039 });
4040
4041 for (auto &PHI : SortedPHIs) {
97
Assuming '__begin1' is not equal to '__end1'
4042 ValueIDNum ThisBlockValueNum =
4043 MLiveIns[PHI->ParentBlock->BB.getNumber()][Loc.asU64()];
98
Array access (from variable 'MLiveIns') results in a null pointer dereference
4044
4045 // Are all these things actually defined?
4046 for (auto &PHIIt : PHI->IncomingValues) {
4047 // Any undef input means DBG_PHIs didn't dominate the use point.
4048 if (Updater.UndefMap.find(&PHIIt.first->BB) != Updater.UndefMap.end())
4049 return None;
4050
4051 ValueIDNum ValueToCheck;
4052 ValueIDNum *BlockLiveOuts = MLiveOuts[PHIIt.first->BB.getNumber()];
4053
4054 auto VVal = ValidatedValues.find(PHIIt.first);
4055 if (VVal == ValidatedValues.end()) {
4056 // We cross a loop, and this is a backedge. LLVMs tail duplication
4057 // happens so late that DBG_PHI instructions should not be able to
4058 // migrate into loops -- meaning we can only be live-through this
4059 // loop.
4060 ValueToCheck = ThisBlockValueNum;
4061 } else {
4062 // Does the block have as a live-out, in the location we're examining,
4063 // the value that we expect? If not, it's been moved or clobbered.
4064 ValueToCheck = VVal->second;
4065 }
4066
4067 if (BlockLiveOuts[Loc.asU64()] != ValueToCheck)
4068 return None;
4069 }
4070
4071 // Record this value as validated.
4072 ValidatedValues.insert({PHI->ParentBlock, ThisBlockValueNum});
4073 }
4074
4075 // All the PHIs are valid: we can return what the SSAUpdater said our value
4076 // number was.
4077 return Result;
4078}

/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/include/llvm/CodeGen/MachineInstr.h

1//===- llvm/CodeGen/MachineInstr.h - MachineInstr class ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the declaration of the MachineInstr class, which is the
10// basic representation for all target dependent machine instructions used by
11// the back end.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_CODEGEN_MACHINEINSTR_H
16#define LLVM_CODEGEN_MACHINEINSTR_H
17
18#include "llvm/ADT/DenseMapInfo.h"
19#include "llvm/ADT/PointerSumType.h"
20#include "llvm/ADT/SmallSet.h"
21#include "llvm/ADT/ilist.h"
22#include "llvm/ADT/ilist_node.h"
23#include "llvm/ADT/iterator_range.h"
24#include "llvm/CodeGen/MachineMemOperand.h"
25#include "llvm/CodeGen/MachineOperand.h"
26#include "llvm/CodeGen/TargetOpcodes.h"
27#include "llvm/IR/DebugLoc.h"
28#include "llvm/IR/InlineAsm.h"
29#include "llvm/IR/PseudoProbe.h"
30#include "llvm/MC/MCInstrDesc.h"
31#include "llvm/MC/MCSymbol.h"
32#include "llvm/Support/ArrayRecycler.h"
33#include "llvm/Support/TrailingObjects.h"
34#include <algorithm>
35#include <cassert>
36#include <cstdint>
37#include <utility>
38
39namespace llvm {
40
41class AAResults;
42template <typename T> class ArrayRef;
43class DIExpression;
44class DILocalVariable;
45class MachineBasicBlock;
46class MachineFunction;
47class MachineRegisterInfo;
48class ModuleSlotTracker;
49class raw_ostream;
50template <typename T> class SmallVectorImpl;
51class SmallBitVector;
52class StringRef;
53class TargetInstrInfo;
54class TargetRegisterClass;
55class TargetRegisterInfo;
56
57//===----------------------------------------------------------------------===//
58/// Representation of each machine instruction.
59///
60/// This class isn't a POD type, but it must have a trivial destructor. When a
61/// MachineFunction is deleted, all the contained MachineInstrs are deallocated
62/// without having their destructor called.
63///
64class MachineInstr
65 : public ilist_node_with_parent<MachineInstr, MachineBasicBlock,
66 ilist_sentinel_tracking<true>> {
67public:
68 using mmo_iterator = ArrayRef<MachineMemOperand *>::iterator;
69
70 /// Flags to specify different kinds of comments to output in
71 /// assembly code. These flags carry semantic information not
72 /// otherwise easily derivable from the IR text.
73 ///
74 enum CommentFlag {
75 ReloadReuse = 0x1, // higher bits are reserved for target dep comments.
76 NoSchedComment = 0x2,
77 TAsmComments = 0x4 // Target Asm comments should start from this value.
78 };
79
80 enum MIFlag {
81 NoFlags = 0,
82 FrameSetup = 1 << 0, // Instruction is used as a part of
83 // function frame setup code.
84 FrameDestroy = 1 << 1, // Instruction is used as a part of
85 // function frame destruction code.
86 BundledPred = 1 << 2, // Instruction has bundled predecessors.
87 BundledSucc = 1 << 3, // Instruction has bundled successors.
88 FmNoNans = 1 << 4, // Instruction does not support Fast
89 // math nan values.
90 FmNoInfs = 1 << 5, // Instruction does not support Fast
91 // math infinity values.
92 FmNsz = 1 << 6, // Instruction is not required to retain
93 // signed zero values.
94 FmArcp = 1 << 7, // Instruction supports Fast math
95 // reciprocal approximations.
96 FmContract = 1 << 8, // Instruction supports Fast math
97 // contraction operations like fma.
98 FmAfn = 1 << 9, // Instruction may map to Fast math
99 // instrinsic approximation.
100 FmReassoc = 1 << 10, // Instruction supports Fast math
101 // reassociation of operand order.
102 NoUWrap = 1 << 11, // Instruction supports binary operator
103 // no unsigned wrap.
104 NoSWrap = 1 << 12, // Instruction supports binary operator
105 // no signed wrap.
106 IsExact = 1 << 13, // Instruction supports division is
107 // known to be exact.
108 NoFPExcept = 1 << 14, // Instruction does not raise
109 // floatint-point exceptions.
110 NoMerge = 1 << 15, // Passes that drop source location info
111 // (e.g. branch folding) should skip
112 // this instruction.
113 };
114
115private:
116 const MCInstrDesc *MCID; // Instruction descriptor.
117 MachineBasicBlock *Parent = nullptr; // Pointer to the owning basic block.
118
119 // Operands are allocated by an ArrayRecycler.
120 MachineOperand *Operands = nullptr; // Pointer to the first operand.
121 unsigned NumOperands = 0; // Number of operands on instruction.
122
123 uint16_t Flags = 0; // Various bits of additional
124 // information about machine
125 // instruction.
126
127 uint8_t AsmPrinterFlags = 0; // Various bits of information used by
128 // the AsmPrinter to emit helpful
129 // comments. This is *not* semantic
130 // information. Do not use this for
131 // anything other than to convey comment
132 // information to AsmPrinter.
133
134 // OperandCapacity has uint8_t size, so it should be next to AsmPrinterFlags
135 // to properly pack.
136 using OperandCapacity = ArrayRecycler<MachineOperand>::Capacity;
137 OperandCapacity CapOperands; // Capacity of the Operands array.
138
139 /// Internal implementation detail class that provides out-of-line storage for
140 /// extra info used by the machine instruction when this info cannot be stored
141 /// in-line within the instruction itself.
142 ///
143 /// This has to be defined eagerly due to the implementation constraints of
144 /// `PointerSumType` where it is used.
145 class ExtraInfo final
146 : TrailingObjects<ExtraInfo, MachineMemOperand *, MCSymbol *, MDNode *> {
147 public:
148 static ExtraInfo *create(BumpPtrAllocator &Allocator,
149 ArrayRef<MachineMemOperand *> MMOs,
150 MCSymbol *PreInstrSymbol = nullptr,
151 MCSymbol *PostInstrSymbol = nullptr,
152 MDNode *HeapAllocMarker = nullptr) {
153 bool HasPreInstrSymbol = PreInstrSymbol != nullptr;
154 bool HasPostInstrSymbol = PostInstrSymbol != nullptr;
155 bool HasHeapAllocMarker = HeapAllocMarker != nullptr;
156 auto *Result = new (Allocator.Allocate(
157 totalSizeToAlloc<MachineMemOperand *, MCSymbol *, MDNode *>(
158 MMOs.size(), HasPreInstrSymbol + HasPostInstrSymbol,
159 HasHeapAllocMarker),
160 alignof(ExtraInfo)))
161 ExtraInfo(MMOs.size(), HasPreInstrSymbol, HasPostInstrSymbol,
162 HasHeapAllocMarker);
163
164 // Copy the actual data into the trailing objects.
165 std::copy(MMOs.begin(), MMOs.end(),
166 Result->getTrailingObjects<MachineMemOperand *>());
167
168 if (HasPreInstrSymbol)
169 Result->getTrailingObjects<MCSymbol *>()[0] = PreInstrSymbol;
170 if (HasPostInstrSymbol)
171 Result->getTrailingObjects<MCSymbol *>()[HasPreInstrSymbol] =
172 PostInstrSymbol;
173 if (HasHeapAllocMarker)
174 Result->getTrailingObjects<MDNode *>()[0] = HeapAllocMarker;
175
176 return Result;
177 }
178
179 ArrayRef<MachineMemOperand *> getMMOs() const {
180 return makeArrayRef(getTrailingObjects<MachineMemOperand *>(), NumMMOs);
181 }
182
183 MCSymbol *getPreInstrSymbol() const {
184 return HasPreInstrSymbol ? getTrailingObjects<MCSymbol *>()[0] : nullptr;
185 }
186
187 MCSymbol *getPostInstrSymbol() const {
188 return HasPostInstrSymbol
189 ? getTrailingObjects<MCSymbol *>()[HasPreInstrSymbol]
190 : nullptr;
191 }
192
193 MDNode *getHeapAllocMarker() const {
194 return HasHeapAllocMarker ? getTrailingObjects<MDNode *>()[0] : nullptr;
195 }
196
197 private:
198 friend TrailingObjects;
199
200 // Description of the extra info, used to interpret the actual optional
201 // data appended.
202 //
203 // Note that this is not terribly space optimized. This leaves a great deal
204 // of flexibility to fit more in here later.
205 const int NumMMOs;
206 const bool HasPreInstrSymbol;
207 const bool HasPostInstrSymbol;
208 const bool HasHeapAllocMarker;
209
210 // Implement the `TrailingObjects` internal API.
211 size_t numTrailingObjects(OverloadToken<MachineMemOperand *>) const {
212 return NumMMOs;
213 }
214 size_t numTrailingObjects(OverloadToken<MCSymbol *>) const {
215 return HasPreInstrSymbol + HasPostInstrSymbol;
216 }
217 size_t numTrailingObjects(OverloadToken<MDNode *>) const {
218 return HasHeapAllocMarker;
219 }
220
221 // Just a boring constructor to allow us to initialize the sizes. Always use
222 // the `create` routine above.
223 ExtraInfo(int NumMMOs, bool HasPreInstrSymbol, bool HasPostInstrSymbol,
224 bool HasHeapAllocMarker)
225 : NumMMOs(NumMMOs), HasPreInstrSymbol(HasPreInstrSymbol),
226 HasPostInstrSymbol(HasPostInstrSymbol),
227 HasHeapAllocMarker(HasHeapAllocMarker) {}
228 };
229
230 /// Enumeration of the kinds of inline extra info available. It is important
231 /// that the `MachineMemOperand` inline kind has a tag value of zero to make
232 /// it accessible as an `ArrayRef`.
233 enum ExtraInfoInlineKinds {
234 EIIK_MMO = 0,
235 EIIK_PreInstrSymbol,
236 EIIK_PostInstrSymbol,
237 EIIK_OutOfLine
238 };
239
240 // We store extra information about the instruction here. The common case is
241 // expected to be nothing or a single pointer (typically a MMO or a symbol).
242 // We work to optimize this common case by storing it inline here rather than
243 // requiring a separate allocation, but we fall back to an allocation when
244 // multiple pointers are needed.
245 PointerSumType<ExtraInfoInlineKinds,
246 PointerSumTypeMember<EIIK_MMO, MachineMemOperand *>,
247 PointerSumTypeMember<EIIK_PreInstrSymbol, MCSymbol *>,
248 PointerSumTypeMember<EIIK_PostInstrSymbol, MCSymbol *>,
249 PointerSumTypeMember<EIIK_OutOfLine, ExtraInfo *>>
250 Info;
251
252 DebugLoc debugLoc; // Source line information.
253
254 /// Unique instruction number. Used by DBG_INSTR_REFs to refer to the values
255 /// defined by this instruction.
256 unsigned DebugInstrNum;
257
258 // Intrusive list support
259 friend struct ilist_traits<MachineInstr>;
260 friend struct ilist_callback_traits<MachineBasicBlock>;
261 void setParent(MachineBasicBlock *P) { Parent = P; }
262
263 /// This constructor creates a copy of the given
264 /// MachineInstr in the given MachineFunction.
265 MachineInstr(MachineFunction &, const MachineInstr &);
266
267 /// This constructor create a MachineInstr and add the implicit operands.
268 /// It reserves space for number of operands specified by
269 /// MCInstrDesc. An explicit DebugLoc is supplied.
270 MachineInstr(MachineFunction &, const MCInstrDesc &tid, DebugLoc dl,
271 bool NoImp = false);
272
273 // MachineInstrs are pool-allocated and owned by MachineFunction.
274 friend class MachineFunction;
275
276 void
277 dumprImpl(const MachineRegisterInfo &MRI, unsigned Depth, unsigned MaxDepth,
278 SmallPtrSetImpl<const MachineInstr *> &AlreadySeenInstrs) const;
279
280public:
281 MachineInstr(const MachineInstr &) = delete;
282 MachineInstr &operator=(const MachineInstr &) = delete;
283 // Use MachineFunction::DeleteMachineInstr() instead.
284 ~MachineInstr() = delete;
285
286 const MachineBasicBlock* getParent() const { return Parent; }
287 MachineBasicBlock* getParent() { return Parent; }
288
289 /// Move the instruction before \p MovePos.
290 void moveBefore(MachineInstr *MovePos);
291
292 /// Return the function that contains the basic block that this instruction
293 /// belongs to.
294 ///
295 /// Note: this is undefined behaviour if the instruction does not have a
296 /// parent.
297 const MachineFunction *getMF() const;
298 MachineFunction *getMF() {
299 return const_cast<MachineFunction *>(
300 static_cast<const MachineInstr *>(this)->getMF());
301 }
302
303 /// Return the asm printer flags bitvector.
304 uint8_t getAsmPrinterFlags() const { return AsmPrinterFlags; }
305
306 /// Clear the AsmPrinter bitvector.
307 void clearAsmPrinterFlags() { AsmPrinterFlags = 0; }
308
309 /// Return whether an AsmPrinter flag is set.
310 bool getAsmPrinterFlag(CommentFlag Flag) const {
311 return AsmPrinterFlags & Flag;
312 }
313
314 /// Set a flag for the AsmPrinter.
315 void setAsmPrinterFlag(uint8_t Flag) {
316 AsmPrinterFlags |= Flag;
317 }
318
319 /// Clear specific AsmPrinter flags.
320 void clearAsmPrinterFlag(CommentFlag Flag) {
321 AsmPrinterFlags &= ~Flag;
322 }
323
324 /// Return the MI flags bitvector.
325 uint16_t getFlags() const {
326 return Flags;
327 }
328
329 /// Return whether an MI flag is set.
330 bool getFlag(MIFlag Flag) const {
331 return Flags & Flag;
332 }
333
334 /// Set a MI flag.
335 void setFlag(MIFlag Flag) {
336 Flags |= (uint16_t)Flag;
337 }
338
339 void setFlags(unsigned flags) {
340 // Filter out the automatically maintained flags.
341 unsigned Mask = BundledPred | BundledSucc;
342 Flags = (Flags & Mask) | (flags & ~Mask);
343 }
344
345 /// clearFlag - Clear a MI flag.
346 void clearFlag(MIFlag Flag) {
347 Flags &= ~((uint16_t)Flag);
348 }
349
350 /// Return true if MI is in a bundle (but not the first MI in a bundle).
351 ///
352 /// A bundle looks like this before it's finalized:
353 /// ----------------
354 /// | MI |
355 /// ----------------
356 /// |
357 /// ----------------
358 /// | MI * |
359 /// ----------------
360 /// |
361 /// ----------------
362 /// | MI * |
363 /// ----------------
364 /// In this case, the first MI starts a bundle but is not inside a bundle, the
365 /// next 2 MIs are considered "inside" the bundle.
366 ///
367 /// After a bundle is finalized, it looks like this:
368 /// ----------------
369 /// | Bundle |
370 /// ----------------
371 /// |
372 /// ----------------
373 /// | MI * |
374 /// ----------------
375 /// |
376 /// ----------------
377 /// | MI * |
378 /// ----------------
379 /// |
380 /// ----------------
381 /// | MI * |
382 /// ----------------
383 /// The first instruction has the special opcode "BUNDLE". It's not "inside"
384 /// a bundle, but the next three MIs are.
385 bool isInsideBundle() const {
386 return getFlag(BundledPred);
387 }
388
389 /// Return true if this instruction part of a bundle. This is true
390 /// if either itself or its following instruction is marked "InsideBundle".
391 bool isBundled() const {
392 return isBundledWithPred() || isBundledWithSucc();
393 }
394
395 /// Return true if this instruction is part of a bundle, and it is not the
396 /// first instruction in the bundle.
397 bool isBundledWithPred() const { return getFlag(BundledPred); }
398
399 /// Return true if this instruction is part of a bundle, and it is not the
400 /// last instruction in the bundle.
401 bool isBundledWithSucc() const { return getFlag(BundledSucc); }
402
403 /// Bundle this instruction with its predecessor. This can be an unbundled
404 /// instruction, or it can be the first instruction in a bundle.
405 void bundleWithPred();
406
407 /// Bundle this instruction with its successor. This can be an unbundled
408 /// instruction, or it can be the last instruction in a bundle.
409 void bundleWithSucc();
410
411 /// Break bundle above this instruction.
412 void unbundleFromPred();
413
414 /// Break bundle below this instruction.
415 void unbundleFromSucc();
416
417 /// Returns the debug location id of this MachineInstr.
418 const DebugLoc &getDebugLoc() const { return debugLoc; }
419
420 /// Return the operand containing the offset to be used if this DBG_VALUE
421 /// instruction is indirect; will be an invalid register if this value is
422 /// not indirect, and an immediate with value 0 otherwise.
423 const MachineOperand &getDebugOffset() const {
424 assert(isNonListDebugValue() && "not a DBG_VALUE")(static_cast <bool> (isNonListDebugValue() && "not a DBG_VALUE"
) ? void (0) : __assert_fail ("isNonListDebugValue() && \"not a DBG_VALUE\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/include/llvm/CodeGen/MachineInstr.h"
, 424, __extension__ __PRETTY_FUNCTION__))
;
425 return getOperand(1);
426 }
427 MachineOperand &getDebugOffset() {
428 assert(isNonListDebugValue() && "not a DBG_VALUE")(static_cast <bool> (isNonListDebugValue() && "not a DBG_VALUE"
) ? void (0) : __assert_fail ("isNonListDebugValue() && \"not a DBG_VALUE\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/include/llvm/CodeGen/MachineInstr.h"
, 428, __extension__ __PRETTY_FUNCTION__))
;
429 return getOperand(1);
430 }
431
432 /// Return the operand for the debug variable referenced by
433 /// this DBG_VALUE instruction.
434 const MachineOperand &getDebugVariableOp() const;
435 MachineOperand &getDebugVariableOp();
436
437 /// Return the debug variable referenced by
438 /// this DBG_VALUE instruction.
439 const DILocalVariable *getDebugVariable() const;
440
441 /// Return the operand for the complex address expression referenced by
442 /// this DBG_VALUE instruction.
443 const MachineOperand &getDebugExpressionOp() const;
444 MachineOperand &getDebugExpressionOp();
445
446 /// Return the complex address expression referenced by
447 /// this DBG_VALUE instruction.
448 const DIExpression *getDebugExpression() const;
449
450 /// Return the debug label referenced by
451 /// this DBG_LABEL instruction.
452 const DILabel *getDebugLabel() const;
453
454 /// Fetch the instruction number of this MachineInstr. If it does not have
455 /// one already, a new and unique number will be assigned.
456 unsigned getDebugInstrNum();
457
458 /// Fetch instruction number of this MachineInstr -- but before it's inserted
459 /// into \p MF. Needed for transformations that create an instruction but
460 /// don't immediately insert them.
461 unsigned getDebugInstrNum(MachineFunction &MF);
462
463 /// Examine the instruction number of this MachineInstr. May be zero if
464 /// it hasn't been assigned a number yet.
465 unsigned peekDebugInstrNum() const { return DebugInstrNum; }
466
467 /// Set instruction number of this MachineInstr. Avoid using unless you're
468 /// deserializing this information.
469 void setDebugInstrNum(unsigned Num) { DebugInstrNum = Num; }
470
471 /// Drop any variable location debugging information associated with this
472 /// instruction. Use when an instruction is modified in such a way that it no
473 /// longer defines the value it used to. Variable locations using that value
474 /// will be dropped.
475 void dropDebugNumber() { DebugInstrNum = 0; }
476
477 /// Emit an error referring to the source location of this instruction.
478 /// This should only be used for inline assembly that is somehow
479 /// impossible to compile. Other errors should have been handled much
480 /// earlier.
481 ///
482 /// If this method returns, the caller should try to recover from the error.
483 void emitError(StringRef Msg) const;
484
485 /// Returns the target instruction descriptor of this MachineInstr.
486 const MCInstrDesc &getDesc() const { return *MCID; }
487
488 /// Returns the opcode of this MachineInstr.
489 unsigned getOpcode() const { return MCID->Opcode; }
490
491 /// Retuns the total number of operands.
492 unsigned getNumOperands() const { return NumOperands; }
493
494 /// Returns the total number of operands which are debug locations.
495 unsigned getNumDebugOperands() const {
496 return std::distance(debug_operands().begin(), debug_operands().end());
497 }
498
499 const MachineOperand& getOperand(unsigned i) const {
500 assert(i < getNumOperands() && "getOperand() out of range!")(static_cast <bool> (i < getNumOperands() &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i < getNumOperands() && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/include/llvm/CodeGen/MachineInstr.h"
, 500, __extension__ __PRETTY_FUNCTION__))
;
501 return Operands[i];
502 }
503 MachineOperand& getOperand(unsigned i) {
504 assert(i < getNumOperands() && "getOperand() out of range!")(static_cast <bool> (i < getNumOperands() &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i < getNumOperands() && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/include/llvm/CodeGen/MachineInstr.h"
, 504, __extension__ __PRETTY_FUNCTION__))
;
505 return Operands[i];
506 }
507
508 MachineOperand &getDebugOperand(unsigned Index) {
509 assert(Index < getNumDebugOperands() && "getDebugOperand() out of range!")(static_cast <bool> (Index < getNumDebugOperands() &&
"getDebugOperand() out of range!") ? void (0) : __assert_fail
("Index < getNumDebugOperands() && \"getDebugOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/include/llvm/CodeGen/MachineInstr.h"
, 509, __extension__ __PRETTY_FUNCTION__))
;
510 return *(debug_operands().begin() + Index);
511 }
512 const MachineOperand &getDebugOperand(unsigned Index) const {
513 assert(Index < getNumDebugOperands() && "getDebugOperand() out of range!")(static_cast <bool> (Index < getNumDebugOperands() &&
"getDebugOperand() out of range!") ? void (0) : __assert_fail
("Index < getNumDebugOperands() && \"getDebugOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/include/llvm/CodeGen/MachineInstr.h"
, 513, __extension__ __PRETTY_FUNCTION__))
;
514 return *(debug_operands().begin() + Index);
515 }
516
517 SmallSet<Register, 4> getUsedDebugRegs() const {
518 assert(isDebugValue() && "not a DBG_VALUE*")(static_cast <bool> (isDebugValue() && "not a DBG_VALUE*"
) ? void (0) : __assert_fail ("isDebugValue() && \"not a DBG_VALUE*\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/include/llvm/CodeGen/MachineInstr.h"
, 518, __extension__ __PRETTY_FUNCTION__))
;
519 SmallSet<Register, 4> UsedRegs;
520 for (auto MO : debug_operands())
521 if (MO.isReg() && MO.getReg())
522 UsedRegs.insert(MO.getReg());
523 return UsedRegs;
524 }
525
526 /// Returns whether this debug value has at least one debug operand with the
527 /// register \p Reg.
528 bool hasDebugOperandForReg(Register Reg) const {
529 return any_of(debug_operands(), [Reg](const MachineOperand &Op) {
530 return Op.isReg() && Op.getReg() == Reg;
531 });
532 }
533
534 /// Returns a range of all of the operands that correspond to a debug use of
535 /// \p Reg.
536 template <typename Operand, typename Instruction>
537 static iterator_range<
538 filter_iterator<Operand *, std::function<bool(Operand &Op)>>>
539 getDebugOperandsForReg(Instruction *MI, Register Reg) {
540 std::function<bool(Operand & Op)> OpUsesReg(
541 [Reg](Operand &Op) { return Op.isReg() && Op.getReg() == Reg; });
542 return make_filter_range(MI->debug_operands(), OpUsesReg);
543 }
544 iterator_range<filter_iterator<const MachineOperand *,
545 std::function<bool(const MachineOperand &Op)>>>
546 getDebugOperandsForReg(Register Reg) const {
547 return MachineInstr::getDebugOperandsForReg<const MachineOperand,
548 const MachineInstr>(this, Reg);
549 }
550 iterator_range<filter_iterator<MachineOperand *,
551 std::function<bool(MachineOperand &Op)>>>
552 getDebugOperandsForReg(Register Reg) {
553 return MachineInstr::getDebugOperandsForReg<MachineOperand, MachineInstr>(
554 this, Reg);
555 }
556
557 bool isDebugOperand(const MachineOperand *Op) const {
558 return Op >= adl_begin(debug_operands()) && Op <= adl_end(debug_operands());
559 }
560
561 unsigned getDebugOperandIndex(const MachineOperand *Op) const {
562 assert(isDebugOperand(Op) && "Expected a debug operand.")(static_cast <bool> (isDebugOperand(Op) && "Expected a debug operand."
) ? void (0) : __assert_fail ("isDebugOperand(Op) && \"Expected a debug operand.\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/include/llvm/CodeGen/MachineInstr.h"
, 562, __extension__ __PRETTY_FUNCTION__))
;
563 return std::distance(adl_begin(debug_operands()), Op);
564 }
565
566 /// Returns the total number of definitions.
567 unsigned getNumDefs() const {
568 return getNumExplicitDefs() + MCID->getNumImplicitDefs();
569 }
570
571 /// Returns true if the instruction has implicit definition.
572 bool hasImplicitDef() const {
573 for (unsigned I = getNumExplicitOperands(), E = getNumOperands();
574 I != E; ++I) {
575 const MachineOperand &MO = getOperand(I);
576 if (MO.isDef() && MO.isImplicit())
577 return true;
578 }
579 return false;
580 }
581
582 /// Returns the implicit operands number.
583 unsigned getNumImplicitOperands() const {
584 return getNumOperands() - getNumExplicitOperands();
585 }
586
587 /// Return true if operand \p OpIdx is a subregister index.
588 bool isOperandSubregIdx(unsigned OpIdx) const {
589 assert(getOperand(OpIdx).getType() == MachineOperand::MO_Immediate &&(static_cast <bool> (getOperand(OpIdx).getType() == MachineOperand
::MO_Immediate && "Expected MO_Immediate operand type."
) ? void (0) : __assert_fail ("getOperand(OpIdx).getType() == MachineOperand::MO_Immediate && \"Expected MO_Immediate operand type.\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/include/llvm/CodeGen/MachineInstr.h"
, 590, __extension__ __PRETTY_FUNCTION__))
590 "Expected MO_Immediate operand type.")(static_cast <bool> (getOperand(OpIdx).getType() == MachineOperand
::MO_Immediate && "Expected MO_Immediate operand type."
) ? void (0) : __assert_fail ("getOperand(OpIdx).getType() == MachineOperand::MO_Immediate && \"Expected MO_Immediate operand type.\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/include/llvm/CodeGen/MachineInstr.h"
, 590, __extension__ __PRETTY_FUNCTION__))
;
591 if (isExtractSubreg() && OpIdx == 2)
592 return true;
593 if (isInsertSubreg() && OpIdx == 3)
594 return true;
595 if (isRegSequence() && OpIdx > 1 && (OpIdx % 2) == 0)
596 return true;
597 if (isSubregToReg() && OpIdx == 3)
598 return true;
599 return false;
600 }
601
602 /// Returns the number of non-implicit operands.
603 unsigned getNumExplicitOperands() const;
604
605 /// Returns the number of non-implicit definitions.
606 unsigned getNumExplicitDefs() const;
607
608 /// iterator/begin/end - Iterate over all operands of a machine instruction.
609 using mop_iterator = MachineOperand *;
610 using const_mop_iterator = const MachineOperand *;
611
612 mop_iterator operands_begin() { return Operands; }
613 mop_iterator operands_end() { return Operands + NumOperands; }
614
615 const_mop_iterator operands_begin() const { return Operands; }
616 const_mop_iterator operands_end() const { return Operands + NumOperands; }
617
618 iterator_range<mop_iterator> operands() {
619 return make_range(operands_begin(), operands_end());
620 }
621 iterator_range<const_mop_iterator> operands() const {
622 return make_range(operands_begin(), operands_end());
623 }
624 iterator_range<mop_iterator> explicit_operands() {
625 return make_range(operands_begin(),
626 operands_begin() + getNumExplicitOperands());
627 }
628 iterator_range<const_mop_iterator> explicit_operands() const {
629 return make_range(operands_begin(),
630 operands_begin() + getNumExplicitOperands());
631 }
632 iterator_range<mop_iterator> implicit_operands() {
633 return make_range(explicit_operands().end(), operands_end());
634 }
635 iterator_range<const_mop_iterator> implicit_operands() const {
636 return make_range(explicit_operands().end(), operands_end());
637 }
638 /// Returns a range over all operands that are used to determine the variable
639 /// location for this DBG_VALUE instruction.
640 iterator_range<mop_iterator> debug_operands() {
641 assert(isDebugValue() && "Must be a debug value instruction.")(static_cast <bool> (isDebugValue() && "Must be a debug value instruction."
) ? void (0) : __assert_fail ("isDebugValue() && \"Must be a debug value instruction.\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/include/llvm/CodeGen/MachineInstr.h"
, 641, __extension__ __PRETTY_FUNCTION__))
;
642 return isDebugValueList()
643 ? make_range(operands_begin() + 2, operands_end())
644 : make_range(operands_begin(), operands_begin() + 1);
645 }
646 /// \copydoc debug_operands()
647 iterator_range<const_mop_iterator> debug_operands() const {
648 assert(isDebugValue() && "Must be a debug value instruction.")(static_cast <bool> (isDebugValue() && "Must be a debug value instruction."
) ? void (0) : __assert_fail ("isDebugValue() && \"Must be a debug value instruction.\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/include/llvm/CodeGen/MachineInstr.h"
, 648, __extension__ __PRETTY_FUNCTION__))
;
649 return isDebugValueList()
650 ? make_range(operands_begin() + 2, operands_end())
651 : make_range(operands_begin(), operands_begin() + 1);
652 }
653 /// Returns a range over all explicit operands that are register definitions.
654 /// Implicit definition are not included!
655 iterator_range<mop_iterator> defs() {
656 return make_range(operands_begin(),
657 operands_begin() + getNumExplicitDefs());
658 }
659 /// \copydoc defs()
660 iterator_range<const_mop_iterator> defs() const {
661 return make_range(operands_begin(),
662 operands_begin() + getNumExplicitDefs());
663 }
664 /// Returns a range that includes all operands that are register uses.
665 /// This may include unrelated operands which are not register uses.
666 iterator_range<mop_iterator> uses() {
667 return make_range(operands_begin() + getNumExplicitDefs(), operands_end());
668 }
669 /// \copydoc uses()
670 iterator_range<const_mop_iterator> uses() const {
671 return make_range(operands_begin() + getNumExplicitDefs(), operands_end());
672 }
673 iterator_range<mop_iterator> explicit_uses() {
674 return make_range(operands_begin() + getNumExplicitDefs(),
675 operands_begin() + getNumExplicitOperands());
676 }
677 iterator_range<const_mop_iterator> explicit_uses() const {
678 return make_range(operands_begin() + getNumExplicitDefs(),
679 operands_begin() + getNumExplicitOperands());
680 }
681
682 /// Returns the number of the operand iterator \p I points to.
683 unsigned getOperandNo(const_mop_iterator I) const {
684 return I - operands_begin();
685 }
686
687 /// Access to memory operands of the instruction. If there are none, that does
688 /// not imply anything about whether the function accesses memory. Instead,
689 /// the caller must behave conservatively.
690 ArrayRef<MachineMemOperand *> memoperands() const {
691 if (!Info)
692 return {};
693
694 if (Info.is<EIIK_MMO>())
695 return makeArrayRef(Info.getAddrOfZeroTagPointer(), 1);
696
697 if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
698 return EI->getMMOs();
699
700 return {};
701 }
702
703 /// Access to memory operands of the instruction.
704 ///
705 /// If `memoperands_begin() == memoperands_end()`, that does not imply
706 /// anything about whether the function accesses memory. Instead, the caller
707 /// must behave conservatively.
708 mmo_iterator memoperands_begin() const { return memoperands().begin(); }
709
710 /// Access to memory operands of the instruction.
711 ///
712 /// If `memoperands_begin() == memoperands_end()`, that does not imply
713 /// anything about whether the function accesses memory. Instead, the caller
714 /// must behave conservatively.
715 mmo_iterator memoperands_end() const { return memoperands().end(); }
716
717 /// Return true if we don't have any memory operands which described the
718 /// memory access done by this instruction. If this is true, calling code
719 /// must be conservative.
720 bool memoperands_empty() const { return memoperands().empty(); }
721
722 /// Return true if this instruction has exactly one MachineMemOperand.
723 bool hasOneMemOperand() const { return memoperands().size() == 1; }
724
725 /// Return the number of memory operands.
726 unsigned getNumMemOperands() const { return memoperands().size(); }
727
728 /// Helper to extract a pre-instruction symbol if one has been added.
729 MCSymbol *getPreInstrSymbol() const {
730 if (!Info)
731 return nullptr;
732 if (MCSymbol *S = Info.get<EIIK_PreInstrSymbol>())
733 return S;
734 if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
735 return EI->getPreInstrSymbol();
736
737 return nullptr;
738 }
739
740 /// Helper to extract a post-instruction symbol if one has been added.
741 MCSymbol *getPostInstrSymbol() const {
742 if (!Info)
743 return nullptr;
744 if (MCSymbol *S = Info.get<EIIK_PostInstrSymbol>())
745 return S;
746 if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
747 return EI->getPostInstrSymbol();
748
749 return nullptr;
750 }
751
752 /// Helper to extract a heap alloc marker if one has been added.
753 MDNode *getHeapAllocMarker() const {
754 if (!Info)
755 return nullptr;
756 if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
757 return EI->getHeapAllocMarker();
758
759 return nullptr;
760 }
761
762 /// API for querying MachineInstr properties. They are the same as MCInstrDesc
763 /// queries but they are bundle aware.
764
765 enum QueryType {
766 IgnoreBundle, // Ignore bundles
767 AnyInBundle, // Return true if any instruction in bundle has property
768 AllInBundle // Return true if all instructions in bundle have property
769 };
770
771 /// Return true if the instruction (or in the case of a bundle,
772 /// the instructions inside the bundle) has the specified property.
773 /// The first argument is the property being queried.
774 /// The second argument indicates whether the query should look inside
775 /// instruction bundles.
776 bool hasProperty(unsigned MCFlag, QueryType Type = AnyInBundle) const {
777 assert(MCFlag < 64 &&(static_cast <bool> (MCFlag < 64 && "MCFlag out of range for bit mask in getFlags/hasPropertyInBundle."
) ? void (0) : __assert_fail ("MCFlag < 64 && \"MCFlag out of range for bit mask in getFlags/hasPropertyInBundle.\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/include/llvm/CodeGen/MachineInstr.h"
, 778, __extension__ __PRETTY_FUNCTION__))
778 "MCFlag out of range for bit mask in getFlags/hasPropertyInBundle.")(static_cast <bool> (MCFlag < 64 && "MCFlag out of range for bit mask in getFlags/hasPropertyInBundle."
) ? void (0) : __assert_fail ("MCFlag < 64 && \"MCFlag out of range for bit mask in getFlags/hasPropertyInBundle.\""
, "/build/llvm-toolchain-snapshot-13~++20210724100615+c63dbd850182/llvm/include/llvm/CodeGen/MachineInstr.h"
, 778, __extension__ __PRETTY_FUNCTION__))
;
779 // Inline the fast path for unbundled or bundle-internal instructions.
780 if (Type == IgnoreBundle || !isBundled() || isBundledWithPred())
781 return getDesc().getFlags() & (1ULL << MCFlag);
782
783 // If this is the first instruction in a bundle, take the slow path.
784 return hasPropertyInBundle(1ULL << MCFlag, Type);
785 }
786
787 /// Return true if this is an instruction that should go through the usual
788 /// legalization steps.
789 bool isPreISelOpcode(QueryType Type = IgnoreBundle) const {
790 return hasProperty(MCID::PreISelOpcode, Type);
791 }
792
793 /// Return true if this instruction can have a variable number of operands.
794 /// In this case, the variable operands will be after the normal
795 /// operands but before the implicit definitions and uses (if any are
796 /// present).
797 bool isVariadic(QueryType Type = IgnoreBundle) const {
798 return hasProperty(MCID::Variadic, Type);
799 }
800
801 /// Set if this instruction has an optional definition, e.g.
802 /// ARM instructions which can set condition code if 's' bit is set.
803 bool hasOptionalDef(QueryType Type = IgnoreBundle) const {
804 return hasProperty(MCID::HasOptionalDef, Type);
805 }
806
807 /// Return true if this is a pseudo instruction that doesn't
808 /// correspond to a real machine instruction.
809 bool isPseudo(QueryType Type = IgnoreBundle) const {
810 return hasProperty(MCID::Pseudo, Type);
811 }
812
813 bool isReturn(QueryType Type = AnyInBundle) const {
814 return hasProperty(MCID::Return, Type);
815 }
816
817 /// Return true if this is an instruction that marks the end of an EH scope,
818 /// i.e., a catchpad or a cleanuppad instruction.
819 bool isEHScopeReturn(QueryType Type = AnyInBundle) const {
820 return hasProperty(MCID::EHScopeReturn, Type);
821 }
822
823 bool isCall(QueryType Type = AnyInBundle) const {
824 return hasProperty(MCID::Call, Type);
825 }
826
827 /// Return true if this is a call instruction that may have an associated
828 /// call site entry in the debug info.
829 bool isCandidateForCallSiteEntry(QueryType Type = IgnoreBundle) const;
830 /// Return true if copying, moving, or erasing this instruction requires
831 /// updating Call Site Info (see \ref copyCallSiteInfo, \ref moveCallSiteInfo,
832 /// \ref eraseCallSiteInfo).
833 bool shouldUpdateCallSiteInfo() const;
834
835 /// Returns true if the specified instruction stops control flow
836 /// from executing the instruction immediately following it. Examples include
837 /// unconditional branches and return instructions.
838 bool isBarrier(QueryType Type = AnyInBundle) const {
839 return hasProperty(MCID::Barrier, Type);
840 }
841
842 /// Returns true if this instruction part of the terminator for a basic block.
843 /// Typically this is things like return and branch instructions.
844 ///
845 /// Various passes use this to insert code into the bottom of a basic block,
846 /// but before control flow occurs.
847 bool isTerminator(QueryType Type = AnyInBundle) const {
848 return hasProperty(MCID::Terminator, Type);
849 }
850
851 /// Returns true if this is a conditional, unconditional, or indirect branch.
852 /// Predicates below can be used to discriminate between
853 /// these cases, and the TargetInstrInfo::analyzeBranch method can be used to
854 /// get more information.
855 bool isBranch(QueryType Type = AnyInBundle) const {
856 return hasProperty(MCID::Branch, Type);
857 }
858
859 /// Return true if this is an indirect branch, such as a
860 /// branch through a register.
861 bool isIndirectBranch(QueryType Type = AnyInBundle) const {
862 return hasProperty(MCID::IndirectBranch, Type);
863 }
864
865 /// Return true if this is a branch which may fall
866 /// through to the next instruction or may transfer control flow to some other
867 /// block. The TargetInstrInfo::analyzeBranch method can be used to get more
868 /// information about this branch.
869 bool isConditionalBranch(QueryType Type = AnyInBundle) const {
870 return isBranch(Type) && !isBarrier(Type) && !isIndirectBranch(Type);
871 }
872
873 /// Return true if this is a branch which always
874 /// transfers control flow to some other block. The
875 /// TargetInstrInfo::analyzeBranch method can be used to get more information
876 /// about this branch.
877 bool isUnconditionalBranch(QueryType Type = AnyInBundle) const {
878 return isBranch(Type) && isBarrier(Type) && !isIndirectBranch(Type);
879 }
880
881 /// Return true if this instruction has a predicate operand that
882 /// controls execution. It may be set to 'always', or may be set to other
883 /// values. There are various methods in TargetInstrInfo that can be used to
884 /// control and modify the predicate in this instruction.
885 bool isPredicable(QueryType Type = AllInBundle) const {
886 // If it's a bundle than all bundled instructions must be predicable for this
887 // to return true.
888 return hasProperty(MCID::Predicable, Type);
889 }
890
891 /// Return true if this instruction is a comparison.
892 bool isCompare(QueryType Type = IgnoreBundle) const {
893 return hasProperty(MCID::Compare, Type);
894 }
895
896 /// Return true if this instruction is a move immediate
897 /// (including conditional moves) instruction.
898 bool isMoveImmediate(QueryType Type = IgnoreBundle) const {
899 return hasProperty(MCID::MoveImm, Type);
900 }
901
902 /// Return true if this instruction is a register move.
903 /// (including moving values from subreg to reg)
904 bool isMoveReg(QueryType Type = IgnoreBundle) const {
905 return hasProperty(MCID::MoveReg, Type);
906 }
907
908 /// Return true if this instruction is a bitcast instruction.
909 bool isBitcast(QueryType Type = IgnoreBundle) const {
910 return hasProperty(MCID::Bitcast, Type);
911 }
912
913 /// Return true if this instruction is a select instruction.
914 bool isSelect(QueryType Type = IgnoreBundle) const {
915 return hasProperty(MCID::Select, Type);
916 }
917
918 /// Return true if this instruction cannot be safely duplicated.
919 /// For example, if the instruction has a unique labels attached
920 /// to it, duplicating it would cause multiple definition errors.
921 bool isNotDuplicable(QueryType Type = AnyInBundle) const {
922 return hasProperty(MCID::NotDuplicable, Type);
923 }
924
925 /// Return true if this instruction is convergent.
926 /// Convergent instructions can not be made control-dependent on any
927 /// additional values.
928 bool isConvergent(QueryType Type = AnyInBundle) const {
929 if (isInlineAsm()) {
930 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
931 if (ExtraInfo & InlineAsm::Extra_IsConvergent)
932 return true;
933 }
934 return hasProperty(MCID::Convergent, Type);
935 }
936
937 /// Returns true if the specified instruction has a delay slot
938 /// which must be filled by the code generator.
939 bool hasDelaySlot(QueryType Type = AnyInBundle) const {
940 return hasProperty(MCID::DelaySlot, Type);
941 }
942
943 /// Return true for instructions that can be folded as
944 /// memory operands in other instructions. The most common use for this
945 /// is instructions that are simple loads from memory that don't modify
946 /// the loaded value in any way, but it can also be used for instructions
947 /// that can be expressed as constant-pool loads, such as V_SETALLONES
948 /// on x86, to allow them to be folded when it is beneficial.
949 /// This should only be set on instructions that return a value in their
950 /// only virtual register definition.
951 bool canFoldAsLoad(QueryType Type = IgnoreBundle) const {
952 return hasProperty(MCID::FoldableAsLoad, Type);
953 }
954
955 /// Return true if this instruction behaves
956 /// the same way as the generic REG_SEQUENCE instructions.
957 /// E.g., on ARM,
958 /// dX VMOVDRR rY, rZ
959 /// is equivalent to
960 /// dX = REG_SEQUENCE rY, ssub_0, rZ, ssub_1.
961 ///
962 /// Note that for the optimizers to be able to take advantage of
963 /// this property, TargetInstrInfo::getRegSequenceLikeInputs has to be
964 /// override accordingly.
965 bool isRegSequenceLike(QueryType Type = IgnoreBundle) const {
966 return hasProperty(MCID::RegSequence, Type);
967 }
968
969 /// Return true if this instruction behaves
970 /// the same way as the generic EXTRACT_SUBREG instructions.
971 /// E.g., on ARM,
972 /// rX, rY VMOVRRD dZ
973 /// is equivalent to two EXTRACT_SUBREG:
974 /// rX = EXTRACT_SUBREG dZ, ssub_0
975 /// rY = EXTRACT_SUBREG dZ, ssub_1
976 ///
977 /// Note that for the optimizers to be able to take advantage of
978 /// this property, TargetInstrInfo::getExtractSubregLikeInputs has to be
979 /// override accordingly.
980 bool isExtractSubregLike(QueryType Type = IgnoreBundle) const {
981 return hasProperty(MCID::ExtractSubreg, Type);
982 }
983
984 /// Return true if this instruction behaves
985 /// the same way as the generic INSERT_SUBREG instructions.
986 /// E.g., on ARM,
987 /// dX = VSETLNi32 dY, rZ, Imm
988 /// is equivalent to a INSERT_SUBREG:
989 /// dX = INSERT_SUBREG dY, rZ, translateImmToSubIdx(Imm)
990 ///
991 /// Note that for the optimizers to be able to take advantage of
992 /// this property, TargetInstrInfo::getInsertSubregLikeInputs has to be
993 /// override accordingly.
994 bool isInsertSubregLike(QueryType Type = IgnoreBundle) const {
995 return hasProperty(MCID::InsertSubreg, Type);
996 }
997
998 //===--------------------------------------------------------------------===//
999 // Side Effect Analysis
1000 //===--------------------------------------------------------------------===//
1001
1002 /// Return true if this instruction could possibly read memory.
1003 /// Instructions with this flag set are not necessarily simple load
1004 /// instructions, they may load a value and modify it, for example.
1005 bool mayLoad(QueryType Type = AnyInBundle) const {
1006 if (isInlineAsm()) {
1007 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1008 if (ExtraInfo & InlineAsm::Extra_MayLoad)
1009 return true;
1010 }
1011 return hasProperty(MCID::MayLoad, Type);
1012 }
1013
1014 /// Return true if this instruction could possibly modify memory.
1015 /// Instructions with this flag set are not necessarily simple store
1016 /// instructions, they may store a modified value based on their operands, or
1017 /// may not actually modify anything, for example.
1018 bool mayStore(QueryType Type = AnyInBundle) const {
1019 if (isInlineAsm()) {
1020 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1021 if (ExtraInfo & InlineAsm::Extra_MayStore)
1022 return true;
1023 }
1024 return hasProperty(MCID::MayStore, Type);
1025 }
1026
1027 /// Return true if this instruction could possibly read or modify memory.
1028 bool mayLoadOrStore(QueryType Type = AnyInBundle) const {
1029 return mayLoad(Type) || mayStore(Type);
1030 }
1031
1032 /// Return true if this instruction could possibly raise a floating-point
1033 /// exception. This is the case if the instruction is a floating-point
1034 /// instruction that can in principle raise an exception, as indicated
1035 /// by the MCID::MayRaiseFPException property, *and* at the same time,
1036 /// the instruction is used in a context where we expect floating-point
1037 /// exceptions are not disabled, as indicated by the NoFPExcept MI flag.
1038 bool mayRaiseFPException() const {
1039 return hasProperty(MCID::MayRaiseFPException) &&
1040 !getFlag(MachineInstr::MIFlag::NoFPExcept);
1041 }
1042
1043 //===--------------------------------------------------------------------===//
1044 // Flags that indicate whether an instruction can be modified by a method.
1045 //===--------------------------------------------------------------------===//
1046
1047 /// Return true if this may be a 2- or 3-address
1048 /// instruction (of the form "X = op Y, Z, ..."), which produces the same
1049 /// result if Y and Z are exchanged. If this flag is set, then the
1050 /// TargetInstrInfo::commuteInstruction method may be used to hack on the
1051 /// instruction.
1052 ///
1053 /// Note that this flag may be set on instructions that are only commutable
1054 /// sometimes. In these cases, the call to commuteInstruction will fail.
1055 /// Also note that some instructions require non-trivial modification to
1056 /// commute them.
1057 bool isCommutable(QueryType Type = IgnoreBundle) const {
1058 return hasProperty(MCID::Commutable, Type);
1059 }
1060
1061 /// Return true if this is a 2-address instruction
1062 /// which can be changed into a 3-address instruction if needed. Doing this
1063 /// transformation can be profitable in the register allocator, because it
1064 /// means that the instruction can use a 2-address form if possible, but
1065 /// degrade into a less efficient form if the source and dest register cannot
1066 /// be assigned to the same register. For example, this allows the x86
1067 /// backend to turn a "shl reg, 3" instruction into an LEA instruction, which
1068 /// is the same speed as the shift but has bigger code size.
1069 ///
1070 /// If this returns true, then the target must implement the
1071 /// TargetInstrInfo::convertToThreeAddress method for this instruction, which
1072 /// is allowed to fail if the transformation isn't valid for this specific
1073 /// instruction (e.g. shl reg, 4 on x86).
1074 ///
1075 bool isConvertibleTo3Addr(QueryType Type = IgnoreBundle) const {
1076 return hasProperty(MCID::ConvertibleTo3Addr, Type);
1077 }
1078
1079 /// Return true if this instruction requires
1080 /// custom insertion support when the DAG scheduler is inserting it into a
1081 /// machine basic block. If this is true for the instruction, it basically
1082 /// means that it is a pseudo instruction used at SelectionDAG time that is
1083 /// expanded out into magic code by the target when MachineInstrs are formed.
1084 ///
1085 /// If this is true, the TargetLoweringInfo::InsertAtEndOfBasicBlock method
1086 /// is used to insert this into the MachineBasicBlock.
1087 bool usesCustomInsertionHook(QueryType Type = IgnoreBundle) const {
1088 return hasProperty(MCID::UsesCustomInserter, Type);
1089 }
1090
1091 /// Return true if this instruction requires *adjustment*
1092 /// after instruction selection by calling a target hook. For example, this
1093 /// can be used to fill in ARM 's' optional operand depending on whether
1094 /// the conditional flag register is used.
1095 bool hasPostISelHook(QueryType Type = IgnoreBundle) const {
1096 return hasProperty(MCID::HasPostISelHook, Type);
1097 }
1098
1099 /// Returns true if this instruction is a candidate for remat.
1100 /// This flag is deprecated, please don't use it anymore. If this
1101 /// flag is set, the isReallyTriviallyReMaterializable() method is called to
1102 /// verify the instruction is really rematable.
1103 bool isRematerializable(QueryType Type = AllInBundle) const {
1104 // It's only possible to re-mat a bundle if all bundled instructions are
1105 // re-materializable.
1106 return hasProperty(MCID::Rematerializable, Type);
1107 }
1108
1109 /// Returns true if this instruction has the same cost (or less) than a move
1110 /// instruction. This is useful during certain types of optimizations
1111 /// (e.g., remat during two-address conversion or machine licm)
1112 /// where we would like to remat or hoist the instruction, but not if it costs
1113 /// more than moving the instruction into the appropriate register. Note, we
1114 /// are not marking copies from and to the same register class with this flag.
1115 bool isAsCheapAsAMove(QueryType Type = AllInBundle) const {
1116 // Only returns true for a bundle if all bundled instructions are cheap.
1117 return hasProperty(MCID::CheapAsAMove, Type);
1118 }
1119
1120 /// Returns true if this instruction source operands
1121 /// have special register allocation requirements that are not captured by the
1122 /// operand register classes. e.g. ARM::STRD's two source registers must be an
1123 /// even / odd pair, ARM::STM registers have to be in ascending order.
1124 /// Post-register allocation passes should not attempt to change allocations
1125 /// for sources of instructions with this flag.
1126 bool hasExtraSrcRegAllocReq(QueryType Type = AnyInBundle) const {
1127 return hasProperty(MCID::ExtraSrcRegAllocReq, Type);
1128 }
1129
1130 /// Returns true if this instruction def operands
1131 /// have special register allocation requirements that are not captured by the
1132 /// operand register classes. e.g. ARM::LDRD's two def registers must be an
1133 /// even / odd pair, ARM::LDM registers have to be in ascending order.
1134 /// Post-register allocation passes should not attempt to change allocations
1135 /// for definitions of instructions with this flag.
1136 bool hasExtraDefRegAllocReq(QueryType Type = AnyInBundle) const {
1137 return hasProperty(MCID::ExtraDefRegAllocReq, Type);
1138 }
1139
1140 enum MICheckType {
1141 CheckDefs, // Check all operands for equality
1142 CheckKillDead, // Check all operands including kill / dead markers
1143 IgnoreDefs, // Ignore all definitions
1144 IgnoreVRegDefs // Ignore virtual register definitions
1145 };
1146
1147 /// Return true if this instruction is identical to \p Other.
1148 /// Two instructions are identical if they have the same opcode and all their
1149 /// operands are identical (with respect to MachineOperand::isIdenticalTo()).
1150 /// Note that this means liveness related flags (dead, undef, kill) do not
1151 /// affect the notion of identical.
1152 bool isIdenticalTo(const MachineInstr &Other,
1153 MICheckType Check = CheckDefs) const;
1154
1155 /// Unlink 'this' from the containing basic block, and return it without
1156 /// deleting it.
1157 ///
1158 /// This function can not be used on bundled instructions, use
1159 /// removeFromBundle() to remove individual instructions from a bundle.
1160 MachineInstr *removeFromParent();
1161
1162 /// Unlink this instruction from its basic block and return it without
1163 /// deleting it.
1164 ///
1165 /// If the instruction is part of a bundle, the other instructions in the
1166 /// bundle remain bundled.
1167 MachineInstr *removeFromBundle();
1168
1169 /// Unlink 'this' from the containing basic block and delete it.
1170 ///
1171 /// If this instruction is the header of a bundle, the whole bundle is erased.
1172 /// This function can not be used for instructions inside a bundle, use
1173 /// eraseFromBundle() to erase individual bundled instructions.
1174 void eraseFromParent();
1175
1176 /// Unlink 'this' from the containing basic block and delete it.
1177 ///
1178 /// For all definitions mark their uses in DBG_VALUE nodes
1179 /// as undefined. Otherwise like eraseFromParent().
1180 void eraseFromParentAndMarkDBGValuesForRemoval();
1181
1182 /// Unlink 'this' form its basic block and delete it.
1183 ///
1184 /// If the instruction is part of a bundle, the other instructions in the
1185 /// bundle remain bundled.
1186 void eraseFromBundle();
1187
1188 bool isEHLabel() const { return getOpcode() == TargetOpcode::EH_LABEL; }
1189 bool isGCLabel() const { return getOpcode() == TargetOpcode::GC_LABEL; }
1190 bool isAnnotationLabel() const {
1191 return getOpcode() == TargetOpcode::ANNOTATION_LABEL;
1192 }
1193
1194 /// Returns true if the MachineInstr represents a label.
1195 bool isLabel() const {
1196 return isEHLabel() || isGCLabel() || isAnnotationLabel();
1197 }
1198
1199 bool isCFIInstruction() const {
1200 return getOpcode() == TargetOpcode::CFI_INSTRUCTION;
1201 }
1202
1203 bool isPseudoProbe() const {
1204 return getOpcode() == TargetOpcode::PSEUDO_PROBE;
1205 }
1206
1207 // True if the instruction represents a position in the function.
1208 bool isPosition() const { return isLabel() || isCFIInstruction(); }
1209
1210 bool isNonListDebugValue() const {
1211 return getOpcode() == TargetOpcode::DBG_VALUE;
1212 }
1213 bool isDebugValueList() const {
1214 return getOpcode() == TargetOpcode::DBG_VALUE_LIST;
1215 }
1216 bool isDebugValue() const {
1217 return isNonListDebugValue() || isDebugValueList();
56
Returning zero, which participates in a condition later
1218 }
1219 bool isDebugLabel() const { return getOpcode() == TargetOpcode::DBG_LABEL; }
1220 bool isDebugRef() const { return getOpcode() == TargetOpcode::DBG_INSTR_REF; }
65
Assuming the condition is true
66
Returning the value 1, which participates in a condition later
1221 bool isDebugPHI() const { return getOpcode() == TargetOpcode::DBG_PHI; }
1222 bool isDebugInstr() const {
1223 return isDebugValue() || isDebugLabel() || isDebugRef() || isDebugPHI();
1224 }
1225 bool isDebugOrPseudoInstr() const {
1226 return isDebugInstr() || isPseudoProbe();
1227 }
1228
1229 bool isDebugOffsetImm() const {
1230 return isNonListDebugValue() && getDebugOffset().isImm();
1231 }
1232
1233 /// A DBG_VALUE is indirect iff the location operand is a register and
1234 /// the offset operand is an immediate.
1235 bool isIndirectDebugValue() const {
1236 return isDebugOffsetImm() && getDebugOperand(0).isReg();
1237 }
1238
1239 /// A DBG_VALUE is an entry value iff its debug expression contains the
1240 /// DW_OP_LLVM_entry_value operation.
1241 bool isDebugEntryValue() const;
1242
1243 /// Return true if the instruction is a debug value which describes a part of
1244 /// a variable as unavailable.
1245 bool isUndefDebugValue() const {
1246 if (!isDebugValue())
1247 return false;
1248 // If any $noreg locations are given, this DV is undef.
1249 for (const MachineOperand &Op : debug_operands())
1250 if (Op.isReg() && !Op.getReg().isValid())
1251 return true;
1252 return false;
1253 }
1254
1255 bool isPHI() const {
1256 return getOpcode() == TargetOpcode::PHI ||
1257 getOpcode() == TargetOpcode::G_PHI;
1258 }
1259 bool isKill() const { return getOpcode() == TargetOpcode::KILL; }
1260 bool isImplicitDef() const { return getOpcode()==TargetOpcode::IMPLICIT_DEF; }
1261 bool isInlineAsm() const {
1262 return getOpcode() == TargetOpcode::INLINEASM ||
1263 getOpcode() == TargetOpcode::INLINEASM_BR;
1264 }
1265
1266 /// FIXME: Seems like a layering violation that the AsmDialect, which is X86
1267 /// specific, be attached to a generic MachineInstr.
1268 bool isMSInlineAsm() const {
1269 return isInlineAsm() && getInlineAsmDialect() == InlineAsm::AD_Intel;
1270 }
1271
1272 bool isStackAligningInlineAsm() const;
1273 InlineAsm::AsmDialect getInlineAsmDialect() const;
1274
1275 bool isInsertSubreg() const {
1276 return getOpcode() == TargetOpcode::INSERT_SUBREG;
1277 }
1278
1279 bool isSubregToReg() const {
1280 return getOpcode() == TargetOpcode::SUBREG_TO_REG;
1281 }
1282
1283 bool isRegSequence() const {
1284 return getOpcode() == TargetOpcode::REG_SEQUENCE;
1285 }
1286
1287 bool isBundle() const {
1288 return getOpcode() == TargetOpcode::BUNDLE;
1289 }
1290
1291 bool isCopy() const {
1292 return getOpcode() == TargetOpcode::COPY;
1293 }
1294
1295 bool isFullCopy() const {
1296 return isCopy() && !getOperand(0).getSubReg() && !getOperand(1).getSubReg();
1297 }
1298
1299 bool isExtractSubreg() const {
1300 return getOpcode() == TargetOpcode::EXTRACT_SUBREG;
1301 }
1302
1303 /// Return true if the instruction behaves like a copy.
1304 /// This does not include native copy instructions.
1305 bool isCopyLike() const {
1306 return isCopy() || isSubregToReg();
1307 }
1308
1309 /// Return true is the instruction is an identity copy.
1310 bool isIdentityCopy() const {
1311 return isCopy() && getOperand(0).getReg() == getOperand(1).getReg() &&
1312 getOperand(0).getSubReg() == getOperand(1).getSubReg();
1313 }
1314
1315 /// Return true if this instruction doesn't produce any output in the form of
1316 /// executable instructions.
1317 bool isMetaInstruction() const {
1318 switch (getOpcode()) {
1319 default:
1320 return false;
1321 case TargetOpcode::IMPLICIT_DEF:
1322 case TargetOpcode::KILL:
1323 case TargetOpcode::CFI_INSTRUCTION:
1324 case TargetOpcode::EH_LABEL:
1325 case TargetOpcode::GC_LABEL:
1326 case TargetOpcode::DBG_VALUE:
1327 case TargetOpcode::DBG_VALUE_LIST:
1328 case TargetOpcode::DBG_INSTR_REF:
1329 case TargetOpcode::DBG_PHI:
1330 case TargetOpcode::DBG_LABEL:
1331 case TargetOpcode::LIFETIME_START:
1332 case TargetOpcode::LIFETIME_END:
1333 case TargetOpcode::PSEUDO_PROBE:
1334 return true;
1335 }
1336 }
1337
1338 /// Return true if this is a transient instruction that is either very likely
1339 /// to be eliminated during register allocation (such as copy-like
1340 /// instructions), or if this instruction doesn't have an execution-time cost.
1341 bool isTransient() const {
1342 switch (getOpcode()) {
1343 default:
1344 return isMetaInstruction();
1345 // Copy-like instructions are usually eliminated during register allocation.
1346 case TargetOpcode::PHI:
1347 case TargetOpcode::G_PHI:
1348 case TargetOpcode::COPY:
1349 case TargetOpcode::INSERT_SUBREG:
1350 case TargetOpcode::SUBREG_TO_REG:
1351 case TargetOpcode::REG_SEQUENCE:
1352 return true;
1353 }
1354 }
1355
1356 /// Return the number of instructions inside the MI bundle, excluding the
1357 /// bundle header.
1358 ///
1359 /// This is the number of instructions that MachineBasicBlock::iterator
1360 /// skips, 0 for unbundled instructions.
1361 unsigned getBundleSize() const;
1362
1363 /// Return true if the MachineInstr reads the specified register.
1364 /// If TargetRegisterInfo is passed, then it also checks if there
1365 /// is a read of a super-register.
1366 /// This does not count partial redefines of virtual registers as reads:
1367 /// %reg1024:6 = OP.
1368 bool readsRegister(Register Reg,
1369 const TargetRegisterInfo *TRI = nullptr) const {
1370 return findRegisterUseOperandIdx(Reg, false, TRI) != -1;
1371 }
1372
1373 /// Return true if the MachineInstr reads the specified virtual register.
1374 /// Take into account that a partial define is a
1375 /// read-modify-write operation.
1376 bool readsVirtualRegister(Register Reg) const {
1377 return readsWritesVirtualRegister(Reg).first;
1378 }
1379
1380 /// Return a pair of bools (reads, writes) indicating if this instruction
1381 /// reads or writes Reg. This also considers partial defines.
1382 /// If Ops is not null, all operand indices for Reg are added.
1383 std::pair<bool,bool> readsWritesVirtualRegister(Register Reg,
1384 SmallVectorImpl<unsigned> *Ops = nullptr) const;
1385
1386 /// Return true if the MachineInstr kills the specified register.
1387 /// If TargetRegisterInfo is passed, then it also checks if there is
1388 /// a kill of a super-register.
1389 bool killsRegister(Register Reg,
1390 const TargetRegisterInfo *TRI = nullptr) const {
1391 return findRegisterUseOperandIdx(Reg, true, TRI) != -1;
1392 }
1393
1394 /// Return true if the MachineInstr fully defines the specified register.
1395 /// If TargetRegisterInfo is passed, then it also checks
1396 /// if there is a def of a super-register.
1397 /// NOTE: It's ignoring subreg indices on virtual registers.
1398 bool definesRegister(Register Reg,
1399 const TargetRegisterInfo *TRI = nullptr) const {
1400 return findRegisterDefOperandIdx(Reg, false, false, TRI) != -1;
1401 }
1402
1403 /// Return true if the MachineInstr modifies (fully define or partially
1404 /// define) the specified register.
1405 /// NOTE: It's ignoring subreg indices on virtual registers.
1406 bool modifiesRegister(Register Reg,
1407 const TargetRegisterInfo *TRI = nullptr) const {
1408 return findRegisterDefOperandIdx(Reg, false, true, TRI) != -1;
1409 }
1410
1411 /// Returns true if the register is dead in this machine instruction.
1412 /// If TargetRegisterInfo is passed, then it also checks
1413 /// if there is a dead def of a super-register.
1414 bool registerDefIsDead(Register Reg,
1415 const TargetRegisterInfo *TRI = nullptr) const {
1416 return findRegisterDefOperandIdx(Reg, true, false, TRI) != -1;
1417 }
1418
1419 /// Returns true if the MachineInstr has an implicit-use operand of exactly
1420 /// the given register (not considering sub/super-registers).
1421 bool hasRegisterImplicitUseOperand(Register Reg) const;
1422
1423 /// Returns the operand index that is a use of the specific register or -1
1424 /// if it is not found. It further tightens the search criteria to a use
1425 /// that kills the register if isKill is true.
1426 int findRegisterUseOperandIdx(Register Reg, bool isKill = false,
1427 const TargetRegisterInfo *TRI = nullptr) const;
1428
1429 /// Wrapper for findRegisterUseOperandIdx, it returns
1430 /// a pointer to the MachineOperand rather than an index.
1431 MachineOperand *findRegisterUseOperand(Register Reg, bool isKill = false,
1432 const TargetRegisterInfo *TRI = nullptr) {
1433 int Idx = findRegisterUseOperandIdx(Reg, isKill, TRI);
1434 return (Idx == -1) ? nullptr : &getOperand(Idx);
1435 }
1436
1437 const MachineOperand *findRegisterUseOperand(
1438 Register Reg, bool isKill = false,
1439 const TargetRegisterInfo *TRI = nullptr) const {
1440 return const_cast<MachineInstr *>(this)->
1441 findRegisterUseOperand(Reg, isKill, TRI);
1442 }
1443
1444 /// Returns the operand index that is a def of the specified register or
1445 /// -1 if it is not found. If isDead is true, defs that are not dead are
1446 /// skipped. If Overlap is true, then it also looks for defs that merely
1447 /// overlap the specified register. If TargetRegisterInfo is non-null,
1448 /// then it also checks if there is a def of a super-register.
1449 /// This may also return a register mask operand when Overlap is true.
1450 int findRegisterDefOperandIdx(Register Reg,
1451 bool isDead = false, bool Overlap = false,
1452 const TargetRegisterInfo *TRI = nullptr) const;
1453
1454 /// Wrapper for findRegisterDefOperandIdx, it returns
1455 /// a pointer to the MachineOperand rather than an index.
1456 MachineOperand *
1457 findRegisterDefOperand(Register Reg, bool isDead = false,
1458 bool Overlap = false,
1459 const TargetRegisterInfo *TRI = nullptr) {
1460 int Idx = findRegisterDefOperandIdx(Reg, isDead, Overlap, TRI);
1461 return (Idx == -1) ? nullptr : &getOperand(Idx);
1462 }
1463
1464 const MachineOperand *
1465 findRegisterDefOperand(Register Reg, bool isDead = false,
1466 bool Overlap = false,
1467 const TargetRegisterInfo *TRI = nullptr) const {
1468 return const_cast<MachineInstr *>(this)->findRegisterDefOperand(
1469 Reg, isDead, Overlap, TRI);
1470 }
1471
1472 /// Find the index of the first operand in the
1473 /// operand list that is used to represent the predicate. It returns -1 if
1474 /// none is found.
1475 int findFirstPredOperandIdx() const;
1476
1477 /// Find the index of the flag word operand that
1478 /// corresponds to operand OpIdx on an inline asm instruction. Returns -1 if
1479 /// getOperand(OpIdx) does not belong to an inline asm operand group.
1480 ///
1481 /// If GroupNo is not NULL, it will receive the number of the operand group
1482 /// containing OpIdx.
1483 int findInlineAsmFlagIdx(unsigned OpIdx, unsigned *GroupNo = nullptr) const;
1484
1485 /// Compute the static register class constraint for operand OpIdx.
1486 /// For normal instructions, this is derived from the MCInstrDesc.
1487 /// For inline assembly it is derived from the flag words.
1488 ///
1489 /// Returns NULL if the static register class constraint cannot be
1490 /// determined.
1491 const TargetRegisterClass*
1492 getRegClassConstraint(unsigned OpIdx,
1493 const TargetInstrInfo *TII,
1494 const TargetRegisterInfo *TRI) const;
1495
1496 /// Applies the constraints (def/use) implied by this MI on \p Reg to
1497 /// the given \p CurRC.
1498 /// If \p ExploreBundle is set and MI is part of a bundle, all the
1499 /// instructions inside the bundle will be taken into account. In other words,
1500 /// this method accumulates all the constraints of the operand of this MI and
1501 /// the related bundle if MI is a bundle or inside a bundle.
1502 ///
1503 /// Returns the register class that satisfies both \p CurRC and the
1504 /// constraints set by MI. Returns NULL if such a register class does not
1505 /// exist.
1506 ///
1507 /// \pre CurRC must not be NULL.
1508 const TargetRegisterClass *getRegClassConstraintEffectForVReg(
1509 Register Reg, const TargetRegisterClass *CurRC,
1510 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI,
1511 bool ExploreBundle = false) const;
1512
1513 /// Applies the constraints (def/use) implied by the \p OpIdx operand
1514 /// to the given \p CurRC.
1515 ///
1516 /// Returns the register class that satisfies both \p CurRC and the
1517 /// constraints set by \p OpIdx MI. Returns NULL if such a register class
1518 /// does not exist.
1519 ///
1520 /// \pre CurRC must not be NULL.
1521 /// \pre The operand at \p OpIdx must be a register.
1522 const TargetRegisterClass *
1523 getRegClassConstraintEffect(unsigned OpIdx, const TargetRegisterClass *CurRC,
1524 const TargetInstrInfo *TII,
1525 const TargetRegisterInfo *TRI) const;
1526
1527 /// Add a tie between the register operands at DefIdx and UseIdx.
1528 /// The tie will cause the register allocator to ensure that the two
1529 /// operands are assigned the same physical register.
1530 ///
1531 /// Tied operands are managed automatically for explicit operands in the
1532 /// MCInstrDesc. This method is for exceptional cases like inline asm.
1533 void tieOperands(unsigned DefIdx, unsigned UseIdx);
1534
1535 /// Given the index of a tied register operand, find the
1536 /// operand it is tied to. Defs are tied to uses and vice versa. Returns the
1537 /// index of the tied operand which must exist.
1538 unsigned findTiedOperandIdx(unsigned OpIdx) const;
1539
1540 /// Given the index of a register def operand,
1541 /// check if the register def is tied to a source operand, due to either
1542 /// two-address elimination or inline assembly constraints. Returns the
1543 /// first tied use operand index by reference if UseOpIdx is not null.
1544 bool isRegTiedToUseOperand(unsigned DefOpIdx,
1545 unsigned *UseOpIdx = nullptr) const {
1546 const MachineOperand &MO = getOperand(DefOpIdx);
1547 if (!MO.isReg() || !MO.isDef() || !MO.isTied())
1548 return false;
1549 if (UseOpIdx)
1550 *UseOpIdx = findTiedOperandIdx(DefOpIdx);
1551 return true;
1552 }
1553
1554 /// Return true if the use operand of the specified index is tied to a def
1555 /// operand. It also returns the def operand index by reference if DefOpIdx
1556 /// is not null.
1557 bool isRegTiedToDefOperand(unsigned UseOpIdx,
1558 unsigned *DefOpIdx = nullptr) const {
1559 const MachineOperand &MO = getOperand(UseOpIdx);
1560 if (!MO.isReg() || !MO.isUse() || !MO.isTied())
1561 return false;
1562 if (DefOpIdx)
1563 *DefOpIdx = findTiedOperandIdx(UseOpIdx);
1564 return true;
1565 }
1566
1567 /// Clears kill flags on all operands.
1568 void clearKillInfo();
1569
1570 /// Replace all occurrences of FromReg with ToReg:SubIdx,
1571 /// properly composing subreg indices where necessary.
1572 void substituteRegister(Register FromReg, Register ToReg, unsigned SubIdx,
1573 const TargetRegisterInfo &RegInfo);
1574