LLVM 17.0.0git
AssignmentTrackingAnalysis.cpp
Go to the documentation of this file.
8#include "llvm/ADT/SmallSet.h"
13#include "llvm/IR/BasicBlock.h"
14#include "llvm/IR/DataLayout.h"
15#include "llvm/IR/DebugInfo.h"
16#include "llvm/IR/Function.h"
17#include "llvm/IR/Instruction.h"
19#include "llvm/IR/PassManager.h"
20#include "llvm/IR/PrintPasses.h"
26#include <assert.h>
27#include <cstdint>
28#include <optional>
29#include <sstream>
30#include <unordered_map>
31
32using namespace llvm;
33#define DEBUG_TYPE "debug-ata"
34
35STATISTIC(NumDefsScanned, "Number of dbg locs that get scanned for removal");
36STATISTIC(NumDefsRemoved, "Number of dbg locs removed");
37STATISTIC(NumWedgesScanned, "Number of dbg wedges scanned");
38STATISTIC(NumWedgesChanged, "Number of dbg wedges changed");
39
41 MaxNumBlocks("debug-ata-max-blocks", cl::init(10000),
42 cl::desc("Maximum num basic blocks before debug info dropped"),
44/// Option for debugging the pass, determines if the memory location fragment
45/// filling happens after generating the variable locations.
46static cl::opt<bool> EnableMemLocFragFill("mem-loc-frag-fill", cl::init(true),
48/// Print the results of the analysis. Respects -filter-print-funcs.
49static cl::opt<bool> PrintResults("print-debug-ata", cl::init(false),
51
52/// Coalesce adjacent dbg locs describing memory locations that have contiguous
53/// fragments. This reduces the cost of LiveDebugValues which does SSA
54/// construction for each explicitly stated variable fragment.
56 CoalesceAdjacentFragmentsOpt("debug-ata-coalesce-frags", cl::Hidden);
57
58// Implicit conversions are disabled for enum class types, so unfortunately we
59// need to create a DenseMapInfo wrapper around the specified underlying type.
60template <> struct llvm::DenseMapInfo<VariableID> {
62 static inline VariableID getEmptyKey() {
63 return static_cast<VariableID>(Wrapped::getEmptyKey());
64 }
65 static inline VariableID getTombstoneKey() {
66 return static_cast<VariableID>(Wrapped::getTombstoneKey());
67 }
68 static unsigned getHashValue(const VariableID &Val) {
69 return Wrapped::getHashValue(static_cast<unsigned>(Val));
70 }
71 static bool isEqual(const VariableID &LHS, const VariableID &RHS) {
72 return LHS == RHS;
73 }
74};
75
76/// Helper class to build FunctionVarLocs, since that class isn't easy to
77/// modify. TODO: There's not a great deal of value in the split, it could be
78/// worth merging the two classes.
80 friend FunctionVarLocs;
82 // Use an unordered_map so we don't invalidate iterators after
83 // insert/modifications.
84 std::unordered_map<const Instruction *, SmallVector<VarLocInfo>>
85 VarLocsBeforeInst;
86
87 SmallVector<VarLocInfo> SingleLocVars;
88
89public:
90 unsigned getNumVariables() const { return Variables.size(); }
91
92 /// Find or insert \p V and return the ID.
94 return static_cast<VariableID>(Variables.insert(V));
95 }
96
97 /// Get a variable from its \p ID.
99 return Variables[static_cast<unsigned>(ID)];
100 }
101
102 /// Return ptr to wedge of defs or nullptr if no defs come just before /p
103 /// Before.
105 auto R = VarLocsBeforeInst.find(Before);
106 if (R == VarLocsBeforeInst.end())
107 return nullptr;
108 return &R->second;
109 }
110
111 /// Replace the defs that come just before /p Before with /p Wedge.
112 void setWedge(const Instruction *Before, SmallVector<VarLocInfo> &&Wedge) {
113 VarLocsBeforeInst[Before] = std::move(Wedge);
114 }
115
116 /// Add a def for a variable that is valid for its lifetime.
119 VarLocInfo VarLoc;
120 VarLoc.VariableID = insertVariable(Var);
121 VarLoc.Expr = Expr;
122 VarLoc.DL = DL;
123 VarLoc.Values = R;
124 SingleLocVars.emplace_back(VarLoc);
125 }
126
127 /// Add a def to the wedge of defs just before /p Before.
130 VarLocInfo VarLoc;
131 VarLoc.VariableID = insertVariable(Var);
132 VarLoc.Expr = Expr;
133 VarLoc.DL = DL;
134 VarLoc.Values = R;
135 VarLocsBeforeInst[Before].emplace_back(VarLoc);
136 }
137};
138
140 // Print the variable table first. TODO: Sorting by variable could make the
141 // output more stable?
142 unsigned Counter = -1;
143 OS << "=== Variables ===\n";
144 for (const DebugVariable &V : Variables) {
145 ++Counter;
146 // Skip first entry because it is a dummy entry.
147 if (Counter == 0) {
148 continue;
149 }
150 OS << "[" << Counter << "] " << V.getVariable()->getName();
151 if (auto F = V.getFragment())
152 OS << " bits [" << F->OffsetInBits << ", "
153 << F->OffsetInBits + F->SizeInBits << ")";
154 if (const auto *IA = V.getInlinedAt())
155 OS << " inlined-at " << *IA;
156 OS << "\n";
157 }
158
159 auto PrintLoc = [&OS](const VarLocInfo &Loc) {
160 OS << "DEF Var=[" << (unsigned)Loc.VariableID << "]"
161 << " Expr=" << *Loc.Expr << " Values=(";
162 for (auto *Op : Loc.Values.location_ops()) {
163 errs() << Op->getName() << " ";
164 }
165 errs() << ")\n";
166 };
167
168 // Print the single location variables.
169 OS << "=== Single location vars ===\n";
170 for (auto It = single_locs_begin(), End = single_locs_end(); It != End;
171 ++It) {
172 PrintLoc(*It);
173 }
174
175 // Print the non-single-location defs in line with IR.
176 OS << "=== In-line variable defs ===";
177 for (const BasicBlock &BB : Fn) {
178 OS << "\n" << BB.getName() << ":\n";
179 for (const Instruction &I : BB) {
180 for (auto It = locs_begin(&I), End = locs_end(&I); It != End; ++It) {
181 PrintLoc(*It);
182 }
183 OS << I << "\n";
184 }
185 }
186}
187
189 // Add the single-location variables first.
190 for (const auto &VarLoc : Builder.SingleLocVars)
191 VarLocRecords.emplace_back(VarLoc);
192 // Mark the end of the section.
193 SingleVarLocEnd = VarLocRecords.size();
194
195 // Insert a contiguous block of VarLocInfos for each instruction, mapping it
196 // to the start and end position in the vector with VarLocsBeforeInst.
197 for (auto &P : Builder.VarLocsBeforeInst) {
198 unsigned BlockStart = VarLocRecords.size();
199 for (const VarLocInfo &VarLoc : P.second)
200 VarLocRecords.emplace_back(VarLoc);
201 unsigned BlockEnd = VarLocRecords.size();
202 // Record the start and end indices.
203 if (BlockEnd != BlockStart)
204 VarLocsBeforeInst[P.first] = {BlockStart, BlockEnd};
205 }
206
207 // Copy the Variables vector from the builder's UniqueVector.
208 assert(Variables.empty() && "Expect clear before init");
209 // UniqueVectors IDs are one-based (which means the VarLocInfo VarID values
210 // are one-based) so reserve an extra and insert a dummy.
211 Variables.reserve(Builder.Variables.size() + 1);
212 Variables.push_back(DebugVariable(nullptr, std::nullopt, nullptr));
213 Variables.append(Builder.Variables.begin(), Builder.Variables.end());
214}
215
217 Variables.clear();
218 VarLocRecords.clear();
219 VarLocsBeforeInst.clear();
220 SingleVarLocEnd = 0;
221}
222
223/// Walk backwards along constant GEPs and bitcasts to the base storage from \p
224/// Start as far as possible. Prepend \Expression with the offset and append it
225/// with a DW_OP_deref that haes been implicit until now. Returns the walked-to
226/// value and modified expression.
227static std::pair<Value *, DIExpression *>
230 APInt OffsetInBytes(DL.getTypeSizeInBits(Start->getType()), false);
231 Value *End =
232 Start->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetInBytes);
234 if (OffsetInBytes.getBoolValue()) {
235 Ops = {dwarf::DW_OP_plus_uconst, OffsetInBytes.getZExtValue()};
237 Expression, Ops, /*StackValue=*/false, /*EntryValue=*/false);
238 }
239 Expression = DIExpression::append(Expression, {dwarf::DW_OP_deref});
240 return {End, Expression};
241}
242
243/// Extract the offset used in \p DIExpr. Returns std::nullopt if the expression
244/// doesn't explicitly describe a memory location with DW_OP_deref or if the
245/// expression is too complex to interpret.
246static std::optional<int64_t>
248 int64_t Offset = 0;
249 const unsigned NumElements = DIExpr->getNumElements();
250 const auto Elements = DIExpr->getElements();
251 unsigned ExpectedDerefIdx = 0;
252 // Extract the offset.
253 if (NumElements > 2 && Elements[0] == dwarf::DW_OP_plus_uconst) {
254 Offset = Elements[1];
255 ExpectedDerefIdx = 2;
256 } else if (NumElements > 3 && Elements[0] == dwarf::DW_OP_constu) {
257 ExpectedDerefIdx = 3;
258 if (Elements[2] == dwarf::DW_OP_plus)
259 Offset = Elements[1];
260 else if (Elements[2] == dwarf::DW_OP_minus)
261 Offset = -Elements[1];
262 else
263 return std::nullopt;
264 }
265
266 // If that's all there is it means there's no deref.
267 if (ExpectedDerefIdx >= NumElements)
268 return std::nullopt;
269
270 // Check the next element is DW_OP_deref - otherwise this is too complex or
271 // isn't a deref expression.
272 if (Elements[ExpectedDerefIdx] != dwarf::DW_OP_deref)
273 return std::nullopt;
274
275 // Check the final operation is either the DW_OP_deref or is a fragment.
276 if (NumElements == ExpectedDerefIdx + 1)
277 return Offset; // Ends with deref.
278 unsigned ExpectedFragFirstIdx = ExpectedDerefIdx + 1;
279 unsigned ExpectedFragFinalIdx = ExpectedFragFirstIdx + 2;
280 if (NumElements == ExpectedFragFinalIdx + 1 &&
281 Elements[ExpectedFragFirstIdx] == dwarf::DW_OP_LLVM_fragment)
282 return Offset; // Ends with deref + fragment.
283
284 // Don't bother trying to interpret anything more complex.
285 return std::nullopt;
286}
287
288/// A whole (unfragmented) source variable.
289using DebugAggregate = std::pair<const DILocalVariable *, const DILocation *>;
291 return DebugAggregate(DII->getVariable(), DII->getDebugLoc().getInlinedAt());
292}
294 return DebugAggregate(Var.getVariable(), Var.getInlinedAt());
295}
296
298 // Enabling fragment coalescing reduces compiler run time when instruction
299 // referencing is enabled. However, it may cause LiveDebugVariables to create
300 // incorrect locations. Since instruction-referencing mode effectively
301 // bypasses LiveDebugVariables we only enable coalescing if the cl::opt flag
302 // has not been explicitly set and instruction-referencing is turned on.
306 Triple(F.getParent()->getTargetTriple()));
308 return true;
310 return false;
311 }
312 llvm_unreachable("Unknown boolOrDefault value");
313}
314
315namespace {
316/// In dwarf emission, the following sequence
317/// 1. dbg.value ... Fragment(0, 64)
318/// 2. dbg.value ... Fragment(0, 32)
319/// effectively sets Fragment(32, 32) to undef (each def sets all bits not in
320/// the intersection of the fragments to having "no location"). This makes
321/// sense for implicit location values because splitting the computed values
322/// could be troublesome, and is probably quite uncommon. When we convert
323/// dbg.assigns to dbg.value+deref this kind of thing is common, and describing
324/// a location (memory) rather than a value means we don't need to worry about
325/// splitting any values, so we try to recover the rest of the fragment
326/// location here.
327/// This class performs a(nother) dataflow analysis over the function, adding
328/// variable locations so that any bits of a variable with a memory location
329/// have that location explicitly reinstated at each subsequent variable
330/// location definition that that doesn't overwrite those bits. i.e. after a
331/// variable location def, insert new defs for the memory location with
332/// fragments for the difference of "all bits currently in memory" and "the
333/// fragment of the second def".
334class MemLocFragmentFill {
335 Function &Fn;
336 FunctionVarLocsBuilder *FnVarLocs;
337 const DenseSet<DebugAggregate> *VarsWithStackSlot;
338 bool CoalesceAdjacentFragments;
339
340 // 0 = no memory location.
341 using BaseAddress = unsigned;
342 using OffsetInBitsTy = unsigned;
344 using FragsInMemMap = IntervalMap<
345 OffsetInBitsTy, BaseAddress,
347 FragTraits>;
348 FragsInMemMap::Allocator IntervalMapAlloc;
349 using VarFragMap = DenseMap<unsigned, FragsInMemMap>;
350
351 /// IDs for memory location base addresses in maps. Use 0 to indicate that
352 /// there's no memory location.
357
358 struct FragMemLoc {
359 unsigned Var;
360 unsigned Base;
361 unsigned OffsetInBits;
362 unsigned SizeInBits;
363 DebugLoc DL;
364 };
366
367 /// BBInsertBeforeMap holds a description for the set of location defs to be
368 /// inserted after the analysis is complete. It is updated during the dataflow
369 /// and the entry for a block is CLEARED each time it is (re-)visited. After
370 /// the dataflow is complete, each block entry will contain the set of defs
371 /// calculated during the final (fixed-point) iteration.
373
374 static bool intervalMapsAreEqual(const FragsInMemMap &A,
375 const FragsInMemMap &B) {
376 auto AIt = A.begin(), AEnd = A.end();
377 auto BIt = B.begin(), BEnd = B.end();
378 for (; AIt != AEnd; ++AIt, ++BIt) {
379 if (BIt == BEnd)
380 return false; // B has fewer elements than A.
381 if (AIt.start() != BIt.start() || AIt.stop() != BIt.stop())
382 return false; // Interval is different.
383 if (*AIt != *BIt)
384 return false; // Value at interval is different.
385 }
386 // AIt == AEnd. Check BIt is also now at end.
387 return BIt == BEnd;
388 }
389
390 static bool varFragMapsAreEqual(const VarFragMap &A, const VarFragMap &B) {
391 if (A.size() != B.size())
392 return false;
393 for (const auto &APair : A) {
394 auto BIt = B.find(APair.first);
395 if (BIt == B.end())
396 return false;
397 if (!intervalMapsAreEqual(APair.second, BIt->second))
398 return false;
399 }
400 return true;
401 }
402
403 /// Return a string for the value that \p BaseID represents.
404 std::string toString(unsigned BaseID) {
405 if (BaseID)
406 return Bases[BaseID].getVariableLocationOp(0)->getName().str();
407 else
408 return "None";
409 }
410
411 /// Format string describing an FragsInMemMap (IntervalMap) interval.
412 std::string toString(FragsInMemMap::const_iterator It, bool Newline = true) {
413 std::string String;
414 std::stringstream S(String);
415 if (It.valid()) {
416 S << "[" << It.start() << ", " << It.stop()
417 << "): " << toString(It.value());
418 } else {
419 S << "invalid iterator (end)";
420 }
421 if (Newline)
422 S << "\n";
423 return S.str();
424 };
425
426 FragsInMemMap meetFragments(const FragsInMemMap &A, const FragsInMemMap &B) {
427 FragsInMemMap Result(IntervalMapAlloc);
428 for (auto AIt = A.begin(), AEnd = A.end(); AIt != AEnd; ++AIt) {
429 LLVM_DEBUG(dbgs() << "a " << toString(AIt));
430 // This is basically copied from process() and inverted (process is
431 // performing something like a union whereas this is more of an
432 // intersect).
433
434 // There's no work to do if interval `a` overlaps no fragments in map `B`.
435 if (!B.overlaps(AIt.start(), AIt.stop()))
436 continue;
437
438 // Does StartBit intersect an existing fragment?
439 auto FirstOverlap = B.find(AIt.start());
440 assert(FirstOverlap != B.end());
441 bool IntersectStart = FirstOverlap.start() < AIt.start();
442 LLVM_DEBUG(dbgs() << "- FirstOverlap " << toString(FirstOverlap, false)
443 << ", IntersectStart: " << IntersectStart << "\n");
444
445 // Does EndBit intersect an existing fragment?
446 auto LastOverlap = B.find(AIt.stop());
447 bool IntersectEnd =
448 LastOverlap != B.end() && LastOverlap.start() < AIt.stop();
449 LLVM_DEBUG(dbgs() << "- LastOverlap " << toString(LastOverlap, false)
450 << ", IntersectEnd: " << IntersectEnd << "\n");
451
452 // Check if both ends of `a` intersect the same interval `b`.
453 if (IntersectStart && IntersectEnd && FirstOverlap == LastOverlap) {
454 // Insert `a` (`a` is contained in `b`) if the values match.
455 // [ a ]
456 // [ - b - ]
457 // -
458 // [ r ]
459 LLVM_DEBUG(dbgs() << "- a is contained within "
460 << toString(FirstOverlap));
461 if (*AIt && *AIt == *FirstOverlap)
462 Result.insert(AIt.start(), AIt.stop(), *AIt);
463 } else {
464 // There's an overlap but `a` is not fully contained within
465 // `b`. Shorten any end-point intersections.
466 // [ - a - ]
467 // [ - b - ]
468 // -
469 // [ r ]
470 auto Next = FirstOverlap;
471 if (IntersectStart) {
472 LLVM_DEBUG(dbgs() << "- insert intersection of a and "
473 << toString(FirstOverlap));
474 if (*AIt && *AIt == *FirstOverlap)
475 Result.insert(AIt.start(), FirstOverlap.stop(), *AIt);
476 ++Next;
477 }
478 // [ - a - ]
479 // [ - b - ]
480 // -
481 // [ r ]
482 if (IntersectEnd) {
483 LLVM_DEBUG(dbgs() << "- insert intersection of a and "
484 << toString(LastOverlap));
485 if (*AIt && *AIt == *LastOverlap)
486 Result.insert(LastOverlap.start(), AIt.stop(), *AIt);
487 }
488
489 // Insert all intervals in map `B` that are contained within interval
490 // `a` where the values match.
491 // [ - - a - - ]
492 // [ b1 ] [ b2 ]
493 // -
494 // [ r1 ] [ r2 ]
495 while (Next != B.end() && Next.start() < AIt.stop() &&
496 Next.stop() <= AIt.stop()) {
498 << "- insert intersection of a and " << toString(Next));
499 if (*AIt && *AIt == *Next)
500 Result.insert(Next.start(), Next.stop(), *Next);
501 ++Next;
502 }
503 }
504 }
505 return Result;
506 }
507
508 /// Meet \p A and \p B, storing the result in \p A.
509 void meetVars(VarFragMap &A, const VarFragMap &B) {
510 // Meet A and B.
511 //
512 // Result = meet(a, b) for a in A, b in B where Var(a) == Var(b)
513 for (auto It = A.begin(), End = A.end(); It != End; ++It) {
514 unsigned AVar = It->first;
515 FragsInMemMap &AFrags = It->second;
516 auto BIt = B.find(AVar);
517 if (BIt == B.end()) {
518 A.erase(It);
519 continue; // Var has no bits defined in B.
520 }
521 LLVM_DEBUG(dbgs() << "meet fragment maps for "
522 << Aggregates[AVar].first->getName() << "\n");
523 AFrags = meetFragments(AFrags, BIt->second);
524 }
525 }
526
527 bool meet(const BasicBlock &BB,
528 const SmallPtrSet<BasicBlock *, 16> &Visited) {
529 LLVM_DEBUG(dbgs() << "meet block info from preds of " << BB.getName()
530 << "\n");
531
532 VarFragMap BBLiveIn;
533 bool FirstMeet = true;
534 // LiveIn locs for BB is the meet of the already-processed preds' LiveOut
535 // locs.
536 for (auto I = pred_begin(&BB), E = pred_end(&BB); I != E; I++) {
537 // Ignore preds that haven't been processed yet. This is essentially the
538 // same as initialising all variables to implicit top value (⊤) which is
539 // the identity value for the meet operation.
540 const BasicBlock *Pred = *I;
541 if (!Visited.count(Pred))
542 continue;
543
544 auto PredLiveOut = LiveOut.find(Pred);
545 assert(PredLiveOut != LiveOut.end());
546
547 if (FirstMeet) {
548 LLVM_DEBUG(dbgs() << "BBLiveIn = " << Pred->getName() << "\n");
549 BBLiveIn = PredLiveOut->second;
550 FirstMeet = false;
551 } else {
552 LLVM_DEBUG(dbgs() << "BBLiveIn = meet BBLiveIn, " << Pred->getName()
553 << "\n");
554 meetVars(BBLiveIn, PredLiveOut->second);
555 }
556
557 // An empty set is ⊥ for the intersect-like meet operation. If we've
558 // already got ⊥ there's no need to run the code - we know the result is
559 // ⊥ since `meet(a, ⊥) = ⊥`.
560 if (BBLiveIn.size() == 0)
561 break;
562 }
563
564 auto CurrentLiveInEntry = LiveIn.find(&BB);
565 // If there's no LiveIn entry for the block yet, add it.
566 if (CurrentLiveInEntry == LiveIn.end()) {
567 LLVM_DEBUG(dbgs() << "change=true (first) on meet on " << BB.getName()
568 << "\n");
569 LiveIn[&BB] = std::move(BBLiveIn);
570 return /*Changed=*/true;
571 }
572
573 // If the LiveIn set has changed (expensive check) update it and return
574 // true.
575 if (!varFragMapsAreEqual(BBLiveIn, CurrentLiveInEntry->second)) {
576 LLVM_DEBUG(dbgs() << "change=true on meet on " << BB.getName() << "\n");
577 CurrentLiveInEntry->second = std::move(BBLiveIn);
578 return /*Changed=*/true;
579 }
580
581 LLVM_DEBUG(dbgs() << "change=false on meet on " << BB.getName() << "\n");
582 return /*Changed=*/false;
583 }
584
585 void insertMemLoc(BasicBlock &BB, Instruction &Before, unsigned Var,
586 unsigned StartBit, unsigned EndBit, unsigned Base,
587 DebugLoc DL) {
588 assert(StartBit < EndBit && "Cannot create fragment of size <= 0");
589 if (!Base)
590 return;
591 FragMemLoc Loc;
592 Loc.Var = Var;
593 Loc.OffsetInBits = StartBit;
594 Loc.SizeInBits = EndBit - StartBit;
595 assert(Base && "Expected a non-zero ID for Base address");
596 Loc.Base = Base;
597 Loc.DL = DL;
598 BBInsertBeforeMap[&BB][&Before].push_back(Loc);
599 LLVM_DEBUG(dbgs() << "Add mem def for " << Aggregates[Var].first->getName()
600 << " bits [" << StartBit << ", " << EndBit << ")\n");
601 }
602
603 /// Inserts a new dbg def if the interval found when looking up \p StartBit
604 /// in \p FragMap starts before \p StartBit or ends after \p EndBit (which
605 /// indicates - assuming StartBit->EndBit has just been inserted - that the
606 /// slice has been coalesced in the map).
607 void coalesceFragments(BasicBlock &BB, Instruction &Before, unsigned Var,
608 unsigned StartBit, unsigned EndBit, unsigned Base,
609 DebugLoc DL, const FragsInMemMap &FragMap) {
610 if (!CoalesceAdjacentFragments)
611 return;
612 // We've inserted the location into the map. The map will have coalesced
613 // adjacent intervals (variable fragments) that describe the same memory
614 // location. Use this knowledge to insert a debug location that describes
615 // that coalesced fragment. This may eclipse other locs we've just
616 // inserted. This is okay as redundant locs will be cleaned up later.
617 auto CoalescedFrag = FragMap.find(StartBit);
618 // Bail if no coalescing has taken place.
619 if (CoalescedFrag.start() == StartBit && CoalescedFrag.stop() == EndBit)
620 return;
621
622 LLVM_DEBUG(dbgs() << "- Insert loc for bits " << CoalescedFrag.start()
623 << " to " << CoalescedFrag.stop() << "\n");
624 insertMemLoc(BB, Before, Var, CoalescedFrag.start(), CoalescedFrag.stop(),
625 Base, DL);
626 }
627
628 void addDef(const VarLocInfo &VarLoc, Instruction &Before, BasicBlock &BB,
629 VarFragMap &LiveSet) {
630 DebugVariable DbgVar = FnVarLocs->getVariable(VarLoc.VariableID);
631 if (skipVariable(DbgVar.getVariable()))
632 return;
633 // Don't bother doing anything for this variables if we know it's fully
634 // promoted. We're only interested in variables that (sometimes) live on
635 // the stack here.
636 if (!VarsWithStackSlot->count(getAggregate(DbgVar)))
637 return;
638 unsigned Var = Aggregates.insert(
639 DebugAggregate(DbgVar.getVariable(), VarLoc.DL.getInlinedAt()));
640
641 // [StartBit: EndBit) are the bits affected by this def.
642 const DIExpression *DIExpr = VarLoc.Expr;
643 unsigned StartBit;
644 unsigned EndBit;
645 if (auto Frag = DIExpr->getFragmentInfo()) {
646 StartBit = Frag->OffsetInBits;
647 EndBit = StartBit + Frag->SizeInBits;
648 } else {
649 assert(static_cast<bool>(DbgVar.getVariable()->getSizeInBits()));
650 StartBit = 0;
651 EndBit = *DbgVar.getVariable()->getSizeInBits();
652 }
653
654 // We will only fill fragments for simple memory-describing dbg.value
655 // intrinsics. If the fragment offset is the same as the offset from the
656 // base pointer, do The Thing, otherwise fall back to normal dbg.value
657 // behaviour. AssignmentTrackingLowering has generated DIExpressions
658 // written in terms of the base pointer.
659 // TODO: Remove this condition since the fragment offset doesn't always
660 // equal the offset from base pointer (e.g. for a SROA-split variable).
661 const auto DerefOffsetInBytes = getDerefOffsetInBytes(DIExpr);
662 const unsigned Base =
663 DerefOffsetInBytes && *DerefOffsetInBytes * 8 == StartBit
664 ? Bases.insert(VarLoc.Values)
665 : 0;
666 LLVM_DEBUG(dbgs() << "DEF " << DbgVar.getVariable()->getName() << " ["
667 << StartBit << ", " << EndBit << "): " << toString(Base)
668 << "\n");
669
670 // First of all, any locs that use mem that are disrupted need reinstating.
671 // Unfortunately, IntervalMap doesn't let us insert intervals that overlap
672 // with existing intervals so this code involves a lot of fiddling around
673 // with intervals to do that manually.
674 auto FragIt = LiveSet.find(Var);
675
676 // Check if the variable does not exist in the map.
677 if (FragIt == LiveSet.end()) {
678 // Add this variable to the BB map.
679 auto P = LiveSet.try_emplace(Var, FragsInMemMap(IntervalMapAlloc));
680 assert(P.second && "Var already in map?");
681 // Add the interval to the fragment map.
682 P.first->second.insert(StartBit, EndBit, Base);
683 return;
684 }
685 // The variable has an entry in the map.
686
687 FragsInMemMap &FragMap = FragIt->second;
688 // First check the easy case: the new fragment `f` doesn't overlap with any
689 // intervals.
690 if (!FragMap.overlaps(StartBit, EndBit)) {
691 LLVM_DEBUG(dbgs() << "- No overlaps\n");
692 FragMap.insert(StartBit, EndBit, Base);
693 coalesceFragments(BB, Before, Var, StartBit, EndBit, Base, VarLoc.DL,
694 FragMap);
695 return;
696 }
697 // There is at least one overlap.
698
699 // Does StartBit intersect an existing fragment?
700 auto FirstOverlap = FragMap.find(StartBit);
701 assert(FirstOverlap != FragMap.end());
702 bool IntersectStart = FirstOverlap.start() < StartBit;
703
704 // Does EndBit intersect an existing fragment?
705 auto LastOverlap = FragMap.find(EndBit);
706 bool IntersectEnd = LastOverlap.valid() && LastOverlap.start() < EndBit;
707
708 // Check if both ends of `f` intersect the same interval `i`.
709 if (IntersectStart && IntersectEnd && FirstOverlap == LastOverlap) {
710 LLVM_DEBUG(dbgs() << "- Intersect single interval @ both ends\n");
711 // Shorten `i` so that there's space to insert `f`.
712 // [ f ]
713 // [ - i - ]
714 // +
715 // [ i ][ f ][ i ]
716
717 // Save values for use after inserting a new interval.
718 auto EndBitOfOverlap = FirstOverlap.stop();
719 unsigned OverlapValue = FirstOverlap.value();
720
721 // Shorten the overlapping interval.
722 FirstOverlap.setStop(StartBit);
723 insertMemLoc(BB, Before, Var, FirstOverlap.start(), StartBit,
724 OverlapValue, VarLoc.DL);
725
726 // Insert a new interval to represent the end part.
727 FragMap.insert(EndBit, EndBitOfOverlap, OverlapValue);
728 insertMemLoc(BB, Before, Var, EndBit, EndBitOfOverlap, OverlapValue,
729 VarLoc.DL);
730
731 // Insert the new (middle) fragment now there is space.
732 FragMap.insert(StartBit, EndBit, Base);
733 } else {
734 // There's an overlap but `f` may not be fully contained within
735 // `i`. Shorten any end-point intersections so that we can then
736 // insert `f`.
737 // [ - f - ]
738 // [ - i - ]
739 // | |
740 // [ i ]
741 // Shorten any end-point intersections.
742 if (IntersectStart) {
743 LLVM_DEBUG(dbgs() << "- Intersect interval at start\n");
744 // Split off at the intersection.
745 FirstOverlap.setStop(StartBit);
746 insertMemLoc(BB, Before, Var, FirstOverlap.start(), StartBit,
747 *FirstOverlap, VarLoc.DL);
748 }
749 // [ - f - ]
750 // [ - i - ]
751 // | |
752 // [ i ]
753 if (IntersectEnd) {
754 LLVM_DEBUG(dbgs() << "- Intersect interval at end\n");
755 // Split off at the intersection.
756 LastOverlap.setStart(EndBit);
757 insertMemLoc(BB, Before, Var, EndBit, LastOverlap.stop(), *LastOverlap,
758 VarLoc.DL);
759 }
760
761 LLVM_DEBUG(dbgs() << "- Erase intervals contained within\n");
762 // FirstOverlap and LastOverlap have been shortened such that they're
763 // no longer overlapping with [StartBit, EndBit). Delete any overlaps
764 // that remain (these will be fully contained within `f`).
765 // [ - f - ] }
766 // [ - i - ] } Intersection shortening that has happened above.
767 // | | }
768 // [ i ] }
769 // -----------------
770 // [i2 ] } Intervals fully contained within `f` get erased.
771 // -----------------
772 // [ - f - ][ i ] } Completed insertion.
773 auto It = FirstOverlap;
774 if (IntersectStart)
775 ++It; // IntersectStart: first overlap has been shortened.
776 while (It.valid() && It.start() >= StartBit && It.stop() <= EndBit) {
777 LLVM_DEBUG(dbgs() << "- Erase " << toString(It));
778 It.erase(); // This increments It after removing the interval.
779 }
780 // We've dealt with all the overlaps now!
781 assert(!FragMap.overlaps(StartBit, EndBit));
782 LLVM_DEBUG(dbgs() << "- Insert DEF into now-empty space\n");
783 FragMap.insert(StartBit, EndBit, Base);
784 }
785
786 coalesceFragments(BB, Before, Var, StartBit, EndBit, Base, VarLoc.DL,
787 FragMap);
788 }
789
790 bool skipVariable(const DILocalVariable *V) { return !V->getSizeInBits(); }
791
792 void process(BasicBlock &BB, VarFragMap &LiveSet) {
793 BBInsertBeforeMap[&BB].clear();
794 for (auto &I : BB) {
795 if (const auto *Locs = FnVarLocs->getWedge(&I)) {
796 for (const VarLocInfo &Loc : *Locs) {
797 addDef(Loc, I, *I.getParent(), LiveSet);
798 }
799 }
800 }
801 }
802
803public:
804 MemLocFragmentFill(Function &Fn,
805 const DenseSet<DebugAggregate> *VarsWithStackSlot,
806 bool CoalesceAdjacentFragments)
807 : Fn(Fn), VarsWithStackSlot(VarsWithStackSlot),
808 CoalesceAdjacentFragments(CoalesceAdjacentFragments) {}
809
810 /// Add variable locations to \p FnVarLocs so that any bits of a variable
811 /// with a memory location have that location explicitly reinstated at each
812 /// subsequent variable location definition that that doesn't overwrite those
813 /// bits. i.e. after a variable location def, insert new defs for the memory
814 /// location with fragments for the difference of "all bits currently in
815 /// memory" and "the fragment of the second def". e.g.
816 ///
817 /// Before:
818 ///
819 /// var x bits 0 to 63: value in memory
820 /// more instructions
821 /// var x bits 0 to 31: value is %0
822 ///
823 /// After:
824 ///
825 /// var x bits 0 to 63: value in memory
826 /// more instructions
827 /// var x bits 0 to 31: value is %0
828 /// var x bits 32 to 61: value in memory ; <-- new loc def
829 ///
830 void run(FunctionVarLocsBuilder *FnVarLocs) {
832 return;
833
834 this->FnVarLocs = FnVarLocs;
835
836 // Prepare for traversal.
837 //
839 std::priority_queue<unsigned int, std::vector<unsigned int>,
840 std::greater<unsigned int>>
841 Worklist;
842 std::priority_queue<unsigned int, std::vector<unsigned int>,
843 std::greater<unsigned int>>
844 Pending;
847 { // Init OrderToBB and BBToOrder.
848 unsigned int RPONumber = 0;
849 for (auto RI = RPOT.begin(), RE = RPOT.end(); RI != RE; ++RI) {
850 OrderToBB[RPONumber] = *RI;
851 BBToOrder[*RI] = RPONumber;
852 Worklist.push(RPONumber);
853 ++RPONumber;
854 }
855 LiveIn.init(RPONumber);
856 LiveOut.init(RPONumber);
857 }
858
859 // Perform the traversal.
860 //
861 // This is a standard "intersect of predecessor outs" dataflow problem. To
862 // solve it, we perform meet() and process() using the two worklist method
863 // until the LiveIn data for each block becomes unchanging.
864 //
865 // This dataflow is essentially working on maps of sets and at each meet we
866 // intersect the maps and the mapped sets. So, initialized live-in maps
867 // monotonically decrease in value throughout the dataflow.
869 while (!Worklist.empty() || !Pending.empty()) {
870 // We track what is on the pending worklist to avoid inserting the same
871 // thing twice. We could avoid this with a custom priority queue, but
872 // this is probably not worth it.
874 LLVM_DEBUG(dbgs() << "Processing Worklist\n");
875 while (!Worklist.empty()) {
876 BasicBlock *BB = OrderToBB[Worklist.top()];
877 LLVM_DEBUG(dbgs() << "\nPop BB " << BB->getName() << "\n");
878 Worklist.pop();
879 bool InChanged = meet(*BB, Visited);
880 // Always consider LiveIn changed on the first visit.
881 InChanged |= Visited.insert(BB).second;
882 if (InChanged) {
884 << BB->getName() << " has new InLocs, process it\n");
885 // Mutate a copy of LiveIn while processing BB. Once we've processed
886 // the terminator LiveSet is the LiveOut set for BB.
887 // This is an expensive copy!
888 VarFragMap LiveSet = LiveIn[BB];
889
890 // Process the instructions in the block.
891 process(*BB, LiveSet);
892
893 // Relatively expensive check: has anything changed in LiveOut for BB?
894 if (!varFragMapsAreEqual(LiveOut[BB], LiveSet)) {
895 LLVM_DEBUG(dbgs() << BB->getName()
896 << " has new OutLocs, add succs to worklist: [ ");
897 LiveOut[BB] = std::move(LiveSet);
898 for (auto I = succ_begin(BB), E = succ_end(BB); I != E; I++) {
899 if (OnPending.insert(*I).second) {
900 LLVM_DEBUG(dbgs() << I->getName() << " ");
901 Pending.push(BBToOrder[*I]);
902 }
903 }
904 LLVM_DEBUG(dbgs() << "]\n");
905 }
906 }
907 }
908 Worklist.swap(Pending);
909 // At this point, pending must be empty, since it was just the empty
910 // worklist
911 assert(Pending.empty() && "Pending should be empty");
912 }
913
914 // Insert new location defs.
915 for (auto &Pair : BBInsertBeforeMap) {
916 InsertMap &Map = Pair.second;
917 for (auto &Pair : Map) {
918 Instruction *InsertBefore = Pair.first;
919 assert(InsertBefore && "should never be null");
920 auto FragMemLocs = Pair.second;
921 auto &Ctx = Fn.getContext();
922
923 for (auto &FragMemLoc : FragMemLocs) {
924 DIExpression *Expr = DIExpression::get(Ctx, std::nullopt);
925 if (FragMemLoc.SizeInBits !=
926 *Aggregates[FragMemLoc.Var].first->getSizeInBits())
928 Expr, FragMemLoc.OffsetInBits, FragMemLoc.SizeInBits);
930 FragMemLoc.OffsetInBits / 8);
931 DebugVariable Var(Aggregates[FragMemLoc.Var].first, Expr,
932 FragMemLoc.DL.getInlinedAt());
933 FnVarLocs->addVarLoc(InsertBefore, Var, Expr, FragMemLoc.DL,
934 Bases[FragMemLoc.Base]);
935 }
936 }
937 }
938 }
939};
940
941/// AssignmentTrackingLowering encapsulates a dataflow analysis over a function
942/// that interprets assignment tracking debug info metadata and stores in IR to
943/// create a map of variable locations.
944class AssignmentTrackingLowering {
945public:
946 /// The kind of location in use for a variable, where Mem is the stack home,
947 /// Val is an SSA value or const, and None means that there is not one single
948 /// kind (either because there are multiple or because there is none; it may
949 /// prove useful to split this into two values in the future).
950 ///
951 /// LocKind is a join-semilattice with the partial order:
952 /// None > Mem, Val
953 ///
954 /// i.e.
955 /// join(Mem, Mem) = Mem
956 /// join(Val, Val) = Val
957 /// join(Mem, Val) = None
958 /// join(None, Mem) = None
959 /// join(None, Val) = None
960 /// join(None, None) = None
961 ///
962 /// Note: the order is not `None > Val > Mem` because we're using DIAssignID
963 /// to name assignments and are not tracking the actual stored values.
964 /// Therefore currently there's no way to ensure that Mem values and Val
965 /// values are the same. This could be a future extension, though it's not
966 /// clear that many additional locations would be recovered that way in
967 /// practice as the likelihood of this sitation arising naturally seems
968 /// incredibly low.
969 enum class LocKind { Mem, Val, None };
970
971 /// An abstraction of the assignment of a value to a variable or memory
972 /// location.
973 ///
974 /// An Assignment is Known or NoneOrPhi. A Known Assignment means we have a
975 /// DIAssignID ptr that represents it. NoneOrPhi means that we don't (or
976 /// can't) know the ID of the last assignment that took place.
977 ///
978 /// The Status of the Assignment (Known or NoneOrPhi) is another
979 /// join-semilattice. The partial order is:
980 /// NoneOrPhi > Known {id_0, id_1, ...id_N}
981 ///
982 /// i.e. for all values x and y where x != y:
983 /// join(x, x) = x
984 /// join(x, y) = NoneOrPhi
985 struct Assignment {
986 enum S { Known, NoneOrPhi } Status;
987 /// ID of the assignment. nullptr if Status is not Known.
988 DIAssignID *ID;
989 /// The dbg.assign that marks this dbg-def. Mem-defs don't use this field.
990 /// May be nullptr.
992
993 bool isSameSourceAssignment(const Assignment &Other) const {
994 // Don't include Source in the equality check. Assignments are
995 // defined by their ID, not debug intrinsic(s).
996 return std::tie(Status, ID) == std::tie(Other.Status, Other.ID);
997 }
998 void dump(raw_ostream &OS) {
999 static const char *LUT[] = {"Known", "NoneOrPhi"};
1000 OS << LUT[Status] << "(id=";
1001 if (ID)
1002 OS << ID;
1003 else
1004 OS << "null";
1005 OS << ", s=";
1006 if (Source)
1007 OS << *Source;
1008 else
1009 OS << "null";
1010 OS << ")";
1011 }
1012
1013 static Assignment make(DIAssignID *ID, DbgAssignIntrinsic *Source) {
1014 return Assignment(Known, ID, Source);
1015 }
1016 static Assignment makeFromMemDef(DIAssignID *ID) {
1017 return Assignment(Known, ID, nullptr);
1018 }
1019 static Assignment makeNoneOrPhi() {
1020 return Assignment(NoneOrPhi, nullptr, nullptr);
1021 }
1022 // Again, need a Top value?
1023 Assignment()
1024 : Status(NoneOrPhi), ID(nullptr), Source(nullptr) {
1025 } // Can we delete this?
1026 Assignment(S Status, DIAssignID *ID, DbgAssignIntrinsic *Source)
1027 : Status(Status), ID(ID), Source(Source) {
1028 // If the Status is Known then we expect there to be an assignment ID.
1029 assert(Status == NoneOrPhi || ID);
1030 }
1031 };
1032
1033 using AssignmentMap = SmallVector<Assignment>;
1036 using UntaggedStoreAssignmentMap =
1037 DenseMap<const Instruction *,
1039
1040private:
1041 /// The highest numbered VariableID for partially promoted variables plus 1,
1042 /// the values for which start at 1.
1043 unsigned TrackedVariablesVectorSize = 0;
1044 /// Map a variable to the set of variables that it fully contains.
1045 OverlapMap VarContains;
1046 /// Map untagged stores to the variable fragments they assign to. Used by
1047 /// processUntaggedInstruction.
1048 UntaggedStoreAssignmentMap UntaggedStoreVars;
1049
1050 // Machinery to defer inserting dbg.values.
1052 InsertMap InsertBeforeMap;
1053 /// Clear the location definitions currently cached for insertion after /p
1054 /// After.
1055 void resetInsertionPoint(Instruction &After);
1056 void emitDbgValue(LocKind Kind, const DbgVariableIntrinsic *Source,
1057 Instruction *After);
1058
1059 static bool mapsAreEqual(const BitVector &Mask, const AssignmentMap &A,
1060 const AssignmentMap &B) {
1061 return llvm::all_of(Mask.set_bits(), [&](unsigned VarID) {
1062 return A[VarID].isSameSourceAssignment(B[VarID]);
1063 });
1064 }
1065
1066 /// Represents the stack and debug assignments in a block. Used to describe
1067 /// the live-in and live-out values for blocks, as well as the "current"
1068 /// value as we process each instruction in a block.
1069 struct BlockInfo {
1070 /// The set of variables (VariableID) being tracked in this block.
1071 BitVector VariableIDsInBlock;
1072 /// Dominating assignment to memory for each variable, indexed by
1073 /// VariableID.
1074 AssignmentMap StackHomeValue;
1075 /// Dominating assignemnt to each variable, indexed by VariableID.
1076 AssignmentMap DebugValue;
1077 /// Location kind for each variable. LiveLoc indicates whether the
1078 /// dominating assignment in StackHomeValue (LocKind::Mem), DebugValue
1079 /// (LocKind::Val), or neither (LocKind::None) is valid, in that order of
1080 /// preference. This cannot be derived by inspecting DebugValue and
1081 /// StackHomeValue due to the fact that there's no distinction in
1082 /// Assignment (the class) between whether an assignment is unknown or a
1083 /// merge of multiple assignments (both are Status::NoneOrPhi). In other
1084 /// words, the memory location may well be valid while both DebugValue and
1085 /// StackHomeValue contain Assignments that have a Status of NoneOrPhi.
1086 /// Indexed by VariableID.
1087 LocMap LiveLoc;
1088
1089 public:
1090 enum AssignmentKind { Stack, Debug };
1091 const AssignmentMap &getAssignmentMap(AssignmentKind Kind) const {
1092 switch (Kind) {
1093 case Stack:
1094 return StackHomeValue;
1095 case Debug:
1096 return DebugValue;
1097 }
1098 llvm_unreachable("Unknown AssignmentKind");
1099 }
1100 AssignmentMap &getAssignmentMap(AssignmentKind Kind) {
1101 return const_cast<AssignmentMap &>(
1102 const_cast<const BlockInfo *>(this)->getAssignmentMap(Kind));
1103 }
1104
1105 bool isVariableTracked(VariableID Var) const {
1106 return VariableIDsInBlock[static_cast<unsigned>(Var)];
1107 }
1108
1109 const Assignment &getAssignment(AssignmentKind Kind, VariableID Var) const {
1110 assert(isVariableTracked(Var) && "Var not tracked in block");
1111 return getAssignmentMap(Kind)[static_cast<unsigned>(Var)];
1112 }
1113
1114 LocKind getLocKind(VariableID Var) const {
1115 assert(isVariableTracked(Var) && "Var not tracked in block");
1116 return LiveLoc[static_cast<unsigned>(Var)];
1117 }
1118
1119 /// Set LocKind for \p Var only: does not set LocKind for VariableIDs of
1120 /// fragments contained win \p Var.
1121 void setLocKind(VariableID Var, LocKind K) {
1122 VariableIDsInBlock.set(static_cast<unsigned>(Var));
1123 LiveLoc[static_cast<unsigned>(Var)] = K;
1124 }
1125
1126 /// Set the assignment in the \p Kind assignment map for \p Var only: does
1127 /// not set the assignment for VariableIDs of fragments contained win \p
1128 /// Var.
1129 void setAssignment(AssignmentKind Kind, VariableID Var,
1130 const Assignment &AV) {
1131 VariableIDsInBlock.set(static_cast<unsigned>(Var));
1132 getAssignmentMap(Kind)[static_cast<unsigned>(Var)] = AV;
1133 }
1134
1135 /// Return true if there is an assignment matching \p AV in the \p Kind
1136 /// assignment map. Does consider assignments for VariableIDs of fragments
1137 /// contained win \p Var.
1138 bool hasAssignment(AssignmentKind Kind, VariableID Var,
1139 const Assignment &AV) const {
1140 if (!isVariableTracked(Var))
1141 return false;
1142 return AV.isSameSourceAssignment(getAssignment(Kind, Var));
1143 }
1144
1145 /// Compare every element in each map to determine structural equality
1146 /// (slow).
1147 bool operator==(const BlockInfo &Other) const {
1148 return VariableIDsInBlock == Other.VariableIDsInBlock &&
1149 LiveLoc == Other.LiveLoc &&
1150 mapsAreEqual(VariableIDsInBlock, StackHomeValue,
1151 Other.StackHomeValue) &&
1152 mapsAreEqual(VariableIDsInBlock, DebugValue, Other.DebugValue);
1153 }
1154 bool operator!=(const BlockInfo &Other) const { return !(*this == Other); }
1155 bool isValid() {
1156 return LiveLoc.size() == DebugValue.size() &&
1157 LiveLoc.size() == StackHomeValue.size();
1158 }
1159
1160 /// Clear everything and initialise with ⊤-values for all variables.
1161 void init(int NumVars) {
1162 StackHomeValue.clear();
1163 DebugValue.clear();
1164 LiveLoc.clear();
1165 VariableIDsInBlock = BitVector(NumVars);
1166 StackHomeValue.insert(StackHomeValue.begin(), NumVars,
1167 Assignment::makeNoneOrPhi());
1168 DebugValue.insert(DebugValue.begin(), NumVars,
1169 Assignment::makeNoneOrPhi());
1170 LiveLoc.insert(LiveLoc.begin(), NumVars, LocKind::None);
1171 }
1172
1173 /// Helper for join.
1174 template <typename ElmtType, typename FnInputType>
1175 static void joinElmt(int Index, SmallVector<ElmtType> &Target,
1176 const SmallVector<ElmtType> &A,
1177 const SmallVector<ElmtType> &B,
1178 ElmtType (*Fn)(FnInputType, FnInputType)) {
1179 Target[Index] = Fn(A[Index], B[Index]);
1180 }
1181
1182 /// See comment for AssignmentTrackingLowering::joinBlockInfo.
1183 static BlockInfo join(const BlockInfo &A, const BlockInfo &B, int NumVars) {
1184 // Join A and B.
1185 //
1186 // Intersect = join(a, b) for a in A, b in B where Var(a) == Var(b)
1187 // Difference = join(x, ⊤) for x where Var(x) is in A xor B
1188 // Join = Intersect ∪ Difference
1189 //
1190 // This is achieved by performing a join on elements from A and B with
1191 // variables common to both A and B (join elements indexed by var
1192 // intersect), then adding ⊤-value elements for vars in A xor B. The
1193 // latter part is equivalent to performing join on elements with variables
1194 // in A xor B with the ⊤-value for the map element since join(x, ⊤) = ⊤.
1195 // BlockInfo::init initializes all variable entries to the ⊤ value so we
1196 // don't need to explicitly perform that step as Join.VariableIDsInBlock
1197 // is set to the union of the variables in A and B at the end of this
1198 // function.
1199 BlockInfo Join;
1200 Join.init(NumVars);
1201
1202 BitVector Intersect = A.VariableIDsInBlock;
1203 Intersect &= B.VariableIDsInBlock;
1204
1205 for (auto VarID : Intersect.set_bits()) {
1206 joinElmt(VarID, Join.LiveLoc, A.LiveLoc, B.LiveLoc, joinKind);
1207 joinElmt(VarID, Join.DebugValue, A.DebugValue, B.DebugValue,
1208 joinAssignment);
1209 joinElmt(VarID, Join.StackHomeValue, A.StackHomeValue, B.StackHomeValue,
1210 joinAssignment);
1211 }
1212
1213 Join.VariableIDsInBlock = A.VariableIDsInBlock;
1214 Join.VariableIDsInBlock |= B.VariableIDsInBlock;
1215 assert(Join.isValid());
1216 return Join;
1217 }
1218 };
1219
1220 Function &Fn;
1221 const DataLayout &Layout;
1222 const DenseSet<DebugAggregate> *VarsWithStackSlot;
1223 FunctionVarLocsBuilder *FnVarLocs;
1226
1227 /// Helper for process methods to track variables touched each frame.
1228 DenseSet<VariableID> VarsTouchedThisFrame;
1229
1230 /// The set of variables that sometimes are not located in their stack home.
1231 DenseSet<DebugAggregate> NotAlwaysStackHomed;
1232
1233 VariableID getVariableID(const DebugVariable &Var) {
1234 return static_cast<VariableID>(FnVarLocs->insertVariable(Var));
1235 }
1236
1237 /// Join the LiveOut values of preds that are contained in \p Visited into
1238 /// LiveIn[BB]. Return True if LiveIn[BB] has changed as a result. LiveIn[BB]
1239 /// values monotonically increase. See the @link joinMethods join methods
1240 /// @endlink documentation for more info.
1241 bool join(const BasicBlock &BB, const SmallPtrSet<BasicBlock *, 16> &Visited);
1242 ///@name joinMethods
1243 /// Functions that implement `join` (the least upper bound) for the
1244 /// join-semilattice types used in the dataflow. There is an explicit bottom
1245 /// value (⊥) for some types and and explicit top value (⊤) for all types.
1246 /// By definition:
1247 ///
1248 /// Join(A, B) >= A && Join(A, B) >= B
1249 /// Join(A, ⊥) = A
1250 /// Join(A, ⊤) = ⊤
1251 ///
1252 /// These invariants are important for monotonicity.
1253 ///
1254 /// For the map-type functions, all unmapped keys in an empty map are
1255 /// associated with a bottom value (⊥). This represents their values being
1256 /// unknown. Unmapped keys in non-empty maps (joining two maps with a key
1257 /// only present in one) represents either a variable going out of scope or
1258 /// dropped debug info. It is assumed the key is associated with a top value
1259 /// (⊤) in this case (unknown location / assignment).
1260 ///@{
1261 static LocKind joinKind(LocKind A, LocKind B);
1262 static Assignment joinAssignment(const Assignment &A, const Assignment &B);
1263 BlockInfo joinBlockInfo(const BlockInfo &A, const BlockInfo &B);
1264 ///@}
1265
1266 /// Process the instructions in \p BB updating \p LiveSet along the way. \p
1267 /// LiveSet must be initialized with the current live-in locations before
1268 /// calling this.
1269 void process(BasicBlock &BB, BlockInfo *LiveSet);
1270 ///@name processMethods
1271 /// Methods to process instructions in order to update the LiveSet (current
1272 /// location information).
1273 ///@{
1274 void processNonDbgInstruction(Instruction &I, BlockInfo *LiveSet);
1275 void processDbgInstruction(Instruction &I, BlockInfo *LiveSet);
1276 /// Update \p LiveSet after encountering an instruction with a DIAssignID
1277 /// attachment, \p I.
1278 void processTaggedInstruction(Instruction &I, BlockInfo *LiveSet);
1279 /// Update \p LiveSet after encountering an instruciton without a DIAssignID
1280 /// attachment, \p I.
1281 void processUntaggedInstruction(Instruction &I, BlockInfo *LiveSet);
1282 void processDbgAssign(DbgAssignIntrinsic &DAI, BlockInfo *LiveSet);
1283 void processDbgValue(DbgValueInst &DVI, BlockInfo *LiveSet);
1284 /// Add an assignment to memory for the variable /p Var.
1285 void addMemDef(BlockInfo *LiveSet, VariableID Var, const Assignment &AV);
1286 /// Add an assignment to the variable /p Var.
1287 void addDbgDef(BlockInfo *LiveSet, VariableID Var, const Assignment &AV);
1288 ///@}
1289
1290 /// Set the LocKind for \p Var.
1291 void setLocKind(BlockInfo *LiveSet, VariableID Var, LocKind K);
1292 /// Get the live LocKind for a \p Var. Requires addMemDef or addDbgDef to
1293 /// have been called for \p Var first.
1294 LocKind getLocKind(BlockInfo *LiveSet, VariableID Var);
1295 /// Return true if \p Var has an assignment in \p M matching \p AV.
1296 bool hasVarWithAssignment(BlockInfo *LiveSet, BlockInfo::AssignmentKind Kind,
1297 VariableID Var, const Assignment &AV);
1298 /// Return the set of VariableIDs corresponding the fragments contained fully
1299 /// within the variable/fragment \p Var.
1300 ArrayRef<VariableID> getContainedFragments(VariableID Var) const;
1301
1302 /// Mark \p Var as having been touched this frame. Note, this applies only
1303 /// to the exact fragment \p Var and not to any fragments contained within.
1304 void touchFragment(VariableID Var);
1305
1306 /// Emit info for variables that are fully promoted.
1307 bool emitPromotedVarLocs(FunctionVarLocsBuilder *FnVarLocs);
1308
1309public:
1310 AssignmentTrackingLowering(Function &Fn, const DataLayout &Layout,
1311 const DenseSet<DebugAggregate> *VarsWithStackSlot)
1312 : Fn(Fn), Layout(Layout), VarsWithStackSlot(VarsWithStackSlot) {}
1313 /// Run the analysis, adding variable location info to \p FnVarLocs. Returns
1314 /// true if any variable locations have been added to FnVarLocs.
1315 bool run(FunctionVarLocsBuilder *FnVarLocs);
1316};
1317} // namespace
1318
1320AssignmentTrackingLowering::getContainedFragments(VariableID Var) const {
1321 auto R = VarContains.find(Var);
1322 if (R == VarContains.end())
1323 return std::nullopt;
1324 return R->second;
1325}
1326
1327void AssignmentTrackingLowering::touchFragment(VariableID Var) {
1328 VarsTouchedThisFrame.insert(Var);
1329}
1330
1331void AssignmentTrackingLowering::setLocKind(BlockInfo *LiveSet, VariableID Var,
1332 LocKind K) {
1333 auto SetKind = [this](BlockInfo *LiveSet, VariableID Var, LocKind K) {
1334 LiveSet->setLocKind(Var, K);
1335 touchFragment(Var);
1336 };
1337 SetKind(LiveSet, Var, K);
1338
1339 // Update the LocKind for all fragments contained within Var.
1340 for (VariableID Frag : getContainedFragments(Var))
1341 SetKind(LiveSet, Frag, K);
1342}
1343
1344AssignmentTrackingLowering::LocKind
1345AssignmentTrackingLowering::getLocKind(BlockInfo *LiveSet, VariableID Var) {
1346 return LiveSet->getLocKind(Var);
1347}
1348
1349void AssignmentTrackingLowering::addMemDef(BlockInfo *LiveSet, VariableID Var,
1350 const Assignment &AV) {
1351 LiveSet->setAssignment(BlockInfo::Stack, Var, AV);
1352
1353 // Use this assigment for all fragments contained within Var, but do not
1354 // provide a Source because we cannot convert Var's value to a value for the
1355 // fragment.
1356 Assignment FragAV = AV;
1357 FragAV.Source = nullptr;
1358 for (VariableID Frag : getContainedFragments(Var))
1359 LiveSet->setAssignment(BlockInfo::Stack, Frag, FragAV);
1360}
1361
1362void AssignmentTrackingLowering::addDbgDef(BlockInfo *LiveSet, VariableID Var,
1363 const Assignment &AV) {
1364 LiveSet->setAssignment(BlockInfo::Debug, Var, AV);
1365
1366 // Use this assigment for all fragments contained within Var, but do not
1367 // provide a Source because we cannot convert Var's value to a value for the
1368 // fragment.
1369 Assignment FragAV = AV;
1370 FragAV.Source = nullptr;
1371 for (VariableID Frag : getContainedFragments(Var))
1372 LiveSet->setAssignment(BlockInfo::Debug, Frag, FragAV);
1373}
1374
1376 return cast<DIAssignID>(I.getMetadata(LLVMContext::MD_DIAssignID));
1377}
1378
1380 return cast<DIAssignID>(DAI.getAssignID());
1381}
1382
1383/// Return true if \p Var has an assignment in \p M matching \p AV.
1384bool AssignmentTrackingLowering::hasVarWithAssignment(
1385 BlockInfo *LiveSet, BlockInfo::AssignmentKind Kind, VariableID Var,
1386 const Assignment &AV) {
1387 if (!LiveSet->hasAssignment(Kind, Var, AV))
1388 return false;
1389
1390 // Check all the frags contained within Var as these will have all been
1391 // mapped to AV at the last store to Var.
1392 for (VariableID Frag : getContainedFragments(Var))
1393 if (!LiveSet->hasAssignment(Kind, Frag, AV))
1394 return false;
1395 return true;
1396}
1397
1398#ifndef NDEBUG
1399const char *locStr(AssignmentTrackingLowering::LocKind Loc) {
1400 using LocKind = AssignmentTrackingLowering::LocKind;
1401 switch (Loc) {
1402 case LocKind::Val:
1403 return "Val";
1404 case LocKind::Mem:
1405 return "Mem";
1406 case LocKind::None:
1407 return "None";
1408 };
1409 llvm_unreachable("unknown LocKind");
1410}
1411#endif
1412
1413void AssignmentTrackingLowering::emitDbgValue(
1414 AssignmentTrackingLowering::LocKind Kind,
1415 const DbgVariableIntrinsic *Source, Instruction *After) {
1416
1417 DILocation *DL = Source->getDebugLoc();
1418 auto Emit = [this, Source, After, DL](Metadata *Val, DIExpression *Expr) {
1419 assert(Expr);
1420 if (!Val)
1422 PoisonValue::get(Type::getInt1Ty(Source->getContext())));
1423
1424 // Find a suitable insert point.
1425 Instruction *InsertBefore = After->getNextNode();
1426 assert(InsertBefore && "Shouldn't be inserting after a terminator");
1427
1428 VariableID Var = getVariableID(DebugVariable(Source));
1429 VarLocInfo VarLoc;
1430 VarLoc.VariableID = static_cast<VariableID>(Var);
1431 VarLoc.Expr = Expr;
1432 VarLoc.Values = RawLocationWrapper(Val);
1433 VarLoc.DL = DL;
1434 // Insert it into the map for later.
1435 InsertBeforeMap[InsertBefore].push_back(VarLoc);
1436 };
1437
1438 // NOTE: This block can mutate Kind.
1439 if (Kind == LocKind::Mem) {
1440 const auto *DAI = cast<DbgAssignIntrinsic>(Source);
1441 // Check the address hasn't been dropped (e.g. the debug uses may not have
1442 // been replaced before deleting a Value).
1443 if (DAI->isKillAddress()) {
1444 // The address isn't valid so treat this as a non-memory def.
1445 Kind = LocKind::Val;
1446 } else {
1447 Value *Val = DAI->getAddress();
1448 DIExpression *Expr = DAI->getAddressExpression();
1449 assert(!Expr->getFragmentInfo() &&
1450 "fragment info should be stored in value-expression only");
1451 // Copy the fragment info over from the value-expression to the new
1452 // DIExpression.
1453 if (auto OptFragInfo = Source->getExpression()->getFragmentInfo()) {
1454 auto FragInfo = *OptFragInfo;
1456 Expr, FragInfo.OffsetInBits, FragInfo.SizeInBits);
1457 }
1458 // The address-expression has an implicit deref, add it now.
1459 std::tie(Val, Expr) =
1460 walkToAllocaAndPrependOffsetDeref(Layout, Val, Expr);
1461 Emit(ValueAsMetadata::get(Val), Expr);
1462 return;
1463 }
1464 }
1465
1466 if (Kind == LocKind::Val) {
1467 Emit(Source->getRawLocation(), Source->getExpression());
1468 return;
1469 }
1470
1471 if (Kind == LocKind::None) {
1472 Emit(nullptr, Source->getExpression());
1473 return;
1474 }
1475}
1476
1477void AssignmentTrackingLowering::processNonDbgInstruction(
1478 Instruction &I, AssignmentTrackingLowering::BlockInfo *LiveSet) {
1479 if (I.hasMetadata(LLVMContext::MD_DIAssignID))
1480 processTaggedInstruction(I, LiveSet);
1481 else
1482 processUntaggedInstruction(I, LiveSet);
1483}
1484
1485void AssignmentTrackingLowering::processUntaggedInstruction(
1486 Instruction &I, AssignmentTrackingLowering::BlockInfo *LiveSet) {
1487 // Interpret stack stores that are not tagged as an assignment in memory for
1488 // the variables associated with that address. These stores may not be tagged
1489 // because a) the store cannot be represented using dbg.assigns (non-const
1490 // length or offset) or b) the tag was accidentally dropped during
1491 // optimisations. For these stores we fall back to assuming that the stack
1492 // home is a valid location for the variables. The benefit is that this
1493 // prevents us missing an assignment and therefore incorrectly maintaining
1494 // earlier location definitions, and in many cases it should be a reasonable
1495 // assumption. However, this will occasionally lead to slight
1496 // inaccuracies. The value of a hoisted untagged store will be visible
1497 // "early", for example.
1498 assert(!I.hasMetadata(LLVMContext::MD_DIAssignID));
1499 auto It = UntaggedStoreVars.find(&I);
1500 if (It == UntaggedStoreVars.end())
1501 return; // No variables associated with the store destination.
1502
1503 LLVM_DEBUG(dbgs() << "processUntaggedInstruction on UNTAGGED INST " << I
1504 << "\n");
1505 // Iterate over the variables that this store affects, add a NoneOrPhi dbg
1506 // and mem def, set lockind to Mem, and emit a location def for each.
1507 for (auto [Var, Info] : It->second) {
1508 // This instruction is treated as both a debug and memory assignment,
1509 // meaning the memory location should be used. We don't have an assignment
1510 // ID though so use Assignment::makeNoneOrPhi() to create an imaginary one.
1511 addMemDef(LiveSet, Var, Assignment::makeNoneOrPhi());
1512 addDbgDef(LiveSet, Var, Assignment::makeNoneOrPhi());
1513 setLocKind(LiveSet, Var, LocKind::Mem);
1514 LLVM_DEBUG(dbgs() << " setting Stack LocKind to: " << locStr(LocKind::Mem)
1515 << "\n");
1516 // Build the dbg location def to insert.
1517 //
1518 // DIExpression: Add fragment and offset.
1519 DebugVariable V = FnVarLocs->getVariable(Var);
1520 DIExpression *DIE = DIExpression::get(I.getContext(), std::nullopt);
1521 if (auto Frag = V.getFragment()) {
1522 auto R = DIExpression::createFragmentExpression(DIE, Frag->OffsetInBits,
1523 Frag->SizeInBits);
1524 assert(R && "unexpected createFragmentExpression failure");
1525 DIE = *R;
1526 }
1528 if (Info.OffsetInBits)
1529 Ops = {dwarf::DW_OP_plus_uconst, Info.OffsetInBits / 8};
1530 Ops.push_back(dwarf::DW_OP_deref);
1531 DIE = DIExpression::prependOpcodes(DIE, Ops, /*StackValue=*/false,
1532 /*EntryValue=*/false);
1533 // Find a suitable insert point.
1534 Instruction *InsertBefore = I.getNextNode();
1535 assert(InsertBefore && "Shouldn't be inserting after a terminator");
1536
1537 // Get DILocation for this unrecorded assignment.
1538 DILocation *InlinedAt = const_cast<DILocation *>(V.getInlinedAt());
1539 const DILocation *DILoc = DILocation::get(
1540 Fn.getContext(), 0, 0, V.getVariable()->getScope(), InlinedAt);
1541
1542 VarLocInfo VarLoc;
1543 VarLoc.VariableID = static_cast<VariableID>(Var);
1544 VarLoc.Expr = DIE;
1545 VarLoc.Values = RawLocationWrapper(
1546 ValueAsMetadata::get(const_cast<AllocaInst *>(Info.Base)));
1547 VarLoc.DL = DILoc;
1548 // 3. Insert it into the map for later.
1549 InsertBeforeMap[InsertBefore].push_back(VarLoc);
1550 }
1551}
1552
1553void AssignmentTrackingLowering::processTaggedInstruction(
1554 Instruction &I, AssignmentTrackingLowering::BlockInfo *LiveSet) {
1555 auto Linked = at::getAssignmentMarkers(&I);
1556 // No dbg.assign intrinsics linked.
1557 // FIXME: All vars that have a stack slot this store modifies that don't have
1558 // a dbg.assign linked to it should probably treat this like an untagged
1559 // store.
1560 if (Linked.empty())
1561 return;
1562
1563 LLVM_DEBUG(dbgs() << "processTaggedInstruction on " << I << "\n");
1564 for (DbgAssignIntrinsic *DAI : Linked) {
1565 VariableID Var = getVariableID(DebugVariable(DAI));
1566 // Something has gone wrong if VarsWithStackSlot doesn't contain a variable
1567 // that is linked to a store.
1568 assert(VarsWithStackSlot->count(getAggregate(DAI)) &&
1569 "expected DAI's variable to have stack slot");
1570
1571 Assignment AV = Assignment::makeFromMemDef(getIDFromInst(I));
1572 addMemDef(LiveSet, Var, AV);
1573
1574 LLVM_DEBUG(dbgs() << " linked to " << *DAI << "\n");
1575 LLVM_DEBUG(dbgs() << " LiveLoc " << locStr(getLocKind(LiveSet, Var))
1576 << " -> ");
1577
1578 // The last assignment to the stack is now AV. Check if the last debug
1579 // assignment has a matching Assignment.
1580 if (hasVarWithAssignment(LiveSet, BlockInfo::Debug, Var, AV)) {
1581 // The StackHomeValue and DebugValue for this variable match so we can
1582 // emit a stack home location here.
1583 LLVM_DEBUG(dbgs() << "Mem, Stack matches Debug program\n";);
1584 LLVM_DEBUG(dbgs() << " Stack val: "; AV.dump(dbgs()); dbgs() << "\n");
1585 LLVM_DEBUG(dbgs() << " Debug val: ";
1586 LiveSet->DebugValue[static_cast<unsigned>(Var)].dump(dbgs());
1587 dbgs() << "\n");
1588 setLocKind(LiveSet, Var, LocKind::Mem);
1589 emitDbgValue(LocKind::Mem, DAI, &I);
1590 continue;
1591 }
1592
1593 // The StackHomeValue and DebugValue for this variable do not match. I.e.
1594 // The value currently stored in the stack is not what we'd expect to
1595 // see, so we cannot use emit a stack home location here. Now we will
1596 // look at the live LocKind for the variable and determine an appropriate
1597 // dbg.value to emit.
1598 LocKind PrevLoc = getLocKind(LiveSet, Var);
1599 switch (PrevLoc) {
1600 case LocKind::Val: {
1601 // The value in memory in memory has changed but we're not currently
1602 // using the memory location. Do nothing.
1603 LLVM_DEBUG(dbgs() << "Val, (unchanged)\n";);
1604 setLocKind(LiveSet, Var, LocKind::Val);
1605 } break;
1606 case LocKind::Mem: {
1607 // There's been an assignment to memory that we were using as a
1608 // location for this variable, and the Assignment doesn't match what
1609 // we'd expect to see in memory.
1610 Assignment DbgAV = LiveSet->getAssignment(BlockInfo::Debug, Var);
1611 if (DbgAV.Status == Assignment::NoneOrPhi) {
1612 // We need to terminate any previously open location now.
1613 LLVM_DEBUG(dbgs() << "None, No Debug value available\n";);
1614 setLocKind(LiveSet, Var, LocKind::None);
1615 emitDbgValue(LocKind::None, DAI, &I);
1616 } else {
1617 // The previous DebugValue Value can be used here.
1618 LLVM_DEBUG(dbgs() << "Val, Debug value is Known\n";);
1619 setLocKind(LiveSet, Var, LocKind::Val);
1620 if (DbgAV.Source) {
1621 emitDbgValue(LocKind::Val, DbgAV.Source, &I);
1622 } else {
1623 // PrevAV.Source is nullptr so we must emit undef here.
1624 emitDbgValue(LocKind::None, DAI, &I);
1625 }
1626 }
1627 } break;
1628 case LocKind::None: {
1629 // There's been an assignment to memory and we currently are
1630 // not tracking a location for the variable. Do not emit anything.
1631 LLVM_DEBUG(dbgs() << "None, (unchanged)\n";);
1632 setLocKind(LiveSet, Var, LocKind::None);
1633 } break;
1634 }
1635 }
1636}
1637
1638void AssignmentTrackingLowering::processDbgAssign(DbgAssignIntrinsic &DAI,
1639 BlockInfo *LiveSet) {
1640 // Only bother tracking variables that are at some point stack homed. Other
1641 // variables can be dealt with trivially later.
1642 if (!VarsWithStackSlot->count(getAggregate(&DAI)))
1643 return;
1644
1645 VariableID Var = getVariableID(DebugVariable(&DAI));
1646 Assignment AV = Assignment::make(getIDFromMarker(DAI), &DAI);
1647 addDbgDef(LiveSet, Var, AV);
1648
1649 LLVM_DEBUG(dbgs() << "processDbgAssign on " << DAI << "\n";);
1650 LLVM_DEBUG(dbgs() << " LiveLoc " << locStr(getLocKind(LiveSet, Var))
1651 << " -> ");
1652
1653 // Check if the DebugValue and StackHomeValue both hold the same
1654 // Assignment.
1655 if (hasVarWithAssignment(LiveSet, BlockInfo::Stack, Var, AV)) {
1656 // They match. We can use the stack home because the debug intrinsics state
1657 // that an assignment happened here, and we know that specific assignment
1658 // was the last one to take place in memory for this variable.
1659 LocKind Kind;
1660 if (DAI.isKillAddress()) {
1661 LLVM_DEBUG(
1662 dbgs()
1663 << "Val, Stack matches Debug program but address is killed\n";);
1664 Kind = LocKind::Val;
1665 } else {
1666 LLVM_DEBUG(dbgs() << "Mem, Stack matches Debug program\n";);
1667 Kind = LocKind::Mem;
1668 };
1669 setLocKind(LiveSet, Var, Kind);
1670 emitDbgValue(Kind, &DAI, &DAI);
1671 } else {
1672 // The last assignment to the memory location isn't the one that we want to
1673 // show to the user so emit a dbg.value(Value). Value may be undef.
1674 LLVM_DEBUG(dbgs() << "Val, Stack contents is unknown\n";);
1675 setLocKind(LiveSet, Var, LocKind::Val);
1676 emitDbgValue(LocKind::Val, &DAI, &DAI);
1677 }
1678}
1679
1680void AssignmentTrackingLowering::processDbgValue(DbgValueInst &DVI,
1681 BlockInfo *LiveSet) {
1682 // Only other tracking variables that are at some point stack homed.
1683 // Other variables can be dealt with trivally later.
1684 if (!VarsWithStackSlot->count(getAggregate(&DVI)))
1685 return;
1686
1687 VariableID Var = getVariableID(DebugVariable(&DVI));
1688 // We have no ID to create an Assignment with so we mark this assignment as
1689 // NoneOrPhi. Note that the dbg.value still exists, we just cannot determine
1690 // the assignment responsible for setting this value.
1691 // This is fine; dbg.values are essentially interchangable with unlinked
1692 // dbg.assigns, and some passes such as mem2reg and instcombine add them to
1693 // PHIs for promoted variables.
1694 Assignment AV = Assignment::makeNoneOrPhi();
1695 addDbgDef(LiveSet, Var, AV);
1696
1697 LLVM_DEBUG(dbgs() << "processDbgValue on " << DVI << "\n";);
1698 LLVM_DEBUG(dbgs() << " LiveLoc " << locStr(getLocKind(LiveSet, Var))
1699 << " -> Val, dbg.value override");
1700
1701 setLocKind(LiveSet, Var, LocKind::Val);
1702 emitDbgValue(LocKind::Val, &DVI, &DVI);
1703}
1704
1705void AssignmentTrackingLowering::processDbgInstruction(
1706 Instruction &I, AssignmentTrackingLowering::BlockInfo *LiveSet) {
1707 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(&I))
1708 processDbgAssign(*DAI, LiveSet);
1709 else if (auto *DVI = dyn_cast<DbgValueInst>(&I))
1710 processDbgValue(*DVI, LiveSet);
1711}
1712
1713void AssignmentTrackingLowering::resetInsertionPoint(Instruction &After) {
1714 assert(!After.isTerminator() && "Can't insert after a terminator");
1715 auto R = InsertBeforeMap.find(After.getNextNode());
1716 if (R == InsertBeforeMap.end())
1717 return;
1718 R->second.clear();
1719}
1720
1721void AssignmentTrackingLowering::process(BasicBlock &BB, BlockInfo *LiveSet) {
1722 for (auto II = BB.begin(), EI = BB.end(); II != EI;) {
1723 assert(VarsTouchedThisFrame.empty());
1724 // Process the instructions in "frames". A "frame" includes a single
1725 // non-debug instruction followed any debug instructions before the
1726 // next non-debug instruction.
1727 if (!isa<DbgInfoIntrinsic>(&*II)) {
1728 if (II->isTerminator())
1729 break;
1730 resetInsertionPoint(*II);
1731 processNonDbgInstruction(*II, LiveSet);
1732 assert(LiveSet->isValid());
1733 ++II;
1734 }
1735 while (II != EI) {
1736 if (!isa<DbgInfoIntrinsic>(&*II))
1737 break;
1738 resetInsertionPoint(*II);
1739 processDbgInstruction(*II, LiveSet);
1740 assert(LiveSet->isValid());
1741 ++II;
1742 }
1743
1744 // We've processed everything in the "frame". Now determine which variables
1745 // cannot be represented by a dbg.declare.
1746 for (auto Var : VarsTouchedThisFrame) {
1747 LocKind Loc = getLocKind(LiveSet, Var);
1748 // If a variable's LocKind is anything other than LocKind::Mem then we
1749 // must note that it cannot be represented with a dbg.declare.
1750 // Note that this check is enough without having to check the result of
1751 // joins() because for join to produce anything other than Mem after
1752 // we've already seen a Mem we'd be joining None or Val with Mem. In that
1753 // case, we've already hit this codepath when we set the LocKind to Val
1754 // or None in that block.
1755 if (Loc != LocKind::Mem) {
1756 DebugVariable DbgVar = FnVarLocs->getVariable(Var);
1757 DebugAggregate Aggr{DbgVar.getVariable(), DbgVar.getInlinedAt()};
1758 NotAlwaysStackHomed.insert(Aggr);
1759 }
1760 }
1761 VarsTouchedThisFrame.clear();
1762 }
1763}
1764
1765AssignmentTrackingLowering::LocKind
1766AssignmentTrackingLowering::joinKind(LocKind A, LocKind B) {
1767 // Partial order:
1768 // None > Mem, Val
1769 return A == B ? A : LocKind::None;
1770}
1771
1772AssignmentTrackingLowering::Assignment
1773AssignmentTrackingLowering::joinAssignment(const Assignment &A,
1774 const Assignment &B) {
1775 // Partial order:
1776 // NoneOrPhi(null, null) > Known(v, ?s)
1777
1778 // If either are NoneOrPhi the join is NoneOrPhi.
1779 // If either value is different then the result is
1780 // NoneOrPhi (joining two values is a Phi).
1781 if (!A.isSameSourceAssignment(B))
1782 return Assignment::makeNoneOrPhi();
1783 if (A.Status == Assignment::NoneOrPhi)
1784 return Assignment::makeNoneOrPhi();
1785
1786 // Source is used to lookup the value + expression in the debug program if
1787 // the stack slot gets assigned a value earlier than expected. Because
1788 // we're only tracking the one dbg.assign, we can't capture debug PHIs.
1789 // It's unlikely that we're losing out on much coverage by avoiding that
1790 // extra work.
1791 // The Source may differ in this situation:
1792 // Pred.1:
1793 // dbg.assign i32 0, ..., !1, ...
1794 // Pred.2:
1795 // dbg.assign i32 1, ..., !1, ...
1796 // Here the same assignment (!1) was performed in both preds in the source,
1797 // but we can't use either one unless they are identical (e.g. .we don't
1798 // want to arbitrarily pick between constant values).
1799 auto JoinSource = [&]() -> DbgAssignIntrinsic * {
1800 if (A.Source == B.Source)
1801 return A.Source;
1802 if (A.Source == nullptr || B.Source == nullptr)
1803 return nullptr;
1804 if (A.Source->isIdenticalTo(B.Source))
1805 return A.Source;
1806 return nullptr;
1807 };
1808 DbgAssignIntrinsic *Source = JoinSource();
1809 assert(A.Status == B.Status && A.Status == Assignment::Known);
1810 assert(A.ID == B.ID);
1811 return Assignment::make(A.ID, Source);
1812}
1813
1814AssignmentTrackingLowering::BlockInfo
1815AssignmentTrackingLowering::joinBlockInfo(const BlockInfo &A,
1816 const BlockInfo &B) {
1817 return BlockInfo::join(A, B, TrackedVariablesVectorSize);
1818}
1819
1820bool AssignmentTrackingLowering::join(
1821 const BasicBlock &BB, const SmallPtrSet<BasicBlock *, 16> &Visited) {
1822
1824 // Ignore backedges if we have not visited the predecessor yet. As the
1825 // predecessor hasn't yet had locations propagated into it, most locations
1826 // will not yet be valid, so treat them as all being uninitialized and
1827 // potentially valid. If a location guessed to be correct here is
1828 // invalidated later, we will remove it when we revisit this block. This
1829 // is essentially the same as initialising all LocKinds and Assignments to
1830 // an implicit ⊥ value which is the identity value for the join operation.
1831 for (auto I = pred_begin(&BB), E = pred_end(&BB); I != E; I++) {
1832 const BasicBlock *Pred = *I;
1833 if (Visited.count(Pred))
1834 VisitedPreds.push_back(Pred);
1835 }
1836
1837 // No preds visited yet.
1838 if (VisitedPreds.empty()) {
1839 auto It = LiveIn.try_emplace(&BB, BlockInfo());
1840 bool DidInsert = It.second;
1841 if (DidInsert)
1842 It.first->second.init(TrackedVariablesVectorSize);
1843 return /*Changed*/ DidInsert;
1844 }
1845
1846 // Exactly one visited pred. Copy the LiveOut from that pred into BB LiveIn.
1847 if (VisitedPreds.size() == 1) {
1848 const BlockInfo &PredLiveOut = LiveOut.find(VisitedPreds[0])->second;
1849 auto CurrentLiveInEntry = LiveIn.find(&BB);
1850
1851 // Check if there isn't an entry, or there is but the LiveIn set has
1852 // changed (expensive check).
1853 if (CurrentLiveInEntry == LiveIn.end())
1854 LiveIn.insert(std::make_pair(&BB, PredLiveOut));
1855 else if (PredLiveOut != CurrentLiveInEntry->second)
1856 CurrentLiveInEntry->second = PredLiveOut;
1857 else
1858 return /*Changed*/ false;
1859 return /*Changed*/ true;
1860 }
1861
1862 // More than one pred. Join LiveOuts of blocks 1 and 2.
1863 assert(VisitedPreds.size() > 1);
1864 const BlockInfo &PredLiveOut0 = LiveOut.find(VisitedPreds[0])->second;
1865 const BlockInfo &PredLiveOut1 = LiveOut.find(VisitedPreds[1])->second;
1866 BlockInfo BBLiveIn = joinBlockInfo(PredLiveOut0, PredLiveOut1);
1867
1868 // Join the LiveOuts of subsequent blocks.
1869 ArrayRef Tail = ArrayRef(VisitedPreds).drop_front(2);
1870 for (const BasicBlock *Pred : Tail) {
1871 const auto &PredLiveOut = LiveOut.find(Pred);
1872 assert(PredLiveOut != LiveOut.end() &&
1873 "block should have been processed already");
1874 BBLiveIn = joinBlockInfo(std::move(BBLiveIn), PredLiveOut->second);
1875 }
1876
1877 // Save the joined result for BB.
1878 auto CurrentLiveInEntry = LiveIn.find(&BB);
1879 // Check if there isn't an entry, or there is but the LiveIn set has changed
1880 // (expensive check).
1881 if (CurrentLiveInEntry == LiveIn.end())
1882 LiveIn.try_emplace(&BB, std::move(BBLiveIn));
1883 else if (BBLiveIn != CurrentLiveInEntry->second)
1884 CurrentLiveInEntry->second = std::move(BBLiveIn);
1885 else
1886 return /*Changed*/ false;
1887 return /*Changed*/ true;
1888}
1889
1890/// Return true if A fully contains B.
1893 auto ALeft = A.OffsetInBits;
1894 auto BLeft = B.OffsetInBits;
1895 if (BLeft < ALeft)
1896 return false;
1897
1898 auto ARight = ALeft + A.SizeInBits;
1899 auto BRight = BLeft + B.SizeInBits;
1900 if (BRight > ARight)
1901 return false;
1902 return true;
1903}
1904
1905static std::optional<at::AssignmentInfo>
1907 // Don't bother checking if this is an AllocaInst. We know this
1908 // instruction has no tag which means there are no variables associated
1909 // with it.
1910 if (const auto *SI = dyn_cast<StoreInst>(&I))
1911 return at::getAssignmentInfo(Layout, SI);
1912 if (const auto *MI = dyn_cast<MemIntrinsic>(&I))
1913 return at::getAssignmentInfo(Layout, MI);
1914 // Alloca or non-store-like inst.
1915 return std::nullopt;
1916}
1917
1918/// Build a map of {Variable x: Variables y} where all variable fragments
1919/// contained within the variable fragment x are in set y. This means that
1920/// y does not contain all overlaps because partial overlaps are excluded.
1921///
1922/// While we're iterating over the function, add single location defs for
1923/// dbg.declares to \p FnVarLocs.
1924///
1925/// Variables that are interesting to this pass in are added to
1926/// FnVarLocs->Variables first. TrackedVariablesVectorSize is set to the ID of
1927/// the last interesting variable plus 1, meaning variables with ID 1
1928/// (inclusive) to TrackedVariablesVectorSize (exclusive) are interesting. The
1929/// subsequent variables are either stack homed or fully promoted.
1930///
1931/// Finally, populate UntaggedStoreVars with a mapping of untagged stores to
1932/// the stored-to variable fragments.
1933///
1934/// These tasks are bundled together to reduce the number of times we need
1935/// to iterate over the function as they can be achieved together in one pass.
1937 Function &Fn, FunctionVarLocsBuilder *FnVarLocs,
1938 const DenseSet<DebugAggregate> &VarsWithStackSlot,
1940 unsigned &TrackedVariablesVectorSize) {
1942 // Map of Variable: [Fragments].
1944 // Iterate over all instructions:
1945 // - dbg.declare -> add single location variable record
1946 // - dbg.* -> Add fragments to FragmentMap
1947 // - untagged store -> Add fragments to FragmentMap and update
1948 // UntaggedStoreVars.
1949 // We need to add fragments for untagged stores too so that we can correctly
1950 // clobber overlapped fragment locations later.
1952 for (auto &BB : Fn) {
1953 for (auto &I : BB) {
1954 if (auto *DDI = dyn_cast<DbgDeclareInst>(&I)) {
1955 Declares.push_back(DDI);
1956 } else if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&I)) {
1957 DebugVariable DV = DebugVariable(DII);
1958 DebugAggregate DA = {DV.getVariable(), DV.getInlinedAt()};
1959 if (!VarsWithStackSlot.contains(DA))
1960 continue;
1961 if (Seen.insert(DV).second)
1962 FragmentMap[DA].push_back(DV);
1963 } else if (auto Info = getUntaggedStoreAssignmentInfo(
1964 I, Fn.getParent()->getDataLayout())) {
1965 // Find markers linked to this alloca.
1967 // Discard the fragment if it covers the entire variable.
1968 std::optional<DIExpression::FragmentInfo> FragInfo =
1969 [&Info, DAI]() -> std::optional<DIExpression::FragmentInfo> {
1971 F.OffsetInBits = Info->OffsetInBits;
1972 F.SizeInBits = Info->SizeInBits;
1973 if (auto ExistingFrag = DAI->getExpression()->getFragmentInfo())
1974 F.OffsetInBits += ExistingFrag->OffsetInBits;
1975 if (auto Sz = DAI->getVariable()->getSizeInBits()) {
1976 if (F.OffsetInBits == 0 && F.SizeInBits == *Sz)
1977 return std::nullopt;
1978 }
1979 return F;
1980 }();
1981
1982 DebugVariable DV = DebugVariable(DAI->getVariable(), FragInfo,
1983 DAI->getDebugLoc().getInlinedAt());
1984 DebugAggregate DA = {DV.getVariable(), DV.getInlinedAt()};
1985 if (!VarsWithStackSlot.contains(DA))
1986 continue;
1987
1988 // Cache this info for later.
1989 UntaggedStoreVars[&I].push_back(
1990 {FnVarLocs->insertVariable(DV), *Info});
1991
1992 if (Seen.insert(DV).second)
1993 FragmentMap[DA].push_back(DV);
1994 }
1995 }
1996 }
1997 }
1998
1999 // Sort the fragment map for each DebugAggregate in non-descending
2000 // order of fragment size. Assert no entries are duplicates.
2001 for (auto &Pair : FragmentMap) {
2002 SmallVector<DebugVariable, 8> &Frags = Pair.second;
2003 std::sort(
2004 Frags.begin(), Frags.end(), [](DebugVariable Next, DebugVariable Elmt) {
2005 assert(!(Elmt.getFragmentOrDefault() == Next.getFragmentOrDefault()));
2006 return Elmt.getFragmentOrDefault().SizeInBits >
2007 Next.getFragmentOrDefault().SizeInBits;
2008 });
2009 }
2010
2011 // Build the map.
2013 for (auto &Pair : FragmentMap) {
2014 auto &Frags = Pair.second;
2015 for (auto It = Frags.begin(), IEnd = Frags.end(); It != IEnd; ++It) {
2016 DIExpression::FragmentInfo Frag = It->getFragmentOrDefault();
2017 // Find the frags that this is contained within.
2018 //
2019 // Because Frags is sorted by size and none have the same offset and
2020 // size, we know that this frag can only be contained by subsequent
2021 // elements.
2023 ++OtherIt;
2024 VariableID ThisVar = FnVarLocs->insertVariable(*It);
2025 for (; OtherIt != IEnd; ++OtherIt) {
2026 DIExpression::FragmentInfo OtherFrag = OtherIt->getFragmentOrDefault();
2027 VariableID OtherVar = FnVarLocs->insertVariable(*OtherIt);
2028 if (fullyContains(OtherFrag, Frag))
2029 Map[OtherVar].push_back(ThisVar);
2030 }
2031 }
2032 }
2033
2034 // VariableIDs are 1-based so the variable-tracking bitvector needs
2035 // NumVariables plus 1 bits.
2036 TrackedVariablesVectorSize = FnVarLocs->getNumVariables() + 1;
2037
2038 // Finally, insert the declares afterwards, so the first IDs are all
2039 // partially stack homed vars.
2040 for (auto *DDI : Declares)
2041 FnVarLocs->addSingleLocVar(DebugVariable(DDI), DDI->getExpression(),
2042 DDI->getDebugLoc(), DDI->getWrappedLocation());
2043 return Map;
2044}
2045
2046bool AssignmentTrackingLowering::run(FunctionVarLocsBuilder *FnVarLocsBuilder) {
2047 if (Fn.size() > MaxNumBlocks) {
2048 LLVM_DEBUG(dbgs() << "[AT] Dropping var locs in: " << Fn.getName()
2049 << ": too many blocks (" << Fn.size() << ")\n");
2050 at::deleteAll(&Fn);
2051 return false;
2052 }
2053
2054 FnVarLocs = FnVarLocsBuilder;
2055
2056 // The general structure here is inspired by VarLocBasedImpl.cpp
2057 // (LiveDebugValues).
2058
2059 // Build the variable fragment overlap map.
2060 // Note that this pass doesn't handle partial overlaps correctly (FWIW
2061 // neither does LiveDebugVariables) because that is difficult to do and
2062 // appears to be rare occurance.
2064 Fn, FnVarLocs, *VarsWithStackSlot, UntaggedStoreVars,
2065 TrackedVariablesVectorSize);
2066
2067 // Prepare for traversal.
2069 std::priority_queue<unsigned int, std::vector<unsigned int>,
2070 std::greater<unsigned int>>
2071 Worklist;
2072 std::priority_queue<unsigned int, std::vector<unsigned int>,
2073 std::greater<unsigned int>>
2074 Pending;
2077 { // Init OrderToBB and BBToOrder.
2078 unsigned int RPONumber = 0;
2079 for (auto RI = RPOT.begin(), RE = RPOT.end(); RI != RE; ++RI) {
2080 OrderToBB[RPONumber] = *RI;
2081 BBToOrder[*RI] = RPONumber;
2082 Worklist.push(RPONumber);
2083 ++RPONumber;
2084 }
2085 LiveIn.init(RPONumber);
2086 LiveOut.init(RPONumber);
2087 }
2088
2089 // Perform the traversal.
2090 //
2091 // This is a standard "union of predecessor outs" dataflow problem. To solve
2092 // it, we perform join() and process() using the two worklist method until
2093 // the LiveIn data for each block becomes unchanging. The "proof" that this
2094 // terminates can be put together by looking at the comments around LocKind,
2095 // Assignment, and the various join methods, which show that all the elements
2096 // involved are made up of join-semilattices; LiveIn(n) can only
2097 // monotonically increase in value throughout the dataflow.
2098 //
2100 while (!Worklist.empty()) {
2101 // We track what is on the pending worklist to avoid inserting the same
2102 // thing twice.
2104 LLVM_DEBUG(dbgs() << "Processing Worklist\n");
2105 while (!Worklist.empty()) {
2106 BasicBlock *BB = OrderToBB[Worklist.top()];
2107 LLVM_DEBUG(dbgs() << "\nPop BB " << BB->getName() << "\n");
2108 Worklist.pop();
2109 bool InChanged = join(*BB, Visited);
2110 // Always consider LiveIn changed on the first visit.
2111 InChanged |= Visited.insert(BB).second;
2112 if (InChanged) {
2113 LLVM_DEBUG(dbgs() << BB->getName() << " has new InLocs, process it\n");
2114 // Mutate a copy of LiveIn while processing BB. After calling process
2115 // LiveSet is the LiveOut set for BB.
2116 BlockInfo LiveSet = LiveIn[BB];
2117
2118 // Process the instructions in the block.
2119 process(*BB, &LiveSet);
2120
2121 // Relatively expensive check: has anything changed in LiveOut for BB?
2122 if (LiveOut[BB] != LiveSet) {
2123 LLVM_DEBUG(dbgs() << BB->getName()
2124 << " has new OutLocs, add succs to worklist: [ ");
2125 LiveOut[BB] = std::move(LiveSet);
2126 for (auto I = succ_begin(BB), E = succ_end(BB); I != E; I++) {
2127 if (OnPending.insert(*I).second) {
2128 LLVM_DEBUG(dbgs() << I->getName() << " ");
2129 Pending.push(BBToOrder[*I]);
2130 }
2131 }
2132 LLVM_DEBUG(dbgs() << "]\n");
2133 }
2134 }
2135 }
2136 Worklist.swap(Pending);
2137 // At this point, pending must be empty, since it was just the empty
2138 // worklist
2139 assert(Pending.empty() && "Pending should be empty");
2140 }
2141
2142 // That's the hard part over. Now we just have some admin to do.
2143
2144 // Record whether we inserted any intrinsics.
2145 bool InsertedAnyIntrinsics = false;
2146
2147 // Identify and add defs for single location variables.
2148 //
2149 // Go through all of the defs that we plan to add. If the aggregate variable
2150 // it's a part of is not in the NotAlwaysStackHomed set we can emit a single
2151 // location def and omit the rest. Add an entry to AlwaysStackHomed so that
2152 // we can identify those uneeded defs later.
2153 DenseSet<DebugAggregate> AlwaysStackHomed;
2154 for (const auto &Pair : InsertBeforeMap) {
2155 const auto &Vec = Pair.second;
2156 for (VarLocInfo VarLoc : Vec) {
2157 DebugVariable Var = FnVarLocs->getVariable(VarLoc.VariableID);
2158 DebugAggregate Aggr{Var.getVariable(), Var.getInlinedAt()};
2159
2160 // Skip this Var if it's not always stack homed.
2161 if (NotAlwaysStackHomed.contains(Aggr))
2162 continue;
2163
2164 // Skip complex cases such as when different fragments of a variable have
2165 // been split into different allocas. Skipping in this case means falling
2166 // back to using a list of defs (which could reduce coverage, but is no
2167 // less correct).
2168 bool Simple =
2169 VarLoc.Expr->getNumElements() == 1 && VarLoc.Expr->startsWithDeref();
2170 if (!Simple) {
2171 NotAlwaysStackHomed.insert(Aggr);
2172 continue;
2173 }
2174
2175 // All source assignments to this variable remain and all stores to any
2176 // part of the variable store to the same address (with varying
2177 // offsets). We can just emit a single location for the whole variable.
2178 //
2179 // Unless we've already done so, create the single location def now.
2180 if (AlwaysStackHomed.insert(Aggr).second) {
2181 assert(!VarLoc.Values.hasArgList() &&
2182 isa<AllocaInst>(VarLoc.Values.getVariableLocationOp(0)));
2183 // TODO: When more complex cases are handled VarLoc.Expr should be
2184 // built appropriately rather than always using an empty DIExpression.
2185 // The assert below is a reminder.
2186 assert(Simple);
2187 VarLoc.Expr = DIExpression::get(Fn.getContext(), std::nullopt);
2188 DebugVariable Var = FnVarLocs->getVariable(VarLoc.VariableID);
2189 FnVarLocs->addSingleLocVar(Var, VarLoc.Expr, VarLoc.DL, VarLoc.Values);
2190 InsertedAnyIntrinsics = true;
2191 }
2192 }
2193 }
2194
2195 // Insert the other DEFs.
2196 for (const auto &[InsertBefore, Vec] : InsertBeforeMap) {
2198 for (const VarLocInfo &VarLoc : Vec) {
2199 DebugVariable Var = FnVarLocs->getVariable(VarLoc.VariableID);
2200 DebugAggregate Aggr{Var.getVariable(), Var.getInlinedAt()};
2201 // If this variable is always stack homed then we have already inserted a
2202 // dbg.declare and deleted this dbg.value.
2203 if (AlwaysStackHomed.contains(Aggr))
2204 continue;
2205 NewDefs.push_back(VarLoc);
2206 InsertedAnyIntrinsics = true;
2207 }
2208
2209 FnVarLocs->setWedge(InsertBefore, std::move(NewDefs));
2210 }
2211
2212 InsertedAnyIntrinsics |= emitPromotedVarLocs(FnVarLocs);
2213
2214 return InsertedAnyIntrinsics;
2215}
2216
2217bool AssignmentTrackingLowering::emitPromotedVarLocs(
2218 FunctionVarLocsBuilder *FnVarLocs) {
2219 bool InsertedAnyIntrinsics = false;
2220 // Go through every block, translating debug intrinsics for fully promoted
2221 // variables into FnVarLocs location defs. No analysis required for these.
2222 for (auto &BB : Fn) {
2223 for (auto &I : BB) {
2224 // Skip instructions other than dbg.values and dbg.assigns.
2225 auto *DVI = dyn_cast<DbgValueInst>(&I);
2226 if (!DVI)
2227 continue;
2228 // Skip variables that haven't been promoted - we've dealt with those
2229 // already.
2230 if (VarsWithStackSlot->contains(getAggregate(DVI)))
2231 continue;
2232 Instruction *InsertBefore = I.getNextNode();
2233 assert(InsertBefore && "Unexpected: debug intrinsics after a terminator");
2234 FnVarLocs->addVarLoc(InsertBefore, DebugVariable(DVI),
2235 DVI->getExpression(), DVI->getDebugLoc(),
2236 DVI->getWrappedLocation());
2237 InsertedAnyIntrinsics = true;
2238 }
2239 }
2240 return InsertedAnyIntrinsics;
2241}
2242
2243/// Remove redundant definitions within sequences of consecutive location defs.
2244/// This is done using a backward scan to keep the last def describing a
2245/// specific variable/fragment.
2246///
2247/// This implements removeRedundantDbgInstrsUsingBackwardScan from
2248/// lib/Transforms/Utils/BasicBlockUtils.cpp for locations described with
2249/// FunctionVarLocsBuilder instead of with intrinsics.
2250static bool
2252 FunctionVarLocsBuilder &FnVarLocs) {
2253 bool Changed = false;
2254 SmallDenseMap<DebugAggregate, BitVector> VariableDefinedBits;
2255 // Scan over the entire block, not just over the instructions mapped by
2256 // FnVarLocs, because wedges in FnVarLocs may only be seperated by debug
2257 // instructions.
2258 for (const Instruction &I : reverse(*BB)) {
2259 if (!isa<DbgVariableIntrinsic>(I)) {
2260 // Sequence of consecutive defs ended. Clear map for the next one.
2261 VariableDefinedBits.clear();
2262 }
2263
2264 // Get the location defs that start just before this instruction.
2265 const auto *Locs = FnVarLocs.getWedge(&I);
2266 if (!Locs)
2267 continue;
2268
2269 NumWedgesScanned++;
2270 bool ChangedThisWedge = false;
2271 // The new pruned set of defs, reversed because we're scanning backwards.
2272 SmallVector<VarLocInfo> NewDefsReversed;
2273
2274 // Iterate over the existing defs in reverse.
2275 for (auto RIt = Locs->rbegin(), REnd = Locs->rend(); RIt != REnd; ++RIt) {
2276 NumDefsScanned++;
2277 DebugAggregate Aggr =
2278 getAggregate(FnVarLocs.getVariable(RIt->VariableID));
2279 uint64_t SizeInBits = Aggr.first->getSizeInBits().value_or(0);
2280
2281 if (SizeInBits == 0) {
2282 // If the size is unknown (0) then keep this location def to be safe.
2283 NewDefsReversed.push_back(*RIt);
2284 continue;
2285 }
2286
2287 // Only keep this location definition if it is not fully eclipsed by
2288 // other definitions in this wedge that come after it
2289
2290 // Inert the bits the location definition defines.
2291 auto InsertResult =
2292 VariableDefinedBits.try_emplace(Aggr, BitVector(SizeInBits));
2293 bool FirstDefinition = InsertResult.second;
2294 BitVector &DefinedBits = InsertResult.first->second;
2295
2297 RIt->Expr->getFragmentInfo().value_or(
2298 DIExpression::FragmentInfo(SizeInBits, 0));
2299 bool InvalidFragment = Fragment.endInBits() > SizeInBits;
2300
2301 // If this defines any previously undefined bits, keep it.
2302 if (FirstDefinition || InvalidFragment ||
2303 DefinedBits.find_first_unset_in(Fragment.startInBits(),
2304 Fragment.endInBits()) != -1) {
2305 if (!InvalidFragment)
2306 DefinedBits.set(Fragment.startInBits(), Fragment.endInBits());
2307 NewDefsReversed.push_back(*RIt);
2308 continue;
2309 }
2310
2311 // Redundant def found: throw it away. Since the wedge of defs is being
2312 // rebuilt, doing nothing is the same as deleting an entry.
2313 ChangedThisWedge = true;
2314 NumDefsRemoved++;
2315 }
2316
2317 // Un-reverse the defs and replace the wedge with the pruned version.
2318 if (ChangedThisWedge) {
2319 std::reverse(NewDefsReversed.begin(), NewDefsReversed.end());
2320 FnVarLocs.setWedge(&I, std::move(NewDefsReversed));
2321 NumWedgesChanged++;
2322 Changed = true;
2323 }
2324 }
2325
2326 return Changed;
2327}
2328
2329/// Remove redundant location defs using a forward scan. This can remove a
2330/// location definition that is redundant due to indicating that a variable has
2331/// the same value as is already being indicated by an earlier def.
2332///
2333/// This implements removeRedundantDbgInstrsUsingForwardScan from
2334/// lib/Transforms/Utils/BasicBlockUtils.cpp for locations described with
2335/// FunctionVarLocsBuilder instead of with intrinsics
2336static bool
2338 FunctionVarLocsBuilder &FnVarLocs) {
2339 bool Changed = false;
2341 VariableMap;
2342
2343 // Scan over the entire block, not just over the instructions mapped by
2344 // FnVarLocs, because wedges in FnVarLocs may only be seperated by debug
2345 // instructions.
2346 for (const Instruction &I : *BB) {
2347 // Get the defs that come just before this instruction.
2348 const auto *Locs = FnVarLocs.getWedge(&I);
2349 if (!Locs)
2350 continue;
2351
2352 NumWedgesScanned++;
2353 bool ChangedThisWedge = false;
2354 // The new pruned set of defs.
2356
2357 // Iterate over the existing defs.
2358 for (const VarLocInfo &Loc : *Locs) {
2359 NumDefsScanned++;
2360 DebugVariable Key(FnVarLocs.getVariable(Loc.VariableID).getVariable(),
2361 std::nullopt, Loc.DL.getInlinedAt());
2362 auto VMI = VariableMap.find(Key);
2363
2364 // Update the map if we found a new value/expression describing the
2365 // variable, or if the variable wasn't mapped already.
2366 if (VMI == VariableMap.end() || VMI->second.first != Loc.Values ||
2367 VMI->second.second != Loc.Expr) {
2368 VariableMap[Key] = {Loc.Values, Loc.Expr};
2369 NewDefs.push_back(Loc);
2370 continue;
2371 }
2372
2373 // Did not insert this Loc, which is the same as removing it.
2374 ChangedThisWedge = true;
2375 NumDefsRemoved++;
2376 }
2377
2378 // Replace the existing wedge with the pruned version.
2379 if (ChangedThisWedge) {
2380 FnVarLocs.setWedge(&I, std::move(NewDefs));
2381 NumWedgesChanged++;
2382 Changed = true;
2383 }
2384 }
2385
2386 return Changed;
2387}
2388
2389static bool
2391 FunctionVarLocsBuilder &FnVarLocs) {
2392 assert(BB->isEntryBlock());
2393 // Do extra work to ensure that we remove semantically unimportant undefs.
2394 //
2395 // This is to work around the fact that SelectionDAG will hoist dbg.values
2396 // using argument values to the top of the entry block. That can move arg
2397 // dbg.values before undef and constant dbg.values which they previously
2398 // followed. The easiest thing to do is to just try to feed SelectionDAG
2399 // input it's happy with.
2400 //
2401 // Map of {Variable x: Fragments y} where the fragments y of variable x have
2402 // have at least one non-undef location defined already. Don't use directly,
2403 // instead call DefineBits and HasDefinedBits.
2405 VarsWithDef;
2406 // Specify that V (a fragment of A) has a non-undef location.
2407 auto DefineBits = [&VarsWithDef](DebugAggregate A, DebugVariable V) {
2408 VarsWithDef[A].insert(V.getFragmentOrDefault());
2409 };
2410 // Return true if a non-undef location has been defined for V (a fragment of
2411 // A). Doesn't imply that the location is currently non-undef, just that a
2412 // non-undef location has been seen previously.
2413 auto HasDefinedBits = [&VarsWithDef](DebugAggregate A, DebugVariable V) {
2414 auto FragsIt = VarsWithDef.find(A);
2415 if (FragsIt == VarsWithDef.end())
2416 return false;
2417 return llvm::any_of(FragsIt->second, [V](auto Frag) {
2418 return DIExpression::fragmentsOverlap(Frag, V.getFragmentOrDefault());
2419 });
2420 };
2421
2422 bool Changed = false;
2424
2425 // Scan over the entire block, not just over the instructions mapped by
2426 // FnVarLocs, because wedges in FnVarLocs may only be seperated by debug
2427 // instructions.
2428 for (const Instruction &I : *BB) {
2429 // Get the defs that come just before this instruction.
2430 const auto *Locs = FnVarLocs.getWedge(&I);
2431 if (!Locs)
2432 continue;
2433
2434 NumWedgesScanned++;
2435 bool ChangedThisWedge = false;
2436 // The new pruned set of defs.
2438
2439 // Iterate over the existing defs.
2440 for (const VarLocInfo &Loc : *Locs) {
2441 NumDefsScanned++;
2442 DebugAggregate Aggr{FnVarLocs.getVariable(Loc.VariableID).getVariable(),
2443 Loc.DL.getInlinedAt()};
2444 DebugVariable Var = FnVarLocs.getVariable(Loc.VariableID);
2445
2446 // Remove undef entries that are encountered before any non-undef
2447 // intrinsics from the entry block.
2448 if (Loc.Values.isKillLocation(Loc.Expr) && !HasDefinedBits(Aggr, Var)) {
2449 // Did not insert this Loc, which is the same as removing it.
2450 NumDefsRemoved++;
2451 ChangedThisWedge = true;
2452 continue;
2453 }
2454
2455 DefineBits(Aggr, Var);
2456 NewDefs.push_back(Loc);
2457 }
2458
2459 // Replace the existing wedge with the pruned version.
2460 if (ChangedThisWedge) {
2461 FnVarLocs.setWedge(&I, std::move(NewDefs));
2462 NumWedgesChanged++;
2463 Changed = true;
2464 }
2465 }
2466
2467 return Changed;
2468}
2469
2471 FunctionVarLocsBuilder &FnVarLocs) {
2472 bool MadeChanges = false;
2473 MadeChanges |= removeRedundantDbgLocsUsingBackwardScan(BB, FnVarLocs);
2474 if (BB->isEntryBlock())
2475 MadeChanges |= removeUndefDbgLocsFromEntryBlock(BB, FnVarLocs);
2476 MadeChanges |= removeRedundantDbgLocsUsingForwardScan(BB, FnVarLocs);
2477
2478 if (MadeChanges)
2479 LLVM_DEBUG(dbgs() << "Removed redundant dbg locs from: " << BB->getName()
2480 << "\n");
2481 return MadeChanges;
2482}
2483
2486 for (auto &BB : Fn) {
2487 for (auto &I : BB) {
2488 // Any variable linked to an instruction is considered
2489 // interesting. Ideally we only need to check Allocas, however, a
2490 // DIAssignID might get dropped from an alloca but not stores. In that
2491 // case, we need to consider the variable interesting for NFC behaviour
2492 // with this change. TODO: Consider only looking at allocas.
2494 Result.insert({DAI->getVariable(), DAI->getDebugLoc().getInlinedAt()});
2495 }
2496 }
2497 }
2498 return Result;
2499}
2500
2501static void analyzeFunction(Function &Fn, const DataLayout &Layout,
2502 FunctionVarLocsBuilder *FnVarLocs) {
2503 // The analysis will generate location definitions for all variables, but we
2504 // only need to perform a dataflow on the set of variables which have a stack
2505 // slot. Find those now.
2506 DenseSet<DebugAggregate> VarsWithStackSlot = findVarsWithStackSlot(Fn);
2507
2508 bool Changed = false;
2509
2510 // Use a scope block to clean up AssignmentTrackingLowering before running
2511 // MemLocFragmentFill to reduce peak memory consumption.
2512 {
2513 AssignmentTrackingLowering Pass(Fn, Layout, &VarsWithStackSlot);
2514 Changed = Pass.run(FnVarLocs);
2515 }
2516
2517 if (Changed) {
2518 MemLocFragmentFill Pass(Fn, &VarsWithStackSlot,
2520 Pass.run(FnVarLocs);
2521
2522 // Remove redundant entries. As well as reducing memory consumption and
2523 // avoiding waiting cycles later by burning some now, this has another
2524 // important job. That is to work around some SelectionDAG quirks. See
2525 // removeRedundantDbgLocsUsingForwardScan comments for more info on that.
2526 for (auto &BB : Fn)
2527 removeRedundantDbgLocs(&BB, *FnVarLocs);
2528 }
2529}
2530
2532 if (!isAssignmentTrackingEnabled(*F.getParent()))
2533 return false;
2534
2535 LLVM_DEBUG(dbgs() << "AssignmentTrackingAnalysis run on " << F.getName()
2536 << "\n");
2537 auto DL = std::make_unique<DataLayout>(F.getParent());
2538
2539 // Clear previous results.
2540 Results->clear();
2541
2543 analyzeFunction(F, *DL.get(), &Builder);
2544
2545 // Save these results.
2546 Results->init(Builder);
2547
2548 if (PrintResults && isFunctionInPrintList(F.getName()))
2549 Results->print(errs(), F);
2550
2551 // Return false because this pass does not modify the function.
2552 return false;
2553}
2554
2556 : FunctionPass(ID), Results(std::make_unique<FunctionVarLocs>()) {}
2557
2559
2561 "Assignment Tracking Analysis", false, true)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
std::pair< const DILocalVariable *, const DILocation * > DebugAggregate
A whole (unfragmented) source variable.
static void analyzeFunction(Function &Fn, const DataLayout &Layout, FunctionVarLocsBuilder *FnVarLocs)
static std::pair< Value *, DIExpression * > walkToAllocaAndPrependOffsetDeref(const DataLayout &DL, Value *Start, DIExpression *Expression)
Walk backwards along constant GEPs and bitcasts to the base storage from Start as far as possible.
static DIAssignID * getIDFromMarker(const DbgAssignIntrinsic &DAI)
static DenseSet< DebugAggregate > findVarsWithStackSlot(Function &Fn)
static bool fullyContains(DIExpression::FragmentInfo A, DIExpression::FragmentInfo B)
Return true if A fully contains B.
static DebugAggregate getAggregate(const DbgVariableIntrinsic *DII)
static std::optional< at::AssignmentInfo > getUntaggedStoreAssignmentInfo(const Instruction &I, const DataLayout &Layout)
static AssignmentTrackingLowering::OverlapMap buildOverlapMapAndRecordDeclares(Function &Fn, FunctionVarLocsBuilder *FnVarLocs, const DenseSet< DebugAggregate > &VarsWithStackSlot, AssignmentTrackingLowering::UntaggedStoreAssignmentMap &UntaggedStoreVars, unsigned &TrackedVariablesVectorSize)
Build a map of {Variable x: Variables y} where all variable fragments contained within the variable f...
static bool removeUndefDbgLocsFromEntryBlock(const BasicBlock *BB, FunctionVarLocsBuilder &FnVarLocs)
static cl::opt< bool > PrintResults("print-debug-ata", cl::init(false), cl::Hidden)
Print the results of the analysis. Respects -filter-print-funcs.
const char * locStr(AssignmentTrackingLowering::LocKind Loc)
static bool removeRedundantDbgLocsUsingForwardScan(const BasicBlock *BB, FunctionVarLocsBuilder &FnVarLocs)
Remove redundant location defs using a forward scan.
static bool removeRedundantDbgLocs(const BasicBlock *BB, FunctionVarLocsBuilder &FnVarLocs)
static cl::opt< bool > EnableMemLocFragFill("mem-loc-frag-fill", cl::init(true), cl::Hidden)
Option for debugging the pass, determines if the memory location fragment filling happens after gener...
static DIAssignID * getIDFromInst(const Instruction &I)
static std::optional< int64_t > getDerefOffsetInBytes(const DIExpression *DIExpr)
Extract the offset used in DIExpr.
static bool removeRedundantDbgLocsUsingBackwardScan(const BasicBlock *BB, FunctionVarLocsBuilder &FnVarLocs)
Remove redundant definitions within sequences of consecutive location defs.
static cl::opt< cl::boolOrDefault > CoalesceAdjacentFragmentsOpt("debug-ata-coalesce-frags", cl::Hidden)
Coalesce adjacent dbg locs describing memory locations that have contiguous fragments.
static cl::opt< unsigned > MaxNumBlocks("debug-ata-max-blocks", cl::init(10000), cl::desc("Maximum num basic blocks before debug info dropped"), cl::Hidden)
static bool shouldCoalesceFragments(Function &F)
assume Assume Builder
for(auto &MBB :MF)
This file implements the BitVector class.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
dxil metadata DXIL Metadata Emit
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines DenseMapInfo traits for DenseMap.
This file contains constants used for implementing Dwarf debug support.
std::optional< std::vector< StOtherPiece > > Other
Definition: ELFYAML.cpp:1260
#define DEBUG_TYPE
IRTranslator LLVM IR MI
This file implements a coalescing interval map for small objects.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define P(N)
if(VerifyEach)
bool Debug
This header defines various interfaces for pass management in LLVM.
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
@ SI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Scalar Replacement Of Aggregates
Definition: SROA.cpp:5172
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines the SmallSet class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
Value * RHS
Value * LHS
Helper class to build FunctionVarLocs, since that class isn't easy to modify.
void addVarLoc(Instruction *Before, DebugVariable Var, DIExpression *Expr, DebugLoc DL, RawLocationWrapper R)
Add a def to the wedge of defs just before /p Before.
void setWedge(const Instruction *Before, SmallVector< VarLocInfo > &&Wedge)
Replace the defs that come just before /p Before with /p Wedge.
const SmallVectorImpl< VarLocInfo > * getWedge(const Instruction *Before) const
Return ptr to wedge of defs or nullptr if no defs come just before /p Before.
void addSingleLocVar(DebugVariable Var, DIExpression *Expr, DebugLoc DL, RawLocationWrapper R)
Add a def for a variable that is valid for its lifetime.
VariableID insertVariable(DebugVariable V)
Find or insert V and return the ID.
const DebugVariable & getVariable(VariableID ID) const
Get a variable from its ID.
Class for arbitrary precision integers.
Definition: APInt.h:75
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1494
bool getBoolValue() const
Convert APInt to a boolean value.
Definition: APInt.h:459
an instruction to allocate memory on the stack
Definition: Instructions.h:58
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:202
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
iterator end()
Definition: BasicBlock.h:325
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:323
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:404
int find_first_unset_in(unsigned Begin, unsigned End) const
find_first_unset_in - Returns the index of the first unset bit in the range [Begin,...
Definition: BitVector.h:261
BitVector & set()
Definition: BitVector.h:351
iterator_range< const_set_bits_iterator > set_bits() const
Definition: BitVector.h:140
Assignment ID.
A structured debug information entry.
Definition: DIE.h:744
DWARF expression.
static DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)
Append the opcodes Ops to DIExpr.
unsigned getNumElements() const
bool startsWithDeref() const
Return whether the first element a DW_OP_deref.
static std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
ArrayRef< uint64_t > getElements() const
static std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
static DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
Debug location.
std::optional< uint64_t > getSizeInBits() const
Determines the size of the variable's type.
StringRef getName() const
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
This represents the llvm.dbg.assign instruction.
DIAssignID * getAssignID() const
bool isKillAddress() const
Check whether this kills the address component.
This represents the llvm.dbg.value instruction.
This is the common base class for debug info intrinsics for variables.
DILocalVariable * getVariable() const
DIExpression * getExpression() const
RawLocationWrapper getWrappedLocation() const
A debug info location.
Definition: DebugLoc.h:33
DILocation * getInlinedAt() const
Definition: DebugLoc.cpp:39
Identifies a unique instance of a variable.
const DILocation * getInlinedAt() const
const DILocalVariable * getVariable() const
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
Definition: DenseMap.h:235
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
void init(unsigned InitNumEntries)
Definition: DenseMap.h:805
Implements a dense probed hash-table based set.
Definition: DenseSet.h:271
Class representing an expression and its matching format.
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:308
Data structure describing the variable locations in a function.
void print(raw_ostream &OS, const Function &Fn) const
const VarLocInfo * locs_begin(const Instruction *Before) const
First variable location definition that comes before Before.
const VarLocInfo * single_locs_begin() const
const VarLocInfo * locs_end(const Instruction *Before) const
One past the last variable location definition that comes before Before.
const VarLocInfo * single_locs_end() const
One past the last single-location variable location definition.
void init(FunctionVarLocsBuilder &Builder)
size_t size() const
Definition: Function.h:761
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:319
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:652
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:358
bool isTerminator() const
Definition: Instruction.h:171
const_iterator begin() const
Definition: IntervalMap.h:1146
void insert(KeyT a, KeyT b, ValT y)
insert - Add a mapping of [a;b] to y, coalesce with adjacent intervals.
Definition: IntervalMap.h:1129
void clear()
clear - Remove all entries.
Definition: IntervalMap.h:1330
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1399
void push_back(MachineInstr *MI)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:37
Root of the metadata hierarchy.
Definition: Metadata.h:61
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.cpp:398
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:91
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1750
Lightweight class that wraps the location operand metadata of a debug intrinsic.
Value * getVariableLocationOp(unsigned OpIdx) const
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:383
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:450
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:941
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
static IntegerType * getInt1Ty(LLVMContext &C)
UniqueVector - This class produces a sequential ID number (base 1) for each unique entry that is adde...
Definition: UniqueVector.h:24
unsigned insert(const T &Entry)
insert - Append entry to the vector if it doesn't already exist.
Definition: UniqueVector.h:40
size_t size() const
size - Returns the number of entries in the vector.
Definition: UniqueVector.h:87
static ValueAsMetadata * get(Value *V)
Definition: Metadata.cpp:392
LLVM Value Representation.
Definition: Value.h:74
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:308
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition: DenseSet.h:185
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
Definition: DenseSet.h:97
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:289
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:119
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
void deleteAll(Function *F)
Remove all Assignment Tracking related intrinsics and metadata from F.
Definition: DebugInfo.cpp:1782
AssignmentMarkerRange getAssignmentMarkers(DIAssignID *ID)
Return a range of dbg.assign intrinsics which use \ID as an operand.
Definition: DebugInfo.cpp:1741
std::optional< AssignmentInfo > getAssignmentInfo(const DataLayout &DL, const MemIntrinsic *I)
Definition: DebugInfo.cpp:1813
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
std::optional< const char * > toString(const std::optional< DWARFFormValue > &V)
Take an optional DWARFFormValue and try to extract a string value from it.
@ DW_OP_LLVM_fragment
Only used in LLVM metadata.
Definition: Dwarf.h:141
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
@ Offset
Definition: DWP.cpp:406
Interval::succ_iterator succ_end(Interval *I)
Definition: Interval.h:102
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1819
bool operator!=(uint64_t V1, const APInt &V2)
Definition: APInt.h:2047
Interval::succ_iterator succ_begin(Interval *I)
succ_begin/succ_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:99
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:112
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1826
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:511
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool isFunctionInPrintList(StringRef FunctionName)
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:109
VariableID
Type wrapper for integer ID for Variables. 0 is reserved.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
Definition: DebugInfo.cpp:2031
constexpr std::nullopt_t None
Definition: None.h:28
bool debuginfoShouldUseDebugInstrRef(const Triple &T)
Definition: BitVector.h:858
Holds the characteristics of one fragment of a larger variable.
uint64_t endInBits() const
Return the index of the bit after the end of the fragment, e.g.
uint64_t startInBits() const
Return the index of the first bit of the fragment.
static bool isEqual(const VariableID &LHS, const VariableID &RHS)
static unsigned getHashValue(const VariableID &Val)
An information struct used to provide DenseMap with the various necessary components for a given valu...
Definition: DenseMapInfo.h:51
Variable location definition used by FunctionVarLocs.
RawLocationWrapper Values