LLVM 19.0.0git
LazyCallGraph.cpp
Go to the documentation of this file.
1//===- LazyCallGraph.cpp - Analysis of a Module's call graph --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
10
11#include "llvm/ADT/ArrayRef.h"
12#include "llvm/ADT/STLExtras.h"
13#include "llvm/ADT/Sequence.h"
18#include "llvm/IR/Constants.h"
19#include "llvm/IR/Function.h"
22#include "llvm/IR/Instruction.h"
23#include "llvm/IR/Module.h"
24#include "llvm/IR/PassManager.h"
27#include "llvm/Support/Debug.h"
30#include <algorithm>
31
32#ifdef EXPENSIVE_CHECKS
33#include "llvm/ADT/ScopeExit.h"
34#endif
35
36using namespace llvm;
37
38#define DEBUG_TYPE "lcg"
39
40void LazyCallGraph::EdgeSequence::insertEdgeInternal(Node &TargetN,
41 Edge::Kind EK) {
42 EdgeIndexMap.try_emplace(&TargetN, Edges.size());
43 Edges.emplace_back(TargetN, EK);
44}
45
46void LazyCallGraph::EdgeSequence::setEdgeKind(Node &TargetN, Edge::Kind EK) {
47 Edges[EdgeIndexMap.find(&TargetN)->second].setKind(EK);
48}
49
50bool LazyCallGraph::EdgeSequence::removeEdgeInternal(Node &TargetN) {
51 auto IndexMapI = EdgeIndexMap.find(&TargetN);
52 if (IndexMapI == EdgeIndexMap.end())
53 return false;
54
55 Edges[IndexMapI->second] = Edge();
56 EdgeIndexMap.erase(IndexMapI);
57 return true;
58}
59
63 if (!EdgeIndexMap.try_emplace(&N, Edges.size()).second)
64 return;
65
66 LLVM_DEBUG(dbgs() << " Added callable function: " << N.getName() << "\n");
68}
69
70LazyCallGraph::EdgeSequence &LazyCallGraph::Node::populateSlow() {
71 assert(!Edges && "Must not have already populated the edges for this node!");
72
73 LLVM_DEBUG(dbgs() << " Adding functions called by '" << getName()
74 << "' to the graph.\n");
75
76 Edges = EdgeSequence();
77
81
82 // Find all the potential call graph edges in this function. We track both
83 // actual call edges and indirect references to functions. The direct calls
84 // are trivially added, but to accumulate the latter we walk the instructions
85 // and add every operand which is a constant to the worklist to process
86 // afterward.
87 //
88 // Note that we consider *any* function with a definition to be a viable
89 // edge. Even if the function's definition is subject to replacement by
90 // some other module (say, a weak definition) there may still be
91 // optimizations which essentially speculate based on the definition and
92 // a way to check that the specific definition is in fact the one being
93 // used. For example, this could be done by moving the weak definition to
94 // a strong (internal) definition and making the weak definition be an
95 // alias. Then a test of the address of the weak function against the new
96 // strong definition's address would be an effective way to determine the
97 // safety of optimizing a direct call edge.
98 for (BasicBlock &BB : *F)
99 for (Instruction &I : BB) {
100 if (auto *CB = dyn_cast<CallBase>(&I))
101 if (Function *Callee = CB->getCalledFunction())
102 if (!Callee->isDeclaration())
103 if (Callees.insert(Callee).second) {
104 Visited.insert(Callee);
105 addEdge(Edges->Edges, Edges->EdgeIndexMap, G->get(*Callee),
107 }
108
109 for (Value *Op : I.operand_values())
110 if (Constant *C = dyn_cast<Constant>(Op))
111 if (Visited.insert(C).second)
112 Worklist.push_back(C);
113 }
114
115 // We've collected all the constant (and thus potentially function or
116 // function containing) operands to all the instructions in the function.
117 // Process them (recursively) collecting every function found.
118 visitReferences(Worklist, Visited, [&](Function &F) {
119 addEdge(Edges->Edges, Edges->EdgeIndexMap, G->get(F),
121 });
122
123 // Add implicit reference edges to any defined libcall functions (if we
124 // haven't found an explicit edge).
125 for (auto *F : G->LibFunctions)
126 if (!Visited.count(F))
127 addEdge(Edges->Edges, Edges->EdgeIndexMap, G->get(*F),
129
130 return *Edges;
131}
132
133void LazyCallGraph::Node::replaceFunction(Function &NewF) {
134 assert(F != &NewF && "Must not replace a function with itself!");
135 F = &NewF;
136}
137
138#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
139LLVM_DUMP_METHOD void LazyCallGraph::Node::dump() const {
140 dbgs() << *this << '\n';
141}
142#endif
143
145 LibFunc LF;
146
147 // Either this is a normal library function or a "vectorizable"
148 // function. Not using the VFDatabase here because this query
149 // is related only to libraries handled via the TLI.
150 return TLI.getLibFunc(F, LF) ||
151 TLI.isKnownVectorFunctionInLibrary(F.getName());
152}
153
156 LLVM_DEBUG(dbgs() << "Building CG for module: " << M.getModuleIdentifier()
157 << "\n");
158 for (Function &F : M) {
159 if (F.isDeclaration())
160 continue;
161 // If this function is a known lib function to LLVM then we want to
162 // synthesize reference edges to it to model the fact that LLVM can turn
163 // arbitrary code into a library function call.
164 if (isKnownLibFunction(F, GetTLI(F)))
165 LibFunctions.insert(&F);
166
167 if (F.hasLocalLinkage())
168 continue;
169
170 // External linkage defined functions have edges to them from other
171 // modules.
172 LLVM_DEBUG(dbgs() << " Adding '" << F.getName()
173 << "' to entry set of the graph.\n");
174 addEdge(EntryEdges.Edges, EntryEdges.EdgeIndexMap, get(F), Edge::Ref);
175 }
176
177 // Externally visible aliases of internal functions are also viable entry
178 // edges to the module.
179 for (auto &A : M.aliases()) {
180 if (A.hasLocalLinkage())
181 continue;
182 if (Function* F = dyn_cast<Function>(A.getAliasee())) {
183 LLVM_DEBUG(dbgs() << " Adding '" << F->getName()
184 << "' with alias '" << A.getName()
185 << "' to entry set of the graph.\n");
186 addEdge(EntryEdges.Edges, EntryEdges.EdgeIndexMap, get(*F), Edge::Ref);
187 }
188 }
189
190 // Now add entry nodes for functions reachable via initializers to globals.
193 for (GlobalVariable &GV : M.globals())
194 if (GV.hasInitializer())
195 if (Visited.insert(GV.getInitializer()).second)
196 Worklist.push_back(GV.getInitializer());
197
199 dbgs() << " Adding functions referenced by global initializers to the "
200 "entry set.\n");
201 visitReferences(Worklist, Visited, [&](Function &F) {
202 addEdge(EntryEdges.Edges, EntryEdges.EdgeIndexMap, get(F),
204 });
205}
206
208 : BPA(std::move(G.BPA)), NodeMap(std::move(G.NodeMap)),
209 EntryEdges(std::move(G.EntryEdges)), SCCBPA(std::move(G.SCCBPA)),
210 SCCMap(std::move(G.SCCMap)), LibFunctions(std::move(G.LibFunctions)) {
211 updateGraphPtrs();
212}
213
214#if !defined(NDEBUG) || defined(EXPENSIVE_CHECKS)
216 for (RefSCC &RC : postorder_ref_sccs()) {
217 RC.verify();
218 }
219}
220#endif
221
224 // Check whether the analysis, all analyses on functions, or the function's
225 // CFG have been preserved.
227 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Module>>());
228}
229
231 BPA = std::move(G.BPA);
232 NodeMap = std::move(G.NodeMap);
233 EntryEdges = std::move(G.EntryEdges);
234 SCCBPA = std::move(G.SCCBPA);
235 SCCMap = std::move(G.SCCMap);
236 LibFunctions = std::move(G.LibFunctions);
237 updateGraphPtrs();
238 return *this;
239}
240
241#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
242LLVM_DUMP_METHOD void LazyCallGraph::SCC::dump() const {
243 dbgs() << *this << '\n';
244}
245#endif
246
247#if !defined(NDEBUG) || defined(EXPENSIVE_CHECKS)
248void LazyCallGraph::SCC::verify() {
249 assert(OuterRefSCC && "Can't have a null RefSCC!");
250 assert(!Nodes.empty() && "Can't have an empty SCC!");
251
252 for (Node *N : Nodes) {
253 assert(N && "Can't have a null node!");
254 assert(OuterRefSCC->G->lookupSCC(*N) == this &&
255 "Node does not map to this SCC!");
256 assert(N->DFSNumber == -1 &&
257 "Must set DFS numbers to -1 when adding a node to an SCC!");
258 assert(N->LowLink == -1 &&
259 "Must set low link to -1 when adding a node to an SCC!");
260 for (Edge &E : **N)
261 assert(E.getNode().isPopulated() && "Can't have an unpopulated node!");
262
263#ifdef EXPENSIVE_CHECKS
264 // Verify that all nodes in this SCC can reach all other nodes.
265 SmallVector<Node *, 4> Worklist;
267 Worklist.push_back(N);
268 while (!Worklist.empty()) {
269 Node *VisitingNode = Worklist.pop_back_val();
270 if (!Visited.insert(VisitingNode).second)
271 continue;
272 for (Edge &E : (*VisitingNode)->calls())
273 Worklist.push_back(&E.getNode());
274 }
275 for (Node *NodeToVisit : Nodes) {
276 assert(Visited.contains(NodeToVisit) &&
277 "Cannot reach all nodes within SCC");
278 }
279#endif
280 }
281}
282#endif
283
285 if (this == &C)
286 return false;
287
288 for (Node &N : *this)
289 for (Edge &E : N->calls())
290 if (OuterRefSCC->G->lookupSCC(E.getNode()) == &C)
291 return true;
292
293 // No edges found.
294 return false;
295}
296
297bool LazyCallGraph::SCC::isAncestorOf(const SCC &TargetC) const {
298 if (this == &TargetC)
299 return false;
300
301 LazyCallGraph &G = *OuterRefSCC->G;
302
303 // Start with this SCC.
304 SmallPtrSet<const SCC *, 16> Visited = {this};
305 SmallVector<const SCC *, 16> Worklist = {this};
306
307 // Walk down the graph until we run out of edges or find a path to TargetC.
308 do {
309 const SCC &C = *Worklist.pop_back_val();
310 for (Node &N : C)
311 for (Edge &E : N->calls()) {
312 SCC *CalleeC = G.lookupSCC(E.getNode());
313 if (!CalleeC)
314 continue;
315
316 // If the callee's SCC is the TargetC, we're done.
317 if (CalleeC == &TargetC)
318 return true;
319
320 // If this is the first time we've reached this SCC, put it on the
321 // worklist to recurse through.
322 if (Visited.insert(CalleeC).second)
323 Worklist.push_back(CalleeC);
324 }
325 } while (!Worklist.empty());
326
327 // No paths found.
328 return false;
329}
330
331LazyCallGraph::RefSCC::RefSCC(LazyCallGraph &G) : G(&G) {}
332
333#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
334LLVM_DUMP_METHOD void LazyCallGraph::RefSCC::dump() const {
335 dbgs() << *this << '\n';
336}
337#endif
338
339#if !defined(NDEBUG) || defined(EXPENSIVE_CHECKS)
340void LazyCallGraph::RefSCC::verify() {
341 assert(G && "Can't have a null graph!");
342 assert(!SCCs.empty() && "Can't have an empty SCC!");
343
344 // Verify basic properties of the SCCs.
346 for (SCC *C : SCCs) {
347 assert(C && "Can't have a null SCC!");
348 C->verify();
349 assert(&C->getOuterRefSCC() == this &&
350 "SCC doesn't think it is inside this RefSCC!");
351 bool Inserted = SCCSet.insert(C).second;
352 assert(Inserted && "Found a duplicate SCC!");
353 auto IndexIt = SCCIndices.find(C);
354 assert(IndexIt != SCCIndices.end() &&
355 "Found an SCC that doesn't have an index!");
356 }
357
358 // Check that our indices map correctly.
359 for (auto [C, I] : SCCIndices) {
360 assert(C && "Can't have a null SCC in the indices!");
361 assert(SCCSet.count(C) && "Found an index for an SCC not in the RefSCC!");
362 assert(SCCs[I] == C && "Index doesn't point to SCC!");
363 }
364
365 // Check that the SCCs are in fact in post-order.
366 for (int I = 0, Size = SCCs.size(); I < Size; ++I) {
367 SCC &SourceSCC = *SCCs[I];
368 for (Node &N : SourceSCC)
369 for (Edge &E : *N) {
370 if (!E.isCall())
371 continue;
372 SCC &TargetSCC = *G->lookupSCC(E.getNode());
373 if (&TargetSCC.getOuterRefSCC() == this) {
374 assert(SCCIndices.find(&TargetSCC)->second <= I &&
375 "Edge between SCCs violates post-order relationship.");
376 continue;
377 }
378 }
379 }
380
381#ifdef EXPENSIVE_CHECKS
382 // Verify that all nodes in this RefSCC can reach all other nodes.
384 for (SCC *C : SCCs) {
385 for (Node &N : *C)
386 Nodes.push_back(&N);
387 }
388 for (Node *N : Nodes) {
389 SmallVector<Node *, 4> Worklist;
391 Worklist.push_back(N);
392 while (!Worklist.empty()) {
393 Node *VisitingNode = Worklist.pop_back_val();
394 if (!Visited.insert(VisitingNode).second)
395 continue;
396 for (Edge &E : **VisitingNode)
397 Worklist.push_back(&E.getNode());
398 }
399 for (Node *NodeToVisit : Nodes) {
400 assert(Visited.contains(NodeToVisit) &&
401 "Cannot reach all nodes within RefSCC");
402 }
403 }
404#endif
405}
406#endif
407
409 if (&RC == this)
410 return false;
411
412 // Search all edges to see if this is a parent.
413 for (SCC &C : *this)
414 for (Node &N : C)
415 for (Edge &E : *N)
416 if (G->lookupRefSCC(E.getNode()) == &RC)
417 return true;
418
419 return false;
420}
421
423 if (&RC == this)
424 return false;
425
426 // For each descendant of this RefSCC, see if one of its children is the
427 // argument. If not, add that descendant to the worklist and continue
428 // searching.
431 Worklist.push_back(this);
432 Visited.insert(this);
433 do {
434 const RefSCC &DescendantRC = *Worklist.pop_back_val();
435 for (SCC &C : DescendantRC)
436 for (Node &N : C)
437 for (Edge &E : *N) {
438 auto *ChildRC = G->lookupRefSCC(E.getNode());
439 if (ChildRC == &RC)
440 return true;
441 if (!ChildRC || !Visited.insert(ChildRC).second)
442 continue;
443 Worklist.push_back(ChildRC);
444 }
445 } while (!Worklist.empty());
446
447 return false;
448}
449
450/// Generic helper that updates a postorder sequence of SCCs for a potentially
451/// cycle-introducing edge insertion.
452///
453/// A postorder sequence of SCCs of a directed graph has one fundamental
454/// property: all deges in the DAG of SCCs point "up" the sequence. That is,
455/// all edges in the SCC DAG point to prior SCCs in the sequence.
456///
457/// This routine both updates a postorder sequence and uses that sequence to
458/// compute the set of SCCs connected into a cycle. It should only be called to
459/// insert a "downward" edge which will require changing the sequence to
460/// restore it to a postorder.
461///
462/// When inserting an edge from an earlier SCC to a later SCC in some postorder
463/// sequence, all of the SCCs which may be impacted are in the closed range of
464/// those two within the postorder sequence. The algorithm used here to restore
465/// the state is as follows:
466///
467/// 1) Starting from the source SCC, construct a set of SCCs which reach the
468/// source SCC consisting of just the source SCC. Then scan toward the
469/// target SCC in postorder and for each SCC, if it has an edge to an SCC
470/// in the set, add it to the set. Otherwise, the source SCC is not
471/// a successor, move it in the postorder sequence to immediately before
472/// the source SCC, shifting the source SCC and all SCCs in the set one
473/// position toward the target SCC. Stop scanning after processing the
474/// target SCC.
475/// 2) If the source SCC is now past the target SCC in the postorder sequence,
476/// and thus the new edge will flow toward the start, we are done.
477/// 3) Otherwise, starting from the target SCC, walk all edges which reach an
478/// SCC between the source and the target, and add them to the set of
479/// connected SCCs, then recurse through them. Once a complete set of the
480/// SCCs the target connects to is known, hoist the remaining SCCs between
481/// the source and the target to be above the target. Note that there is no
482/// need to process the source SCC, it is already known to connect.
483/// 4) At this point, all of the SCCs in the closed range between the source
484/// SCC and the target SCC in the postorder sequence are connected,
485/// including the target SCC and the source SCC. Inserting the edge from
486/// the source SCC to the target SCC will form a cycle out of precisely
487/// these SCCs. Thus we can merge all of the SCCs in this closed range into
488/// a single SCC.
489///
490/// This process has various important properties:
491/// - Only mutates the SCCs when adding the edge actually changes the SCC
492/// structure.
493/// - Never mutates SCCs which are unaffected by the change.
494/// - Updates the postorder sequence to correctly satisfy the postorder
495/// constraint after the edge is inserted.
496/// - Only reorders SCCs in the closed postorder sequence from the source to
497/// the target, so easy to bound how much has changed even in the ordering.
498/// - Big-O is the number of edges in the closed postorder range of SCCs from
499/// source to target.
500///
501/// This helper routine, in addition to updating the postorder sequence itself
502/// will also update a map from SCCs to indices within that sequence.
503///
504/// The sequence and the map must operate on pointers to the SCC type.
505///
506/// Two callbacks must be provided. The first computes the subset of SCCs in
507/// the postorder closed range from the source to the target which connect to
508/// the source SCC via some (transitive) set of edges. The second computes the
509/// subset of the same range which the target SCC connects to via some
510/// (transitive) set of edges. Both callbacks should populate the set argument
511/// provided.
512template <typename SCCT, typename PostorderSequenceT, typename SCCIndexMapT,
513 typename ComputeSourceConnectedSetCallableT,
514 typename ComputeTargetConnectedSetCallableT>
517 SCCT &SourceSCC, SCCT &TargetSCC, PostorderSequenceT &SCCs,
518 SCCIndexMapT &SCCIndices,
519 ComputeSourceConnectedSetCallableT ComputeSourceConnectedSet,
520 ComputeTargetConnectedSetCallableT ComputeTargetConnectedSet) {
521 int SourceIdx = SCCIndices[&SourceSCC];
522 int TargetIdx = SCCIndices[&TargetSCC];
523 assert(SourceIdx < TargetIdx && "Cannot have equal indices here!");
524
525 SmallPtrSet<SCCT *, 4> ConnectedSet;
526
527 // Compute the SCCs which (transitively) reach the source.
528 ComputeSourceConnectedSet(ConnectedSet);
529
530 // Partition the SCCs in this part of the port-order sequence so only SCCs
531 // connecting to the source remain between it and the target. This is
532 // a benign partition as it preserves postorder.
533 auto SourceI = std::stable_partition(
534 SCCs.begin() + SourceIdx, SCCs.begin() + TargetIdx + 1,
535 [&ConnectedSet](SCCT *C) { return !ConnectedSet.count(C); });
536 for (int I = SourceIdx, E = TargetIdx + 1; I < E; ++I)
537 SCCIndices.find(SCCs[I])->second = I;
538
539 // If the target doesn't connect to the source, then we've corrected the
540 // post-order and there are no cycles formed.
541 if (!ConnectedSet.count(&TargetSCC)) {
542 assert(SourceI > (SCCs.begin() + SourceIdx) &&
543 "Must have moved the source to fix the post-order.");
544 assert(*std::prev(SourceI) == &TargetSCC &&
545 "Last SCC to move should have bene the target.");
546
547 // Return an empty range at the target SCC indicating there is nothing to
548 // merge.
549 return make_range(std::prev(SourceI), std::prev(SourceI));
550 }
551
552 assert(SCCs[TargetIdx] == &TargetSCC &&
553 "Should not have moved target if connected!");
554 SourceIdx = SourceI - SCCs.begin();
555 assert(SCCs[SourceIdx] == &SourceSCC &&
556 "Bad updated index computation for the source SCC!");
557
558 // See whether there are any remaining intervening SCCs between the source
559 // and target. If so we need to make sure they all are reachable form the
560 // target.
561 if (SourceIdx + 1 < TargetIdx) {
562 ConnectedSet.clear();
563 ComputeTargetConnectedSet(ConnectedSet);
564
565 // Partition SCCs so that only SCCs reached from the target remain between
566 // the source and the target. This preserves postorder.
567 auto TargetI = std::stable_partition(
568 SCCs.begin() + SourceIdx + 1, SCCs.begin() + TargetIdx + 1,
569 [&ConnectedSet](SCCT *C) { return ConnectedSet.count(C); });
570 for (int I = SourceIdx + 1, E = TargetIdx + 1; I < E; ++I)
571 SCCIndices.find(SCCs[I])->second = I;
572 TargetIdx = std::prev(TargetI) - SCCs.begin();
573 assert(SCCs[TargetIdx] == &TargetSCC &&
574 "Should always end with the target!");
575 }
576
577 // At this point, we know that connecting source to target forms a cycle
578 // because target connects back to source, and we know that all the SCCs
579 // between the source and target in the postorder sequence participate in that
580 // cycle.
581 return make_range(SCCs.begin() + SourceIdx, SCCs.begin() + TargetIdx);
582}
583
585 Node &SourceN, Node &TargetN,
586 function_ref<void(ArrayRef<SCC *> MergeSCCs)> MergeCB) {
587 assert(!(*SourceN)[TargetN].isCall() && "Must start with a ref edge!");
588 SmallVector<SCC *, 1> DeletedSCCs;
589
590#ifdef EXPENSIVE_CHECKS
591 verify();
592 auto VerifyOnExit = make_scope_exit([&]() { verify(); });
593#endif
594
595 SCC &SourceSCC = *G->lookupSCC(SourceN);
596 SCC &TargetSCC = *G->lookupSCC(TargetN);
597
598 // If the two nodes are already part of the same SCC, we're also done as
599 // we've just added more connectivity.
600 if (&SourceSCC == &TargetSCC) {
601 SourceN->setEdgeKind(TargetN, Edge::Call);
602 return false; // No new cycle.
603 }
604
605 // At this point we leverage the postorder list of SCCs to detect when the
606 // insertion of an edge changes the SCC structure in any way.
607 //
608 // First and foremost, we can eliminate the need for any changes when the
609 // edge is toward the beginning of the postorder sequence because all edges
610 // flow in that direction already. Thus adding a new one cannot form a cycle.
611 int SourceIdx = SCCIndices[&SourceSCC];
612 int TargetIdx = SCCIndices[&TargetSCC];
613 if (TargetIdx < SourceIdx) {
614 SourceN->setEdgeKind(TargetN, Edge::Call);
615 return false; // No new cycle.
616 }
617
618 // Compute the SCCs which (transitively) reach the source.
619 auto ComputeSourceConnectedSet = [&](SmallPtrSetImpl<SCC *> &ConnectedSet) {
620#ifdef EXPENSIVE_CHECKS
621 // Check that the RefSCC is still valid before computing this as the
622 // results will be nonsensical of we've broken its invariants.
623 verify();
624#endif
625 ConnectedSet.insert(&SourceSCC);
626 auto IsConnected = [&](SCC &C) {
627 for (Node &N : C)
628 for (Edge &E : N->calls())
629 if (ConnectedSet.count(G->lookupSCC(E.getNode())))
630 return true;
631
632 return false;
633 };
634
635 for (SCC *C :
636 make_range(SCCs.begin() + SourceIdx + 1, SCCs.begin() + TargetIdx + 1))
637 if (IsConnected(*C))
638 ConnectedSet.insert(C);
639 };
640
641 // Use a normal worklist to find which SCCs the target connects to. We still
642 // bound the search based on the range in the postorder list we care about,
643 // but because this is forward connectivity we just "recurse" through the
644 // edges.
645 auto ComputeTargetConnectedSet = [&](SmallPtrSetImpl<SCC *> &ConnectedSet) {
646#ifdef EXPENSIVE_CHECKS
647 // Check that the RefSCC is still valid before computing this as the
648 // results will be nonsensical of we've broken its invariants.
649 verify();
650#endif
651 ConnectedSet.insert(&TargetSCC);
652 SmallVector<SCC *, 4> Worklist;
653 Worklist.push_back(&TargetSCC);
654 do {
655 SCC &C = *Worklist.pop_back_val();
656 for (Node &N : C)
657 for (Edge &E : *N) {
658 if (!E.isCall())
659 continue;
660 SCC &EdgeC = *G->lookupSCC(E.getNode());
661 if (&EdgeC.getOuterRefSCC() != this)
662 // Not in this RefSCC...
663 continue;
664 if (SCCIndices.find(&EdgeC)->second <= SourceIdx)
665 // Not in the postorder sequence between source and target.
666 continue;
667
668 if (ConnectedSet.insert(&EdgeC).second)
669 Worklist.push_back(&EdgeC);
670 }
671 } while (!Worklist.empty());
672 };
673
674 // Use a generic helper to update the postorder sequence of SCCs and return
675 // a range of any SCCs connected into a cycle by inserting this edge. This
676 // routine will also take care of updating the indices into the postorder
677 // sequence.
679 SourceSCC, TargetSCC, SCCs, SCCIndices, ComputeSourceConnectedSet,
680 ComputeTargetConnectedSet);
681
682 // Run the user's callback on the merged SCCs before we actually merge them.
683 if (MergeCB)
684 MergeCB(ArrayRef(MergeRange.begin(), MergeRange.end()));
685
686 // If the merge range is empty, then adding the edge didn't actually form any
687 // new cycles. We're done.
688 if (MergeRange.empty()) {
689 // Now that the SCC structure is finalized, flip the kind to call.
690 SourceN->setEdgeKind(TargetN, Edge::Call);
691 return false; // No new cycle.
692 }
693
694#ifdef EXPENSIVE_CHECKS
695 // Before merging, check that the RefSCC remains valid after all the
696 // postorder updates.
697 verify();
698#endif
699
700 // Otherwise we need to merge all the SCCs in the cycle into a single result
701 // SCC.
702 //
703 // NB: We merge into the target because all of these functions were already
704 // reachable from the target, meaning any SCC-wide properties deduced about it
705 // other than the set of functions within it will not have changed.
706 for (SCC *C : MergeRange) {
707 assert(C != &TargetSCC &&
708 "We merge *into* the target and shouldn't process it here!");
709 SCCIndices.erase(C);
710 TargetSCC.Nodes.append(C->Nodes.begin(), C->Nodes.end());
711 for (Node *N : C->Nodes)
712 G->SCCMap[N] = &TargetSCC;
713 C->clear();
714 DeletedSCCs.push_back(C);
715 }
716
717 // Erase the merged SCCs from the list and update the indices of the
718 // remaining SCCs.
719 int IndexOffset = MergeRange.end() - MergeRange.begin();
720 auto EraseEnd = SCCs.erase(MergeRange.begin(), MergeRange.end());
721 for (SCC *C : make_range(EraseEnd, SCCs.end()))
722 SCCIndices[C] -= IndexOffset;
723
724 // Now that the SCC structure is finalized, flip the kind to call.
725 SourceN->setEdgeKind(TargetN, Edge::Call);
726
727 // And we're done, but we did form a new cycle.
728 return true;
729}
730
732 Node &TargetN) {
733 assert((*SourceN)[TargetN].isCall() && "Must start with a call edge!");
734
735#ifdef EXPENSIVE_CHECKS
736 verify();
737 auto VerifyOnExit = make_scope_exit([&]() { verify(); });
738#endif
739
740 assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC.");
741 assert(G->lookupRefSCC(TargetN) == this && "Target must be in this RefSCC.");
742 assert(G->lookupSCC(SourceN) != G->lookupSCC(TargetN) &&
743 "Source and Target must be in separate SCCs for this to be trivial!");
744
745 // Set the edge kind.
746 SourceN->setEdgeKind(TargetN, Edge::Ref);
747}
748
751 assert((*SourceN)[TargetN].isCall() && "Must start with a call edge!");
752
753#ifdef EXPENSIVE_CHECKS
754 verify();
755 auto VerifyOnExit = make_scope_exit([&]() { verify(); });
756#endif
757
758 assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC.");
759 assert(G->lookupRefSCC(TargetN) == this && "Target must be in this RefSCC.");
760
761 SCC &TargetSCC = *G->lookupSCC(TargetN);
762 assert(G->lookupSCC(SourceN) == &TargetSCC && "Source and Target must be in "
763 "the same SCC to require the "
764 "full CG update.");
765
766 // Set the edge kind.
767 SourceN->setEdgeKind(TargetN, Edge::Ref);
768
769 // Otherwise we are removing a call edge from a single SCC. This may break
770 // the cycle. In order to compute the new set of SCCs, we need to do a small
771 // DFS over the nodes within the SCC to form any sub-cycles that remain as
772 // distinct SCCs and compute a postorder over the resulting SCCs.
773 //
774 // However, we specially handle the target node. The target node is known to
775 // reach all other nodes in the original SCC by definition. This means that
776 // we want the old SCC to be replaced with an SCC containing that node as it
777 // will be the root of whatever SCC DAG results from the DFS. Assumptions
778 // about an SCC such as the set of functions called will continue to hold,
779 // etc.
780
781 SCC &OldSCC = TargetSCC;
783 SmallVector<Node *, 16> PendingSCCStack;
784 SmallVector<SCC *, 4> NewSCCs;
785
786 // Prepare the nodes for a fresh DFS.
788 Worklist.swap(OldSCC.Nodes);
789 for (Node *N : Worklist) {
790 N->DFSNumber = N->LowLink = 0;
791 G->SCCMap.erase(N);
792 }
793
794 // Force the target node to be in the old SCC. This also enables us to take
795 // a very significant short-cut in the standard Tarjan walk to re-form SCCs
796 // below: whenever we build an edge that reaches the target node, we know
797 // that the target node eventually connects back to all other nodes in our
798 // walk. As a consequence, we can detect and handle participants in that
799 // cycle without walking all the edges that form this connection, and instead
800 // by relying on the fundamental guarantee coming into this operation (all
801 // nodes are reachable from the target due to previously forming an SCC).
802 TargetN.DFSNumber = TargetN.LowLink = -1;
803 OldSCC.Nodes.push_back(&TargetN);
804 G->SCCMap[&TargetN] = &OldSCC;
805
806 // Scan down the stack and DFS across the call edges.
807 for (Node *RootN : Worklist) {
808 assert(DFSStack.empty() &&
809 "Cannot begin a new root with a non-empty DFS stack!");
810 assert(PendingSCCStack.empty() &&
811 "Cannot begin a new root with pending nodes for an SCC!");
812
813 // Skip any nodes we've already reached in the DFS.
814 if (RootN->DFSNumber != 0) {
815 assert(RootN->DFSNumber == -1 &&
816 "Shouldn't have any mid-DFS root nodes!");
817 continue;
818 }
819
820 RootN->DFSNumber = RootN->LowLink = 1;
821 int NextDFSNumber = 2;
822
823 DFSStack.emplace_back(RootN, (*RootN)->call_begin());
824 do {
825 auto [N, I] = DFSStack.pop_back_val();
826 auto E = (*N)->call_end();
827 while (I != E) {
828 Node &ChildN = I->getNode();
829 if (ChildN.DFSNumber == 0) {
830 // We haven't yet visited this child, so descend, pushing the current
831 // node onto the stack.
832 DFSStack.emplace_back(N, I);
833
834 assert(!G->SCCMap.count(&ChildN) &&
835 "Found a node with 0 DFS number but already in an SCC!");
836 ChildN.DFSNumber = ChildN.LowLink = NextDFSNumber++;
837 N = &ChildN;
838 I = (*N)->call_begin();
839 E = (*N)->call_end();
840 continue;
841 }
842
843 // Check for the child already being part of some component.
844 if (ChildN.DFSNumber == -1) {
845 if (G->lookupSCC(ChildN) == &OldSCC) {
846 // If the child is part of the old SCC, we know that it can reach
847 // every other node, so we have formed a cycle. Pull the entire DFS
848 // and pending stacks into it. See the comment above about setting
849 // up the old SCC for why we do this.
850 int OldSize = OldSCC.size();
851 OldSCC.Nodes.push_back(N);
852 OldSCC.Nodes.append(PendingSCCStack.begin(), PendingSCCStack.end());
853 PendingSCCStack.clear();
854 while (!DFSStack.empty())
855 OldSCC.Nodes.push_back(DFSStack.pop_back_val().first);
856 for (Node &N : drop_begin(OldSCC, OldSize)) {
857 N.DFSNumber = N.LowLink = -1;
858 G->SCCMap[&N] = &OldSCC;
859 }
860 N = nullptr;
861 break;
862 }
863
864 // If the child has already been added to some child component, it
865 // couldn't impact the low-link of this parent because it isn't
866 // connected, and thus its low-link isn't relevant so skip it.
867 ++I;
868 continue;
869 }
870
871 // Track the lowest linked child as the lowest link for this node.
872 assert(ChildN.LowLink > 0 && "Must have a positive low-link number!");
873 if (ChildN.LowLink < N->LowLink)
874 N->LowLink = ChildN.LowLink;
875
876 // Move to the next edge.
877 ++I;
878 }
879 if (!N)
880 // Cleared the DFS early, start another round.
881 break;
882
883 // We've finished processing N and its descendants, put it on our pending
884 // SCC stack to eventually get merged into an SCC of nodes.
885 PendingSCCStack.push_back(N);
886
887 // If this node is linked to some lower entry, continue walking up the
888 // stack.
889 if (N->LowLink != N->DFSNumber)
890 continue;
891
892 // Otherwise, we've completed an SCC. Append it to our post order list of
893 // SCCs.
894 int RootDFSNumber = N->DFSNumber;
895 // Find the range of the node stack by walking down until we pass the
896 // root DFS number.
897 auto SCCNodes = make_range(
898 PendingSCCStack.rbegin(),
899 find_if(reverse(PendingSCCStack), [RootDFSNumber](const Node *N) {
900 return N->DFSNumber < RootDFSNumber;
901 }));
902
903 // Form a new SCC out of these nodes and then clear them off our pending
904 // stack.
905 NewSCCs.push_back(G->createSCC(*this, SCCNodes));
906 for (Node &N : *NewSCCs.back()) {
907 N.DFSNumber = N.LowLink = -1;
908 G->SCCMap[&N] = NewSCCs.back();
909 }
910 PendingSCCStack.erase(SCCNodes.end().base(), PendingSCCStack.end());
911 } while (!DFSStack.empty());
912 }
913
914 // Insert the remaining SCCs before the old one. The old SCC can reach all
915 // other SCCs we form because it contains the target node of the removed edge
916 // of the old SCC. This means that we will have edges into all the new SCCs,
917 // which means the old one must come last for postorder.
918 int OldIdx = SCCIndices[&OldSCC];
919 SCCs.insert(SCCs.begin() + OldIdx, NewSCCs.begin(), NewSCCs.end());
920
921 // Update the mapping from SCC* to index to use the new SCC*s, and remove the
922 // old SCC from the mapping.
923 for (int Idx = OldIdx, Size = SCCs.size(); Idx < Size; ++Idx)
924 SCCIndices[SCCs[Idx]] = Idx;
925
926 return make_range(SCCs.begin() + OldIdx,
927 SCCs.begin() + OldIdx + NewSCCs.size());
928}
929
931 Node &TargetN) {
932 assert(!(*SourceN)[TargetN].isCall() && "Must start with a ref edge!");
933
934 assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC.");
935 assert(G->lookupRefSCC(TargetN) != this &&
936 "Target must not be in this RefSCC.");
937#ifdef EXPENSIVE_CHECKS
938 assert(G->lookupRefSCC(TargetN)->isDescendantOf(*this) &&
939 "Target must be a descendant of the Source.");
940#endif
941
942 // Edges between RefSCCs are the same regardless of call or ref, so we can
943 // just flip the edge here.
944 SourceN->setEdgeKind(TargetN, Edge::Call);
945
946#ifdef EXPENSIVE_CHECKS
947 verify();
948#endif
949}
950
952 Node &TargetN) {
953 assert((*SourceN)[TargetN].isCall() && "Must start with a call edge!");
954
955 assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC.");
956 assert(G->lookupRefSCC(TargetN) != this &&
957 "Target must not be in this RefSCC.");
958#ifdef EXPENSIVE_CHECKS
959 assert(G->lookupRefSCC(TargetN)->isDescendantOf(*this) &&
960 "Target must be a descendant of the Source.");
961#endif
962
963 // Edges between RefSCCs are the same regardless of call or ref, so we can
964 // just flip the edge here.
965 SourceN->setEdgeKind(TargetN, Edge::Ref);
966
967#ifdef EXPENSIVE_CHECKS
968 verify();
969#endif
970}
971
973 Node &TargetN) {
974 assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC.");
975 assert(G->lookupRefSCC(TargetN) == this && "Target must be in this RefSCC.");
976
977 SourceN->insertEdgeInternal(TargetN, Edge::Ref);
978
979#ifdef EXPENSIVE_CHECKS
980 verify();
981#endif
982}
983
985 Edge::Kind EK) {
986 // First insert it into the caller.
987 SourceN->insertEdgeInternal(TargetN, EK);
988
989 assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC.");
990
991 assert(G->lookupRefSCC(TargetN) != this &&
992 "Target must not be in this RefSCC.");
993#ifdef EXPENSIVE_CHECKS
994 assert(G->lookupRefSCC(TargetN)->isDescendantOf(*this) &&
995 "Target must be a descendant of the Source.");
996#endif
997
998#ifdef EXPENSIVE_CHECKS
999 verify();
1000#endif
1001}
1002
1005 assert(G->lookupRefSCC(TargetN) == this && "Target must be in this RefSCC.");
1006 RefSCC &SourceC = *G->lookupRefSCC(SourceN);
1007 assert(&SourceC != this && "Source must not be in this RefSCC.");
1008#ifdef EXPENSIVE_CHECKS
1009 assert(SourceC.isDescendantOf(*this) &&
1010 "Source must be a descendant of the Target.");
1011#endif
1012
1013 SmallVector<RefSCC *, 1> DeletedRefSCCs;
1014
1015#ifdef EXPENSIVE_CHECKS
1016 verify();
1017 auto VerifyOnExit = make_scope_exit([&]() { verify(); });
1018#endif
1019
1020 int SourceIdx = G->RefSCCIndices[&SourceC];
1021 int TargetIdx = G->RefSCCIndices[this];
1022 assert(SourceIdx < TargetIdx &&
1023 "Postorder list doesn't see edge as incoming!");
1024
1025 // Compute the RefSCCs which (transitively) reach the source. We do this by
1026 // working backwards from the source using the parent set in each RefSCC,
1027 // skipping any RefSCCs that don't fall in the postorder range. This has the
1028 // advantage of walking the sparser parent edge (in high fan-out graphs) but
1029 // more importantly this removes examining all forward edges in all RefSCCs
1030 // within the postorder range which aren't in fact connected. Only connected
1031 // RefSCCs (and their edges) are visited here.
1032 auto ComputeSourceConnectedSet = [&](SmallPtrSetImpl<RefSCC *> &Set) {
1033 Set.insert(&SourceC);
1034 auto IsConnected = [&](RefSCC &RC) {
1035 for (SCC &C : RC)
1036 for (Node &N : C)
1037 for (Edge &E : *N)
1038 if (Set.count(G->lookupRefSCC(E.getNode())))
1039 return true;
1040
1041 return false;
1042 };
1043
1044 for (RefSCC *C : make_range(G->PostOrderRefSCCs.begin() + SourceIdx + 1,
1045 G->PostOrderRefSCCs.begin() + TargetIdx + 1))
1046 if (IsConnected(*C))
1047 Set.insert(C);
1048 };
1049
1050 // Use a normal worklist to find which SCCs the target connects to. We still
1051 // bound the search based on the range in the postorder list we care about,
1052 // but because this is forward connectivity we just "recurse" through the
1053 // edges.
1054 auto ComputeTargetConnectedSet = [&](SmallPtrSetImpl<RefSCC *> &Set) {
1055 Set.insert(this);
1056 SmallVector<RefSCC *, 4> Worklist;
1057 Worklist.push_back(this);
1058 do {
1059 RefSCC &RC = *Worklist.pop_back_val();
1060 for (SCC &C : RC)
1061 for (Node &N : C)
1062 for (Edge &E : *N) {
1063 RefSCC &EdgeRC = *G->lookupRefSCC(E.getNode());
1064 if (G->getRefSCCIndex(EdgeRC) <= SourceIdx)
1065 // Not in the postorder sequence between source and target.
1066 continue;
1067
1068 if (Set.insert(&EdgeRC).second)
1069 Worklist.push_back(&EdgeRC);
1070 }
1071 } while (!Worklist.empty());
1072 };
1073
1074 // Use a generic helper to update the postorder sequence of RefSCCs and return
1075 // a range of any RefSCCs connected into a cycle by inserting this edge. This
1076 // routine will also take care of updating the indices into the postorder
1077 // sequence.
1080 SourceC, *this, G->PostOrderRefSCCs, G->RefSCCIndices,
1081 ComputeSourceConnectedSet, ComputeTargetConnectedSet);
1082
1083 // Build a set, so we can do fast tests for whether a RefSCC will end up as
1084 // part of the merged RefSCC.
1085 SmallPtrSet<RefSCC *, 16> MergeSet(MergeRange.begin(), MergeRange.end());
1086
1087 // This RefSCC will always be part of that set, so just insert it here.
1088 MergeSet.insert(this);
1089
1090 // Now that we have identified all the SCCs which need to be merged into
1091 // a connected set with the inserted edge, merge all of them into this SCC.
1092 SmallVector<SCC *, 16> MergedSCCs;
1093 int SCCIndex = 0;
1094 for (RefSCC *RC : MergeRange) {
1095 assert(RC != this && "We're merging into the target RefSCC, so it "
1096 "shouldn't be in the range.");
1097
1098 // Walk the inner SCCs to update their up-pointer and walk all the edges to
1099 // update any parent sets.
1100 // FIXME: We should try to find a way to avoid this (rather expensive) edge
1101 // walk by updating the parent sets in some other manner.
1102 for (SCC &InnerC : *RC) {
1103 InnerC.OuterRefSCC = this;
1104 SCCIndices[&InnerC] = SCCIndex++;
1105 for (Node &N : InnerC)
1106 G->SCCMap[&N] = &InnerC;
1107 }
1108
1109 // Now merge in the SCCs. We can actually move here so try to reuse storage
1110 // the first time through.
1111 if (MergedSCCs.empty())
1112 MergedSCCs = std::move(RC->SCCs);
1113 else
1114 MergedSCCs.append(RC->SCCs.begin(), RC->SCCs.end());
1115 RC->SCCs.clear();
1116 DeletedRefSCCs.push_back(RC);
1117 }
1118
1119 // Append our original SCCs to the merged list and move it into place.
1120 for (SCC &InnerC : *this)
1121 SCCIndices[&InnerC] = SCCIndex++;
1122 MergedSCCs.append(SCCs.begin(), SCCs.end());
1123 SCCs = std::move(MergedSCCs);
1124
1125 // Remove the merged away RefSCCs from the post order sequence.
1126 for (RefSCC *RC : MergeRange)
1127 G->RefSCCIndices.erase(RC);
1128 int IndexOffset = MergeRange.end() - MergeRange.begin();
1129 auto EraseEnd =
1130 G->PostOrderRefSCCs.erase(MergeRange.begin(), MergeRange.end());
1131 for (RefSCC *RC : make_range(EraseEnd, G->PostOrderRefSCCs.end()))
1132 G->RefSCCIndices[RC] -= IndexOffset;
1133
1134 // At this point we have a merged RefSCC with a post-order SCCs list, just
1135 // connect the nodes to form the new edge.
1136 SourceN->insertEdgeInternal(TargetN, Edge::Ref);
1137
1138 // We return the list of SCCs which were merged so that callers can
1139 // invalidate any data they have associated with those SCCs. Note that these
1140 // SCCs are no longer in an interesting state (they are totally empty) but
1141 // the pointers will remain stable for the life of the graph itself.
1142 return DeletedRefSCCs;
1143}
1144
1146 assert(G->lookupRefSCC(SourceN) == this &&
1147 "The source must be a member of this RefSCC.");
1148 assert(G->lookupRefSCC(TargetN) != this &&
1149 "The target must not be a member of this RefSCC");
1150
1151#ifdef EXPENSIVE_CHECKS
1152 verify();
1153 auto VerifyOnExit = make_scope_exit([&]() { verify(); });
1154#endif
1155
1156 // First remove it from the node.
1157 bool Removed = SourceN->removeEdgeInternal(TargetN);
1158 (void)Removed;
1159 assert(Removed && "Target not in the edge set for this caller?");
1160}
1161
1164 ArrayRef<std::pair<Node *, Node *>> Edges) {
1165 // We return a list of the resulting *new* RefSCCs in post-order.
1167
1168#ifdef EXPENSIVE_CHECKS
1169 // Verify the RefSCC is valid to start with and that either we return an empty
1170 // list of result RefSCCs and this RefSCC remains valid, or we return new
1171 // RefSCCs and this RefSCC is dead.
1172 verify();
1173 auto VerifyOnExit = make_scope_exit([&]() {
1174 // If we didn't replace our RefSCC with new ones, check that this one
1175 // remains valid.
1176 if (G)
1177 verify();
1178 });
1179#endif
1180
1181 // First remove the actual edges.
1182 for (auto [SourceN, TargetN] : Edges) {
1183 assert(!(**SourceN)[*TargetN].isCall() &&
1184 "Cannot remove a call edge, it must first be made a ref edge");
1185
1186 bool Removed = (*SourceN)->removeEdgeInternal(*TargetN);
1187 (void)Removed;
1188 assert(Removed && "Target not in the edge set for this caller?");
1189 }
1190
1191 // Direct self references don't impact the ref graph at all.
1192 // If all targets are in the same SCC as the source, because no call edges
1193 // were removed there is no RefSCC structure change.
1194 if (llvm::all_of(Edges, [&](std::pair<Node *, Node *> E) {
1195 return E.first == E.second ||
1196 G->lookupSCC(*E.first) == G->lookupSCC(*E.second);
1197 }))
1198 return Result;
1199
1200 // We build somewhat synthetic new RefSCCs by providing a postorder mapping
1201 // for each inner SCC. We store these inside the low-link field of the nodes
1202 // rather than associated with SCCs because this saves a round-trip through
1203 // the node->SCC map and in the common case, SCCs are small. We will verify
1204 // that we always give the same number to every node in the SCC such that
1205 // these are equivalent.
1206 int PostOrderNumber = 0;
1207
1208 // Reset all the other nodes to prepare for a DFS over them, and add them to
1209 // our worklist.
1210 SmallVector<Node *, 8> Worklist;
1211 for (SCC *C : SCCs) {
1212 for (Node &N : *C)
1213 N.DFSNumber = N.LowLink = 0;
1214
1215 Worklist.append(C->Nodes.begin(), C->Nodes.end());
1216 }
1217
1218 // Track the number of nodes in this RefSCC so that we can quickly recognize
1219 // an important special case of the edge removal not breaking the cycle of
1220 // this RefSCC.
1221 const int NumRefSCCNodes = Worklist.size();
1222
1224 SmallVector<Node *, 4> PendingRefSCCStack;
1225 do {
1226 assert(DFSStack.empty() &&
1227 "Cannot begin a new root with a non-empty DFS stack!");
1228 assert(PendingRefSCCStack.empty() &&
1229 "Cannot begin a new root with pending nodes for an SCC!");
1230
1231 Node *RootN = Worklist.pop_back_val();
1232 // Skip any nodes we've already reached in the DFS.
1233 if (RootN->DFSNumber != 0) {
1234 assert(RootN->DFSNumber == -1 &&
1235 "Shouldn't have any mid-DFS root nodes!");
1236 continue;
1237 }
1238
1239 RootN->DFSNumber = RootN->LowLink = 1;
1240 int NextDFSNumber = 2;
1241
1242 DFSStack.emplace_back(RootN, (*RootN)->begin());
1243 do {
1244 auto [N, I] = DFSStack.pop_back_val();
1245 auto E = (*N)->end();
1246
1247 assert(N->DFSNumber != 0 && "We should always assign a DFS number "
1248 "before processing a node.");
1249
1250 while (I != E) {
1251 Node &ChildN = I->getNode();
1252 if (ChildN.DFSNumber == 0) {
1253 // Mark that we should start at this child when next this node is the
1254 // top of the stack. We don't start at the next child to ensure this
1255 // child's lowlink is reflected.
1256 DFSStack.emplace_back(N, I);
1257
1258 // Continue, resetting to the child node.
1259 ChildN.LowLink = ChildN.DFSNumber = NextDFSNumber++;
1260 N = &ChildN;
1261 I = ChildN->begin();
1262 E = ChildN->end();
1263 continue;
1264 }
1265 if (ChildN.DFSNumber == -1) {
1266 // If this child isn't currently in this RefSCC, no need to process
1267 // it.
1268 ++I;
1269 continue;
1270 }
1271
1272 // Track the lowest link of the children, if any are still in the stack.
1273 // Any child not on the stack will have a LowLink of -1.
1274 assert(ChildN.LowLink != 0 &&
1275 "Low-link must not be zero with a non-zero DFS number.");
1276 if (ChildN.LowLink >= 0 && ChildN.LowLink < N->LowLink)
1277 N->LowLink = ChildN.LowLink;
1278 ++I;
1279 }
1280
1281 // We've finished processing N and its descendants, put it on our pending
1282 // stack to eventually get merged into a RefSCC.
1283 PendingRefSCCStack.push_back(N);
1284
1285 // If this node is linked to some lower entry, continue walking up the
1286 // stack.
1287 if (N->LowLink != N->DFSNumber) {
1288 assert(!DFSStack.empty() &&
1289 "We never found a viable root for a RefSCC to pop off!");
1290 continue;
1291 }
1292
1293 // Otherwise, form a new RefSCC from the top of the pending node stack.
1294 int RefSCCNumber = PostOrderNumber++;
1295 int RootDFSNumber = N->DFSNumber;
1296
1297 // Find the range of the node stack by walking down until we pass the
1298 // root DFS number. Update the DFS numbers and low link numbers in the
1299 // process to avoid re-walking this list where possible.
1300 auto StackRI = find_if(reverse(PendingRefSCCStack), [&](Node *N) {
1301 if (N->DFSNumber < RootDFSNumber)
1302 // We've found the bottom.
1303 return true;
1304
1305 // Update this node and keep scanning.
1306 N->DFSNumber = -1;
1307 // Save the post-order number in the lowlink field so that we can use
1308 // it to map SCCs into new RefSCCs after we finish the DFS.
1309 N->LowLink = RefSCCNumber;
1310 return false;
1311 });
1312 auto RefSCCNodes = make_range(StackRI.base(), PendingRefSCCStack.end());
1313
1314 // If we find a cycle containing all nodes originally in this RefSCC then
1315 // the removal hasn't changed the structure at all. This is an important
1316 // special case, and we can directly exit the entire routine more
1317 // efficiently as soon as we discover it.
1318 if (llvm::size(RefSCCNodes) == NumRefSCCNodes) {
1319 // Clear out the low link field as we won't need it.
1320 for (Node *N : RefSCCNodes)
1321 N->LowLink = -1;
1322 // Return the empty result immediately.
1323 return Result;
1324 }
1325
1326 // We've already marked the nodes internally with the RefSCC number so
1327 // just clear them off the stack and continue.
1328 PendingRefSCCStack.erase(RefSCCNodes.begin(), PendingRefSCCStack.end());
1329 } while (!DFSStack.empty());
1330
1331 assert(DFSStack.empty() && "Didn't flush the entire DFS stack!");
1332 assert(PendingRefSCCStack.empty() && "Didn't flush all pending nodes!");
1333 } while (!Worklist.empty());
1334
1335 assert(PostOrderNumber > 1 &&
1336 "Should never finish the DFS when the existing RefSCC remains valid!");
1337
1338 // Otherwise we create a collection of new RefSCC nodes and build
1339 // a radix-sort style map from postorder number to these new RefSCCs. We then
1340 // append SCCs to each of these RefSCCs in the order they occurred in the
1341 // original SCCs container.
1342 for (int I = 0; I < PostOrderNumber; ++I)
1343 Result.push_back(G->createRefSCC(*G));
1344
1345 // Insert the resulting postorder sequence into the global graph postorder
1346 // sequence before the current RefSCC in that sequence, and then remove the
1347 // current one.
1348 //
1349 // FIXME: It'd be nice to change the APIs so that we returned an iterator
1350 // range over the global postorder sequence and generally use that sequence
1351 // rather than building a separate result vector here.
1352 int Idx = G->getRefSCCIndex(*this);
1353 G->PostOrderRefSCCs.erase(G->PostOrderRefSCCs.begin() + Idx);
1354 G->PostOrderRefSCCs.insert(G->PostOrderRefSCCs.begin() + Idx, Result.begin(),
1355 Result.end());
1356 for (int I : seq<int>(Idx, G->PostOrderRefSCCs.size()))
1357 G->RefSCCIndices[G->PostOrderRefSCCs[I]] = I;
1358
1359 for (SCC *C : SCCs) {
1360 // We store the SCC number in the node's low-link field above.
1361 int SCCNumber = C->begin()->LowLink;
1362 // Clear out all the SCC's node's low-link fields now that we're done
1363 // using them as side-storage.
1364 for (Node &N : *C) {
1365 assert(N.LowLink == SCCNumber &&
1366 "Cannot have different numbers for nodes in the same SCC!");
1367 N.LowLink = -1;
1368 }
1369
1370 RefSCC &RC = *Result[SCCNumber];
1371 int SCCIndex = RC.SCCs.size();
1372 RC.SCCs.push_back(C);
1373 RC.SCCIndices[C] = SCCIndex;
1374 C->OuterRefSCC = &RC;
1375 }
1376
1377 // Now that we've moved things into the new RefSCCs, clear out our current
1378 // one.
1379 G = nullptr;
1380 SCCs.clear();
1381 SCCIndices.clear();
1382
1383#ifdef EXPENSIVE_CHECKS
1384 // Verify the new RefSCCs we've built.
1385 for (RefSCC *RC : Result)
1386 RC->verify();
1387#endif
1388
1389 // Return the new list of SCCs.
1390 return Result;
1391}
1392
1394 Node &TargetN) {
1395#ifdef EXPENSIVE_CHECKS
1396 auto ExitVerifier = make_scope_exit([this] { verify(); });
1397
1398 // Check that we aren't breaking some invariants of the SCC graph. Note that
1399 // this is quadratic in the number of edges in the call graph!
1400 SCC &SourceC = *G->lookupSCC(SourceN);
1401 SCC &TargetC = *G->lookupSCC(TargetN);
1402 if (&SourceC != &TargetC)
1403 assert(SourceC.isAncestorOf(TargetC) &&
1404 "Call edge is not trivial in the SCC graph!");
1405#endif
1406
1407 // First insert it into the source or find the existing edge.
1408 auto [Iterator, Inserted] =
1409 SourceN->EdgeIndexMap.try_emplace(&TargetN, SourceN->Edges.size());
1410 if (!Inserted) {
1411 // Already an edge, just update it.
1412 Edge &E = SourceN->Edges[Iterator->second];
1413 if (E.isCall())
1414 return; // Nothing to do!
1415 E.setKind(Edge::Call);
1416 } else {
1417 // Create the new edge.
1418 SourceN->Edges.emplace_back(TargetN, Edge::Call);
1419 }
1420}
1421
1423#ifdef EXPENSIVE_CHECKS
1424 auto ExitVerifier = make_scope_exit([this] { verify(); });
1425
1426 // Check that we aren't breaking some invariants of the RefSCC graph.
1427 RefSCC &SourceRC = *G->lookupRefSCC(SourceN);
1428 RefSCC &TargetRC = *G->lookupRefSCC(TargetN);
1429 if (&SourceRC != &TargetRC)
1430 assert(SourceRC.isAncestorOf(TargetRC) &&
1431 "Ref edge is not trivial in the RefSCC graph!");
1432#endif
1433
1434 // First insert it into the source or find the existing edge.
1435 auto [Iterator, Inserted] =
1436 SourceN->EdgeIndexMap.try_emplace(&TargetN, SourceN->Edges.size());
1437 (void)Iterator;
1438 if (!Inserted)
1439 // Already an edge, we're done.
1440 return;
1441
1442 // Create the new edge.
1443 SourceN->Edges.emplace_back(TargetN, Edge::Ref);
1444}
1445
1447 Function &OldF = N.getFunction();
1448
1449#ifdef EXPENSIVE_CHECKS
1450 auto ExitVerifier = make_scope_exit([this] { verify(); });
1451
1452 assert(G->lookupRefSCC(N) == this &&
1453 "Cannot replace the function of a node outside this RefSCC.");
1454
1455 assert(G->NodeMap.find(&NewF) == G->NodeMap.end() &&
1456 "Must not have already walked the new function!'");
1457
1458 // It is important that this replacement not introduce graph changes so we
1459 // insist that the caller has already removed every use of the original
1460 // function and that all uses of the new function correspond to existing
1461 // edges in the graph. The common and expected way to use this is when
1462 // replacing the function itself in the IR without changing the call graph
1463 // shape and just updating the analysis based on that.
1464 assert(&OldF != &NewF && "Cannot replace a function with itself!");
1465 assert(OldF.use_empty() &&
1466 "Must have moved all uses from the old function to the new!");
1467#endif
1468
1469 N.replaceFunction(NewF);
1470
1471 // Update various call graph maps.
1472 G->NodeMap.erase(&OldF);
1473 G->NodeMap[&NewF] = &N;
1474
1475 // Update lib functions.
1476 if (G->isLibFunction(OldF)) {
1477 G->LibFunctions.remove(&OldF);
1478 G->LibFunctions.insert(&NewF);
1479 }
1480}
1481
1482void LazyCallGraph::insertEdge(Node &SourceN, Node &TargetN, Edge::Kind EK) {
1483 assert(SCCMap.empty() &&
1484 "This method cannot be called after SCCs have been formed!");
1485
1486 return SourceN->insertEdgeInternal(TargetN, EK);
1487}
1488
1489void LazyCallGraph::removeEdge(Node &SourceN, Node &TargetN) {
1490 assert(SCCMap.empty() &&
1491 "This method cannot be called after SCCs have been formed!");
1492
1493 bool Removed = SourceN->removeEdgeInternal(TargetN);
1494 (void)Removed;
1495 assert(Removed && "Target not in the edge set for this caller?");
1496}
1497
1499 // FIXME: This is unnecessarily restrictive. We should be able to remove
1500 // functions which recursively call themselves.
1501 assert(F.hasZeroLiveUses() &&
1502 "This routine should only be called on trivially dead functions!");
1503
1504 // We shouldn't remove library functions as they are never really dead while
1505 // the call graph is in use -- every function definition refers to them.
1507 "Must not remove lib functions from the call graph!");
1508
1509 auto NI = NodeMap.find(&F);
1510 assert(NI != NodeMap.end() && "Removed function should be known!");
1511
1512 Node &N = *NI->second;
1513
1514 // Remove all call edges out of dead function.
1515 for (Edge E : *N) {
1516 if (E.isCall())
1517 N->setEdgeKind(E.getNode(), Edge::Ref);
1518 }
1519}
1520
1522 if (DeadFs.empty())
1523 return;
1524
1525 // Group dead functions by the RefSCC they're in.
1527 for (Function *DeadF : DeadFs) {
1528 Node *N = lookup(*DeadF);
1529#ifndef NDEBUG
1530 for (Edge &E : **N) {
1531 assert(!E.isCall() &&
1532 "dead function shouldn't have any outgoing call edges");
1533 }
1534#endif
1535 RefSCC *RC = lookupRefSCC(*N);
1536 RCs[RC].push_back(N);
1537 }
1538 // Remove outgoing edges from all dead functions. Dead functions should
1539 // already have had their call edges removed in markDeadFunction(), so we only
1540 // need to worry about spurious ref edges.
1541 for (auto [RC, DeadNs] : RCs) {
1542 SmallVector<std::pair<Node *, Node *>> InternalEdgesToRemove;
1543 for (Node *DeadN : DeadNs) {
1544 for (Edge &E : **DeadN) {
1545 if (lookupRefSCC(E.getNode()) == RC)
1546 InternalEdgesToRemove.push_back({DeadN, &E.getNode()});
1547 else
1548 RC->removeOutgoingEdge(*DeadN, E.getNode());
1549 }
1550 }
1551 // We ignore the returned RefSCCs since at this point we're done with CGSCC
1552 // iteration and don't need to add it to any worklists.
1553 (void)RC->removeInternalRefEdges(InternalEdgesToRemove);
1554 for (Node *DeadN : DeadNs) {
1555 RefSCC *DeadRC = lookupRefSCC(*DeadN);
1556 assert(DeadRC->size() == 1);
1557 assert(DeadRC->begin()->size() == 1);
1558 DeadRC->clear();
1559 DeadRC->G = nullptr;
1560 }
1561 }
1562 // Clean up data structures.
1563 for (Function *DeadF : DeadFs) {
1564 Node &N = *lookup(*DeadF);
1565
1566 EntryEdges.removeEdgeInternal(N);
1567 SCCMap.erase(SCCMap.find(&N));
1568 NodeMap.erase(NodeMap.find(DeadF));
1569
1570 N.clear();
1571 N.G = nullptr;
1572 N.F = nullptr;
1573 }
1574}
1575
1576// Gets the Edge::Kind from one function to another by looking at the function's
1577// instructions. Asserts if there is no edge.
1578// Useful for determining what type of edge should exist between functions when
1579// the edge hasn't been created yet.
1581 Function &NewFunction) {
1582 // In release builds, assume that if there are no direct calls to the new
1583 // function, then there is a ref edge. In debug builds, keep track of
1584 // references to assert that there is actually a ref edge if there is no call
1585 // edge.
1586#ifndef NDEBUG
1589#endif
1590
1591 for (Instruction &I : instructions(OriginalFunction)) {
1592 if (auto *CB = dyn_cast<CallBase>(&I)) {
1593 if (Function *Callee = CB->getCalledFunction()) {
1594 if (Callee == &NewFunction)
1596 }
1597 }
1598#ifndef NDEBUG
1599 for (Value *Op : I.operand_values()) {
1600 if (Constant *C = dyn_cast<Constant>(Op)) {
1601 if (Visited.insert(C).second)
1602 Worklist.push_back(C);
1603 }
1604 }
1605#endif
1606 }
1607
1608#ifndef NDEBUG
1609 bool FoundNewFunction = false;
1610 LazyCallGraph::visitReferences(Worklist, Visited, [&](Function &F) {
1611 if (&F == &NewFunction)
1612 FoundNewFunction = true;
1613 });
1614 assert(FoundNewFunction && "No edge from original function to new function");
1615#endif
1616
1618}
1619
1621 Function &NewFunction) {
1622 assert(lookup(OriginalFunction) &&
1623 "Original function's node should already exist");
1624 Node &OriginalN = get(OriginalFunction);
1625 SCC *OriginalC = lookupSCC(OriginalN);
1626 RefSCC *OriginalRC = lookupRefSCC(OriginalN);
1627
1628#ifdef EXPENSIVE_CHECKS
1629 OriginalRC->verify();
1630 auto VerifyOnExit = make_scope_exit([&]() { OriginalRC->verify(); });
1631#endif
1632
1633 assert(!lookup(NewFunction) &&
1634 "New function's node should not already exist");
1635 Node &NewN = initNode(NewFunction);
1636
1637 Edge::Kind EK = getEdgeKind(OriginalFunction, NewFunction);
1638
1639 SCC *NewC = nullptr;
1640 for (Edge &E : *NewN) {
1641 Node &EN = E.getNode();
1642 if (EK == Edge::Kind::Call && E.isCall() && lookupSCC(EN) == OriginalC) {
1643 // If the edge to the new function is a call edge and there is a call edge
1644 // from the new function to any function in the original function's SCC,
1645 // it is in the same SCC (and RefSCC) as the original function.
1646 NewC = OriginalC;
1647 NewC->Nodes.push_back(&NewN);
1648 break;
1649 }
1650 }
1651
1652 if (!NewC) {
1653 for (Edge &E : *NewN) {
1654 Node &EN = E.getNode();
1655 if (lookupRefSCC(EN) == OriginalRC) {
1656 // If there is any edge from the new function to any function in the
1657 // original function's RefSCC, it is in the same RefSCC as the original
1658 // function but a new SCC.
1659 RefSCC *NewRC = OriginalRC;
1660 NewC = createSCC(*NewRC, SmallVector<Node *, 1>({&NewN}));
1661
1662 // The new function's SCC is not the same as the original function's
1663 // SCC, since that case was handled earlier. If the edge from the
1664 // original function to the new function was a call edge, then we need
1665 // to insert the newly created function's SCC before the original
1666 // function's SCC. Otherwise, either the new SCC comes after the
1667 // original function's SCC, or it doesn't matter, and in both cases we
1668 // can add it to the very end.
1669 int InsertIndex = EK == Edge::Kind::Call ? NewRC->SCCIndices[OriginalC]
1670 : NewRC->SCCIndices.size();
1671 NewRC->SCCs.insert(NewRC->SCCs.begin() + InsertIndex, NewC);
1672 for (int I = InsertIndex, Size = NewRC->SCCs.size(); I < Size; ++I)
1673 NewRC->SCCIndices[NewRC->SCCs[I]] = I;
1674
1675 break;
1676 }
1677 }
1678 }
1679
1680 if (!NewC) {
1681 // We didn't find any edges back to the original function's RefSCC, so the
1682 // new function belongs in a new RefSCC. The new RefSCC goes before the
1683 // original function's RefSCC.
1684 RefSCC *NewRC = createRefSCC(*this);
1685 NewC = createSCC(*NewRC, SmallVector<Node *, 1>({&NewN}));
1686 NewRC->SCCIndices[NewC] = 0;
1687 NewRC->SCCs.push_back(NewC);
1688 auto OriginalRCIndex = RefSCCIndices.find(OriginalRC)->second;
1689 PostOrderRefSCCs.insert(PostOrderRefSCCs.begin() + OriginalRCIndex, NewRC);
1690 for (int I = OriginalRCIndex, Size = PostOrderRefSCCs.size(); I < Size; ++I)
1691 RefSCCIndices[PostOrderRefSCCs[I]] = I;
1692 }
1693
1694 SCCMap[&NewN] = NewC;
1695
1696 OriginalN->insertEdgeInternal(NewN, EK);
1697}
1698
1700 Function &OriginalFunction, ArrayRef<Function *> NewFunctions) {
1701 assert(!NewFunctions.empty() && "Can't add zero functions");
1702 assert(lookup(OriginalFunction) &&
1703 "Original function's node should already exist");
1704 Node &OriginalN = get(OriginalFunction);
1705 RefSCC *OriginalRC = lookupRefSCC(OriginalN);
1706
1707#ifdef EXPENSIVE_CHECKS
1708 OriginalRC->verify();
1709 auto VerifyOnExit = make_scope_exit([&]() {
1710 OriginalRC->verify();
1711 for (Function *NewFunction : NewFunctions)
1712 lookupRefSCC(get(*NewFunction))->verify();
1713 });
1714#endif
1715
1716 bool ExistsRefToOriginalRefSCC = false;
1717
1718 for (Function *NewFunction : NewFunctions) {
1719 Node &NewN = initNode(*NewFunction);
1720
1721 OriginalN->insertEdgeInternal(NewN, Edge::Kind::Ref);
1722
1723 // Check if there is any edge from any new function back to any function in
1724 // the original function's RefSCC.
1725 for (Edge &E : *NewN) {
1726 if (lookupRefSCC(E.getNode()) == OriginalRC) {
1727 ExistsRefToOriginalRefSCC = true;
1728 break;
1729 }
1730 }
1731 }
1732
1733 RefSCC *NewRC;
1734 if (ExistsRefToOriginalRefSCC) {
1735 // If there is any edge from any new function to any function in the
1736 // original function's RefSCC, all new functions will be in the same RefSCC
1737 // as the original function.
1738 NewRC = OriginalRC;
1739 } else {
1740 // Otherwise the new functions are in their own RefSCC.
1741 NewRC = createRefSCC(*this);
1742 // The new RefSCC goes before the original function's RefSCC in postorder
1743 // since there are only edges from the original function's RefSCC to the new
1744 // RefSCC.
1745 auto OriginalRCIndex = RefSCCIndices.find(OriginalRC)->second;
1746 PostOrderRefSCCs.insert(PostOrderRefSCCs.begin() + OriginalRCIndex, NewRC);
1747 for (int I = OriginalRCIndex, Size = PostOrderRefSCCs.size(); I < Size; ++I)
1748 RefSCCIndices[PostOrderRefSCCs[I]] = I;
1749 }
1750
1751 for (Function *NewFunction : NewFunctions) {
1752 Node &NewN = get(*NewFunction);
1753 // Each new function is in its own new SCC. The original function can only
1754 // have a ref edge to new functions, and no other existing functions can
1755 // have references to new functions. Each new function only has a ref edge
1756 // to the other new functions.
1757 SCC *NewC = createSCC(*NewRC, SmallVector<Node *, 1>({&NewN}));
1758 // The new SCCs are either sibling SCCs or parent SCCs to all other existing
1759 // SCCs in the RefSCC. Either way, they can go at the back of the postorder
1760 // SCC list.
1761 auto Index = NewRC->SCCIndices.size();
1762 NewRC->SCCIndices[NewC] = Index;
1763 NewRC->SCCs.push_back(NewC);
1764 SCCMap[&NewN] = NewC;
1765 }
1766
1767#ifndef NDEBUG
1768 for (Function *F1 : NewFunctions) {
1769 assert(getEdgeKind(OriginalFunction, *F1) == Edge::Kind::Ref &&
1770 "Expected ref edges from original function to every new function");
1771 Node &N1 = get(*F1);
1772 for (Function *F2 : NewFunctions) {
1773 if (F1 == F2)
1774 continue;
1775 Node &N2 = get(*F2);
1776 assert(!N1->lookup(N2)->isCall() &&
1777 "Edges between new functions must be ref edges");
1778 }
1779 }
1780#endif
1781}
1782
1783LazyCallGraph::Node &LazyCallGraph::insertInto(Function &F, Node *&MappedN) {
1784 return *new (MappedN = BPA.Allocate()) Node(*this, F);
1785}
1786
1787void LazyCallGraph::updateGraphPtrs() {
1788 // Walk the node map to update their graph pointers. While this iterates in
1789 // an unstable order, the order has no effect, so it remains correct.
1790 for (auto &FunctionNodePair : NodeMap)
1791 FunctionNodePair.second->G = this;
1792
1793 for (auto *RC : PostOrderRefSCCs)
1794 RC->G = this;
1795}
1796
1797LazyCallGraph::Node &LazyCallGraph::initNode(Function &F) {
1798 Node &N = get(F);
1799 N.DFSNumber = N.LowLink = -1;
1800 N.populate();
1801 NodeMap[&F] = &N;
1802 return N;
1803}
1804
1805template <typename RootsT, typename GetBeginT, typename GetEndT,
1806 typename GetNodeT, typename FormSCCCallbackT>
1807void LazyCallGraph::buildGenericSCCs(RootsT &&Roots, GetBeginT &&GetBegin,
1808 GetEndT &&GetEnd, GetNodeT &&GetNode,
1809 FormSCCCallbackT &&FormSCC) {
1810 using EdgeItT = decltype(GetBegin(std::declval<Node &>()));
1811
1813 SmallVector<Node *, 16> PendingSCCStack;
1814
1815 // Scan down the stack and DFS across the call edges.
1816 for (Node *RootN : Roots) {
1817 assert(DFSStack.empty() &&
1818 "Cannot begin a new root with a non-empty DFS stack!");
1819 assert(PendingSCCStack.empty() &&
1820 "Cannot begin a new root with pending nodes for an SCC!");
1821
1822 // Skip any nodes we've already reached in the DFS.
1823 if (RootN->DFSNumber != 0) {
1824 assert(RootN->DFSNumber == -1 &&
1825 "Shouldn't have any mid-DFS root nodes!");
1826 continue;
1827 }
1828
1829 RootN->DFSNumber = RootN->LowLink = 1;
1830 int NextDFSNumber = 2;
1831
1832 DFSStack.emplace_back(RootN, GetBegin(*RootN));
1833 do {
1834 auto [N, I] = DFSStack.pop_back_val();
1835 auto E = GetEnd(*N);
1836 while (I != E) {
1837 Node &ChildN = GetNode(I);
1838 if (ChildN.DFSNumber == 0) {
1839 // We haven't yet visited this child, so descend, pushing the current
1840 // node onto the stack.
1841 DFSStack.emplace_back(N, I);
1842
1843 ChildN.DFSNumber = ChildN.LowLink = NextDFSNumber++;
1844 N = &ChildN;
1845 I = GetBegin(*N);
1846 E = GetEnd(*N);
1847 continue;
1848 }
1849
1850 // If the child has already been added to some child component, it
1851 // couldn't impact the low-link of this parent because it isn't
1852 // connected, and thus its low-link isn't relevant so skip it.
1853 if (ChildN.DFSNumber == -1) {
1854 ++I;
1855 continue;
1856 }
1857
1858 // Track the lowest linked child as the lowest link for this node.
1859 assert(ChildN.LowLink > 0 && "Must have a positive low-link number!");
1860 if (ChildN.LowLink < N->LowLink)
1861 N->LowLink = ChildN.LowLink;
1862
1863 // Move to the next edge.
1864 ++I;
1865 }
1866
1867 // We've finished processing N and its descendants, put it on our pending
1868 // SCC stack to eventually get merged into an SCC of nodes.
1869 PendingSCCStack.push_back(N);
1870
1871 // If this node is linked to some lower entry, continue walking up the
1872 // stack.
1873 if (N->LowLink != N->DFSNumber)
1874 continue;
1875
1876 // Otherwise, we've completed an SCC. Append it to our post order list of
1877 // SCCs.
1878 int RootDFSNumber = N->DFSNumber;
1879 // Find the range of the node stack by walking down until we pass the
1880 // root DFS number.
1881 auto SCCNodes = make_range(
1882 PendingSCCStack.rbegin(),
1883 find_if(reverse(PendingSCCStack), [RootDFSNumber](const Node *N) {
1884 return N->DFSNumber < RootDFSNumber;
1885 }));
1886 // Form a new SCC out of these nodes and then clear them off our pending
1887 // stack.
1888 FormSCC(SCCNodes);
1889 PendingSCCStack.erase(SCCNodes.end().base(), PendingSCCStack.end());
1890 } while (!DFSStack.empty());
1891 }
1892}
1893
1894/// Build the internal SCCs for a RefSCC from a sequence of nodes.
1895///
1896/// Appends the SCCs to the provided vector and updates the map with their
1897/// indices. Both the vector and map must be empty when passed into this
1898/// routine.
1899void LazyCallGraph::buildSCCs(RefSCC &RC, node_stack_range Nodes) {
1900 assert(RC.SCCs.empty() && "Already built SCCs!");
1901 assert(RC.SCCIndices.empty() && "Already mapped SCC indices!");
1902
1903 for (Node *N : Nodes) {
1904 assert(N->LowLink >= (*Nodes.begin())->LowLink &&
1905 "We cannot have a low link in an SCC lower than its root on the "
1906 "stack!");
1907
1908 // This node will go into the next RefSCC, clear out its DFS and low link
1909 // as we scan.
1910 N->DFSNumber = N->LowLink = 0;
1911 }
1912
1913 // Each RefSCC contains a DAG of the call SCCs. To build these, we do
1914 // a direct walk of the call edges using Tarjan's algorithm. We reuse the
1915 // internal storage as we won't need it for the outer graph's DFS any longer.
1916 buildGenericSCCs(
1917 Nodes, [](Node &N) { return N->call_begin(); },
1918 [](Node &N) { return N->call_end(); },
1919 [](EdgeSequence::call_iterator I) -> Node & { return I->getNode(); },
1920 [this, &RC](node_stack_range Nodes) {
1921 RC.SCCs.push_back(createSCC(RC, Nodes));
1922 for (Node &N : *RC.SCCs.back()) {
1923 N.DFSNumber = N.LowLink = -1;
1924 SCCMap[&N] = RC.SCCs.back();
1925 }
1926 });
1927
1928 // Wire up the SCC indices.
1929 for (int I = 0, Size = RC.SCCs.size(); I < Size; ++I)
1930 RC.SCCIndices[RC.SCCs[I]] = I;
1931}
1932
1934 if (EntryEdges.empty() || !PostOrderRefSCCs.empty())
1935 // RefSCCs are either non-existent or already built!
1936 return;
1937
1938 assert(RefSCCIndices.empty() && "Already mapped RefSCC indices!");
1939
1941 for (Edge &E : *this)
1942 Roots.push_back(&E.getNode());
1943
1944 // The roots will be iterated in order.
1945 buildGenericSCCs(
1946 Roots,
1947 [](Node &N) {
1948 // We need to populate each node as we begin to walk its edges.
1949 N.populate();
1950 return N->begin();
1951 },
1952 [](Node &N) { return N->end(); },
1953 [](EdgeSequence::iterator I) -> Node & { return I->getNode(); },
1954 [this](node_stack_range Nodes) {
1955 RefSCC *NewRC = createRefSCC(*this);
1956 buildSCCs(*NewRC, Nodes);
1957
1958 // Push the new node into the postorder list and remember its position
1959 // in the index map.
1960 bool Inserted =
1961 RefSCCIndices.try_emplace(NewRC, PostOrderRefSCCs.size()).second;
1962 (void)Inserted;
1963 assert(Inserted && "Cannot already have this RefSCC in the index map!");
1964 PostOrderRefSCCs.push_back(NewRC);
1965#ifdef EXPENSIVE_CHECKS
1966 NewRC->verify();
1967#endif
1968 });
1969}
1970
1973 function_ref<void(Function &)> Callback) {
1974 while (!Worklist.empty()) {
1975 Constant *C = Worklist.pop_back_val();
1976
1977 if (Function *F = dyn_cast<Function>(C)) {
1978 if (!F->isDeclaration())
1979 Callback(*F);
1980 continue;
1981 }
1982
1983 // blockaddresses are weird and don't participate in the call graph anyway,
1984 // skip them.
1985 if (isa<BlockAddress>(C))
1986 continue;
1987
1988 for (Value *Op : C->operand_values())
1989 if (Visited.insert(cast<Constant>(Op)).second)
1990 Worklist.push_back(cast<Constant>(Op));
1991 }
1992}
1993
1994AnalysisKey LazyCallGraphAnalysis::Key;
1995
1997
1999 OS << " Edges in function: " << N.getFunction().getName() << "\n";
2000 for (LazyCallGraph::Edge &E : N.populate())
2001 OS << " " << (E.isCall() ? "call" : "ref ") << " -> "
2002 << E.getFunction().getName() << "\n";
2003
2004 OS << "\n";
2005}
2006
2008 OS << " SCC with " << C.size() << " functions:\n";
2009
2010 for (LazyCallGraph::Node &N : C)
2011 OS << " " << N.getFunction().getName() << "\n";
2012}
2013
2015 OS << " RefSCC with " << C.size() << " call SCCs:\n";
2016
2017 for (LazyCallGraph::SCC &InnerC : C)
2018 printSCC(OS, InnerC);
2019
2020 OS << "\n";
2021}
2022
2026
2027 OS << "Printing the call graph for module: " << M.getModuleIdentifier()
2028 << "\n\n";
2029
2030 for (Function &F : M)
2031 printNode(OS, G.get(F));
2032
2033 G.buildRefSCCs();
2034 for (LazyCallGraph::RefSCC &C : G.postorder_ref_sccs())
2035 printRefSCC(OS, C);
2036
2037 return PreservedAnalyses::all();
2038}
2039
2041 : OS(OS) {}
2042
2044 std::string Name =
2045 "\"" + DOT::EscapeString(std::string(N.getFunction().getName())) + "\"";
2046
2047 for (LazyCallGraph::Edge &E : N.populate()) {
2048 OS << " " << Name << " -> \""
2049 << DOT::EscapeString(std::string(E.getFunction().getName())) << "\"";
2050 if (!E.isCall()) // It is a ref edge.
2051 OS << " [style=dashed,label=\"ref\"]";
2052 OS << ";\n";
2053 }
2054
2055 OS << "\n";
2056}
2057
2061
2062 OS << "digraph \"" << DOT::EscapeString(M.getModuleIdentifier()) << "\" {\n";
2063
2064 for (Function &F : M)
2065 printNodeDOT(OS, G.get(F));
2066
2067 OS << "}\n";
2068
2069 return PreservedAnalyses::all();
2070}
Expand Atomic instructions
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition: Compiler.h:537
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
std::string Name
uint64_t Size
static void printNode(raw_ostream &OS, LazyCallGraph::Node &N)
static void printRefSCC(raw_ostream &OS, LazyCallGraph::RefSCC &C)
static iterator_range< typename PostorderSequenceT::iterator > updatePostorderSequenceForEdgeInsertion(SCCT &SourceSCC, SCCT &TargetSCC, PostorderSequenceT &SCCs, SCCIndexMapT &SCCIndices, ComputeSourceConnectedSetCallableT ComputeSourceConnectedSet, ComputeTargetConnectedSetCallableT ComputeTargetConnectedSet)
Generic helper that updates a postorder sequence of SCCs for a potentially cycle-introducing edge ins...
static void printNodeDOT(raw_ostream &OS, LazyCallGraph::Node &N)
static LazyCallGraph::Edge::Kind getEdgeKind(Function &OriginalFunction, Function &NewFunction)
static void printSCC(raw_ostream &OS, LazyCallGraph::SCC &C)
static void addEdge(SmallVectorImpl< LazyCallGraph::Edge > &Edges, DenseMap< LazyCallGraph::Node *, int > &EdgeIndexMap, LazyCallGraph::Node &N, LazyCallGraph::Edge::Kind EK)
static bool isKnownLibFunction(Function &F, TargetLibraryInfo &TLI)
Implements a lazy call graph analysis and related passes for the new pass manager.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
static StringRef getName(Value *V)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
Provides some synthesis utilities to produce sequences of values.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
Definition: Analysis.h:49
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:292
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:405
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
This is an important base class in LLVM.
Definition: Constant.h:41
This class represents an Operation in the Expression.
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
Definition: DenseMap.h:235
An analysis pass which computes the call graph for a module.
LazyCallGraphDOTPrinterPass(raw_ostream &OS)
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
LazyCallGraphPrinterPass(raw_ostream &OS)
An iterator used for the edges to both entry nodes and child nodes.
The edge sequence object.
A class used to represent edges in the call graph.
bool isCall() const
Test whether the edge represents a direct call to a function.
Kind
The kind of edge in the graph.
Node & getNode() const
Get the call graph node referenced by this edge.
A node in the call graph.
A RefSCC of the call graph.
SmallVector< RefSCC *, 1 > insertIncomingRefEdge(Node &SourceN, Node &TargetN)
Insert an edge whose source is in a descendant RefSCC and target is in this RefSCC.
bool switchInternalEdgeToCall(Node &SourceN, Node &TargetN, function_ref< void(ArrayRef< SCC * > MergedSCCs)> MergeCB={})
Make an existing internal ref edge into a call edge.
bool isAncestorOf(const RefSCC &RC) const
Test if this RefSCC is an ancestor of RC.
void insertTrivialRefEdge(Node &SourceN, Node &TargetN)
A convenience wrapper around the above to handle trivial cases of inserting a new ref edge.
bool isDescendantOf(const RefSCC &RC) const
Test if this RefSCC is a descendant of RC.
void switchOutgoingEdgeToCall(Node &SourceN, Node &TargetN)
Make an existing outgoing ref edge into a call edge.
void replaceNodeFunction(Node &N, Function &NewF)
Directly replace a node's function with a new function.
void insertOutgoingEdge(Node &SourceN, Node &TargetN, Edge::Kind EK)
Insert an edge whose parent is in this RefSCC and child is in some child RefSCC.
SmallVector< RefSCC *, 1 > removeInternalRefEdges(ArrayRef< std::pair< Node *, Node * > > Edges)
Remove a list of ref edges which are entirely within this RefSCC.
iterator_range< iterator > switchInternalEdgeToRef(Node &SourceN, Node &TargetN)
Make an existing internal call edge within a single SCC into a ref edge.
void insertInternalRefEdge(Node &SourceN, Node &TargetN)
Insert a ref edge from one node in this RefSCC to another in this RefSCC.
void insertTrivialCallEdge(Node &SourceN, Node &TargetN)
A convenience wrapper around the above to handle trivial cases of inserting a new call edge.
void removeOutgoingEdge(Node &SourceN, Node &TargetN)
Remove an edge whose source is in this RefSCC and target is not.
void switchOutgoingEdgeToRef(Node &SourceN, Node &TargetN)
Make an existing outgoing call edge into a ref edge.
void switchTrivialInternalEdgeToRef(Node &SourceN, Node &TargetN)
Make an existing internal call edge between separate SCCs into a ref edge.
bool isParentOf(const RefSCC &RC) const
Test if this RefSCC is a parent of RC.
An SCC of the call graph.
bool isAncestorOf(const SCC &C) const
Test if this SCC is an ancestor of C.
bool isParentOf(const SCC &C) const
Test if this SCC is a parent of C.
RefSCC & getOuterRefSCC() const
A lazily constructed view of the call graph of a module.
bool isLibFunction(Function &F) const
Test whether a function is a known and defined library function tracked by the call graph.
RefSCC * lookupRefSCC(Node &N) const
Lookup a function's RefSCC in the graph.
void insertEdge(Node &SourceN, Node &TargetN, Edge::Kind EK)
Update the call graph after inserting a new edge.
LazyCallGraph(Module &M, function_ref< TargetLibraryInfo &(Function &)> GetTLI)
Construct a graph for the given module.
static void visitReferences(SmallVectorImpl< Constant * > &Worklist, SmallPtrSetImpl< Constant * > &Visited, function_ref< void(Function &)> Callback)
Recursively visits the defined functions whose address is reachable from every constant in the Workli...
void markDeadFunction(Function &F)
Mark a function as dead to be removed later by removeDeadFunctions().
void addSplitFunction(Function &OriginalFunction, Function &NewFunction)
Add a new function split/outlined from an existing function.
void addSplitRefRecursiveFunctions(Function &OriginalFunction, ArrayRef< Function * > NewFunctions)
Add new ref-recursive functions split/outlined from an existing function.
void removeDeadFunctions(ArrayRef< Function * > DeadFs)
Remove dead functions from the call graph.
void removeEdge(Node &SourceN, Node &TargetN)
Update the call graph after deleting an edge.
Node & get(Function &F)
Get a graph node for a given function, scanning it to populate the graph data as necessary.
SCC * lookupSCC(Node &N) const
Lookup a function's SCC in the graph.
iterator_range< postorder_ref_scc_iterator > postorder_ref_sccs()
LazyCallGraph & operator=(LazyCallGraph &&RHS)
bool invalidate(Module &, const PreservedAnalyses &PA, ModuleAnalysisManager::Invalidator &)
void verify()
Verify that every RefSCC is valid.
Node * lookup(const Function &F) const
Lookup a function in the graph which has already been scanned and added.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Definition: Analysis.h:264
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:323
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:412
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:344
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:418
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:479
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
iterator erase(const_iterator CI)
Definition: SmallVector.h:750
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:696
void swap(SmallVectorImpl &RHS)
Definition: SmallVector.h:981
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
Provides information about what library functions are available for the current target.
bool isKnownVectorFunctionInLibrary(StringRef F) const
Check if the function "F" is listed in a library known to LLVM.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
LLVM Value Representation.
Definition: Value.h:74
bool use_empty() const
Definition: Value.h:344
An efficient, type-erasing, non-owning reference to a callable.
A range adaptor for a pair of iterators.
IteratorT end() const
IteratorT begin() const
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
std::string EscapeString(const std::string &Label)
Definition: GraphWriter.cpp:56
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1680
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:59
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:419
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1849
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1749
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
#define N
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: Analysis.h:28