LLVM  14.0.0git
LazyCallGraph.cpp
Go to the documentation of this file.
1 //===- LazyCallGraph.cpp - Analysis of a Module's call graph --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
10 #include "llvm/ADT/ArrayRef.h"
11 #include "llvm/ADT/STLExtras.h"
12 #include "llvm/ADT/ScopeExit.h"
13 #include "llvm/ADT/Sequence.h"
14 #include "llvm/ADT/SmallPtrSet.h"
15 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/Config/llvm-config.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/GlobalVariable.h"
22 #include "llvm/IR/InstIterator.h"
23 #include "llvm/IR/Instruction.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/IR/PassManager.h"
26 #include "llvm/Support/Casting.h"
27 #include "llvm/Support/Compiler.h"
28 #include "llvm/Support/Debug.h"
31 #include <algorithm>
32 #include <cassert>
33 #include <cstddef>
34 #include <iterator>
35 #include <string>
36 #include <tuple>
37 #include <utility>
38 
39 using namespace llvm;
40 
41 #define DEBUG_TYPE "lcg"
42 
43 void LazyCallGraph::EdgeSequence::insertEdgeInternal(Node &TargetN,
44  Edge::Kind EK) {
45  EdgeIndexMap.insert({&TargetN, Edges.size()});
46  Edges.emplace_back(TargetN, EK);
47 }
48 
49 void LazyCallGraph::EdgeSequence::setEdgeKind(Node &TargetN, Edge::Kind EK) {
50  Edges[EdgeIndexMap.find(&TargetN)->second].setKind(EK);
51 }
52 
53 bool LazyCallGraph::EdgeSequence::removeEdgeInternal(Node &TargetN) {
54  auto IndexMapI = EdgeIndexMap.find(&TargetN);
55  if (IndexMapI == EdgeIndexMap.end())
56  return false;
57 
58  Edges[IndexMapI->second] = Edge();
59  EdgeIndexMap.erase(IndexMapI);
60  return true;
61 }
62 
66  if (!EdgeIndexMap.insert({&N, Edges.size()}).second)
67  return;
68 
69  LLVM_DEBUG(dbgs() << " Added callable function: " << N.getName() << "\n");
71 }
72 
73 LazyCallGraph::EdgeSequence &LazyCallGraph::Node::populateSlow() {
74  assert(!Edges && "Must not have already populated the edges for this node!");
75 
76  LLVM_DEBUG(dbgs() << " Adding functions called by '" << getName()
77  << "' to the graph.\n");
78 
79  Edges = EdgeSequence();
80 
84 
85  // Find all the potential call graph edges in this function. We track both
86  // actual call edges and indirect references to functions. The direct calls
87  // are trivially added, but to accumulate the latter we walk the instructions
88  // and add every operand which is a constant to the worklist to process
89  // afterward.
90  //
91  // Note that we consider *any* function with a definition to be a viable
92  // edge. Even if the function's definition is subject to replacement by
93  // some other module (say, a weak definition) there may still be
94  // optimizations which essentially speculate based on the definition and
95  // a way to check that the specific definition is in fact the one being
96  // used. For example, this could be done by moving the weak definition to
97  // a strong (internal) definition and making the weak definition be an
98  // alias. Then a test of the address of the weak function against the new
99  // strong definition's address would be an effective way to determine the
100  // safety of optimizing a direct call edge.
101  for (BasicBlock &BB : *F)
102  for (Instruction &I : BB) {
103  if (auto *CB = dyn_cast<CallBase>(&I))
104  if (Function *Callee = CB->getCalledFunction())
105  if (!Callee->isDeclaration())
106  if (Callees.insert(Callee).second) {
107  Visited.insert(Callee);
108  addEdge(Edges->Edges, Edges->EdgeIndexMap, G->get(*Callee),
110  }
111 
112  for (Value *Op : I.operand_values())
113  if (Constant *C = dyn_cast<Constant>(Op))
114  if (Visited.insert(C).second)
115  Worklist.push_back(C);
116  }
117 
118  // We've collected all the constant (and thus potentially function or
119  // function containing) operands to all of the instructions in the function.
120  // Process them (recursively) collecting every function found.
121  visitReferences(Worklist, Visited, [&](Function &F) {
122  addEdge(Edges->Edges, Edges->EdgeIndexMap, G->get(F),
124  });
125 
126  // Add implicit reference edges to any defined libcall functions (if we
127  // haven't found an explicit edge).
128  for (auto *F : G->LibFunctions)
129  if (!Visited.count(F))
130  addEdge(Edges->Edges, Edges->EdgeIndexMap, G->get(*F),
132 
133  return *Edges;
134 }
135 
136 void LazyCallGraph::Node::replaceFunction(Function &NewF) {
137  assert(F != &NewF && "Must not replace a function with itself!");
138  F = &NewF;
139 }
140 
141 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
142 LLVM_DUMP_METHOD void LazyCallGraph::Node::dump() const {
143  dbgs() << *this << '\n';
144 }
145 #endif
146 
148  LibFunc LF;
149 
150  // Either this is a normal library function or a "vectorizable"
151  // function. Not using the VFDatabase here because this query
152  // is related only to libraries handled via the TLI.
153  return TLI.getLibFunc(F, LF) ||
154  TLI.isKnownVectorFunctionInLibrary(F.getName());
155 }
156 
158  Module &M, function_ref<TargetLibraryInfo &(Function &)> GetTLI) {
159  LLVM_DEBUG(dbgs() << "Building CG for module: " << M.getModuleIdentifier()
160  << "\n");
161  for (Function &F : M) {
162  if (F.isDeclaration())
163  continue;
164  // If this function is a known lib function to LLVM then we want to
165  // synthesize reference edges to it to model the fact that LLVM can turn
166  // arbitrary code into a library function call.
167  if (isKnownLibFunction(F, GetTLI(F)))
168  LibFunctions.insert(&F);
169 
170  if (F.hasLocalLinkage())
171  continue;
172 
173  // External linkage defined functions have edges to them from other
174  // modules.
175  LLVM_DEBUG(dbgs() << " Adding '" << F.getName()
176  << "' to entry set of the graph.\n");
177  addEdge(EntryEdges.Edges, EntryEdges.EdgeIndexMap, get(F), Edge::Ref);
178  }
179 
180  // Externally visible aliases of internal functions are also viable entry
181  // edges to the module.
182  for (auto &A : M.aliases()) {
183  if (A.hasLocalLinkage())
184  continue;
185  if (Function* F = dyn_cast<Function>(A.getAliasee())) {
186  LLVM_DEBUG(dbgs() << " Adding '" << F->getName()
187  << "' with alias '" << A.getName()
188  << "' to entry set of the graph.\n");
189  addEdge(EntryEdges.Edges, EntryEdges.EdgeIndexMap, get(*F), Edge::Ref);
190  }
191  }
192 
193  // Now add entry nodes for functions reachable via initializers to globals.
196  for (GlobalVariable &GV : M.globals())
197  if (GV.hasInitializer())
198  if (Visited.insert(GV.getInitializer()).second)
199  Worklist.push_back(GV.getInitializer());
200 
201  LLVM_DEBUG(
202  dbgs() << " Adding functions referenced by global initializers to the "
203  "entry set.\n");
204  visitReferences(Worklist, Visited, [&](Function &F) {
205  addEdge(EntryEdges.Edges, EntryEdges.EdgeIndexMap, get(F),
207  });
208 }
209 
211  : BPA(std::move(G.BPA)), NodeMap(std::move(G.NodeMap)),
212  EntryEdges(std::move(G.EntryEdges)), SCCBPA(std::move(G.SCCBPA)),
213  SCCMap(std::move(G.SCCMap)),
214  LibFunctions(std::move(G.LibFunctions)) {
215  updateGraphPtrs();
216 }
217 
220  // Check whether the analysis, all analyses on functions, or the function's
221  // CFG have been preserved.
222  auto PAC = PA.getChecker<llvm::LazyCallGraphAnalysis>();
223  return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Module>>());
224 }
225 
227  BPA = std::move(G.BPA);
228  NodeMap = std::move(G.NodeMap);
229  EntryEdges = std::move(G.EntryEdges);
230  SCCBPA = std::move(G.SCCBPA);
231  SCCMap = std::move(G.SCCMap);
232  LibFunctions = std::move(G.LibFunctions);
233  updateGraphPtrs();
234  return *this;
235 }
236 
237 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
238 LLVM_DUMP_METHOD void LazyCallGraph::SCC::dump() const {
239  dbgs() << *this << '\n';
240 }
241 #endif
242 
243 #if !defined(NDEBUG) || defined(EXPENSIVE_CHECKS)
244 void LazyCallGraph::SCC::verify() {
245  assert(OuterRefSCC && "Can't have a null RefSCC!");
246  assert(!Nodes.empty() && "Can't have an empty SCC!");
247 
248  for (Node *N : Nodes) {
249  assert(N && "Can't have a null node!");
250  assert(OuterRefSCC->G->lookupSCC(*N) == this &&
251  "Node does not map to this SCC!");
252  assert(N->DFSNumber == -1 &&
253  "Must set DFS numbers to -1 when adding a node to an SCC!");
254  assert(N->LowLink == -1 &&
255  "Must set low link to -1 when adding a node to an SCC!");
256  for (Edge &E : **N)
257  assert(E.getNode().isPopulated() && "Can't have an unpopulated node!");
258 
259 #ifdef EXPENSIVE_CHECKS
260  // Verify that all nodes in this SCC can reach all other nodes.
261  SmallVector<Node *, 4> Worklist;
262  SmallPtrSet<Node *, 4> Visited;
263  Worklist.push_back(N);
264  while (!Worklist.empty()) {
265  Node *VisitingNode = Worklist.pop_back_val();
266  if (!Visited.insert(VisitingNode).second)
267  continue;
268  for (Edge &E : (*VisitingNode)->calls())
269  Worklist.push_back(&E.getNode());
270  }
271  for (Node *NodeToVisit : Nodes) {
272  assert(Visited.contains(NodeToVisit) &&
273  "Cannot reach all nodes within SCC");
274  }
275 #endif
276  }
277 }
278 #endif
279 
281  if (this == &C)
282  return false;
283 
284  for (Node &N : *this)
285  for (Edge &E : N->calls())
286  if (OuterRefSCC->G->lookupSCC(E.getNode()) == &C)
287  return true;
288 
289  // No edges found.
290  return false;
291 }
292 
293 bool LazyCallGraph::SCC::isAncestorOf(const SCC &TargetC) const {
294  if (this == &TargetC)
295  return false;
296 
297  LazyCallGraph &G = *OuterRefSCC->G;
298 
299  // Start with this SCC.
300  SmallPtrSet<const SCC *, 16> Visited = {this};
301  SmallVector<const SCC *, 16> Worklist = {this};
302 
303  // Walk down the graph until we run out of edges or find a path to TargetC.
304  do {
305  const SCC &C = *Worklist.pop_back_val();
306  for (Node &N : C)
307  for (Edge &E : N->calls()) {
308  SCC *CalleeC = G.lookupSCC(E.getNode());
309  if (!CalleeC)
310  continue;
311 
312  // If the callee's SCC is the TargetC, we're done.
313  if (CalleeC == &TargetC)
314  return true;
315 
316  // If this is the first time we've reached this SCC, put it on the
317  // worklist to recurse through.
318  if (Visited.insert(CalleeC).second)
319  Worklist.push_back(CalleeC);
320  }
321  } while (!Worklist.empty());
322 
323  // No paths found.
324  return false;
325 }
326 
327 LazyCallGraph::RefSCC::RefSCC(LazyCallGraph &G) : G(&G) {}
328 
329 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
330 LLVM_DUMP_METHOD void LazyCallGraph::RefSCC::dump() const {
331  dbgs() << *this << '\n';
332 }
333 #endif
334 
335 #if !defined(NDEBUG) || defined(EXPENSIVE_CHECKS)
336 void LazyCallGraph::RefSCC::verify() {
337  assert(G && "Can't have a null graph!");
338  assert(!SCCs.empty() && "Can't have an empty SCC!");
339 
340  // Verify basic properties of the SCCs.
341  SmallPtrSet<SCC *, 4> SCCSet;
342  for (SCC *C : SCCs) {
343  assert(C && "Can't have a null SCC!");
344  C->verify();
345  assert(&C->getOuterRefSCC() == this &&
346  "SCC doesn't think it is inside this RefSCC!");
347  bool Inserted = SCCSet.insert(C).second;
348  assert(Inserted && "Found a duplicate SCC!");
349  auto IndexIt = SCCIndices.find(C);
350  assert(IndexIt != SCCIndices.end() &&
351  "Found an SCC that doesn't have an index!");
352  }
353 
354  // Check that our indices map correctly.
355  for (auto &SCCIndexPair : SCCIndices) {
356  SCC *C = SCCIndexPair.first;
357  int i = SCCIndexPair.second;
358  assert(C && "Can't have a null SCC in the indices!");
359  assert(SCCSet.count(C) && "Found an index for an SCC not in the RefSCC!");
360  assert(SCCs[i] == C && "Index doesn't point to SCC!");
361  }
362 
363  // Check that the SCCs are in fact in post-order.
364  for (int i = 0, Size = SCCs.size(); i < Size; ++i) {
365  SCC &SourceSCC = *SCCs[i];
366  for (Node &N : SourceSCC)
367  for (Edge &E : *N) {
368  if (!E.isCall())
369  continue;
370  SCC &TargetSCC = *G->lookupSCC(E.getNode());
371  if (&TargetSCC.getOuterRefSCC() == this) {
372  assert(SCCIndices.find(&TargetSCC)->second <= i &&
373  "Edge between SCCs violates post-order relationship.");
374  continue;
375  }
376  }
377  }
378 
379 #ifdef EXPENSIVE_CHECKS
380  // Verify that all nodes in this RefSCC can reach all other nodes.
381  SmallVector<Node *> Nodes;
382  for (SCC *C : SCCs) {
383  for (Node &N : *C)
384  Nodes.push_back(&N);
385  }
386  for (Node *N : Nodes) {
387  SmallVector<Node *, 4> Worklist;
388  SmallPtrSet<Node *, 4> Visited;
389  Worklist.push_back(N);
390  while (!Worklist.empty()) {
391  Node *VisitingNode = Worklist.pop_back_val();
392  if (!Visited.insert(VisitingNode).second)
393  continue;
394  for (Edge &E : **VisitingNode)
395  Worklist.push_back(&E.getNode());
396  }
397  for (Node *NodeToVisit : Nodes) {
398  assert(Visited.contains(NodeToVisit) &&
399  "Cannot reach all nodes within RefSCC");
400  }
401  }
402 #endif
403 }
404 #endif
405 
407  if (&RC == this)
408  return false;
409 
410  // Search all edges to see if this is a parent.
411  for (SCC &C : *this)
412  for (Node &N : C)
413  for (Edge &E : *N)
414  if (G->lookupRefSCC(E.getNode()) == &RC)
415  return true;
416 
417  return false;
418 }
419 
421  if (&RC == this)
422  return false;
423 
424  // For each descendant of this RefSCC, see if one of its children is the
425  // argument. If not, add that descendant to the worklist and continue
426  // searching.
429  Worklist.push_back(this);
430  Visited.insert(this);
431  do {
432  const RefSCC &DescendantRC = *Worklist.pop_back_val();
433  for (SCC &C : DescendantRC)
434  for (Node &N : C)
435  for (Edge &E : *N) {
436  auto *ChildRC = G->lookupRefSCC(E.getNode());
437  if (ChildRC == &RC)
438  return true;
439  if (!ChildRC || !Visited.insert(ChildRC).second)
440  continue;
441  Worklist.push_back(ChildRC);
442  }
443  } while (!Worklist.empty());
444 
445  return false;
446 }
447 
448 /// Generic helper that updates a postorder sequence of SCCs for a potentially
449 /// cycle-introducing edge insertion.
450 ///
451 /// A postorder sequence of SCCs of a directed graph has one fundamental
452 /// property: all deges in the DAG of SCCs point "up" the sequence. That is,
453 /// all edges in the SCC DAG point to prior SCCs in the sequence.
454 ///
455 /// This routine both updates a postorder sequence and uses that sequence to
456 /// compute the set of SCCs connected into a cycle. It should only be called to
457 /// insert a "downward" edge which will require changing the sequence to
458 /// restore it to a postorder.
459 ///
460 /// When inserting an edge from an earlier SCC to a later SCC in some postorder
461 /// sequence, all of the SCCs which may be impacted are in the closed range of
462 /// those two within the postorder sequence. The algorithm used here to restore
463 /// the state is as follows:
464 ///
465 /// 1) Starting from the source SCC, construct a set of SCCs which reach the
466 /// source SCC consisting of just the source SCC. Then scan toward the
467 /// target SCC in postorder and for each SCC, if it has an edge to an SCC
468 /// in the set, add it to the set. Otherwise, the source SCC is not
469 /// a successor, move it in the postorder sequence to immediately before
470 /// the source SCC, shifting the source SCC and all SCCs in the set one
471 /// position toward the target SCC. Stop scanning after processing the
472 /// target SCC.
473 /// 2) If the source SCC is now past the target SCC in the postorder sequence,
474 /// and thus the new edge will flow toward the start, we are done.
475 /// 3) Otherwise, starting from the target SCC, walk all edges which reach an
476 /// SCC between the source and the target, and add them to the set of
477 /// connected SCCs, then recurse through them. Once a complete set of the
478 /// SCCs the target connects to is known, hoist the remaining SCCs between
479 /// the source and the target to be above the target. Note that there is no
480 /// need to process the source SCC, it is already known to connect.
481 /// 4) At this point, all of the SCCs in the closed range between the source
482 /// SCC and the target SCC in the postorder sequence are connected,
483 /// including the target SCC and the source SCC. Inserting the edge from
484 /// the source SCC to the target SCC will form a cycle out of precisely
485 /// these SCCs. Thus we can merge all of the SCCs in this closed range into
486 /// a single SCC.
487 ///
488 /// This process has various important properties:
489 /// - Only mutates the SCCs when adding the edge actually changes the SCC
490 /// structure.
491 /// - Never mutates SCCs which are unaffected by the change.
492 /// - Updates the postorder sequence to correctly satisfy the postorder
493 /// constraint after the edge is inserted.
494 /// - Only reorders SCCs in the closed postorder sequence from the source to
495 /// the target, so easy to bound how much has changed even in the ordering.
496 /// - Big-O is the number of edges in the closed postorder range of SCCs from
497 /// source to target.
498 ///
499 /// This helper routine, in addition to updating the postorder sequence itself
500 /// will also update a map from SCCs to indices within that sequence.
501 ///
502 /// The sequence and the map must operate on pointers to the SCC type.
503 ///
504 /// Two callbacks must be provided. The first computes the subset of SCCs in
505 /// the postorder closed range from the source to the target which connect to
506 /// the source SCC via some (transitive) set of edges. The second computes the
507 /// subset of the same range which the target SCC connects to via some
508 /// (transitive) set of edges. Both callbacks should populate the set argument
509 /// provided.
510 template <typename SCCT, typename PostorderSequenceT, typename SCCIndexMapT,
511  typename ComputeSourceConnectedSetCallableT,
512  typename ComputeTargetConnectedSetCallableT>
515  SCCT &SourceSCC, SCCT &TargetSCC, PostorderSequenceT &SCCs,
516  SCCIndexMapT &SCCIndices,
517  ComputeSourceConnectedSetCallableT ComputeSourceConnectedSet,
518  ComputeTargetConnectedSetCallableT ComputeTargetConnectedSet) {
519  int SourceIdx = SCCIndices[&SourceSCC];
520  int TargetIdx = SCCIndices[&TargetSCC];
521  assert(SourceIdx < TargetIdx && "Cannot have equal indices here!");
522 
523  SmallPtrSet<SCCT *, 4> ConnectedSet;
524 
525  // Compute the SCCs which (transitively) reach the source.
526  ComputeSourceConnectedSet(ConnectedSet);
527 
528  // Partition the SCCs in this part of the port-order sequence so only SCCs
529  // connecting to the source remain between it and the target. This is
530  // a benign partition as it preserves postorder.
531  auto SourceI = std::stable_partition(
532  SCCs.begin() + SourceIdx, SCCs.begin() + TargetIdx + 1,
533  [&ConnectedSet](SCCT *C) { return !ConnectedSet.count(C); });
534  for (int i = SourceIdx, e = TargetIdx + 1; i < e; ++i)
535  SCCIndices.find(SCCs[i])->second = i;
536 
537  // If the target doesn't connect to the source, then we've corrected the
538  // post-order and there are no cycles formed.
539  if (!ConnectedSet.count(&TargetSCC)) {
540  assert(SourceI > (SCCs.begin() + SourceIdx) &&
541  "Must have moved the source to fix the post-order.");
542  assert(*std::prev(SourceI) == &TargetSCC &&
543  "Last SCC to move should have bene the target.");
544 
545  // Return an empty range at the target SCC indicating there is nothing to
546  // merge.
547  return make_range(std::prev(SourceI), std::prev(SourceI));
548  }
549 
550  assert(SCCs[TargetIdx] == &TargetSCC &&
551  "Should not have moved target if connected!");
552  SourceIdx = SourceI - SCCs.begin();
553  assert(SCCs[SourceIdx] == &SourceSCC &&
554  "Bad updated index computation for the source SCC!");
555 
556 
557  // See whether there are any remaining intervening SCCs between the source
558  // and target. If so we need to make sure they all are reachable form the
559  // target.
560  if (SourceIdx + 1 < TargetIdx) {
561  ConnectedSet.clear();
562  ComputeTargetConnectedSet(ConnectedSet);
563 
564  // Partition SCCs so that only SCCs reached from the target remain between
565  // the source and the target. This preserves postorder.
566  auto TargetI = std::stable_partition(
567  SCCs.begin() + SourceIdx + 1, SCCs.begin() + TargetIdx + 1,
568  [&ConnectedSet](SCCT *C) { return ConnectedSet.count(C); });
569  for (int i = SourceIdx + 1, e = TargetIdx + 1; i < e; ++i)
570  SCCIndices.find(SCCs[i])->second = i;
571  TargetIdx = std::prev(TargetI) - SCCs.begin();
572  assert(SCCs[TargetIdx] == &TargetSCC &&
573  "Should always end with the target!");
574  }
575 
576  // At this point, we know that connecting source to target forms a cycle
577  // because target connects back to source, and we know that all of the SCCs
578  // between the source and target in the postorder sequence participate in that
579  // cycle.
580  return make_range(SCCs.begin() + SourceIdx, SCCs.begin() + TargetIdx);
581 }
582 
583 bool
585  Node &SourceN, Node &TargetN,
586  function_ref<void(ArrayRef<SCC *> MergeSCCs)> MergeCB) {
587  assert(!(*SourceN)[TargetN].isCall() && "Must start with a ref edge!");
588  SmallVector<SCC *, 1> DeletedSCCs;
589 
590 #ifdef EXPENSIVE_CHECKS
591  verify();
592  auto VerifyOnExit = make_scope_exit([&]() { verify(); });
593 #endif
594 
595  SCC &SourceSCC = *G->lookupSCC(SourceN);
596  SCC &TargetSCC = *G->lookupSCC(TargetN);
597 
598  // If the two nodes are already part of the same SCC, we're also done as
599  // we've just added more connectivity.
600  if (&SourceSCC == &TargetSCC) {
601  SourceN->setEdgeKind(TargetN, Edge::Call);
602  return false; // No new cycle.
603  }
604 
605  // At this point we leverage the postorder list of SCCs to detect when the
606  // insertion of an edge changes the SCC structure in any way.
607  //
608  // First and foremost, we can eliminate the need for any changes when the
609  // edge is toward the beginning of the postorder sequence because all edges
610  // flow in that direction already. Thus adding a new one cannot form a cycle.
611  int SourceIdx = SCCIndices[&SourceSCC];
612  int TargetIdx = SCCIndices[&TargetSCC];
613  if (TargetIdx < SourceIdx) {
614  SourceN->setEdgeKind(TargetN, Edge::Call);
615  return false; // No new cycle.
616  }
617 
618  // Compute the SCCs which (transitively) reach the source.
619  auto ComputeSourceConnectedSet = [&](SmallPtrSetImpl<SCC *> &ConnectedSet) {
620 #ifdef EXPENSIVE_CHECKS
621  // Check that the RefSCC is still valid before computing this as the
622  // results will be nonsensical of we've broken its invariants.
623  verify();
624 #endif
625  ConnectedSet.insert(&SourceSCC);
626  auto IsConnected = [&](SCC &C) {
627  for (Node &N : C)
628  for (Edge &E : N->calls())
629  if (ConnectedSet.count(G->lookupSCC(E.getNode())))
630  return true;
631 
632  return false;
633  };
634 
635  for (SCC *C :
636  make_range(SCCs.begin() + SourceIdx + 1, SCCs.begin() + TargetIdx + 1))
637  if (IsConnected(*C))
638  ConnectedSet.insert(C);
639  };
640 
641  // Use a normal worklist to find which SCCs the target connects to. We still
642  // bound the search based on the range in the postorder list we care about,
643  // but because this is forward connectivity we just "recurse" through the
644  // edges.
645  auto ComputeTargetConnectedSet = [&](SmallPtrSetImpl<SCC *> &ConnectedSet) {
646 #ifdef EXPENSIVE_CHECKS
647  // Check that the RefSCC is still valid before computing this as the
648  // results will be nonsensical of we've broken its invariants.
649  verify();
650 #endif
651  ConnectedSet.insert(&TargetSCC);
652  SmallVector<SCC *, 4> Worklist;
653  Worklist.push_back(&TargetSCC);
654  do {
655  SCC &C = *Worklist.pop_back_val();
656  for (Node &N : C)
657  for (Edge &E : *N) {
658  if (!E.isCall())
659  continue;
660  SCC &EdgeC = *G->lookupSCC(E.getNode());
661  if (&EdgeC.getOuterRefSCC() != this)
662  // Not in this RefSCC...
663  continue;
664  if (SCCIndices.find(&EdgeC)->second <= SourceIdx)
665  // Not in the postorder sequence between source and target.
666  continue;
667 
668  if (ConnectedSet.insert(&EdgeC).second)
669  Worklist.push_back(&EdgeC);
670  }
671  } while (!Worklist.empty());
672  };
673 
674  // Use a generic helper to update the postorder sequence of SCCs and return
675  // a range of any SCCs connected into a cycle by inserting this edge. This
676  // routine will also take care of updating the indices into the postorder
677  // sequence.
678  auto MergeRange = updatePostorderSequenceForEdgeInsertion(
679  SourceSCC, TargetSCC, SCCs, SCCIndices, ComputeSourceConnectedSet,
680  ComputeTargetConnectedSet);
681 
682  // Run the user's callback on the merged SCCs before we actually merge them.
683  if (MergeCB)
684  MergeCB(makeArrayRef(MergeRange.begin(), MergeRange.end()));
685 
686  // If the merge range is empty, then adding the edge didn't actually form any
687  // new cycles. We're done.
688  if (MergeRange.empty()) {
689  // Now that the SCC structure is finalized, flip the kind to call.
690  SourceN->setEdgeKind(TargetN, Edge::Call);
691  return false; // No new cycle.
692  }
693 
694 #ifdef EXPENSIVE_CHECKS
695  // Before merging, check that the RefSCC remains valid after all the
696  // postorder updates.
697  verify();
698 #endif
699 
700  // Otherwise we need to merge all of the SCCs in the cycle into a single
701  // result SCC.
702  //
703  // NB: We merge into the target because all of these functions were already
704  // reachable from the target, meaning any SCC-wide properties deduced about it
705  // other than the set of functions within it will not have changed.
706  for (SCC *C : MergeRange) {
707  assert(C != &TargetSCC &&
708  "We merge *into* the target and shouldn't process it here!");
709  SCCIndices.erase(C);
710  TargetSCC.Nodes.append(C->Nodes.begin(), C->Nodes.end());
711  for (Node *N : C->Nodes)
712  G->SCCMap[N] = &TargetSCC;
713  C->clear();
714  DeletedSCCs.push_back(C);
715  }
716 
717  // Erase the merged SCCs from the list and update the indices of the
718  // remaining SCCs.
719  int IndexOffset = MergeRange.end() - MergeRange.begin();
720  auto EraseEnd = SCCs.erase(MergeRange.begin(), MergeRange.end());
721  for (SCC *C : make_range(EraseEnd, SCCs.end()))
722  SCCIndices[C] -= IndexOffset;
723 
724  // Now that the SCC structure is finalized, flip the kind to call.
725  SourceN->setEdgeKind(TargetN, Edge::Call);
726 
727  // And we're done, but we did form a new cycle.
728  return true;
729 }
730 
732  Node &TargetN) {
733  assert((*SourceN)[TargetN].isCall() && "Must start with a call edge!");
734 
735 #ifdef EXPENSIVE_CHECKS
736  verify();
737  auto VerifyOnExit = make_scope_exit([&]() { verify(); });
738 #endif
739 
740  assert(G->lookupRefSCC(SourceN) == this &&
741  "Source must be in this RefSCC.");
742  assert(G->lookupRefSCC(TargetN) == this &&
743  "Target must be in this RefSCC.");
744  assert(G->lookupSCC(SourceN) != G->lookupSCC(TargetN) &&
745  "Source and Target must be in separate SCCs for this to be trivial!");
746 
747  // Set the edge kind.
748  SourceN->setEdgeKind(TargetN, Edge::Ref);
749 }
750 
753  assert((*SourceN)[TargetN].isCall() && "Must start with a call edge!");
754 
755 #ifdef EXPENSIVE_CHECKS
756  verify();
757  auto VerifyOnExit = make_scope_exit([&]() { verify(); });
758 #endif
759 
760  assert(G->lookupRefSCC(SourceN) == this &&
761  "Source must be in this RefSCC.");
762  assert(G->lookupRefSCC(TargetN) == this &&
763  "Target must be in this RefSCC.");
764 
765  SCC &TargetSCC = *G->lookupSCC(TargetN);
766  assert(G->lookupSCC(SourceN) == &TargetSCC && "Source and Target must be in "
767  "the same SCC to require the "
768  "full CG update.");
769 
770  // Set the edge kind.
771  SourceN->setEdgeKind(TargetN, Edge::Ref);
772 
773  // Otherwise we are removing a call edge from a single SCC. This may break
774  // the cycle. In order to compute the new set of SCCs, we need to do a small
775  // DFS over the nodes within the SCC to form any sub-cycles that remain as
776  // distinct SCCs and compute a postorder over the resulting SCCs.
777  //
778  // However, we specially handle the target node. The target node is known to
779  // reach all other nodes in the original SCC by definition. This means that
780  // we want the old SCC to be replaced with an SCC containing that node as it
781  // will be the root of whatever SCC DAG results from the DFS. Assumptions
782  // about an SCC such as the set of functions called will continue to hold,
783  // etc.
784 
785  SCC &OldSCC = TargetSCC;
787  SmallVector<Node *, 16> PendingSCCStack;
788  SmallVector<SCC *, 4> NewSCCs;
789 
790  // Prepare the nodes for a fresh DFS.
791  SmallVector<Node *, 16> Worklist;
792  Worklist.swap(OldSCC.Nodes);
793  for (Node *N : Worklist) {
794  N->DFSNumber = N->LowLink = 0;
795  G->SCCMap.erase(N);
796  }
797 
798  // Force the target node to be in the old SCC. This also enables us to take
799  // a very significant short-cut in the standard Tarjan walk to re-form SCCs
800  // below: whenever we build an edge that reaches the target node, we know
801  // that the target node eventually connects back to all other nodes in our
802  // walk. As a consequence, we can detect and handle participants in that
803  // cycle without walking all the edges that form this connection, and instead
804  // by relying on the fundamental guarantee coming into this operation (all
805  // nodes are reachable from the target due to previously forming an SCC).
806  TargetN.DFSNumber = TargetN.LowLink = -1;
807  OldSCC.Nodes.push_back(&TargetN);
808  G->SCCMap[&TargetN] = &OldSCC;
809 
810  // Scan down the stack and DFS across the call edges.
811  for (Node *RootN : Worklist) {
812  assert(DFSStack.empty() &&
813  "Cannot begin a new root with a non-empty DFS stack!");
814  assert(PendingSCCStack.empty() &&
815  "Cannot begin a new root with pending nodes for an SCC!");
816 
817  // Skip any nodes we've already reached in the DFS.
818  if (RootN->DFSNumber != 0) {
819  assert(RootN->DFSNumber == -1 &&
820  "Shouldn't have any mid-DFS root nodes!");
821  continue;
822  }
823 
824  RootN->DFSNumber = RootN->LowLink = 1;
825  int NextDFSNumber = 2;
826 
827  DFSStack.push_back({RootN, (*RootN)->call_begin()});
828  do {
829  Node *N;
831  std::tie(N, I) = DFSStack.pop_back_val();
832  auto E = (*N)->call_end();
833  while (I != E) {
834  Node &ChildN = I->getNode();
835  if (ChildN.DFSNumber == 0) {
836  // We haven't yet visited this child, so descend, pushing the current
837  // node onto the stack.
838  DFSStack.push_back({N, I});
839 
840  assert(!G->SCCMap.count(&ChildN) &&
841  "Found a node with 0 DFS number but already in an SCC!");
842  ChildN.DFSNumber = ChildN.LowLink = NextDFSNumber++;
843  N = &ChildN;
844  I = (*N)->call_begin();
845  E = (*N)->call_end();
846  continue;
847  }
848 
849  // Check for the child already being part of some component.
850  if (ChildN.DFSNumber == -1) {
851  if (G->lookupSCC(ChildN) == &OldSCC) {
852  // If the child is part of the old SCC, we know that it can reach
853  // every other node, so we have formed a cycle. Pull the entire DFS
854  // and pending stacks into it. See the comment above about setting
855  // up the old SCC for why we do this.
856  int OldSize = OldSCC.size();
857  OldSCC.Nodes.push_back(N);
858  OldSCC.Nodes.append(PendingSCCStack.begin(), PendingSCCStack.end());
859  PendingSCCStack.clear();
860  while (!DFSStack.empty())
861  OldSCC.Nodes.push_back(DFSStack.pop_back_val().first);
862  for (Node &N : drop_begin(OldSCC, OldSize)) {
863  N.DFSNumber = N.LowLink = -1;
864  G->SCCMap[&N] = &OldSCC;
865  }
866  N = nullptr;
867  break;
868  }
869 
870  // If the child has already been added to some child component, it
871  // couldn't impact the low-link of this parent because it isn't
872  // connected, and thus its low-link isn't relevant so skip it.
873  ++I;
874  continue;
875  }
876 
877  // Track the lowest linked child as the lowest link for this node.
878  assert(ChildN.LowLink > 0 && "Must have a positive low-link number!");
879  if (ChildN.LowLink < N->LowLink)
880  N->LowLink = ChildN.LowLink;
881 
882  // Move to the next edge.
883  ++I;
884  }
885  if (!N)
886  // Cleared the DFS early, start another round.
887  break;
888 
889  // We've finished processing N and its descendants, put it on our pending
890  // SCC stack to eventually get merged into an SCC of nodes.
891  PendingSCCStack.push_back(N);
892 
893  // If this node is linked to some lower entry, continue walking up the
894  // stack.
895  if (N->LowLink != N->DFSNumber)
896  continue;
897 
898  // Otherwise, we've completed an SCC. Append it to our post order list of
899  // SCCs.
900  int RootDFSNumber = N->DFSNumber;
901  // Find the range of the node stack by walking down until we pass the
902  // root DFS number.
903  auto SCCNodes = make_range(
904  PendingSCCStack.rbegin(),
905  find_if(reverse(PendingSCCStack), [RootDFSNumber](const Node *N) {
906  return N->DFSNumber < RootDFSNumber;
907  }));
908 
909  // Form a new SCC out of these nodes and then clear them off our pending
910  // stack.
911  NewSCCs.push_back(G->createSCC(*this, SCCNodes));
912  for (Node &N : *NewSCCs.back()) {
913  N.DFSNumber = N.LowLink = -1;
914  G->SCCMap[&N] = NewSCCs.back();
915  }
916  PendingSCCStack.erase(SCCNodes.end().base(), PendingSCCStack.end());
917  } while (!DFSStack.empty());
918  }
919 
920  // Insert the remaining SCCs before the old one. The old SCC can reach all
921  // other SCCs we form because it contains the target node of the removed edge
922  // of the old SCC. This means that we will have edges into all of the new
923  // SCCs, which means the old one must come last for postorder.
924  int OldIdx = SCCIndices[&OldSCC];
925  SCCs.insert(SCCs.begin() + OldIdx, NewSCCs.begin(), NewSCCs.end());
926 
927  // Update the mapping from SCC* to index to use the new SCC*s, and remove the
928  // old SCC from the mapping.
929  for (int Idx = OldIdx, Size = SCCs.size(); Idx < Size; ++Idx)
930  SCCIndices[SCCs[Idx]] = Idx;
931 
932  return make_range(SCCs.begin() + OldIdx,
933  SCCs.begin() + OldIdx + NewSCCs.size());
934 }
935 
937  Node &TargetN) {
938  assert(!(*SourceN)[TargetN].isCall() && "Must start with a ref edge!");
939 
940  assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC.");
941  assert(G->lookupRefSCC(TargetN) != this &&
942  "Target must not be in this RefSCC.");
943 #ifdef EXPENSIVE_CHECKS
944  assert(G->lookupRefSCC(TargetN)->isDescendantOf(*this) &&
945  "Target must be a descendant of the Source.");
946 #endif
947 
948  // Edges between RefSCCs are the same regardless of call or ref, so we can
949  // just flip the edge here.
950  SourceN->setEdgeKind(TargetN, Edge::Call);
951 
952 #ifdef EXPENSIVE_CHECKS
953  verify();
954 #endif
955 }
956 
958  Node &TargetN) {
959  assert((*SourceN)[TargetN].isCall() && "Must start with a call edge!");
960 
961  assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC.");
962  assert(G->lookupRefSCC(TargetN) != this &&
963  "Target must not be in this RefSCC.");
964 #ifdef EXPENSIVE_CHECKS
965  assert(G->lookupRefSCC(TargetN)->isDescendantOf(*this) &&
966  "Target must be a descendant of the Source.");
967 #endif
968 
969  // Edges between RefSCCs are the same regardless of call or ref, so we can
970  // just flip the edge here.
971  SourceN->setEdgeKind(TargetN, Edge::Ref);
972 
973 #ifdef EXPENSIVE_CHECKS
974  verify();
975 #endif
976 }
977 
979  Node &TargetN) {
980  assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC.");
981  assert(G->lookupRefSCC(TargetN) == this && "Target must be in this RefSCC.");
982 
983  SourceN->insertEdgeInternal(TargetN, Edge::Ref);
984 
985 #ifdef EXPENSIVE_CHECKS
986  verify();
987 #endif
988 }
989 
991  Edge::Kind EK) {
992  // First insert it into the caller.
993  SourceN->insertEdgeInternal(TargetN, EK);
994 
995  assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC.");
996 
997  assert(G->lookupRefSCC(TargetN) != this &&
998  "Target must not be in this RefSCC.");
999 #ifdef EXPENSIVE_CHECKS
1000  assert(G->lookupRefSCC(TargetN)->isDescendantOf(*this) &&
1001  "Target must be a descendant of the Source.");
1002 #endif
1003 
1004 #ifdef EXPENSIVE_CHECKS
1005  verify();
1006 #endif
1007 }
1008 
1011  assert(G->lookupRefSCC(TargetN) == this && "Target must be in this RefSCC.");
1012  RefSCC &SourceC = *G->lookupRefSCC(SourceN);
1013  assert(&SourceC != this && "Source must not be in this RefSCC.");
1014 #ifdef EXPENSIVE_CHECKS
1015  assert(SourceC.isDescendantOf(*this) &&
1016  "Source must be a descendant of the Target.");
1017 #endif
1018 
1019  SmallVector<RefSCC *, 1> DeletedRefSCCs;
1020 
1021 #ifdef EXPENSIVE_CHECKS
1022  verify();
1023  auto VerifyOnExit = make_scope_exit([&]() { verify(); });
1024 #endif
1025 
1026  int SourceIdx = G->RefSCCIndices[&SourceC];
1027  int TargetIdx = G->RefSCCIndices[this];
1028  assert(SourceIdx < TargetIdx &&
1029  "Postorder list doesn't see edge as incoming!");
1030 
1031  // Compute the RefSCCs which (transitively) reach the source. We do this by
1032  // working backwards from the source using the parent set in each RefSCC,
1033  // skipping any RefSCCs that don't fall in the postorder range. This has the
1034  // advantage of walking the sparser parent edge (in high fan-out graphs) but
1035  // more importantly this removes examining all forward edges in all RefSCCs
1036  // within the postorder range which aren't in fact connected. Only connected
1037  // RefSCCs (and their edges) are visited here.
1038  auto ComputeSourceConnectedSet = [&](SmallPtrSetImpl<RefSCC *> &Set) {
1039  Set.insert(&SourceC);
1040  auto IsConnected = [&](RefSCC &RC) {
1041  for (SCC &C : RC)
1042  for (Node &N : C)
1043  for (Edge &E : *N)
1044  if (Set.count(G->lookupRefSCC(E.getNode())))
1045  return true;
1046 
1047  return false;
1048  };
1049 
1050  for (RefSCC *C : make_range(G->PostOrderRefSCCs.begin() + SourceIdx + 1,
1051  G->PostOrderRefSCCs.begin() + TargetIdx + 1))
1052  if (IsConnected(*C))
1053  Set.insert(C);
1054  };
1055 
1056  // Use a normal worklist to find which SCCs the target connects to. We still
1057  // bound the search based on the range in the postorder list we care about,
1058  // but because this is forward connectivity we just "recurse" through the
1059  // edges.
1060  auto ComputeTargetConnectedSet = [&](SmallPtrSetImpl<RefSCC *> &Set) {
1061  Set.insert(this);
1062  SmallVector<RefSCC *, 4> Worklist;
1063  Worklist.push_back(this);
1064  do {
1065  RefSCC &RC = *Worklist.pop_back_val();
1066  for (SCC &C : RC)
1067  for (Node &N : C)
1068  for (Edge &E : *N) {
1069  RefSCC &EdgeRC = *G->lookupRefSCC(E.getNode());
1070  if (G->getRefSCCIndex(EdgeRC) <= SourceIdx)
1071  // Not in the postorder sequence between source and target.
1072  continue;
1073 
1074  if (Set.insert(&EdgeRC).second)
1075  Worklist.push_back(&EdgeRC);
1076  }
1077  } while (!Worklist.empty());
1078  };
1079 
1080  // Use a generic helper to update the postorder sequence of RefSCCs and return
1081  // a range of any RefSCCs connected into a cycle by inserting this edge. This
1082  // routine will also take care of updating the indices into the postorder
1083  // sequence.
1086  SourceC, *this, G->PostOrderRefSCCs, G->RefSCCIndices,
1087  ComputeSourceConnectedSet, ComputeTargetConnectedSet);
1088 
1089  // Build a set so we can do fast tests for whether a RefSCC will end up as
1090  // part of the merged RefSCC.
1091  SmallPtrSet<RefSCC *, 16> MergeSet(MergeRange.begin(), MergeRange.end());
1092 
1093  // This RefSCC will always be part of that set, so just insert it here.
1094  MergeSet.insert(this);
1095 
1096  // Now that we have identified all of the SCCs which need to be merged into
1097  // a connected set with the inserted edge, merge all of them into this SCC.
1098  SmallVector<SCC *, 16> MergedSCCs;
1099  int SCCIndex = 0;
1100  for (RefSCC *RC : MergeRange) {
1101  assert(RC != this && "We're merging into the target RefSCC, so it "
1102  "shouldn't be in the range.");
1103 
1104  // Walk the inner SCCs to update their up-pointer and walk all the edges to
1105  // update any parent sets.
1106  // FIXME: We should try to find a way to avoid this (rather expensive) edge
1107  // walk by updating the parent sets in some other manner.
1108  for (SCC &InnerC : *RC) {
1109  InnerC.OuterRefSCC = this;
1110  SCCIndices[&InnerC] = SCCIndex++;
1111  for (Node &N : InnerC)
1112  G->SCCMap[&N] = &InnerC;
1113  }
1114 
1115  // Now merge in the SCCs. We can actually move here so try to reuse storage
1116  // the first time through.
1117  if (MergedSCCs.empty())
1118  MergedSCCs = std::move(RC->SCCs);
1119  else
1120  MergedSCCs.append(RC->SCCs.begin(), RC->SCCs.end());
1121  RC->SCCs.clear();
1122  DeletedRefSCCs.push_back(RC);
1123  }
1124 
1125  // Append our original SCCs to the merged list and move it into place.
1126  for (SCC &InnerC : *this)
1127  SCCIndices[&InnerC] = SCCIndex++;
1128  MergedSCCs.append(SCCs.begin(), SCCs.end());
1129  SCCs = std::move(MergedSCCs);
1130 
1131  // Remove the merged away RefSCCs from the post order sequence.
1132  for (RefSCC *RC : MergeRange)
1133  G->RefSCCIndices.erase(RC);
1134  int IndexOffset = MergeRange.end() - MergeRange.begin();
1135  auto EraseEnd =
1136  G->PostOrderRefSCCs.erase(MergeRange.begin(), MergeRange.end());
1137  for (RefSCC *RC : make_range(EraseEnd, G->PostOrderRefSCCs.end()))
1138  G->RefSCCIndices[RC] -= IndexOffset;
1139 
1140  // At this point we have a merged RefSCC with a post-order SCCs list, just
1141  // connect the nodes to form the new edge.
1142  SourceN->insertEdgeInternal(TargetN, Edge::Ref);
1143 
1144  // We return the list of SCCs which were merged so that callers can
1145  // invalidate any data they have associated with those SCCs. Note that these
1146  // SCCs are no longer in an interesting state (they are totally empty) but
1147  // the pointers will remain stable for the life of the graph itself.
1148  return DeletedRefSCCs;
1149 }
1150 
1152  assert(G->lookupRefSCC(SourceN) == this &&
1153  "The source must be a member of this RefSCC.");
1154  assert(G->lookupRefSCC(TargetN) != this &&
1155  "The target must not be a member of this RefSCC");
1156 
1157 #ifdef EXPENSIVE_CHECKS
1158  verify();
1159  auto VerifyOnExit = make_scope_exit([&]() { verify(); });
1160 #endif
1161 
1162  // First remove it from the node.
1163  bool Removed = SourceN->removeEdgeInternal(TargetN);
1164  (void)Removed;
1165  assert(Removed && "Target not in the edge set for this caller?");
1166 }
1167 
1170  ArrayRef<Node *> TargetNs) {
1171  // We return a list of the resulting *new* RefSCCs in post-order.
1172  SmallVector<RefSCC *, 1> Result;
1173 
1174 #ifdef EXPENSIVE_CHECKS
1175  // Verify the RefSCC is valid to start with and that either we return an empty
1176  // list of result RefSCCs and this RefSCC remains valid, or we return new
1177  // RefSCCs and this RefSCC is dead.
1178  verify();
1179  auto VerifyOnExit = make_scope_exit([&]() {
1180  // If we didn't replace our RefSCC with new ones, check that this one
1181  // remains valid.
1182  if (G)
1183  verify();
1184  });
1185 #endif
1186 
1187  // First remove the actual edges.
1188  for (Node *TargetN : TargetNs) {
1189  assert(!(*SourceN)[*TargetN].isCall() &&
1190  "Cannot remove a call edge, it must first be made a ref edge");
1191 
1192  bool Removed = SourceN->removeEdgeInternal(*TargetN);
1193  (void)Removed;
1194  assert(Removed && "Target not in the edge set for this caller?");
1195  }
1196 
1197  // Direct self references don't impact the ref graph at all.
1198  if (llvm::all_of(TargetNs,
1199  [&](Node *TargetN) { return &SourceN == TargetN; }))
1200  return Result;
1201 
1202  // If all targets are in the same SCC as the source, because no call edges
1203  // were removed there is no RefSCC structure change.
1204  SCC &SourceC = *G->lookupSCC(SourceN);
1205  if (llvm::all_of(TargetNs, [&](Node *TargetN) {
1206  return G->lookupSCC(*TargetN) == &SourceC;
1207  }))
1208  return Result;
1209 
1210  // We build somewhat synthetic new RefSCCs by providing a postorder mapping
1211  // for each inner SCC. We store these inside the low-link field of the nodes
1212  // rather than associated with SCCs because this saves a round-trip through
1213  // the node->SCC map and in the common case, SCCs are small. We will verify
1214  // that we always give the same number to every node in the SCC such that
1215  // these are equivalent.
1216  int PostOrderNumber = 0;
1217 
1218  // Reset all the other nodes to prepare for a DFS over them, and add them to
1219  // our worklist.
1220  SmallVector<Node *, 8> Worklist;
1221  for (SCC *C : SCCs) {
1222  for (Node &N : *C)
1223  N.DFSNumber = N.LowLink = 0;
1224 
1225  Worklist.append(C->Nodes.begin(), C->Nodes.end());
1226  }
1227 
1228  // Track the number of nodes in this RefSCC so that we can quickly recognize
1229  // an important special case of the edge removal not breaking the cycle of
1230  // this RefSCC.
1231  const int NumRefSCCNodes = Worklist.size();
1232 
1234  SmallVector<Node *, 4> PendingRefSCCStack;
1235  do {
1236  assert(DFSStack.empty() &&
1237  "Cannot begin a new root with a non-empty DFS stack!");
1238  assert(PendingRefSCCStack.empty() &&
1239  "Cannot begin a new root with pending nodes for an SCC!");
1240 
1241  Node *RootN = Worklist.pop_back_val();
1242  // Skip any nodes we've already reached in the DFS.
1243  if (RootN->DFSNumber != 0) {
1244  assert(RootN->DFSNumber == -1 &&
1245  "Shouldn't have any mid-DFS root nodes!");
1246  continue;
1247  }
1248 
1249  RootN->DFSNumber = RootN->LowLink = 1;
1250  int NextDFSNumber = 2;
1251 
1252  DFSStack.push_back({RootN, (*RootN)->begin()});
1253  do {
1254  Node *N;
1256  std::tie(N, I) = DFSStack.pop_back_val();
1257  auto E = (*N)->end();
1258 
1259  assert(N->DFSNumber != 0 && "We should always assign a DFS number "
1260  "before processing a node.");
1261 
1262  while (I != E) {
1263  Node &ChildN = I->getNode();
1264  if (ChildN.DFSNumber == 0) {
1265  // Mark that we should start at this child when next this node is the
1266  // top of the stack. We don't start at the next child to ensure this
1267  // child's lowlink is reflected.
1268  DFSStack.push_back({N, I});
1269 
1270  // Continue, resetting to the child node.
1271  ChildN.LowLink = ChildN.DFSNumber = NextDFSNumber++;
1272  N = &ChildN;
1273  I = ChildN->begin();
1274  E = ChildN->end();
1275  continue;
1276  }
1277  if (ChildN.DFSNumber == -1) {
1278  // If this child isn't currently in this RefSCC, no need to process
1279  // it.
1280  ++I;
1281  continue;
1282  }
1283 
1284  // Track the lowest link of the children, if any are still in the stack.
1285  // Any child not on the stack will have a LowLink of -1.
1286  assert(ChildN.LowLink != 0 &&
1287  "Low-link must not be zero with a non-zero DFS number.");
1288  if (ChildN.LowLink >= 0 && ChildN.LowLink < N->LowLink)
1289  N->LowLink = ChildN.LowLink;
1290  ++I;
1291  }
1292 
1293  // We've finished processing N and its descendants, put it on our pending
1294  // stack to eventually get merged into a RefSCC.
1295  PendingRefSCCStack.push_back(N);
1296 
1297  // If this node is linked to some lower entry, continue walking up the
1298  // stack.
1299  if (N->LowLink != N->DFSNumber) {
1300  assert(!DFSStack.empty() &&
1301  "We never found a viable root for a RefSCC to pop off!");
1302  continue;
1303  }
1304 
1305  // Otherwise, form a new RefSCC from the top of the pending node stack.
1306  int RefSCCNumber = PostOrderNumber++;
1307  int RootDFSNumber = N->DFSNumber;
1308 
1309  // Find the range of the node stack by walking down until we pass the
1310  // root DFS number. Update the DFS numbers and low link numbers in the
1311  // process to avoid re-walking this list where possible.
1312  auto StackRI = find_if(reverse(PendingRefSCCStack), [&](Node *N) {
1313  if (N->DFSNumber < RootDFSNumber)
1314  // We've found the bottom.
1315  return true;
1316 
1317  // Update this node and keep scanning.
1318  N->DFSNumber = -1;
1319  // Save the post-order number in the lowlink field so that we can use
1320  // it to map SCCs into new RefSCCs after we finish the DFS.
1321  N->LowLink = RefSCCNumber;
1322  return false;
1323  });
1324  auto RefSCCNodes = make_range(StackRI.base(), PendingRefSCCStack.end());
1325 
1326  // If we find a cycle containing all nodes originally in this RefSCC then
1327  // the removal hasn't changed the structure at all. This is an important
1328  // special case and we can directly exit the entire routine more
1329  // efficiently as soon as we discover it.
1330  if (llvm::size(RefSCCNodes) == NumRefSCCNodes) {
1331  // Clear out the low link field as we won't need it.
1332  for (Node *N : RefSCCNodes)
1333  N->LowLink = -1;
1334  // Return the empty result immediately.
1335  return Result;
1336  }
1337 
1338  // We've already marked the nodes internally with the RefSCC number so
1339  // just clear them off the stack and continue.
1340  PendingRefSCCStack.erase(RefSCCNodes.begin(), PendingRefSCCStack.end());
1341  } while (!DFSStack.empty());
1342 
1343  assert(DFSStack.empty() && "Didn't flush the entire DFS stack!");
1344  assert(PendingRefSCCStack.empty() && "Didn't flush all pending nodes!");
1345  } while (!Worklist.empty());
1346 
1347  assert(PostOrderNumber > 1 &&
1348  "Should never finish the DFS when the existing RefSCC remains valid!");
1349 
1350  // Otherwise we create a collection of new RefSCC nodes and build
1351  // a radix-sort style map from postorder number to these new RefSCCs. We then
1352  // append SCCs to each of these RefSCCs in the order they occurred in the
1353  // original SCCs container.
1354  for (int i = 0; i < PostOrderNumber; ++i)
1355  Result.push_back(G->createRefSCC(*G));
1356 
1357  // Insert the resulting postorder sequence into the global graph postorder
1358  // sequence before the current RefSCC in that sequence, and then remove the
1359  // current one.
1360  //
1361  // FIXME: It'd be nice to change the APIs so that we returned an iterator
1362  // range over the global postorder sequence and generally use that sequence
1363  // rather than building a separate result vector here.
1364  int Idx = G->getRefSCCIndex(*this);
1365  G->PostOrderRefSCCs.erase(G->PostOrderRefSCCs.begin() + Idx);
1366  G->PostOrderRefSCCs.insert(G->PostOrderRefSCCs.begin() + Idx, Result.begin(),
1367  Result.end());
1368  for (int i : seq<int>(Idx, G->PostOrderRefSCCs.size()))
1369  G->RefSCCIndices[G->PostOrderRefSCCs[i]] = i;
1370 
1371  for (SCC *C : SCCs) {
1372  // We store the SCC number in the node's low-link field above.
1373  int SCCNumber = C->begin()->LowLink;
1374  // Clear out all of the SCC's node's low-link fields now that we're done
1375  // using them as side-storage.
1376  for (Node &N : *C) {
1377  assert(N.LowLink == SCCNumber &&
1378  "Cannot have different numbers for nodes in the same SCC!");
1379  N.LowLink = -1;
1380  }
1381 
1382  RefSCC &RC = *Result[SCCNumber];
1383  int SCCIndex = RC.SCCs.size();
1384  RC.SCCs.push_back(C);
1385  RC.SCCIndices[C] = SCCIndex;
1386  C->OuterRefSCC = &RC;
1387  }
1388 
1389  // Now that we've moved things into the new RefSCCs, clear out our current
1390  // one.
1391  G = nullptr;
1392  SCCs.clear();
1393  SCCIndices.clear();
1394 
1395 #ifdef EXPENSIVE_CHECKS
1396  // Verify the new RefSCCs we've built.
1397  for (RefSCC *RC : Result)
1398  RC->verify();
1399 #endif
1400 
1401  // Return the new list of SCCs.
1402  return Result;
1403 }
1404 
1406  Node &TargetN) {
1407 #ifdef EXPENSIVE_CHECKS
1408  auto ExitVerifier = make_scope_exit([this] { verify(); });
1409 
1410  // Check that we aren't breaking some invariants of the SCC graph. Note that
1411  // this is quadratic in the number of edges in the call graph!
1412  SCC &SourceC = *G->lookupSCC(SourceN);
1413  SCC &TargetC = *G->lookupSCC(TargetN);
1414  if (&SourceC != &TargetC)
1415  assert(SourceC.isAncestorOf(TargetC) &&
1416  "Call edge is not trivial in the SCC graph!");
1417 #endif
1418 
1419  // First insert it into the source or find the existing edge.
1420  auto InsertResult =
1421  SourceN->EdgeIndexMap.insert({&TargetN, SourceN->Edges.size()});
1422  if (!InsertResult.second) {
1423  // Already an edge, just update it.
1424  Edge &E = SourceN->Edges[InsertResult.first->second];
1425  if (E.isCall())
1426  return; // Nothing to do!
1427  E.setKind(Edge::Call);
1428  } else {
1429  // Create the new edge.
1430  SourceN->Edges.emplace_back(TargetN, Edge::Call);
1431  }
1432 }
1433 
1435 #ifdef EXPENSIVE_CHECKS
1436  auto ExitVerifier = make_scope_exit([this] { verify(); });
1437 
1438  // Check that we aren't breaking some invariants of the RefSCC graph.
1439  RefSCC &SourceRC = *G->lookupRefSCC(SourceN);
1440  RefSCC &TargetRC = *G->lookupRefSCC(TargetN);
1441  if (&SourceRC != &TargetRC)
1442  assert(SourceRC.isAncestorOf(TargetRC) &&
1443  "Ref edge is not trivial in the RefSCC graph!");
1444 #endif
1445 
1446  // First insert it into the source or find the existing edge.
1447  auto InsertResult =
1448  SourceN->EdgeIndexMap.insert({&TargetN, SourceN->Edges.size()});
1449  if (!InsertResult.second)
1450  // Already an edge, we're done.
1451  return;
1452 
1453  // Create the new edge.
1454  SourceN->Edges.emplace_back(TargetN, Edge::Ref);
1455 }
1456 
1458  Function &OldF = N.getFunction();
1459 
1460 #ifdef EXPENSIVE_CHECKS
1461  auto ExitVerifier = make_scope_exit([this] { verify(); });
1462 
1463  assert(G->lookupRefSCC(N) == this &&
1464  "Cannot replace the function of a node outside this RefSCC.");
1465 
1466  assert(G->NodeMap.find(&NewF) == G->NodeMap.end() &&
1467  "Must not have already walked the new function!'");
1468 
1469  // It is important that this replacement not introduce graph changes so we
1470  // insist that the caller has already removed every use of the original
1471  // function and that all uses of the new function correspond to existing
1472  // edges in the graph. The common and expected way to use this is when
1473  // replacing the function itself in the IR without changing the call graph
1474  // shape and just updating the analysis based on that.
1475  assert(&OldF != &NewF && "Cannot replace a function with itself!");
1476  assert(OldF.use_empty() &&
1477  "Must have moved all uses from the old function to the new!");
1478 #endif
1479 
1480  N.replaceFunction(NewF);
1481 
1482  // Update various call graph maps.
1483  G->NodeMap.erase(&OldF);
1484  G->NodeMap[&NewF] = &N;
1485 }
1486 
1487 void LazyCallGraph::insertEdge(Node &SourceN, Node &TargetN, Edge::Kind EK) {
1488  assert(SCCMap.empty() &&
1489  "This method cannot be called after SCCs have been formed!");
1490 
1491  return SourceN->insertEdgeInternal(TargetN, EK);
1492 }
1493 
1494 void LazyCallGraph::removeEdge(Node &SourceN, Node &TargetN) {
1495  assert(SCCMap.empty() &&
1496  "This method cannot be called after SCCs have been formed!");
1497 
1498  bool Removed = SourceN->removeEdgeInternal(TargetN);
1499  (void)Removed;
1500  assert(Removed && "Target not in the edge set for this caller?");
1501 }
1502 
1504  // FIXME: This is unnecessarily restrictive. We should be able to remove
1505  // functions which recursively call themselves.
1506  assert(F.use_empty() &&
1507  "This routine should only be called on trivially dead functions!");
1508 
1509  // We shouldn't remove library functions as they are never really dead while
1510  // the call graph is in use -- every function definition refers to them.
1511  assert(!isLibFunction(F) &&
1512  "Must not remove lib functions from the call graph!");
1513 
1514  auto NI = NodeMap.find(&F);
1515  if (NI == NodeMap.end())
1516  // Not in the graph at all!
1517  return;
1518 
1519  Node &N = *NI->second;
1520  NodeMap.erase(NI);
1521 
1522  // Remove this from the entry edges if present.
1523  EntryEdges.removeEdgeInternal(N);
1524 
1525  if (SCCMap.empty()) {
1526  // No SCCs have been formed, so removing this is fine and there is nothing
1527  // else necessary at this point but clearing out the node.
1528  N.clear();
1529  return;
1530  }
1531 
1532  // Cannot remove a function which has yet to be visited in the DFS walk, so
1533  // if we have a node at all then we must have an SCC and RefSCC.
1534  auto CI = SCCMap.find(&N);
1535  assert(CI != SCCMap.end() &&
1536  "Tried to remove a node without an SCC after DFS walk started!");
1537  SCC &C = *CI->second;
1538  SCCMap.erase(CI);
1539  RefSCC &RC = C.getOuterRefSCC();
1540 
1541  // This node must be the only member of its SCC as it has no callers, and
1542  // that SCC must be the only member of a RefSCC as it has no references.
1543  // Validate these properties first.
1544  assert(C.size() == 1 && "Dead functions must be in a singular SCC");
1545  assert(RC.size() == 1 && "Dead functions must be in a singular RefSCC");
1546 
1547  auto RCIndexI = RefSCCIndices.find(&RC);
1548  int RCIndex = RCIndexI->second;
1549  PostOrderRefSCCs.erase(PostOrderRefSCCs.begin() + RCIndex);
1550  RefSCCIndices.erase(RCIndexI);
1551  for (int i = RCIndex, Size = PostOrderRefSCCs.size(); i < Size; ++i)
1552  RefSCCIndices[PostOrderRefSCCs[i]] = i;
1553 
1554  // Finally clear out all the data structures from the node down through the
1555  // components.
1556  N.clear();
1557  N.G = nullptr;
1558  N.F = nullptr;
1559  C.clear();
1560  RC.clear();
1561  RC.G = nullptr;
1562 
1563  // Nothing to delete as all the objects are allocated in stable bump pointer
1564  // allocators.
1565 }
1566 
1567 // Gets the Edge::Kind from one function to another by looking at the function's
1568 // instructions. Asserts if there is no edge.
1569 // Useful for determining what type of edge should exist between functions when
1570 // the edge hasn't been created yet.
1572  Function &NewFunction) {
1573  // In release builds, assume that if there are no direct calls to the new
1574  // function, then there is a ref edge. In debug builds, keep track of
1575  // references to assert that there is actually a ref edge if there is no call
1576  // edge.
1577 #ifndef NDEBUG
1578  SmallVector<Constant *, 16> Worklist;
1580 #endif
1581 
1582  for (Instruction &I : instructions(OriginalFunction)) {
1583  if (auto *CB = dyn_cast<CallBase>(&I)) {
1584  if (Function *Callee = CB->getCalledFunction()) {
1585  if (Callee == &NewFunction)
1587  }
1588  }
1589 #ifndef NDEBUG
1590  for (Value *Op : I.operand_values()) {
1591  if (Constant *C = dyn_cast<Constant>(Op)) {
1592  if (Visited.insert(C).second)
1593  Worklist.push_back(C);
1594  }
1595  }
1596 #endif
1597  }
1598 
1599 #ifndef NDEBUG
1600  bool FoundNewFunction = false;
1601  LazyCallGraph::visitReferences(Worklist, Visited, [&](Function &F) {
1602  if (&F == &NewFunction)
1603  FoundNewFunction = true;
1604  });
1605  assert(FoundNewFunction && "No edge from original function to new function");
1606 #endif
1607 
1608  return LazyCallGraph::Edge::Kind::Ref;
1609 }
1610 
1612  Function &NewFunction) {
1613  assert(lookup(OriginalFunction) &&
1614  "Original function's node should already exist");
1615  Node &OriginalN = get(OriginalFunction);
1616  SCC *OriginalC = lookupSCC(OriginalN);
1617  RefSCC *OriginalRC = lookupRefSCC(OriginalN);
1618 
1619 #ifdef EXPENSIVE_CHECKS
1620  OriginalRC->verify();
1621  auto VerifyOnExit = make_scope_exit([&]() { OriginalRC->verify(); });
1622 #endif
1623 
1624  assert(!lookup(NewFunction) &&
1625  "New function's node should not already exist");
1626  Node &NewN = initNode(NewFunction);
1627 
1628  Edge::Kind EK = getEdgeKind(OriginalFunction, NewFunction);
1629 
1630  SCC *NewC = nullptr;
1631  for (Edge &E : *NewN) {
1632  Node &EN = E.getNode();
1633  if (EK == Edge::Kind::Call && E.isCall() && lookupSCC(EN) == OriginalC) {
1634  // If the edge to the new function is a call edge and there is a call edge
1635  // from the new function to any function in the original function's SCC,
1636  // it is in the same SCC (and RefSCC) as the original function.
1637  NewC = OriginalC;
1638  NewC->Nodes.push_back(&NewN);
1639  break;
1640  }
1641  }
1642 
1643  if (!NewC) {
1644  for (Edge &E : *NewN) {
1645  Node &EN = E.getNode();
1646  if (lookupRefSCC(EN) == OriginalRC) {
1647  // If there is any edge from the new function to any function in the
1648  // original function's RefSCC, it is in the same RefSCC as the original
1649  // function but a new SCC.
1650  RefSCC *NewRC = OriginalRC;
1651  NewC = createSCC(*NewRC, SmallVector<Node *, 1>({&NewN}));
1652 
1653  // The new function's SCC is not the same as the original function's
1654  // SCC, since that case was handled earlier. If the edge from the
1655  // original function to the new function was a call edge, then we need
1656  // to insert the newly created function's SCC before the original
1657  // function's SCC. Otherwise either the new SCC comes after the original
1658  // function's SCC, or it doesn't matter, and in both cases we can add it
1659  // to the very end.
1660  int InsertIndex = EK == Edge::Kind::Call ? NewRC->SCCIndices[OriginalC]
1661  : NewRC->SCCIndices.size();
1662  NewRC->SCCs.insert(NewRC->SCCs.begin() + InsertIndex, NewC);
1663  for (int I = InsertIndex, Size = NewRC->SCCs.size(); I < Size; ++I)
1664  NewRC->SCCIndices[NewRC->SCCs[I]] = I;
1665 
1666  break;
1667  }
1668  }
1669  }
1670 
1671  if (!NewC) {
1672  // We didn't find any edges back to the original function's RefSCC, so the
1673  // new function belongs in a new RefSCC. The new RefSCC goes before the
1674  // original function's RefSCC.
1675  RefSCC *NewRC = createRefSCC(*this);
1676  NewC = createSCC(*NewRC, SmallVector<Node *, 1>({&NewN}));
1677  NewRC->SCCIndices[NewC] = 0;
1678  NewRC->SCCs.push_back(NewC);
1679  auto OriginalRCIndex = RefSCCIndices.find(OriginalRC)->second;
1680  PostOrderRefSCCs.insert(PostOrderRefSCCs.begin() + OriginalRCIndex, NewRC);
1681  for (int I = OriginalRCIndex, Size = PostOrderRefSCCs.size(); I < Size; ++I)
1682  RefSCCIndices[PostOrderRefSCCs[I]] = I;
1683  }
1684 
1685  SCCMap[&NewN] = NewC;
1686 
1687  OriginalN->insertEdgeInternal(NewN, EK);
1688 }
1689 
1691  Function &OriginalFunction, ArrayRef<Function *> NewFunctions) {
1692  assert(!NewFunctions.empty() && "Can't add zero functions");
1693  assert(lookup(OriginalFunction) &&
1694  "Original function's node should already exist");
1695  Node &OriginalN = get(OriginalFunction);
1696  RefSCC *OriginalRC = lookupRefSCC(OriginalN);
1697 
1698 #ifdef EXPENSIVE_CHECKS
1699  OriginalRC->verify();
1700  auto VerifyOnExit = make_scope_exit([&]() {
1701  OriginalRC->verify();
1702  for (Function *NewFunction : NewFunctions)
1703  lookupRefSCC(get(*NewFunction))->verify();
1704  });
1705 #endif
1706 
1707  bool ExistsRefToOriginalRefSCC = false;
1708 
1709  for (Function *NewFunction : NewFunctions) {
1710  Node &NewN = initNode(*NewFunction);
1711 
1712  OriginalN->insertEdgeInternal(NewN, Edge::Kind::Ref);
1713 
1714  // Check if there is any edge from any new function back to any function in
1715  // the original function's RefSCC.
1716  for (Edge &E : *NewN) {
1717  if (lookupRefSCC(E.getNode()) == OriginalRC) {
1718  ExistsRefToOriginalRefSCC = true;
1719  break;
1720  }
1721  }
1722  }
1723 
1724  RefSCC *NewRC;
1725  if (ExistsRefToOriginalRefSCC) {
1726  // If there is any edge from any new function to any function in the
1727  // original function's RefSCC, all new functions will be in the same RefSCC
1728  // as the original function.
1729  NewRC = OriginalRC;
1730  } else {
1731  // Otherwise the new functions are in their own RefSCC.
1732  NewRC = createRefSCC(*this);
1733  // The new RefSCC goes before the original function's RefSCC in postorder
1734  // since there are only edges from the original function's RefSCC to the new
1735  // RefSCC.
1736  auto OriginalRCIndex = RefSCCIndices.find(OriginalRC)->second;
1737  PostOrderRefSCCs.insert(PostOrderRefSCCs.begin() + OriginalRCIndex, NewRC);
1738  for (int I = OriginalRCIndex, Size = PostOrderRefSCCs.size(); I < Size; ++I)
1739  RefSCCIndices[PostOrderRefSCCs[I]] = I;
1740  }
1741 
1742  for (Function *NewFunction : NewFunctions) {
1743  Node &NewN = get(*NewFunction);
1744  // Each new function is in its own new SCC. The original function can only
1745  // have a ref edge to new functions, and no other existing functions can
1746  // have references to new functions. Each new function only has a ref edge
1747  // to the other new functions.
1748  SCC *NewC = createSCC(*NewRC, SmallVector<Node *, 1>({&NewN}));
1749  // The new SCCs are either sibling SCCs or parent SCCs to all other existing
1750  // SCCs in the RefSCC. Either way, they can go at the back of the postorder
1751  // SCC list.
1752  auto Index = NewRC->SCCIndices.size();
1753  NewRC->SCCIndices[NewC] = Index;
1754  NewRC->SCCs.push_back(NewC);
1755  SCCMap[&NewN] = NewC;
1756  }
1757 
1758 #ifndef NDEBUG
1759  for (Function *F1 : NewFunctions) {
1760  assert(getEdgeKind(OriginalFunction, *F1) == Edge::Kind::Ref &&
1761  "Expected ref edges from original function to every new function");
1762  Node &N1 = get(*F1);
1763  for (Function *F2 : NewFunctions) {
1764  if (F1 == F2)
1765  continue;
1766  Node &N2 = get(*F2);
1767  assert(!N1->lookup(N2)->isCall() &&
1768  "Edges between new functions must be ref edges");
1769  }
1770  }
1771 #endif
1772 }
1773 
1774 LazyCallGraph::Node &LazyCallGraph::insertInto(Function &F, Node *&MappedN) {
1775  return *new (MappedN = BPA.Allocate()) Node(*this, F);
1776 }
1777 
1778 void LazyCallGraph::updateGraphPtrs() {
1779  // Walk the node map to update their graph pointers. While this iterates in
1780  // an unstable order, the order has no effect so it remains correct.
1781  for (auto &FunctionNodePair : NodeMap)
1782  FunctionNodePair.second->G = this;
1783 
1784  for (auto *RC : PostOrderRefSCCs)
1785  RC->G = this;
1786 }
1787 
1788 LazyCallGraph::Node &LazyCallGraph::initNode(Function &F) {
1789  Node &N = get(F);
1790  N.DFSNumber = N.LowLink = -1;
1791  N.populate();
1792  NodeMap[&F] = &N;
1793  return N;
1794 }
1795 
1796 template <typename RootsT, typename GetBeginT, typename GetEndT,
1797  typename GetNodeT, typename FormSCCCallbackT>
1798 void LazyCallGraph::buildGenericSCCs(RootsT &&Roots, GetBeginT &&GetBegin,
1799  GetEndT &&GetEnd, GetNodeT &&GetNode,
1800  FormSCCCallbackT &&FormSCC) {
1801  using EdgeItT = decltype(GetBegin(std::declval<Node &>()));
1802 
1804  SmallVector<Node *, 16> PendingSCCStack;
1805 
1806  // Scan down the stack and DFS across the call edges.
1807  for (Node *RootN : Roots) {
1808  assert(DFSStack.empty() &&
1809  "Cannot begin a new root with a non-empty DFS stack!");
1810  assert(PendingSCCStack.empty() &&
1811  "Cannot begin a new root with pending nodes for an SCC!");
1812 
1813  // Skip any nodes we've already reached in the DFS.
1814  if (RootN->DFSNumber != 0) {
1815  assert(RootN->DFSNumber == -1 &&
1816  "Shouldn't have any mid-DFS root nodes!");
1817  continue;
1818  }
1819 
1820  RootN->DFSNumber = RootN->LowLink = 1;
1821  int NextDFSNumber = 2;
1822 
1823  DFSStack.push_back({RootN, GetBegin(*RootN)});
1824  do {
1825  Node *N;
1826  EdgeItT I;
1827  std::tie(N, I) = DFSStack.pop_back_val();
1828  auto E = GetEnd(*N);
1829  while (I != E) {
1830  Node &ChildN = GetNode(I);
1831  if (ChildN.DFSNumber == 0) {
1832  // We haven't yet visited this child, so descend, pushing the current
1833  // node onto the stack.
1834  DFSStack.push_back({N, I});
1835 
1836  ChildN.DFSNumber = ChildN.LowLink = NextDFSNumber++;
1837  N = &ChildN;
1838  I = GetBegin(*N);
1839  E = GetEnd(*N);
1840  continue;
1841  }
1842 
1843  // If the child has already been added to some child component, it
1844  // couldn't impact the low-link of this parent because it isn't
1845  // connected, and thus its low-link isn't relevant so skip it.
1846  if (ChildN.DFSNumber == -1) {
1847  ++I;
1848  continue;
1849  }
1850 
1851  // Track the lowest linked child as the lowest link for this node.
1852  assert(ChildN.LowLink > 0 && "Must have a positive low-link number!");
1853  if (ChildN.LowLink < N->LowLink)
1854  N->LowLink = ChildN.LowLink;
1855 
1856  // Move to the next edge.
1857  ++I;
1858  }
1859 
1860  // We've finished processing N and its descendants, put it on our pending
1861  // SCC stack to eventually get merged into an SCC of nodes.
1862  PendingSCCStack.push_back(N);
1863 
1864  // If this node is linked to some lower entry, continue walking up the
1865  // stack.
1866  if (N->LowLink != N->DFSNumber)
1867  continue;
1868 
1869  // Otherwise, we've completed an SCC. Append it to our post order list of
1870  // SCCs.
1871  int RootDFSNumber = N->DFSNumber;
1872  // Find the range of the node stack by walking down until we pass the
1873  // root DFS number.
1874  auto SCCNodes = make_range(
1875  PendingSCCStack.rbegin(),
1876  find_if(reverse(PendingSCCStack), [RootDFSNumber](const Node *N) {
1877  return N->DFSNumber < RootDFSNumber;
1878  }));
1879  // Form a new SCC out of these nodes and then clear them off our pending
1880  // stack.
1881  FormSCC(SCCNodes);
1882  PendingSCCStack.erase(SCCNodes.end().base(), PendingSCCStack.end());
1883  } while (!DFSStack.empty());
1884  }
1885 }
1886 
1887 /// Build the internal SCCs for a RefSCC from a sequence of nodes.
1888 ///
1889 /// Appends the SCCs to the provided vector and updates the map with their
1890 /// indices. Both the vector and map must be empty when passed into this
1891 /// routine.
1892 void LazyCallGraph::buildSCCs(RefSCC &RC, node_stack_range Nodes) {
1893  assert(RC.SCCs.empty() && "Already built SCCs!");
1894  assert(RC.SCCIndices.empty() && "Already mapped SCC indices!");
1895 
1896  for (Node *N : Nodes) {
1897  assert(N->LowLink >= (*Nodes.begin())->LowLink &&
1898  "We cannot have a low link in an SCC lower than its root on the "
1899  "stack!");
1900 
1901  // This node will go into the next RefSCC, clear out its DFS and low link
1902  // as we scan.
1903  N->DFSNumber = N->LowLink = 0;
1904  }
1905 
1906  // Each RefSCC contains a DAG of the call SCCs. To build these, we do
1907  // a direct walk of the call edges using Tarjan's algorithm. We reuse the
1908  // internal storage as we won't need it for the outer graph's DFS any longer.
1909  buildGenericSCCs(
1910  Nodes, [](Node &N) { return N->call_begin(); },
1911  [](Node &N) { return N->call_end(); },
1912  [](EdgeSequence::call_iterator I) -> Node & { return I->getNode(); },
1913  [this, &RC](node_stack_range Nodes) {
1914  RC.SCCs.push_back(createSCC(RC, Nodes));
1915  for (Node &N : *RC.SCCs.back()) {
1916  N.DFSNumber = N.LowLink = -1;
1917  SCCMap[&N] = RC.SCCs.back();
1918  }
1919  });
1920 
1921  // Wire up the SCC indices.
1922  for (int i = 0, Size = RC.SCCs.size(); i < Size; ++i)
1923  RC.SCCIndices[RC.SCCs[i]] = i;
1924 }
1925 
1927  if (EntryEdges.empty() || !PostOrderRefSCCs.empty())
1928  // RefSCCs are either non-existent or already built!
1929  return;
1930 
1931  assert(RefSCCIndices.empty() && "Already mapped RefSCC indices!");
1932 
1934  for (Edge &E : *this)
1935  Roots.push_back(&E.getNode());
1936 
1937  // The roots will be iterated in order.
1938  buildGenericSCCs(
1939  Roots,
1940  [](Node &N) {
1941  // We need to populate each node as we begin to walk its edges.
1942  N.populate();
1943  return N->begin();
1944  },
1945  [](Node &N) { return N->end(); },
1946  [](EdgeSequence::iterator I) -> Node & { return I->getNode(); },
1947  [this](node_stack_range Nodes) {
1948  RefSCC *NewRC = createRefSCC(*this);
1949  buildSCCs(*NewRC, Nodes);
1950 
1951  // Push the new node into the postorder list and remember its position
1952  // in the index map.
1953  bool Inserted =
1954  RefSCCIndices.insert({NewRC, PostOrderRefSCCs.size()}).second;
1955  (void)Inserted;
1956  assert(Inserted && "Cannot already have this RefSCC in the index map!");
1957  PostOrderRefSCCs.push_back(NewRC);
1958 #ifdef EXPENSIVE_CHECKS
1959  NewRC->verify();
1960 #endif
1961  });
1962 }
1963 
1965  SmallPtrSetImpl<Constant *> &Visited,
1966  function_ref<void(Function &)> Callback) {
1967  while (!Worklist.empty()) {
1968  Constant *C = Worklist.pop_back_val();
1969 
1970  if (Function *F = dyn_cast<Function>(C)) {
1971  if (!F->isDeclaration())
1972  Callback(*F);
1973  continue;
1974  }
1975 
1976  // blockaddresses are weird and don't participate in the call graph anyway,
1977  // skip them.
1978  if (isa<BlockAddress>(C))
1979  continue;
1980 
1981  for (Value *Op : C->operand_values())
1982  if (Visited.insert(cast<Constant>(Op)).second)
1983  Worklist.push_back(cast<Constant>(Op));
1984  }
1985 }
1986 
1987 AnalysisKey LazyCallGraphAnalysis::Key;
1988 
1990 
1992  OS << " Edges in function: " << N.getFunction().getName() << "\n";
1993  for (LazyCallGraph::Edge &E : N.populate())
1994  OS << " " << (E.isCall() ? "call" : "ref ") << " -> "
1995  << E.getFunction().getName() << "\n";
1996 
1997  OS << "\n";
1998 }
1999 
2001  OS << " SCC with " << C.size() << " functions:\n";
2002 
2003  for (LazyCallGraph::Node &N : C)
2004  OS << " " << N.getFunction().getName() << "\n";
2005 }
2006 
2008  OS << " RefSCC with " << C.size() << " call SCCs:\n";
2009 
2010  for (LazyCallGraph::SCC &InnerC : C)
2011  printSCC(OS, InnerC);
2012 
2013  OS << "\n";
2014 }
2015 
2017  ModuleAnalysisManager &AM) {
2019 
2020  OS << "Printing the call graph for module: " << M.getModuleIdentifier()
2021  << "\n\n";
2022 
2023  for (Function &F : M)
2024  printNode(OS, G.get(F));
2025 
2026  G.buildRefSCCs();
2027  for (LazyCallGraph::RefSCC &C : G.postorder_ref_sccs())
2028  printRefSCC(OS, C);
2029 
2030  return PreservedAnalyses::all();
2031 }
2032 
2034  : OS(OS) {}
2035 
2037  std::string Name =
2038  "\"" + DOT::EscapeString(std::string(N.getFunction().getName())) + "\"";
2039 
2040  for (LazyCallGraph::Edge &E : N.populate()) {
2041  OS << " " << Name << " -> \""
2042  << DOT::EscapeString(std::string(E.getFunction().getName())) << "\"";
2043  if (!E.isCall()) // It is a ref edge.
2044  OS << " [style=dashed,label=\"ref\"]";
2045  OS << ";\n";
2046  }
2047 
2048  OS << "\n";
2049 }
2050 
2052  ModuleAnalysisManager &AM) {
2054 
2055  OS << "digraph \"" << DOT::EscapeString(M.getModuleIdentifier()) << "\" {\n";
2056 
2057  for (Function &F : M)
2058  printNodeDOT(OS, G.get(F));
2059 
2060  OS << "}\n";
2061 
2062  return PreservedAnalyses::all();
2063 }
llvm::Check::Size
@ Size
Definition: FileCheck.h:73
i
i
Definition: README.txt:29
llvm::PreservedAnalyses
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:155
llvm::LazyCallGraph::SCC::isParentOf
bool isParentOf(const SCC &C) const
Test if this SCC is a parent of C.
Definition: LazyCallGraph.cpp:280
getName
static StringRef getName(Value *V)
Definition: ProvenanceAnalysisEvaluator.cpp:42
llvm::LazyCallGraphPrinterPass::LazyCallGraphPrinterPass
LazyCallGraphPrinterPass(raw_ostream &OS)
Definition: LazyCallGraph.cpp:1989
LLVM_DUMP_METHOD
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition: Compiler.h:506
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AllocatorList.h:23
llvm::SmallVectorImpl::erase
iterator erase(const_iterator CI)
Definition: SmallVector.h:705
llvm::LazyCallGraph::buildRefSCCs
void buildRefSCCs()
Definition: LazyCallGraph.cpp:1926
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
llvm::make_range
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Definition: iterator_range.h:53
llvm::drop_begin
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:266
llvm::LazyCallGraph::LazyCallGraph
LazyCallGraph(Module &M, function_ref< TargetLibraryInfo &(Function &)> GetTLI)
Construct a graph for the given module.
Definition: LazyCallGraph.cpp:157
llvm::AnalysisManager::getResult
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:783
InstIterator.h
llvm::Function
Definition: Function.h:62
llvm::LazyCallGraph::insertEdge
void insertEdge(Node &SourceN, Node &TargetN, Edge::Kind EK)
Update the call graph after inserting a new edge.
Definition: LazyCallGraph.cpp:1487
llvm::LazyCallGraph::RefSCC::size
ssize_t size() const
Definition: LazyCallGraph.h:610
addEdge
static void addEdge(SmallVectorImpl< LazyCallGraph::Edge > &Edges, DenseMap< LazyCallGraph::Node *, int > &EdgeIndexMap, LazyCallGraph::Node &N, LazyCallGraph::Edge::Kind EK)
Definition: LazyCallGraph.cpp:63
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1168
llvm::DOT::EscapeString
std::string EscapeString(const std::string &Label)
Definition: GraphWriter.cpp:52
llvm::GlobalVariable
Definition: GlobalVariable.h:40
llvm::AllAnalysesOn
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
Definition: PassManager.h:93
llvm::LazyCallGraph::removeDeadFunction
void removeDeadFunction(Function &F)
Remove a dead function from the call graph (typically to delete it).
Definition: LazyCallGraph.cpp:1503
llvm::LazyCallGraphDOTPrinterPass::run
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
Definition: LazyCallGraph.cpp:2051
Module.h
llvm::reverse
auto reverse(ContainerTy &&C, std::enable_if_t< has_rbegin< ContainerTy >::value > *=nullptr)
Definition: STLExtras.h:359
llvm::LazyCallGraph::lookup
Node * lookup(const Function &F) const
Lookup a function in the graph which has already been scanned and added.
Definition: LazyCallGraph.h:955
getEdgeKind
static LazyCallGraph::Edge::Kind getEdgeKind(Function &OriginalFunction, Function &NewFunction)
Definition: LazyCallGraph.cpp:1571
llvm::SmallPtrSet
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:449
STLExtras.h
llvm::SmallVectorImpl::pop_back_val
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:635
Sequence.h
llvm::LazyCallGraph::EdgeSequence::begin
iterator begin()
Definition: LazyCallGraph.h:257
llvm::ArrayRef::empty
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:158
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
F
#define F(x, y, z)
Definition: MD5.cpp:56
llvm::LazyCallGraph::invalidate
bool invalidate(Module &, const PreservedAnalyses &PA, ModuleAnalysisManager::Invalidator &)
Definition: LazyCallGraph.cpp:218
llvm::LazyCallGraph::RefSCC::isAncestorOf
bool isAncestorOf(const RefSCC &RC) const
Test if this RefSCC is an ancestor of RC.
Definition: LazyCallGraph.cpp:420
llvm::BasicBlock
LLVM Basic Block Representation.
Definition: BasicBlock.h:58
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
Instruction.h
llvm::all_of
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1581
llvm::LazyCallGraph::RefSCC::insertTrivialCallEdge
void insertTrivialCallEdge(Node &SourceN, Node &TargetN)
A convenience wrapper around the above to handle trivial cases of inserting a new call edge.
Definition: LazyCallGraph.cpp:1405
llvm::LazyCallGraph::SCC
An SCC of the call graph.
Definition: LazyCallGraph.h:422
llvm::LazyCallGraph::operator=
LazyCallGraph & operator=(LazyCallGraph &&RHS)
Definition: LazyCallGraph.cpp:226
printNode
static void printNode(raw_ostream &OS, LazyCallGraph::Node &N)
Definition: LazyCallGraph.cpp:1991
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::SmallVectorImpl::append
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:648
printRefSCC
static void printRefSCC(raw_ostream &OS, LazyCallGraph::RefSCC &C)
Definition: LazyCallGraph.cpp:2007
llvm::LibFunc
LibFunc
Definition: TargetLibraryInfo.h:34
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::LazyCallGraph::EdgeSequence::end
iterator end()
Definition: LazyCallGraph.h:258
llvm::LazyCallGraph::RefSCC::isParentOf
bool isParentOf(const RefSCC &RC) const
Test if this RefSCC is a parent of RC.
Definition: LazyCallGraph.cpp:406
updatePostorderSequenceForEdgeInsertion
static iterator_range< typename PostorderSequenceT::iterator > updatePostorderSequenceForEdgeInsertion(SCCT &SourceSCC, SCCT &TargetSCC, PostorderSequenceT &SCCs, SCCIndexMapT &SCCIndices, ComputeSourceConnectedSetCallableT ComputeSourceConnectedSet, ComputeTargetConnectedSetCallableT ComputeTargetConnectedSet)
Generic helper that updates a postorder sequence of SCCs for a potentially cycle-introducing edge ins...
Definition: LazyCallGraph.cpp:514
llvm::LazyCallGraph::RefSCC::insertTrivialRefEdge
void insertTrivialRefEdge(Node &SourceN, Node &TargetN)
A convenience wrapper around the above to handle trivial cases of inserting a new ref edge.
Definition: LazyCallGraph.cpp:1434
TargetLibraryInfo.h
llvm::TargetLibraryInfo::getLibFunc
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Definition: TargetLibraryInfo.h:291
llvm::make_scope_exit
LLVM_NODISCARD detail::scope_exit< typename std::decay< Callable >::type > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:58
llvm::Instruction
Definition: Instruction.h:45
llvm::LazyCallGraph::RefSCC
A RefSCC of the call graph.
Definition: LazyCallGraph.h:539
llvm::LazyCallGraph::EdgeSequence::iterator
An iterator used for the edges to both entry nodes and child nodes.
Definition: LazyCallGraph.h:195
llvm::raw_ostream
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:53
llvm::LazyCallGraph::EdgeSequence
The edge sequence object.
Definition: LazyCallGraph.h:185
SmallPtrSet.h
llvm::LazyCallGraph::lookupSCC
SCC * lookupSCC(Node &N) const
Lookup a function's SCC in the graph.
Definition: LazyCallGraph.h:961
llvm::AnalysisManager::Invalidator
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:670
LazyCallGraph.h
llvm::MCID::Call
@ Call
Definition: MCInstrDesc.h:153
llvm::lltok::Kind
Kind
Definition: LLToken.h:18
llvm::Value::use_empty
bool use_empty() const
Definition: Value.h:344
llvm::LazyCallGraph::RefSCC::insertOutgoingEdge
void insertOutgoingEdge(Node &SourceN, Node &TargetN, Edge::Kind EK)
Insert an edge whose parent is in this RefSCC and child is in some child RefSCC.
Definition: LazyCallGraph.cpp:990
llvm::LazyCallGraph::get
Node & get(Function &F)
Get a graph node for a given function, scanning it to populate the graph data as necessary.
Definition: LazyCallGraph.h:976
llvm::LazyCallGraph::RefSCC::switchInternalEdgeToCall
bool switchInternalEdgeToCall(Node &SourceN, Node &TargetN, function_ref< void(ArrayRef< SCC * > MergedSCCs)> MergeCB={})
Make an existing internal ref edge into a call edge.
Definition: LazyCallGraph.cpp:584
llvm::function_ref
An efficient, type-erasing, non-owning reference to a callable.
Definition: STLExtras.h:168
G
const DataFlowGraph & G
Definition: RDFGraph.cpp:202
VectorUtils.h
llvm::LazyCallGraph::RefSCC::switchInternalEdgeToRef
iterator_range< iterator > switchInternalEdgeToRef(Node &SourceN, Node &TargetN)
Make an existing internal call edge within a single SCC into a ref edge.
Definition: LazyCallGraph.cpp:752
llvm::instructions
inst_range instructions(Function *F)
Definition: InstIterator.h:133
llvm::Constant
This is an important base class in LLVM.
Definition: Constant.h:41
Index
uint32_t Index
Definition: ELFObjHandler.cpp:84
llvm::LazyCallGraph::RefSCC::isDescendantOf
bool isDescendantOf(const RefSCC &RC) const
Test if this RefSCC is a descendant of RC.
Definition: LazyCallGraph.h:642
llvm::LazyCallGraph::addSplitFunction
void addSplitFunction(Function &OriginalFunction, Function &NewFunction)
Add a new function split/outlined from an existing function.
Definition: LazyCallGraph.cpp:1611
llvm::LazyCallGraph::lookupRefSCC
RefSCC * lookupRefSCC(Node &N) const
Lookup a function's RefSCC in the graph.
Definition: LazyCallGraph.h:967
llvm::LazyCallGraph::RefSCC::removeOutgoingEdge
void removeOutgoingEdge(Node &SourceN, Node &TargetN)
Remove an edge whose source is in this RefSCC and target is not.
Definition: LazyCallGraph.cpp:1151
move
compiles ldr LCPI1_0 ldr ldr mov lsr tst moveq r1 ldr LCPI1_1 and r0 bx lr It would be better to do something like to fold the shift into the conditional move
Definition: README.txt:546
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:57
llvm::DenseMap
Definition: DenseMap.h:714
llvm::AnalysisKey
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: PassManager.h:72
I
#define I(x, y, z)
Definition: MD5.cpp:59
llvm::LazyCallGraph::RefSCC::insertIncomingRefEdge
SmallVector< RefSCC *, 1 > insertIncomingRefEdge(Node &SourceN, Node &TargetN)
Insert an edge whose source is in a descendant RefSCC and target is in this RefSCC.
Definition: LazyCallGraph.cpp:1010
ArrayRef.h
llvm::LazyCallGraph::Edge::Kind
Kind
The kind of edge in the graph.
Definition: LazyCallGraph.h:137
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::move
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1639
iterator_range.h
llvm::LazyCallGraph::EdgeSequence::lookup
Edge * lookup(Node &N)
Definition: LazyCallGraph.h:267
llvm::TargetLibraryInfo::isKnownVectorFunctionInLibrary
bool isKnownVectorFunctionInLibrary(StringRef F) const
Check if the function "F" is listed in a library known to LLVM.
Definition: TargetLibraryInfo.h:431
llvm::Module
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
llvm::LazyCallGraph::SCC::isAncestorOf
bool isAncestorOf(const SCC &C) const
Test if this SCC is an ancestor of C.
Definition: LazyCallGraph.cpp:293
llvm::LazyCallGraph::Node
A node in the call graph.
Definition: LazyCallGraph.h:318
llvm::SmallPtrSetImpl::count
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:382
llvm::size
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1562
llvm::AMDGPU::CPol::SCC
@ SCC
Definition: SIDefines.h:295
llvm::LazyCallGraph::Edge
A class used to represent edges in the call graph.
Definition: LazyCallGraph.h:134
llvm::SmallPtrSetImplBase::clear
void clear()
Definition: SmallPtrSet.h:94
llvm::iterator_range::end
IteratorT end() const
Definition: iterator_range.h:45
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
llvm::LazyCallGraph::RefSCC::removeInternalRefEdge
SmallVector< RefSCC *, 1 > removeInternalRefEdge(Node &SourceN, ArrayRef< Node * > TargetNs)
Remove a list of ref edges which are entirely within this RefSCC.
Definition: LazyCallGraph.cpp:1169
llvm::LazyCallGraph::SCC::size
int size() const
Definition: LazyCallGraph.h:483
Compiler.h
verify
ppc ctr loops verify
Definition: PPCCTRLoops.cpp:76
llvm::LazyCallGraph::RefSCC::replaceNodeFunction
void replaceNodeFunction(Node &N, Function &NewF)
Directly replace a node's function with a new function.
Definition: LazyCallGraph.cpp:1457
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::insert
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:207
llvm::find_if
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1608
Callee
amdgpu Simplify well known AMD library false FunctionCallee Callee
Definition: AMDGPULibCalls.cpp:206
isKnownLibFunction
static bool isKnownLibFunction(Function &F, TargetLibraryInfo &TLI)
Definition: LazyCallGraph.cpp:147
llvm::GraphProgram::Name
Name
Definition: GraphWriter.h:52
GraphWriter.h
std
Definition: BitVector.h:838
llvm::SmallVectorImpl::swap
void swap(SmallVectorImpl &RHS)
Definition: SmallVector.h:936
llvm::LazyCallGraph::addSplitRefRecursiveFunctions
void addSplitRefRecursiveFunctions(Function &OriginalFunction, ArrayRef< Function * > NewFunctions)
Add new ref-recursive functions split/outlined from an existing function.
Definition: LazyCallGraph.cpp:1690
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:324
llvm::PreservedAnalyses::all
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:161
llvm::LazyCallGraph::Edge::isCall
bool isCall() const
Test whether the edge represents a direct call to a function.
Definition: LazyCallGraph.h:1205
GlobalVariable.h
llvm::LazyCallGraphAnalysis
An analysis pass which computes the call graph for a module.
Definition: LazyCallGraph.h:1239
Casting.h
llvm::LazyCallGraph::isLibFunction
bool isLibFunction(Function &F) const
Test whether a function is a known and defined library function tracked by the call graph.
Definition: LazyCallGraph.h:998
Function.h
PassManager.h
llvm::TargetLibraryInfo
Provides information about what library functions are available for the current target.
Definition: TargetLibraryInfo.h:221
llvm::LazyCallGraphDOTPrinterPass::LazyCallGraphDOTPrinterPass
LazyCallGraphDOTPrinterPass(raw_ostream &OS)
Definition: LazyCallGraph.cpp:2033
llvm::SmallVectorImpl::clear
void clear()
Definition: SmallVector.h:585
llvm::LazyCallGraph::RefSCC::switchTrivialInternalEdgeToRef
void switchTrivialInternalEdgeToRef(Node &SourceN, Node &TargetN)
Make an existing internal call edge between separate SCCs into a ref edge.
Definition: LazyCallGraph.cpp:731
llvm::LazyCallGraph::EdgeSequence::empty
bool empty()
Definition: LazyCallGraph.h:284
llvm::makeArrayRef
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:474
llvm::LazyCallGraph::RefSCC::switchOutgoingEdgeToRef
void switchOutgoingEdgeToRef(Node &SourceN, Node &TargetN)
Make an existing outgoing call edge into a ref edge.
Definition: LazyCallGraph.cpp:957
SmallVector.h
llvm::LazyCallGraph::EdgeSequence::call_iterator
An iterator over specifically call edges.
Definition: LazyCallGraph.h:226
N
#define N
llvm::LazyCallGraph::RefSCC::switchOutgoingEdgeToCall
void switchOutgoingEdgeToCall(Node &SourceN, Node &TargetN)
Make an existing outgoing ref edge into a call edge.
Definition: LazyCallGraph.cpp:936
llvm::LazyCallGraph::SCC::getOuterRefSCC
RefSCC & getOuterRefSCC() const
Definition: LazyCallGraph.h:485
llvm::iterator_range
A range adaptor for a pair of iterators.
Definition: iterator_range.h:30
ScopeExit.h
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:43
llvm::SmallPtrSetImpl
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:343
llvm::LazyCallGraph::Edge::Ref
@ Ref
Definition: LazyCallGraph.h:137
llvm::PreservedAnalyses::getChecker
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Definition: PassManager.h:313
llvm::AnalysisManager
A container for analyses that lazily runs them and caches their results.
Definition: InstructionSimplify.h:44
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
llvm::LazyCallGraph
A lazily constructed view of the call graph of a module.
Definition: LazyCallGraph.h:113
raw_ostream.h
llvm::LazyCallGraph::visitReferences
static void visitReferences(SmallVectorImpl< Constant * > &Worklist, SmallPtrSetImpl< Constant * > &Visited, function_ref< void(Function &)> Callback)
Recursively visits the defined functions whose address is reachable from every constant in the Workli...
Definition: LazyCallGraph.cpp:1964
llvm::SmallPtrSetImpl::contains
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:388
printSCC
static void printSCC(raw_ostream &OS, LazyCallGraph::SCC &C)
Definition: LazyCallGraph.cpp:2000
llvm::Value
LLVM Value Representation.
Definition: Value.h:74
Debug.h
llvm::iterator_range::begin
IteratorT begin() const
Definition: iterator_range.h:44
llvm::LazyCallGraph::Edge::Call
@ Call
Definition: LazyCallGraph.h:137
llvm::LazyCallGraph::RefSCC::insertInternalRefEdge
void insertInternalRefEdge(Node &SourceN, Node &TargetN)
Insert a ref edge from one node in this RefSCC to another in this RefSCC.
Definition: LazyCallGraph.cpp:978
llvm::LazyCallGraph::removeEdge
void removeEdge(Node &SourceN, Node &TargetN)
Update the call graph after deleting an edge.
Definition: LazyCallGraph.cpp:1494
llvm::SmallVectorImpl::emplace_back
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:908
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:364
printNodeDOT
static void printNodeDOT(raw_ostream &OS, LazyCallGraph::Node &N)
Definition: LazyCallGraph.cpp:2036
llvm::LazyCallGraphPrinterPass::run
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
Definition: LazyCallGraph.cpp:2016