Bug Summary

File:build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/llvm/lib/Analysis/CGSCCPassManager.cpp
Warning:line 230, column 11
Forming reference to null pointer

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name CGSCCPassManager.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/build-llvm -resource-dir /usr/lib/llvm-15/lib/clang/15.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Analysis -I /build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/llvm/lib/Analysis -I include -I /build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-15/lib/clang/15.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-03-20-232535-108605-1 -x c++ /build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/llvm/lib/Analysis/CGSCCPassManager.cpp

/build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/llvm/lib/Analysis/CGSCCPassManager.cpp

1//===- CGSCCPassManager.cpp - Managing & running CGSCC passes -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "llvm/Analysis/CGSCCPassManager.h"
10#include "llvm/ADT/ArrayRef.h"
11#include "llvm/ADT/Optional.h"
12#include "llvm/ADT/PriorityWorklist.h"
13#include "llvm/ADT/STLExtras.h"
14#include "llvm/ADT/SetVector.h"
15#include "llvm/ADT/SmallPtrSet.h"
16#include "llvm/ADT/SmallVector.h"
17#include "llvm/ADT/iterator_range.h"
18#include "llvm/Analysis/LazyCallGraph.h"
19#include "llvm/IR/Constant.h"
20#include "llvm/IR/InstIterator.h"
21#include "llvm/IR/Instruction.h"
22#include "llvm/IR/PassManager.h"
23#include "llvm/IR/PassManagerImpl.h"
24#include "llvm/IR/ValueHandle.h"
25#include "llvm/Support/Casting.h"
26#include "llvm/Support/CommandLine.h"
27#include "llvm/Support/Debug.h"
28#include "llvm/Support/ErrorHandling.h"
29#include "llvm/Support/TimeProfiler.h"
30#include "llvm/Support/raw_ostream.h"
31#include <cassert>
32#include <iterator>
33
34#define DEBUG_TYPE"cgscc" "cgscc"
35
36using namespace llvm;
37
38// Explicit template instantiations and specialization definitions for core
39// template typedefs.
40namespace llvm {
41static cl::opt<bool> AbortOnMaxDevirtIterationsReached(
42 "abort-on-max-devirt-iterations-reached",
43 cl::desc("Abort when the max iterations for devirtualization CGSCC repeat "
44 "pass is reached"));
45
46AnalysisKey ShouldNotRunFunctionPassesAnalysis::Key;
47
48// Explicit instantiations for the core proxy templates.
49template class AllAnalysesOn<LazyCallGraph::SCC>;
50template class AnalysisManager<LazyCallGraph::SCC, LazyCallGraph &>;
51template class PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager,
52 LazyCallGraph &, CGSCCUpdateResult &>;
53template class InnerAnalysisManagerProxy<CGSCCAnalysisManager, Module>;
54template class OuterAnalysisManagerProxy<ModuleAnalysisManager,
55 LazyCallGraph::SCC, LazyCallGraph &>;
56template class OuterAnalysisManagerProxy<CGSCCAnalysisManager, Function>;
57
58/// Explicitly specialize the pass manager run method to handle call graph
59/// updates.
60template <>
61PreservedAnalyses
62PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager, LazyCallGraph &,
63 CGSCCUpdateResult &>::run(LazyCallGraph::SCC &InitialC,
64 CGSCCAnalysisManager &AM,
65 LazyCallGraph &G, CGSCCUpdateResult &UR) {
66 // Request PassInstrumentation from analysis manager, will use it to run
67 // instrumenting callbacks for the passes later.
68 PassInstrumentation PI =
69 AM.getResult<PassInstrumentationAnalysis>(InitialC, G);
70
71 PreservedAnalyses PA = PreservedAnalyses::all();
72
73 // The SCC may be refined while we are running passes over it, so set up
74 // a pointer that we can update.
75 LazyCallGraph::SCC *C = &InitialC;
76
77 // Get Function analysis manager from its proxy.
78 FunctionAnalysisManager &FAM =
79 AM.getCachedResult<FunctionAnalysisManagerCGSCCProxy>(*C)->getManager();
80
81 for (auto &Pass : Passes) {
82 // Check the PassInstrumentation's BeforePass callbacks before running the
83 // pass, skip its execution completely if asked to (callback returns false).
84 if (!PI.runBeforePass(*Pass, *C))
85 continue;
86
87 PreservedAnalyses PassPA;
88 {
89 TimeTraceScope TimeScope(Pass->name());
90 PassPA = Pass->run(*C, AM, G, UR);
91 }
92
93 if (UR.InvalidatedSCCs.count(C))
94 PI.runAfterPassInvalidated<LazyCallGraph::SCC>(*Pass, PassPA);
95 else
96 PI.runAfterPass<LazyCallGraph::SCC>(*Pass, *C, PassPA);
97
98 // Update the SCC if necessary.
99 C = UR.UpdatedC ? UR.UpdatedC : C;
100 if (UR.UpdatedC) {
101 // If C is updated, also create a proxy and update FAM inside the result.
102 auto *ResultFAMCP =
103 &AM.getResult<FunctionAnalysisManagerCGSCCProxy>(*C, G);
104 ResultFAMCP->updateFAM(FAM);
105 }
106
107 // If the CGSCC pass wasn't able to provide a valid updated SCC, the
108 // current SCC may simply need to be skipped if invalid.
109 if (UR.InvalidatedSCCs.count(C)) {
110 LLVM_DEBUG(dbgs() << "Skipping invalidated root or island SCC!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Skipping invalidated root or island SCC!\n"
; } } while (false)
;
111 break;
112 }
113 // Check that we didn't miss any update scenario.
114 assert(C->begin() != C->end() && "Cannot have an empty SCC!")(static_cast <bool> (C->begin() != C->end() &&
"Cannot have an empty SCC!") ? void (0) : __assert_fail ("C->begin() != C->end() && \"Cannot have an empty SCC!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 114, __extension__
__PRETTY_FUNCTION__))
;
115
116 // Update the analysis manager as each pass runs and potentially
117 // invalidates analyses.
118 AM.invalidate(*C, PassPA);
119
120 // Finally, we intersect the final preserved analyses to compute the
121 // aggregate preserved set for this pass manager.
122 PA.intersect(std::move(PassPA));
123 }
124
125 // Before we mark all of *this* SCC's analyses as preserved below, intersect
126 // this with the cross-SCC preserved analysis set. This is used to allow
127 // CGSCC passes to mutate ancestor SCCs and still trigger proper invalidation
128 // for them.
129 UR.CrossSCCPA.intersect(PA);
130
131 // Invalidation was handled after each pass in the above loop for the current
132 // SCC. Therefore, the remaining analysis results in the AnalysisManager are
133 // preserved. We mark this with a set so that we don't need to inspect each
134 // one individually.
135 PA.preserveSet<AllAnalysesOn<LazyCallGraph::SCC>>();
136
137 return PA;
138}
139
140PreservedAnalyses
141ModuleToPostOrderCGSCCPassAdaptor::run(Module &M, ModuleAnalysisManager &AM) {
142 // Setup the CGSCC analysis manager from its proxy.
143 CGSCCAnalysisManager &CGAM =
144 AM.getResult<CGSCCAnalysisManagerModuleProxy>(M).getManager();
145
146 // Get the call graph for this module.
147 LazyCallGraph &CG = AM.getResult<LazyCallGraphAnalysis>(M);
148
149 // Get Function analysis manager from its proxy.
150 FunctionAnalysisManager &FAM =
151 AM.getCachedResult<FunctionAnalysisManagerModuleProxy>(M)->getManager();
152
153 // We keep worklists to allow us to push more work onto the pass manager as
154 // the passes are run.
155 SmallPriorityWorklist<LazyCallGraph::RefSCC *, 1> RCWorklist;
156 SmallPriorityWorklist<LazyCallGraph::SCC *, 1> CWorklist;
157
158 // Keep sets for invalidated SCCs and RefSCCs that should be skipped when
159 // iterating off the worklists.
160 SmallPtrSet<LazyCallGraph::RefSCC *, 4> InvalidRefSCCSet;
161 SmallPtrSet<LazyCallGraph::SCC *, 4> InvalidSCCSet;
162
163 SmallDenseSet<std::pair<LazyCallGraph::Node *, LazyCallGraph::SCC *>, 4>
164 InlinedInternalEdges;
165
166 CGSCCUpdateResult UR = {
167 RCWorklist, CWorklist, InvalidRefSCCSet,
168 InvalidSCCSet, nullptr, PreservedAnalyses::all(),
169 InlinedInternalEdges, {}};
170
171 // Request PassInstrumentation from analysis manager, will use it to run
172 // instrumenting callbacks for the passes later.
173 PassInstrumentation PI = AM.getResult<PassInstrumentationAnalysis>(M);
174
175 PreservedAnalyses PA = PreservedAnalyses::all();
176 CG.buildRefSCCs();
177 for (auto RCI = CG.postorder_ref_scc_begin(),
178 RCE = CG.postorder_ref_scc_end();
179 RCI != RCE;) {
180 assert(RCWorklist.empty() &&(static_cast <bool> (RCWorklist.empty() && "Should always start with an empty RefSCC worklist"
) ? void (0) : __assert_fail ("RCWorklist.empty() && \"Should always start with an empty RefSCC worklist\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 181, __extension__
__PRETTY_FUNCTION__))
1
Loop condition is true. Entering loop body
2
'?' condition is true
181 "Should always start with an empty RefSCC worklist")(static_cast <bool> (RCWorklist.empty() && "Should always start with an empty RefSCC worklist"
) ? void (0) : __assert_fail ("RCWorklist.empty() && \"Should always start with an empty RefSCC worklist\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 181, __extension__
__PRETTY_FUNCTION__))
;
182 // The postorder_ref_sccs range we are walking is lazily constructed, so
183 // we only push the first one onto the worklist. The worklist allows us
184 // to capture *new* RefSCCs created during transformations.
185 //
186 // We really want to form RefSCCs lazily because that makes them cheaper
187 // to update as the program is simplified and allows us to have greater
188 // cache locality as forming a RefSCC touches all the parts of all the
189 // functions within that RefSCC.
190 //
191 // We also eagerly increment the iterator to the next position because
192 // the CGSCC passes below may delete the current RefSCC.
193 RCWorklist.insert(&*RCI++);
194
195 do {
196 LazyCallGraph::RefSCC *RC = RCWorklist.pop_back_val();
197 if (InvalidRefSCCSet.count(RC)) {
3
Assuming the condition is false
198 LLVM_DEBUG(dbgs() << "Skipping an invalid RefSCC...\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Skipping an invalid RefSCC...\n"
; } } while (false)
;
199 continue;
200 }
201
202 assert(CWorklist.empty() &&(static_cast <bool> (CWorklist.empty() && "Should always start with an empty SCC worklist"
) ? void (0) : __assert_fail ("CWorklist.empty() && \"Should always start with an empty SCC worklist\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 203, __extension__
__PRETTY_FUNCTION__))
4
Taking false branch
5
'?' condition is true
203 "Should always start with an empty SCC worklist")(static_cast <bool> (CWorklist.empty() && "Should always start with an empty SCC worklist"
) ? void (0) : __assert_fail ("CWorklist.empty() && \"Should always start with an empty SCC worklist\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 203, __extension__
__PRETTY_FUNCTION__))
;
204
205 LLVM_DEBUG(dbgs() << "Running an SCC pass across the RefSCC: " << *RCdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Running an SCC pass across the RefSCC: "
<< *RC << "\n"; } } while (false)
6
Assuming 'DebugFlag' is false
7
Loop condition is false. Exiting loop
206 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Running an SCC pass across the RefSCC: "
<< *RC << "\n"; } } while (false)
;
207
208 // The top of the worklist may *also* be the same SCC we just ran over
209 // (and invalidated for). Keep track of that last SCC we processed due
210 // to SCC update to avoid redundant processing when an SCC is both just
211 // updated itself and at the top of the worklist.
212 LazyCallGraph::SCC *LastUpdatedC = nullptr;
213
214 // Push the initial SCCs in reverse post-order as we'll pop off the
215 // back and so see this in post-order.
216 for (LazyCallGraph::SCC &C : llvm::reverse(*RC))
217 CWorklist.insert(&C);
218
219 do {
220 LazyCallGraph::SCC *C = CWorklist.pop_back_val();
8
Calling 'PriorityWorklist::pop_back_val'
15
Returning from 'PriorityWorklist::pop_back_val'
16
'C' initialized here
221 // Due to call graph mutations, we may have invalid SCCs or SCCs from
222 // other RefSCCs in the worklist. The invalid ones are dead and the
223 // other RefSCCs should be queued above, so we just need to skip both
224 // scenarios here.
225 if (InvalidSCCSet.count(C)) {
17
Assuming the condition is false
18
Taking false branch
226 LLVM_DEBUG(dbgs() << "Skipping an invalid SCC...\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Skipping an invalid SCC...\n"; }
} while (false)
;
227 continue;
228 }
229 if (LastUpdatedC == C) {
19
Assuming 'LastUpdatedC' is equal to 'C'
230 LLVM_DEBUG(dbgs() << "Skipping redundant run on SCC: " << *C << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Skipping redundant run on SCC: "
<< *C << "\n"; } } while (false)
;
20
Taking true branch
21
Assuming 'DebugFlag' is true
22
Assuming the condition is true
23
Taking true branch
24
Forming reference to null pointer
231 continue;
232 }
233 // We used to also check if the current SCC is part of the current
234 // RefSCC and bail if it wasn't, since it should be in RCWorklist.
235 // However, this can cause compile time explosions in some cases on
236 // modules with a huge RefSCC. If a non-trivial amount of SCCs in the
237 // huge RefSCC can become their own child RefSCC, we create one child
238 // RefSCC, bail on the current RefSCC, visit the child RefSCC, revisit
239 // the huge RefSCC, and repeat. By visiting all SCCs in the original
240 // RefSCC we create all the child RefSCCs in one pass of the RefSCC,
241 // rather one pass of the RefSCC creating one child RefSCC at a time.
242
243 // Ensure we can proxy analysis updates from the CGSCC analysis manager
244 // into the the Function analysis manager by getting a proxy here.
245 // This also needs to update the FunctionAnalysisManager, as this may be
246 // the first time we see this SCC.
247 CGAM.getResult<FunctionAnalysisManagerCGSCCProxy>(*C, CG).updateFAM(
248 FAM);
249
250 // Each time we visit a new SCC pulled off the worklist,
251 // a transformation of a child SCC may have also modified this parent
252 // and invalidated analyses. So we invalidate using the update record's
253 // cross-SCC preserved set. This preserved set is intersected by any
254 // CGSCC pass that handles invalidation (primarily pass managers) prior
255 // to marking its SCC as preserved. That lets us track everything that
256 // might need invalidation across SCCs without excessive invalidations
257 // on a single SCC.
258 //
259 // This essentially allows SCC passes to freely invalidate analyses
260 // of any ancestor SCC. If this becomes detrimental to successfully
261 // caching analyses, we could force each SCC pass to manually
262 // invalidate the analyses for any SCCs other than themselves which
263 // are mutated. However, that seems to lose the robustness of the
264 // pass-manager driven invalidation scheme.
265 CGAM.invalidate(*C, UR.CrossSCCPA);
266
267 do {
268 // Check that we didn't miss any update scenario.
269 assert(!InvalidSCCSet.count(C) && "Processing an invalid SCC!")(static_cast <bool> (!InvalidSCCSet.count(C) &&
"Processing an invalid SCC!") ? void (0) : __assert_fail ("!InvalidSCCSet.count(C) && \"Processing an invalid SCC!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 269, __extension__
__PRETTY_FUNCTION__))
;
270 assert(C->begin() != C->end() && "Cannot have an empty SCC!")(static_cast <bool> (C->begin() != C->end() &&
"Cannot have an empty SCC!") ? void (0) : __assert_fail ("C->begin() != C->end() && \"Cannot have an empty SCC!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 270, __extension__
__PRETTY_FUNCTION__))
;
271
272 LastUpdatedC = UR.UpdatedC;
273 UR.UpdatedC = nullptr;
274
275 // Check the PassInstrumentation's BeforePass callbacks before
276 // running the pass, skip its execution completely if asked to
277 // (callback returns false).
278 if (!PI.runBeforePass<LazyCallGraph::SCC>(*Pass, *C))
279 continue;
280
281 PreservedAnalyses PassPA;
282 {
283 TimeTraceScope TimeScope(Pass->name());
284 PassPA = Pass->run(*C, CGAM, CG, UR);
285 }
286
287 if (UR.InvalidatedSCCs.count(C))
288 PI.runAfterPassInvalidated<LazyCallGraph::SCC>(*Pass, PassPA);
289 else
290 PI.runAfterPass<LazyCallGraph::SCC>(*Pass, *C, PassPA);
291
292 // Update the SCC and RefSCC if necessary.
293 C = UR.UpdatedC ? UR.UpdatedC : C;
294
295 if (UR.UpdatedC) {
296 // If we're updating the SCC, also update the FAM inside the proxy's
297 // result.
298 CGAM.getResult<FunctionAnalysisManagerCGSCCProxy>(*C, CG).updateFAM(
299 FAM);
300 }
301
302 // If the CGSCC pass wasn't able to provide a valid updated SCC,
303 // the current SCC may simply need to be skipped if invalid.
304 if (UR.InvalidatedSCCs.count(C)) {
305 LLVM_DEBUG(dbgs() << "Skipping invalidated root or island SCC!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Skipping invalidated root or island SCC!\n"
; } } while (false)
;
306 break;
307 }
308 // Check that we didn't miss any update scenario.
309 assert(C->begin() != C->end() && "Cannot have an empty SCC!")(static_cast <bool> (C->begin() != C->end() &&
"Cannot have an empty SCC!") ? void (0) : __assert_fail ("C->begin() != C->end() && \"Cannot have an empty SCC!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 309, __extension__
__PRETTY_FUNCTION__))
;
310
311 // We handle invalidating the CGSCC analysis manager's information
312 // for the (potentially updated) SCC here. Note that any other SCCs
313 // whose structure has changed should have been invalidated by
314 // whatever was updating the call graph. This SCC gets invalidated
315 // late as it contains the nodes that were actively being
316 // processed.
317 CGAM.invalidate(*C, PassPA);
318
319 // Then intersect the preserved set so that invalidation of module
320 // analyses will eventually occur when the module pass completes.
321 // Also intersect with the cross-SCC preserved set to capture any
322 // cross-SCC invalidation.
323 UR.CrossSCCPA.intersect(PassPA);
324 PA.intersect(std::move(PassPA));
325
326 // The pass may have restructured the call graph and refined the
327 // current SCC and/or RefSCC. We need to update our current SCC and
328 // RefSCC pointers to follow these. Also, when the current SCC is
329 // refined, re-run the SCC pass over the newly refined SCC in order
330 // to observe the most precise SCC model available. This inherently
331 // cannot cycle excessively as it only happens when we split SCCs
332 // apart, at most converging on a DAG of single nodes.
333 // FIXME: If we ever start having RefSCC passes, we'll want to
334 // iterate there too.
335 if (UR.UpdatedC)
336 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Re-running SCC passes after a refinement of the "
"current SCC: " << *UR.UpdatedC << "\n"; } } while
(false)
337 << "Re-running SCC passes after a refinement of the "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Re-running SCC passes after a refinement of the "
"current SCC: " << *UR.UpdatedC << "\n"; } } while
(false)
338 "current SCC: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Re-running SCC passes after a refinement of the "
"current SCC: " << *UR.UpdatedC << "\n"; } } while
(false)
339 << *UR.UpdatedC << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Re-running SCC passes after a refinement of the "
"current SCC: " << *UR.UpdatedC << "\n"; } } while
(false)
;
340
341 // Note that both `C` and `RC` may at this point refer to deleted,
342 // invalid SCC and RefSCCs respectively. But we will short circuit
343 // the processing when we check them in the loop above.
344 } while (UR.UpdatedC);
345 } while (!CWorklist.empty());
346
347 // We only need to keep internal inlined edge information within
348 // a RefSCC, clear it to save on space and let the next time we visit
349 // any of these functions have a fresh start.
350 InlinedInternalEdges.clear();
351 } while (!RCWorklist.empty());
352 }
353
354 // By definition we preserve the call garph, all SCC analyses, and the
355 // analysis proxies by handling them above and in any nested pass managers.
356 PA.preserveSet<AllAnalysesOn<LazyCallGraph::SCC>>();
357 PA.preserve<LazyCallGraphAnalysis>();
358 PA.preserve<CGSCCAnalysisManagerModuleProxy>();
359 PA.preserve<FunctionAnalysisManagerModuleProxy>();
360 return PA;
361}
362
363PreservedAnalyses DevirtSCCRepeatedPass::run(LazyCallGraph::SCC &InitialC,
364 CGSCCAnalysisManager &AM,
365 LazyCallGraph &CG,
366 CGSCCUpdateResult &UR) {
367 PreservedAnalyses PA = PreservedAnalyses::all();
368 PassInstrumentation PI =
369 AM.getResult<PassInstrumentationAnalysis>(InitialC, CG);
370
371 // The SCC may be refined while we are running passes over it, so set up
372 // a pointer that we can update.
373 LazyCallGraph::SCC *C = &InitialC;
374
375 // Struct to track the counts of direct and indirect calls in each function
376 // of the SCC.
377 struct CallCount {
378 int Direct;
379 int Indirect;
380 };
381
382 // Put value handles on all of the indirect calls and return the number of
383 // direct calls for each function in the SCC.
384 auto ScanSCC = [](LazyCallGraph::SCC &C,
385 SmallMapVector<Value *, WeakTrackingVH, 16> &CallHandles) {
386 assert(CallHandles.empty() && "Must start with a clear set of handles.")(static_cast <bool> (CallHandles.empty() && "Must start with a clear set of handles."
) ? void (0) : __assert_fail ("CallHandles.empty() && \"Must start with a clear set of handles.\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 386, __extension__
__PRETTY_FUNCTION__))
;
387
388 SmallDenseMap<Function *, CallCount> CallCounts;
389 CallCount CountLocal = {0, 0};
390 for (LazyCallGraph::Node &N : C) {
391 CallCount &Count =
392 CallCounts.insert(std::make_pair(&N.getFunction(), CountLocal))
393 .first->second;
394 for (Instruction &I : instructions(N.getFunction()))
395 if (auto *CB = dyn_cast<CallBase>(&I)) {
396 if (CB->getCalledFunction()) {
397 ++Count.Direct;
398 } else {
399 ++Count.Indirect;
400 CallHandles.insert({CB, WeakTrackingVH(CB)});
401 }
402 }
403 }
404
405 return CallCounts;
406 };
407
408 UR.IndirectVHs.clear();
409 // Populate the initial call handles and get the initial call counts.
410 auto CallCounts = ScanSCC(*C, UR.IndirectVHs);
411
412 for (int Iteration = 0;; ++Iteration) {
413 if (!PI.runBeforePass<LazyCallGraph::SCC>(*Pass, *C))
414 continue;
415
416 PreservedAnalyses PassPA = Pass->run(*C, AM, CG, UR);
417
418 if (UR.InvalidatedSCCs.count(C))
419 PI.runAfterPassInvalidated<LazyCallGraph::SCC>(*Pass, PassPA);
420 else
421 PI.runAfterPass<LazyCallGraph::SCC>(*Pass, *C, PassPA);
422
423 // If the SCC structure has changed, bail immediately and let the outer
424 // CGSCC layer handle any iteration to reflect the refined structure.
425 if (UR.UpdatedC && UR.UpdatedC != C) {
426 PA.intersect(std::move(PassPA));
427 break;
428 }
429
430 // If the CGSCC pass wasn't able to provide a valid updated SCC, the
431 // current SCC may simply need to be skipped if invalid.
432 if (UR.InvalidatedSCCs.count(C)) {
433 LLVM_DEBUG(dbgs() << "Skipping invalidated root or island SCC!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Skipping invalidated root or island SCC!\n"
; } } while (false)
;
434 break;
435 }
436
437 assert(C->begin() != C->end() && "Cannot have an empty SCC!")(static_cast <bool> (C->begin() != C->end() &&
"Cannot have an empty SCC!") ? void (0) : __assert_fail ("C->begin() != C->end() && \"Cannot have an empty SCC!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 437, __extension__
__PRETTY_FUNCTION__))
;
438
439 // Check whether any of the handles were devirtualized.
440 bool Devirt = llvm::any_of(UR.IndirectVHs, [](auto &P) -> bool {
441 if (P.second) {
442 if (CallBase *CB = dyn_cast<CallBase>(P.second)) {
443 if (CB->getCalledFunction()) {
444 LLVM_DEBUG(dbgs() << "Found devirtualized call: " << *CB << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Found devirtualized call: " <<
*CB << "\n"; } } while (false)
;
445 return true;
446 }
447 }
448 }
449 return false;
450 });
451
452 // Rescan to build up a new set of handles and count how many direct
453 // calls remain. If we decide to iterate, this also sets up the input to
454 // the next iteration.
455 UR.IndirectVHs.clear();
456 auto NewCallCounts = ScanSCC(*C, UR.IndirectVHs);
457
458 // If we haven't found an explicit devirtualization already see if we
459 // have decreased the number of indirect calls and increased the number
460 // of direct calls for any function in the SCC. This can be fooled by all
461 // manner of transformations such as DCE and other things, but seems to
462 // work well in practice.
463 if (!Devirt)
464 // Iterate over the keys in NewCallCounts, if Function also exists in
465 // CallCounts, make the check below.
466 for (auto &Pair : NewCallCounts) {
467 auto &CallCountNew = Pair.second;
468 auto CountIt = CallCounts.find(Pair.first);
469 if (CountIt != CallCounts.end()) {
470 const auto &CallCountOld = CountIt->second;
471 if (CallCountOld.Indirect > CallCountNew.Indirect &&
472 CallCountOld.Direct < CallCountNew.Direct) {
473 Devirt = true;
474 break;
475 }
476 }
477 }
478
479 if (!Devirt) {
480 PA.intersect(std::move(PassPA));
481 break;
482 }
483
484 // Otherwise, if we've already hit our max, we're done.
485 if (Iteration >= MaxIterations) {
486 if (AbortOnMaxDevirtIterationsReached)
487 report_fatal_error("Max devirtualization iterations reached");
488 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Found another devirtualization after hitting the max "
"number of repetitions (" << MaxIterations << ") on SCC: "
<< *C << "\n"; } } while (false)
489 dbgs() << "Found another devirtualization after hitting the max "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Found another devirtualization after hitting the max "
"number of repetitions (" << MaxIterations << ") on SCC: "
<< *C << "\n"; } } while (false)
490 "number of repetitions ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Found another devirtualization after hitting the max "
"number of repetitions (" << MaxIterations << ") on SCC: "
<< *C << "\n"; } } while (false)
491 << MaxIterations << ") on SCC: " << *C << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Found another devirtualization after hitting the max "
"number of repetitions (" << MaxIterations << ") on SCC: "
<< *C << "\n"; } } while (false)
;
492 PA.intersect(std::move(PassPA));
493 break;
494 }
495
496 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Repeating an SCC pass after finding a devirtualization in: "
<< *C << "\n"; } } while (false)
497 dbgs() << "Repeating an SCC pass after finding a devirtualization in: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Repeating an SCC pass after finding a devirtualization in: "
<< *C << "\n"; } } while (false)
498 << *C << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Repeating an SCC pass after finding a devirtualization in: "
<< *C << "\n"; } } while (false)
;
499
500 // Move over the new call counts in preparation for iterating.
501 CallCounts = std::move(NewCallCounts);
502
503 // Update the analysis manager with each run and intersect the total set
504 // of preserved analyses so we're ready to iterate.
505 AM.invalidate(*C, PassPA);
506
507 PA.intersect(std::move(PassPA));
508 }
509
510 // Note that we don't add any preserved entries here unlike a more normal
511 // "pass manager" because we only handle invalidation *between* iterations,
512 // not after the last iteration.
513 return PA;
514}
515
516PreservedAnalyses CGSCCToFunctionPassAdaptor::run(LazyCallGraph::SCC &C,
517 CGSCCAnalysisManager &AM,
518 LazyCallGraph &CG,
519 CGSCCUpdateResult &UR) {
520 // Setup the function analysis manager from its proxy.
521 FunctionAnalysisManager &FAM =
522 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
523
524 SmallVector<LazyCallGraph::Node *, 4> Nodes;
525 for (LazyCallGraph::Node &N : C)
526 Nodes.push_back(&N);
527
528 // The SCC may get split while we are optimizing functions due to deleting
529 // edges. If this happens, the current SCC can shift, so keep track of
530 // a pointer we can overwrite.
531 LazyCallGraph::SCC *CurrentC = &C;
532
533 LLVM_DEBUG(dbgs() << "Running function passes across an SCC: " << C << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Running function passes across an SCC: "
<< C << "\n"; } } while (false)
;
534
535 PreservedAnalyses PA = PreservedAnalyses::all();
536 for (LazyCallGraph::Node *N : Nodes) {
537 // Skip nodes from other SCCs. These may have been split out during
538 // processing. We'll eventually visit those SCCs and pick up the nodes
539 // there.
540 if (CG.lookupSCC(*N) != CurrentC)
541 continue;
542
543 Function &F = N->getFunction();
544
545 if (NoRerun && FAM.getCachedResult<ShouldNotRunFunctionPassesAnalysis>(F))
546 continue;
547
548 PassInstrumentation PI = FAM.getResult<PassInstrumentationAnalysis>(F);
549 if (!PI.runBeforePass<Function>(*Pass, F))
550 continue;
551
552 PreservedAnalyses PassPA;
553 {
554 TimeTraceScope TimeScope(Pass->name());
555 PassPA = Pass->run(F, FAM);
556 }
557
558 PI.runAfterPass<Function>(*Pass, F, PassPA);
559
560 // We know that the function pass couldn't have invalidated any other
561 // function's analyses (that's the contract of a function pass), so
562 // directly handle the function analysis manager's invalidation here.
563 FAM.invalidate(F, EagerlyInvalidate ? PreservedAnalyses::none() : PassPA);
564 if (NoRerun)
565 (void)FAM.getResult<ShouldNotRunFunctionPassesAnalysis>(F);
566
567 // Then intersect the preserved set so that invalidation of module
568 // analyses will eventually occur when the module pass completes.
569 PA.intersect(std::move(PassPA));
570
571 // If the call graph hasn't been preserved, update it based on this
572 // function pass. This may also update the current SCC to point to
573 // a smaller, more refined SCC.
574 auto PAC = PA.getChecker<LazyCallGraphAnalysis>();
575 if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Module>>()) {
576 CurrentC = &updateCGAndAnalysisManagerForFunctionPass(CG, *CurrentC, *N,
577 AM, UR, FAM);
578 assert(CG.lookupSCC(*N) == CurrentC &&(static_cast <bool> (CG.lookupSCC(*N) == CurrentC &&
"Current SCC not updated to the SCC containing the current node!"
) ? void (0) : __assert_fail ("CG.lookupSCC(*N) == CurrentC && \"Current SCC not updated to the SCC containing the current node!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 579, __extension__
__PRETTY_FUNCTION__))
579 "Current SCC not updated to the SCC containing the current node!")(static_cast <bool> (CG.lookupSCC(*N) == CurrentC &&
"Current SCC not updated to the SCC containing the current node!"
) ? void (0) : __assert_fail ("CG.lookupSCC(*N) == CurrentC && \"Current SCC not updated to the SCC containing the current node!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 579, __extension__
__PRETTY_FUNCTION__))
;
580 }
581 }
582
583 // By definition we preserve the proxy. And we preserve all analyses on
584 // Functions. This precludes *any* invalidation of function analyses by the
585 // proxy, but that's OK because we've taken care to invalidate analyses in
586 // the function analysis manager incrementally above.
587 PA.preserveSet<AllAnalysesOn<Function>>();
588 PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
589
590 // We've also ensured that we updated the call graph along the way.
591 PA.preserve<LazyCallGraphAnalysis>();
592
593 return PA;
594}
595
596bool CGSCCAnalysisManagerModuleProxy::Result::invalidate(
597 Module &M, const PreservedAnalyses &PA,
598 ModuleAnalysisManager::Invalidator &Inv) {
599 // If literally everything is preserved, we're done.
600 if (PA.areAllPreserved())
601 return false; // This is still a valid proxy.
602
603 // If this proxy or the call graph is going to be invalidated, we also need
604 // to clear all the keys coming from that analysis.
605 //
606 // We also directly invalidate the FAM's module proxy if necessary, and if
607 // that proxy isn't preserved we can't preserve this proxy either. We rely on
608 // it to handle module -> function analysis invalidation in the face of
609 // structural changes and so if it's unavailable we conservatively clear the
610 // entire SCC layer as well rather than trying to do invalidation ourselves.
611 auto PAC = PA.getChecker<CGSCCAnalysisManagerModuleProxy>();
612 if (!(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Module>>()) ||
613 Inv.invalidate<LazyCallGraphAnalysis>(M, PA) ||
614 Inv.invalidate<FunctionAnalysisManagerModuleProxy>(M, PA)) {
615 InnerAM->clear();
616
617 // And the proxy itself should be marked as invalid so that we can observe
618 // the new call graph. This isn't strictly necessary because we cheat
619 // above, but is still useful.
620 return true;
621 }
622
623 // Directly check if the relevant set is preserved so we can short circuit
624 // invalidating SCCs below.
625 bool AreSCCAnalysesPreserved =
626 PA.allAnalysesInSetPreserved<AllAnalysesOn<LazyCallGraph::SCC>>();
627
628 // Ok, we have a graph, so we can propagate the invalidation down into it.
629 G->buildRefSCCs();
630 for (auto &RC : G->postorder_ref_sccs())
631 for (auto &C : RC) {
632 Optional<PreservedAnalyses> InnerPA;
633
634 // Check to see whether the preserved set needs to be adjusted based on
635 // module-level analysis invalidation triggering deferred invalidation
636 // for this SCC.
637 if (auto *OuterProxy =
638 InnerAM->getCachedResult<ModuleAnalysisManagerCGSCCProxy>(C))
639 for (const auto &OuterInvalidationPair :
640 OuterProxy->getOuterInvalidations()) {
641 AnalysisKey *OuterAnalysisID = OuterInvalidationPair.first;
642 const auto &InnerAnalysisIDs = OuterInvalidationPair.second;
643 if (Inv.invalidate(OuterAnalysisID, M, PA)) {
644 if (!InnerPA)
645 InnerPA = PA;
646 for (AnalysisKey *InnerAnalysisID : InnerAnalysisIDs)
647 InnerPA->abandon(InnerAnalysisID);
648 }
649 }
650
651 // Check if we needed a custom PA set. If so we'll need to run the inner
652 // invalidation.
653 if (InnerPA) {
654 InnerAM->invalidate(C, *InnerPA);
655 continue;
656 }
657
658 // Otherwise we only need to do invalidation if the original PA set didn't
659 // preserve all SCC analyses.
660 if (!AreSCCAnalysesPreserved)
661 InnerAM->invalidate(C, PA);
662 }
663
664 // Return false to indicate that this result is still a valid proxy.
665 return false;
666}
667
668template <>
669CGSCCAnalysisManagerModuleProxy::Result
670CGSCCAnalysisManagerModuleProxy::run(Module &M, ModuleAnalysisManager &AM) {
671 // Force the Function analysis manager to also be available so that it can
672 // be accessed in an SCC analysis and proxied onward to function passes.
673 // FIXME: It is pretty awkward to just drop the result here and assert that
674 // we can find it again later.
675 (void)AM.getResult<FunctionAnalysisManagerModuleProxy>(M);
676
677 return Result(*InnerAM, AM.getResult<LazyCallGraphAnalysis>(M));
678}
679
680AnalysisKey FunctionAnalysisManagerCGSCCProxy::Key;
681
682FunctionAnalysisManagerCGSCCProxy::Result
683FunctionAnalysisManagerCGSCCProxy::run(LazyCallGraph::SCC &C,
684 CGSCCAnalysisManager &AM,
685 LazyCallGraph &CG) {
686 // Note: unconditionally getting checking that the proxy exists may get it at
687 // this point. There are cases when this is being run unnecessarily, but
688 // it is cheap and having the assertion in place is more valuable.
689 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerCGSCCProxy>(C, CG);
690 Module &M = *C.begin()->getFunction().getParent();
691 bool ProxyExists =
692 MAMProxy.cachedResultExists<FunctionAnalysisManagerModuleProxy>(M);
693 assert(ProxyExists &&(static_cast <bool> (ProxyExists && "The CGSCC pass manager requires that the FAM module proxy is run "
"on the module prior to entering the CGSCC walk") ? void (0)
: __assert_fail ("ProxyExists && \"The CGSCC pass manager requires that the FAM module proxy is run \" \"on the module prior to entering the CGSCC walk\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 695, __extension__
__PRETTY_FUNCTION__))
694 "The CGSCC pass manager requires that the FAM module proxy is run "(static_cast <bool> (ProxyExists && "The CGSCC pass manager requires that the FAM module proxy is run "
"on the module prior to entering the CGSCC walk") ? void (0)
: __assert_fail ("ProxyExists && \"The CGSCC pass manager requires that the FAM module proxy is run \" \"on the module prior to entering the CGSCC walk\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 695, __extension__
__PRETTY_FUNCTION__))
695 "on the module prior to entering the CGSCC walk")(static_cast <bool> (ProxyExists && "The CGSCC pass manager requires that the FAM module proxy is run "
"on the module prior to entering the CGSCC walk") ? void (0)
: __assert_fail ("ProxyExists && \"The CGSCC pass manager requires that the FAM module proxy is run \" \"on the module prior to entering the CGSCC walk\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 695, __extension__
__PRETTY_FUNCTION__))
;
696 (void)ProxyExists;
697
698 // We just return an empty result. The caller will use the updateFAM interface
699 // to correctly register the relevant FunctionAnalysisManager based on the
700 // context in which this proxy is run.
701 return Result();
702}
703
704bool FunctionAnalysisManagerCGSCCProxy::Result::invalidate(
705 LazyCallGraph::SCC &C, const PreservedAnalyses &PA,
706 CGSCCAnalysisManager::Invalidator &Inv) {
707 // If literally everything is preserved, we're done.
708 if (PA.areAllPreserved())
709 return false; // This is still a valid proxy.
710
711 // All updates to preserve valid results are done below, so we don't need to
712 // invalidate this proxy.
713 //
714 // Note that in order to preserve this proxy, a module pass must ensure that
715 // the FAM has been completely updated to handle the deletion of functions.
716 // Specifically, any FAM-cached results for those functions need to have been
717 // forcibly cleared. When preserved, this proxy will only invalidate results
718 // cached on functions *still in the module* at the end of the module pass.
719 auto PAC = PA.getChecker<FunctionAnalysisManagerCGSCCProxy>();
720 if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<LazyCallGraph::SCC>>()) {
721 for (LazyCallGraph::Node &N : C)
722 FAM->invalidate(N.getFunction(), PA);
723
724 return false;
725 }
726
727 // Directly check if the relevant set is preserved.
728 bool AreFunctionAnalysesPreserved =
729 PA.allAnalysesInSetPreserved<AllAnalysesOn<Function>>();
730
731 // Now walk all the functions to see if any inner analysis invalidation is
732 // necessary.
733 for (LazyCallGraph::Node &N : C) {
734 Function &F = N.getFunction();
735 Optional<PreservedAnalyses> FunctionPA;
736
737 // Check to see whether the preserved set needs to be pruned based on
738 // SCC-level analysis invalidation that triggers deferred invalidation
739 // registered with the outer analysis manager proxy for this function.
740 if (auto *OuterProxy =
741 FAM->getCachedResult<CGSCCAnalysisManagerFunctionProxy>(F))
742 for (const auto &OuterInvalidationPair :
743 OuterProxy->getOuterInvalidations()) {
744 AnalysisKey *OuterAnalysisID = OuterInvalidationPair.first;
745 const auto &InnerAnalysisIDs = OuterInvalidationPair.second;
746 if (Inv.invalidate(OuterAnalysisID, C, PA)) {
747 if (!FunctionPA)
748 FunctionPA = PA;
749 for (AnalysisKey *InnerAnalysisID : InnerAnalysisIDs)
750 FunctionPA->abandon(InnerAnalysisID);
751 }
752 }
753
754 // Check if we needed a custom PA set, and if so we'll need to run the
755 // inner invalidation.
756 if (FunctionPA) {
757 FAM->invalidate(F, *FunctionPA);
758 continue;
759 }
760
761 // Otherwise we only need to do invalidation if the original PA set didn't
762 // preserve all function analyses.
763 if (!AreFunctionAnalysesPreserved)
764 FAM->invalidate(F, PA);
765 }
766
767 // Return false to indicate that this result is still a valid proxy.
768 return false;
769}
770
771} // end namespace llvm
772
773/// When a new SCC is created for the graph we first update the
774/// FunctionAnalysisManager in the Proxy's result.
775/// As there might be function analysis results cached for the functions now in
776/// that SCC, two forms of updates are required.
777///
778/// First, a proxy from the SCC to the FunctionAnalysisManager needs to be
779/// created so that any subsequent invalidation events to the SCC are
780/// propagated to the function analysis results cached for functions within it.
781///
782/// Second, if any of the functions within the SCC have analysis results with
783/// outer analysis dependencies, then those dependencies would point to the
784/// *wrong* SCC's analysis result. We forcibly invalidate the necessary
785/// function analyses so that they don't retain stale handles.
786static void updateNewSCCFunctionAnalyses(LazyCallGraph::SCC &C,
787 LazyCallGraph &G,
788 CGSCCAnalysisManager &AM,
789 FunctionAnalysisManager &FAM) {
790 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, G).updateFAM(FAM);
791
792 // Now walk the functions in this SCC and invalidate any function analysis
793 // results that might have outer dependencies on an SCC analysis.
794 for (LazyCallGraph::Node &N : C) {
795 Function &F = N.getFunction();
796
797 auto *OuterProxy =
798 FAM.getCachedResult<CGSCCAnalysisManagerFunctionProxy>(F);
799 if (!OuterProxy)
800 // No outer analyses were queried, nothing to do.
801 continue;
802
803 // Forcibly abandon all the inner analyses with dependencies, but
804 // invalidate nothing else.
805 auto PA = PreservedAnalyses::all();
806 for (const auto &OuterInvalidationPair :
807 OuterProxy->getOuterInvalidations()) {
808 const auto &InnerAnalysisIDs = OuterInvalidationPair.second;
809 for (AnalysisKey *InnerAnalysisID : InnerAnalysisIDs)
810 PA.abandon(InnerAnalysisID);
811 }
812
813 // Now invalidate anything we found.
814 FAM.invalidate(F, PA);
815 }
816}
817
818/// Helper function to update both the \c CGSCCAnalysisManager \p AM and the \c
819/// CGSCCPassManager's \c CGSCCUpdateResult \p UR based on a range of newly
820/// added SCCs.
821///
822/// The range of new SCCs must be in postorder already. The SCC they were split
823/// out of must be provided as \p C. The current node being mutated and
824/// triggering updates must be passed as \p N.
825///
826/// This function returns the SCC containing \p N. This will be either \p C if
827/// no new SCCs have been split out, or it will be the new SCC containing \p N.
828template <typename SCCRangeT>
829static LazyCallGraph::SCC *
830incorporateNewSCCRange(const SCCRangeT &NewSCCRange, LazyCallGraph &G,
831 LazyCallGraph::Node &N, LazyCallGraph::SCC *C,
832 CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR) {
833 using SCC = LazyCallGraph::SCC;
834
835 if (NewSCCRange.empty())
836 return C;
837
838 // Add the current SCC to the worklist as its shape has changed.
839 UR.CWorklist.insert(C);
840 LLVM_DEBUG(dbgs() << "Enqueuing the existing SCC in the worklist:" << *Cdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Enqueuing the existing SCC in the worklist:"
<< *C << "\n"; } } while (false)
841 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Enqueuing the existing SCC in the worklist:"
<< *C << "\n"; } } while (false)
;
842
843 SCC *OldC = C;
844
845 // Update the current SCC. Note that if we have new SCCs, this must actually
846 // change the SCC.
847 assert(C != &*NewSCCRange.begin() &&(static_cast <bool> (C != &*NewSCCRange.begin() &&
"Cannot insert new SCCs without changing current SCC!") ? void
(0) : __assert_fail ("C != &*NewSCCRange.begin() && \"Cannot insert new SCCs without changing current SCC!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 848, __extension__
__PRETTY_FUNCTION__))
848 "Cannot insert new SCCs without changing current SCC!")(static_cast <bool> (C != &*NewSCCRange.begin() &&
"Cannot insert new SCCs without changing current SCC!") ? void
(0) : __assert_fail ("C != &*NewSCCRange.begin() && \"Cannot insert new SCCs without changing current SCC!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 848, __extension__
__PRETTY_FUNCTION__))
;
849 C = &*NewSCCRange.begin();
850 assert(G.lookupSCC(N) == C && "Failed to update current SCC!")(static_cast <bool> (G.lookupSCC(N) == C && "Failed to update current SCC!"
) ? void (0) : __assert_fail ("G.lookupSCC(N) == C && \"Failed to update current SCC!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 850, __extension__
__PRETTY_FUNCTION__))
;
851
852 // If we had a cached FAM proxy originally, we will want to create more of
853 // them for each SCC that was split off.
854 FunctionAnalysisManager *FAM = nullptr;
855 if (auto *FAMProxy =
856 AM.getCachedResult<FunctionAnalysisManagerCGSCCProxy>(*OldC))
857 FAM = &FAMProxy->getManager();
858
859 // We need to propagate an invalidation call to all but the newly current SCC
860 // because the outer pass manager won't do that for us after splitting them.
861 // FIXME: We should accept a PreservedAnalysis from the CG updater so that if
862 // there are preserved analysis we can avoid invalidating them here for
863 // split-off SCCs.
864 // We know however that this will preserve any FAM proxy so go ahead and mark
865 // that.
866 auto PA = PreservedAnalyses::allInSet<AllAnalysesOn<Function>>();
867 PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
868 AM.invalidate(*OldC, PA);
869
870 // Ensure the now-current SCC's function analyses are updated.
871 if (FAM)
872 updateNewSCCFunctionAnalyses(*C, G, AM, *FAM);
873
874 for (SCC &NewC : llvm::reverse(llvm::drop_begin(NewSCCRange))) {
875 assert(C != &NewC && "No need to re-visit the current SCC!")(static_cast <bool> (C != &NewC && "No need to re-visit the current SCC!"
) ? void (0) : __assert_fail ("C != &NewC && \"No need to re-visit the current SCC!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 875, __extension__
__PRETTY_FUNCTION__))
;
876 assert(OldC != &NewC && "Already handled the original SCC!")(static_cast <bool> (OldC != &NewC && "Already handled the original SCC!"
) ? void (0) : __assert_fail ("OldC != &NewC && \"Already handled the original SCC!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 876, __extension__
__PRETTY_FUNCTION__))
;
877 UR.CWorklist.insert(&NewC);
878 LLVM_DEBUG(dbgs() << "Enqueuing a newly formed SCC:" << NewC << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Enqueuing a newly formed SCC:" <<
NewC << "\n"; } } while (false)
;
879
880 // Ensure new SCCs' function analyses are updated.
881 if (FAM)
882 updateNewSCCFunctionAnalyses(NewC, G, AM, *FAM);
883
884 // Also propagate a normal invalidation to the new SCC as only the current
885 // will get one from the pass manager infrastructure.
886 AM.invalidate(NewC, PA);
887 }
888 return C;
889}
890
891static LazyCallGraph::SCC &updateCGAndAnalysisManagerForPass(
892 LazyCallGraph &G, LazyCallGraph::SCC &InitialC, LazyCallGraph::Node &N,
893 CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR,
894 FunctionAnalysisManager &FAM, bool FunctionPass) {
895 using Node = LazyCallGraph::Node;
896 using Edge = LazyCallGraph::Edge;
897 using SCC = LazyCallGraph::SCC;
898 using RefSCC = LazyCallGraph::RefSCC;
899
900 RefSCC &InitialRC = InitialC.getOuterRefSCC();
901 SCC *C = &InitialC;
902 RefSCC *RC = &InitialRC;
903 Function &F = N.getFunction();
904
905 // Walk the function body and build up the set of retained, promoted, and
906 // demoted edges.
907 SmallVector<Constant *, 16> Worklist;
908 SmallPtrSet<Constant *, 16> Visited;
909 SmallPtrSet<Node *, 16> RetainedEdges;
910 SmallSetVector<Node *, 4> PromotedRefTargets;
911 SmallSetVector<Node *, 4> DemotedCallTargets;
912 SmallSetVector<Node *, 4> NewCallEdges;
913 SmallSetVector<Node *, 4> NewRefEdges;
914
915 // First walk the function and handle all called functions. We do this first
916 // because if there is a single call edge, whether there are ref edges is
917 // irrelevant.
918 for (Instruction &I : instructions(F)) {
919 if (auto *CB = dyn_cast<CallBase>(&I)) {
920 if (Function *Callee = CB->getCalledFunction()) {
921 if (Visited.insert(Callee).second && !Callee->isDeclaration()) {
922 Node *CalleeN = G.lookup(*Callee);
923 assert(CalleeN &&(static_cast <bool> (CalleeN && "Visited function should already have an associated node"
) ? void (0) : __assert_fail ("CalleeN && \"Visited function should already have an associated node\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 924, __extension__
__PRETTY_FUNCTION__))
924 "Visited function should already have an associated node")(static_cast <bool> (CalleeN && "Visited function should already have an associated node"
) ? void (0) : __assert_fail ("CalleeN && \"Visited function should already have an associated node\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 924, __extension__
__PRETTY_FUNCTION__))
;
925 Edge *E = N->lookup(*CalleeN);
926 assert((E || !FunctionPass) &&(static_cast <bool> ((E || !FunctionPass) && "No function transformations should introduce *new* "
"call edges! Any new calls should be modeled as " "promoted existing ref edges!"
) ? void (0) : __assert_fail ("(E || !FunctionPass) && \"No function transformations should introduce *new* \" \"call edges! Any new calls should be modeled as \" \"promoted existing ref edges!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 929, __extension__
__PRETTY_FUNCTION__))
927 "No function transformations should introduce *new* "(static_cast <bool> ((E || !FunctionPass) && "No function transformations should introduce *new* "
"call edges! Any new calls should be modeled as " "promoted existing ref edges!"
) ? void (0) : __assert_fail ("(E || !FunctionPass) && \"No function transformations should introduce *new* \" \"call edges! Any new calls should be modeled as \" \"promoted existing ref edges!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 929, __extension__
__PRETTY_FUNCTION__))
928 "call edges! Any new calls should be modeled as "(static_cast <bool> ((E || !FunctionPass) && "No function transformations should introduce *new* "
"call edges! Any new calls should be modeled as " "promoted existing ref edges!"
) ? void (0) : __assert_fail ("(E || !FunctionPass) && \"No function transformations should introduce *new* \" \"call edges! Any new calls should be modeled as \" \"promoted existing ref edges!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 929, __extension__
__PRETTY_FUNCTION__))
929 "promoted existing ref edges!")(static_cast <bool> ((E || !FunctionPass) && "No function transformations should introduce *new* "
"call edges! Any new calls should be modeled as " "promoted existing ref edges!"
) ? void (0) : __assert_fail ("(E || !FunctionPass) && \"No function transformations should introduce *new* \" \"call edges! Any new calls should be modeled as \" \"promoted existing ref edges!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 929, __extension__
__PRETTY_FUNCTION__))
;
930 bool Inserted = RetainedEdges.insert(CalleeN).second;
931 (void)Inserted;
932 assert(Inserted && "We should never visit a function twice.")(static_cast <bool> (Inserted && "We should never visit a function twice."
) ? void (0) : __assert_fail ("Inserted && \"We should never visit a function twice.\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 932, __extension__
__PRETTY_FUNCTION__))
;
933 if (!E)
934 NewCallEdges.insert(CalleeN);
935 else if (!E->isCall())
936 PromotedRefTargets.insert(CalleeN);
937 }
938 } else {
939 // We can miss devirtualization if an indirect call is created then
940 // promoted before updateCGAndAnalysisManagerForPass runs.
941 auto *Entry = UR.IndirectVHs.find(CB);
942 if (Entry == UR.IndirectVHs.end())
943 UR.IndirectVHs.insert({CB, WeakTrackingVH(CB)});
944 else if (!Entry->second)
945 Entry->second = WeakTrackingVH(CB);
946 }
947 }
948 }
949
950 // Now walk all references.
951 for (Instruction &I : instructions(F))
952 for (Value *Op : I.operand_values())
953 if (auto *OpC = dyn_cast<Constant>(Op))
954 if (Visited.insert(OpC).second)
955 Worklist.push_back(OpC);
956
957 auto VisitRef = [&](Function &Referee) {
958 Node *RefereeN = G.lookup(Referee);
959 assert(RefereeN &&(static_cast <bool> (RefereeN && "Visited function should already have an associated node"
) ? void (0) : __assert_fail ("RefereeN && \"Visited function should already have an associated node\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 960, __extension__
__PRETTY_FUNCTION__))
960 "Visited function should already have an associated node")(static_cast <bool> (RefereeN && "Visited function should already have an associated node"
) ? void (0) : __assert_fail ("RefereeN && \"Visited function should already have an associated node\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 960, __extension__
__PRETTY_FUNCTION__))
;
961 Edge *E = N->lookup(*RefereeN);
962 assert((E || !FunctionPass) &&(static_cast <bool> ((E || !FunctionPass) && "No function transformations should introduce *new* ref "
"edges! Any new ref edges would require IPO which " "function passes aren't allowed to do!"
) ? void (0) : __assert_fail ("(E || !FunctionPass) && \"No function transformations should introduce *new* ref \" \"edges! Any new ref edges would require IPO which \" \"function passes aren't allowed to do!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 965, __extension__
__PRETTY_FUNCTION__))
963 "No function transformations should introduce *new* ref "(static_cast <bool> ((E || !FunctionPass) && "No function transformations should introduce *new* ref "
"edges! Any new ref edges would require IPO which " "function passes aren't allowed to do!"
) ? void (0) : __assert_fail ("(E || !FunctionPass) && \"No function transformations should introduce *new* ref \" \"edges! Any new ref edges would require IPO which \" \"function passes aren't allowed to do!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 965, __extension__
__PRETTY_FUNCTION__))
964 "edges! Any new ref edges would require IPO which "(static_cast <bool> ((E || !FunctionPass) && "No function transformations should introduce *new* ref "
"edges! Any new ref edges would require IPO which " "function passes aren't allowed to do!"
) ? void (0) : __assert_fail ("(E || !FunctionPass) && \"No function transformations should introduce *new* ref \" \"edges! Any new ref edges would require IPO which \" \"function passes aren't allowed to do!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 965, __extension__
__PRETTY_FUNCTION__))
965 "function passes aren't allowed to do!")(static_cast <bool> ((E || !FunctionPass) && "No function transformations should introduce *new* ref "
"edges! Any new ref edges would require IPO which " "function passes aren't allowed to do!"
) ? void (0) : __assert_fail ("(E || !FunctionPass) && \"No function transformations should introduce *new* ref \" \"edges! Any new ref edges would require IPO which \" \"function passes aren't allowed to do!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 965, __extension__
__PRETTY_FUNCTION__))
;
966 bool Inserted = RetainedEdges.insert(RefereeN).second;
967 (void)Inserted;
968 assert(Inserted && "We should never visit a function twice.")(static_cast <bool> (Inserted && "We should never visit a function twice."
) ? void (0) : __assert_fail ("Inserted && \"We should never visit a function twice.\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 968, __extension__
__PRETTY_FUNCTION__))
;
969 if (!E)
970 NewRefEdges.insert(RefereeN);
971 else if (E->isCall())
972 DemotedCallTargets.insert(RefereeN);
973 };
974 LazyCallGraph::visitReferences(Worklist, Visited, VisitRef);
975
976 // Handle new ref edges.
977 for (Node *RefTarget : NewRefEdges) {
978 SCC &TargetC = *G.lookupSCC(*RefTarget);
979 RefSCC &TargetRC = TargetC.getOuterRefSCC();
980 (void)TargetRC;
981 // TODO: This only allows trivial edges to be added for now.
982#ifdef EXPENSIVE_CHECKS
983 assert((RC == &TargetRC ||(static_cast <bool> ((RC == &TargetRC || RC->isAncestorOf
(TargetRC)) && "New ref edge is not trivial!") ? void
(0) : __assert_fail ("(RC == &TargetRC || RC->isAncestorOf(TargetRC)) && \"New ref edge is not trivial!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 984, __extension__
__PRETTY_FUNCTION__))
984 RC->isAncestorOf(TargetRC)) && "New ref edge is not trivial!")(static_cast <bool> ((RC == &TargetRC || RC->isAncestorOf
(TargetRC)) && "New ref edge is not trivial!") ? void
(0) : __assert_fail ("(RC == &TargetRC || RC->isAncestorOf(TargetRC)) && \"New ref edge is not trivial!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 984, __extension__
__PRETTY_FUNCTION__))
;
985#endif
986 RC->insertTrivialRefEdge(N, *RefTarget);
987 }
988
989 // Handle new call edges.
990 for (Node *CallTarget : NewCallEdges) {
991 SCC &TargetC = *G.lookupSCC(*CallTarget);
992 RefSCC &TargetRC = TargetC.getOuterRefSCC();
993 (void)TargetRC;
994 // TODO: This only allows trivial edges to be added for now.
995#ifdef EXPENSIVE_CHECKS
996 assert((RC == &TargetRC ||(static_cast <bool> ((RC == &TargetRC || RC->isAncestorOf
(TargetRC)) && "New call edge is not trivial!") ? void
(0) : __assert_fail ("(RC == &TargetRC || RC->isAncestorOf(TargetRC)) && \"New call edge is not trivial!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 997, __extension__
__PRETTY_FUNCTION__))
997 RC->isAncestorOf(TargetRC)) && "New call edge is not trivial!")(static_cast <bool> ((RC == &TargetRC || RC->isAncestorOf
(TargetRC)) && "New call edge is not trivial!") ? void
(0) : __assert_fail ("(RC == &TargetRC || RC->isAncestorOf(TargetRC)) && \"New call edge is not trivial!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 997, __extension__
__PRETTY_FUNCTION__))
;
998#endif
999 // Add a trivial ref edge to be promoted later on alongside
1000 // PromotedRefTargets.
1001 RC->insertTrivialRefEdge(N, *CallTarget);
1002 }
1003
1004 // Include synthetic reference edges to known, defined lib functions.
1005 for (auto *LibFn : G.getLibFunctions())
1006 // While the list of lib functions doesn't have repeats, don't re-visit
1007 // anything handled above.
1008 if (!Visited.count(LibFn))
1009 VisitRef(*LibFn);
1010
1011 // First remove all of the edges that are no longer present in this function.
1012 // The first step makes these edges uniformly ref edges and accumulates them
1013 // into a separate data structure so removal doesn't invalidate anything.
1014 SmallVector<Node *, 4> DeadTargets;
1015 for (Edge &E : *N) {
1016 if (RetainedEdges.count(&E.getNode()))
1017 continue;
1018
1019 SCC &TargetC = *G.lookupSCC(E.getNode());
1020 RefSCC &TargetRC = TargetC.getOuterRefSCC();
1021 if (&TargetRC == RC && E.isCall()) {
1022 if (C != &TargetC) {
1023 // For separate SCCs this is trivial.
1024 RC->switchTrivialInternalEdgeToRef(N, E.getNode());
1025 } else {
1026 // Now update the call graph.
1027 C = incorporateNewSCCRange(RC->switchInternalEdgeToRef(N, E.getNode()),
1028 G, N, C, AM, UR);
1029 }
1030 }
1031
1032 // Now that this is ready for actual removal, put it into our list.
1033 DeadTargets.push_back(&E.getNode());
1034 }
1035 // Remove the easy cases quickly and actually pull them out of our list.
1036 llvm::erase_if(DeadTargets, [&](Node *TargetN) {
1037 SCC &TargetC = *G.lookupSCC(*TargetN);
1038 RefSCC &TargetRC = TargetC.getOuterRefSCC();
1039
1040 // We can't trivially remove internal targets, so skip
1041 // those.
1042 if (&TargetRC == RC)
1043 return false;
1044
1045 LLVM_DEBUG(dbgs() << "Deleting outgoing edge from '" << N << "' to '"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Deleting outgoing edge from '" <<
N << "' to '" << *TargetN << "'\n"; } } while
(false)
1046 << *TargetN << "'\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Deleting outgoing edge from '" <<
N << "' to '" << *TargetN << "'\n"; } } while
(false)
;
1047 RC->removeOutgoingEdge(N, *TargetN);
1048 return true;
1049 });
1050
1051 // Now do a batch removal of the internal ref edges left.
1052 auto NewRefSCCs = RC->removeInternalRefEdge(N, DeadTargets);
1053 if (!NewRefSCCs.empty()) {
1054 // The old RefSCC is dead, mark it as such.
1055 UR.InvalidatedRefSCCs.insert(RC);
1056
1057 // Note that we don't bother to invalidate analyses as ref-edge
1058 // connectivity is not really observable in any way and is intended
1059 // exclusively to be used for ordering of transforms rather than for
1060 // analysis conclusions.
1061
1062 // Update RC to the "bottom".
1063 assert(G.lookupSCC(N) == C && "Changed the SCC when splitting RefSCCs!")(static_cast <bool> (G.lookupSCC(N) == C && "Changed the SCC when splitting RefSCCs!"
) ? void (0) : __assert_fail ("G.lookupSCC(N) == C && \"Changed the SCC when splitting RefSCCs!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 1063, __extension__
__PRETTY_FUNCTION__))
;
1064 RC = &C->getOuterRefSCC();
1065 assert(G.lookupRefSCC(N) == RC && "Failed to update current RefSCC!")(static_cast <bool> (G.lookupRefSCC(N) == RC &&
"Failed to update current RefSCC!") ? void (0) : __assert_fail
("G.lookupRefSCC(N) == RC && \"Failed to update current RefSCC!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 1065, __extension__
__PRETTY_FUNCTION__))
;
1066
1067 // The RC worklist is in reverse postorder, so we enqueue the new ones in
1068 // RPO except for the one which contains the source node as that is the
1069 // "bottom" we will continue processing in the bottom-up walk.
1070 assert(NewRefSCCs.front() == RC &&(static_cast <bool> (NewRefSCCs.front() == RC &&
"New current RefSCC not first in the returned list!") ? void
(0) : __assert_fail ("NewRefSCCs.front() == RC && \"New current RefSCC not first in the returned list!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 1071, __extension__
__PRETTY_FUNCTION__))
1071 "New current RefSCC not first in the returned list!")(static_cast <bool> (NewRefSCCs.front() == RC &&
"New current RefSCC not first in the returned list!") ? void
(0) : __assert_fail ("NewRefSCCs.front() == RC && \"New current RefSCC not first in the returned list!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 1071, __extension__
__PRETTY_FUNCTION__))
;
1072 for (RefSCC *NewRC : llvm::reverse(llvm::drop_begin(NewRefSCCs))) {
1073 assert(NewRC != RC && "Should not encounter the current RefSCC further "(static_cast <bool> (NewRC != RC && "Should not encounter the current RefSCC further "
"in the postorder list of new RefSCCs.") ? void (0) : __assert_fail
("NewRC != RC && \"Should not encounter the current RefSCC further \" \"in the postorder list of new RefSCCs.\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 1074, __extension__
__PRETTY_FUNCTION__))
1074 "in the postorder list of new RefSCCs.")(static_cast <bool> (NewRC != RC && "Should not encounter the current RefSCC further "
"in the postorder list of new RefSCCs.") ? void (0) : __assert_fail
("NewRC != RC && \"Should not encounter the current RefSCC further \" \"in the postorder list of new RefSCCs.\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 1074, __extension__
__PRETTY_FUNCTION__))
;
1075 UR.RCWorklist.insert(NewRC);
1076 LLVM_DEBUG(dbgs() << "Enqueuing a new RefSCC in the update worklist: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Enqueuing a new RefSCC in the update worklist: "
<< *NewRC << "\n"; } } while (false)
1077 << *NewRC << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Enqueuing a new RefSCC in the update worklist: "
<< *NewRC << "\n"; } } while (false)
;
1078 }
1079 }
1080
1081 // Next demote all the call edges that are now ref edges. This helps make
1082 // the SCCs small which should minimize the work below as we don't want to
1083 // form cycles that this would break.
1084 for (Node *RefTarget : DemotedCallTargets) {
1085 SCC &TargetC = *G.lookupSCC(*RefTarget);
1086 RefSCC &TargetRC = TargetC.getOuterRefSCC();
1087
1088 // The easy case is when the target RefSCC is not this RefSCC. This is
1089 // only supported when the target RefSCC is a child of this RefSCC.
1090 if (&TargetRC != RC) {
1091#ifdef EXPENSIVE_CHECKS
1092 assert(RC->isAncestorOf(TargetRC) &&(static_cast <bool> (RC->isAncestorOf(TargetRC) &&
"Cannot potentially form RefSCC cycles here!") ? void (0) : __assert_fail
("RC->isAncestorOf(TargetRC) && \"Cannot potentially form RefSCC cycles here!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 1093, __extension__
__PRETTY_FUNCTION__))
1093 "Cannot potentially form RefSCC cycles here!")(static_cast <bool> (RC->isAncestorOf(TargetRC) &&
"Cannot potentially form RefSCC cycles here!") ? void (0) : __assert_fail
("RC->isAncestorOf(TargetRC) && \"Cannot potentially form RefSCC cycles here!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 1093, __extension__
__PRETTY_FUNCTION__))
;
1094#endif
1095 RC->switchOutgoingEdgeToRef(N, *RefTarget);
1096 LLVM_DEBUG(dbgs() << "Switch outgoing call edge to a ref edge from '" << Ndo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Switch outgoing call edge to a ref edge from '"
<< N << "' to '" << *RefTarget << "'\n"
; } } while (false)
1097 << "' to '" << *RefTarget << "'\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Switch outgoing call edge to a ref edge from '"
<< N << "' to '" << *RefTarget << "'\n"
; } } while (false)
;
1098 continue;
1099 }
1100
1101 // We are switching an internal call edge to a ref edge. This may split up
1102 // some SCCs.
1103 if (C != &TargetC) {
1104 // For separate SCCs this is trivial.
1105 RC->switchTrivialInternalEdgeToRef(N, *RefTarget);
1106 continue;
1107 }
1108
1109 // Now update the call graph.
1110 C = incorporateNewSCCRange(RC->switchInternalEdgeToRef(N, *RefTarget), G, N,
1111 C, AM, UR);
1112 }
1113
1114 // We added a ref edge earlier for new call edges, promote those to call edges
1115 // alongside PromotedRefTargets.
1116 for (Node *E : NewCallEdges)
1117 PromotedRefTargets.insert(E);
1118
1119 // Now promote ref edges into call edges.
1120 for (Node *CallTarget : PromotedRefTargets) {
1121 SCC &TargetC = *G.lookupSCC(*CallTarget);
1122 RefSCC &TargetRC = TargetC.getOuterRefSCC();
1123
1124 // The easy case is when the target RefSCC is not this RefSCC. This is
1125 // only supported when the target RefSCC is a child of this RefSCC.
1126 if (&TargetRC != RC) {
1127#ifdef EXPENSIVE_CHECKS
1128 assert(RC->isAncestorOf(TargetRC) &&(static_cast <bool> (RC->isAncestorOf(TargetRC) &&
"Cannot potentially form RefSCC cycles here!") ? void (0) : __assert_fail
("RC->isAncestorOf(TargetRC) && \"Cannot potentially form RefSCC cycles here!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 1129, __extension__
__PRETTY_FUNCTION__))
1129 "Cannot potentially form RefSCC cycles here!")(static_cast <bool> (RC->isAncestorOf(TargetRC) &&
"Cannot potentially form RefSCC cycles here!") ? void (0) : __assert_fail
("RC->isAncestorOf(TargetRC) && \"Cannot potentially form RefSCC cycles here!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 1129, __extension__
__PRETTY_FUNCTION__))
;
1130#endif
1131 RC->switchOutgoingEdgeToCall(N, *CallTarget);
1132 LLVM_DEBUG(dbgs() << "Switch outgoing ref edge to a call edge from '" << Ndo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Switch outgoing ref edge to a call edge from '"
<< N << "' to '" << *CallTarget << "'\n"
; } } while (false)
1133 << "' to '" << *CallTarget << "'\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Switch outgoing ref edge to a call edge from '"
<< N << "' to '" << *CallTarget << "'\n"
; } } while (false)
;
1134 continue;
1135 }
1136 LLVM_DEBUG(dbgs() << "Switch an internal ref edge to a call edge from '"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Switch an internal ref edge to a call edge from '"
<< N << "' to '" << *CallTarget << "'\n"
; } } while (false)
1137 << N << "' to '" << *CallTarget << "'\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Switch an internal ref edge to a call edge from '"
<< N << "' to '" << *CallTarget << "'\n"
; } } while (false)
;
1138
1139 // Otherwise we are switching an internal ref edge to a call edge. This
1140 // may merge away some SCCs, and we add those to the UpdateResult. We also
1141 // need to make sure to update the worklist in the event SCCs have moved
1142 // before the current one in the post-order sequence
1143 bool HasFunctionAnalysisProxy = false;
1144 auto InitialSCCIndex = RC->find(*C) - RC->begin();
1145 bool FormedCycle = RC->switchInternalEdgeToCall(
1146 N, *CallTarget, [&](ArrayRef<SCC *> MergedSCCs) {
1147 for (SCC *MergedC : MergedSCCs) {
1148 assert(MergedC != &TargetC && "Cannot merge away the target SCC!")(static_cast <bool> (MergedC != &TargetC &&
"Cannot merge away the target SCC!") ? void (0) : __assert_fail
("MergedC != &TargetC && \"Cannot merge away the target SCC!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 1148, __extension__
__PRETTY_FUNCTION__))
;
1149
1150 HasFunctionAnalysisProxy |=
1151 AM.getCachedResult<FunctionAnalysisManagerCGSCCProxy>(
1152 *MergedC) != nullptr;
1153
1154 // Mark that this SCC will no longer be valid.
1155 UR.InvalidatedSCCs.insert(MergedC);
1156
1157 // FIXME: We should really do a 'clear' here to forcibly release
1158 // memory, but we don't have a good way of doing that and
1159 // preserving the function analyses.
1160 auto PA = PreservedAnalyses::allInSet<AllAnalysesOn<Function>>();
1161 PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
1162 AM.invalidate(*MergedC, PA);
1163 }
1164 });
1165
1166 // If we formed a cycle by creating this call, we need to update more data
1167 // structures.
1168 if (FormedCycle) {
1169 C = &TargetC;
1170 assert(G.lookupSCC(N) == C && "Failed to update current SCC!")(static_cast <bool> (G.lookupSCC(N) == C && "Failed to update current SCC!"
) ? void (0) : __assert_fail ("G.lookupSCC(N) == C && \"Failed to update current SCC!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 1170, __extension__
__PRETTY_FUNCTION__))
;
1171
1172 // If one of the invalidated SCCs had a cached proxy to a function
1173 // analysis manager, we need to create a proxy in the new current SCC as
1174 // the invalidated SCCs had their functions moved.
1175 if (HasFunctionAnalysisProxy)
1176 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(*C, G).updateFAM(FAM);
1177
1178 // Any analyses cached for this SCC are no longer precise as the shape
1179 // has changed by introducing this cycle. However, we have taken care to
1180 // update the proxies so it remains valide.
1181 auto PA = PreservedAnalyses::allInSet<AllAnalysesOn<Function>>();
1182 PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
1183 AM.invalidate(*C, PA);
1184 }
1185 auto NewSCCIndex = RC->find(*C) - RC->begin();
1186 // If we have actually moved an SCC to be topologically "below" the current
1187 // one due to merging, we will need to revisit the current SCC after
1188 // visiting those moved SCCs.
1189 //
1190 // It is critical that we *do not* revisit the current SCC unless we
1191 // actually move SCCs in the process of merging because otherwise we may
1192 // form a cycle where an SCC is split apart, merged, split, merged and so
1193 // on infinitely.
1194 if (InitialSCCIndex < NewSCCIndex) {
1195 // Put our current SCC back onto the worklist as we'll visit other SCCs
1196 // that are now definitively ordered prior to the current one in the
1197 // post-order sequence, and may end up observing more precise context to
1198 // optimize the current SCC.
1199 UR.CWorklist.insert(C);
1200 LLVM_DEBUG(dbgs() << "Enqueuing the existing SCC in the worklist: " << *Cdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Enqueuing the existing SCC in the worklist: "
<< *C << "\n"; } } while (false)
1201 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Enqueuing the existing SCC in the worklist: "
<< *C << "\n"; } } while (false)
;
1202 // Enqueue in reverse order as we pop off the back of the worklist.
1203 for (SCC &MovedC : llvm::reverse(make_range(RC->begin() + InitialSCCIndex,
1204 RC->begin() + NewSCCIndex))) {
1205 UR.CWorklist.insert(&MovedC);
1206 LLVM_DEBUG(dbgs() << "Enqueuing a newly earlier in post-order SCC: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Enqueuing a newly earlier in post-order SCC: "
<< MovedC << "\n"; } } while (false)
1207 << MovedC << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("cgscc")) { dbgs() << "Enqueuing a newly earlier in post-order SCC: "
<< MovedC << "\n"; } } while (false)
;
1208 }
1209 }
1210 }
1211
1212 assert(!UR.InvalidatedSCCs.count(C) && "Invalidated the current SCC!")(static_cast <bool> (!UR.InvalidatedSCCs.count(C) &&
"Invalidated the current SCC!") ? void (0) : __assert_fail (
"!UR.InvalidatedSCCs.count(C) && \"Invalidated the current SCC!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 1212, __extension__
__PRETTY_FUNCTION__))
;
1213 assert(!UR.InvalidatedRefSCCs.count(RC) && "Invalidated the current RefSCC!")(static_cast <bool> (!UR.InvalidatedRefSCCs.count(RC) &&
"Invalidated the current RefSCC!") ? void (0) : __assert_fail
("!UR.InvalidatedRefSCCs.count(RC) && \"Invalidated the current RefSCC!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 1213, __extension__
__PRETTY_FUNCTION__))
;
1214 assert(&C->getOuterRefSCC() == RC && "Current SCC not in current RefSCC!")(static_cast <bool> (&C->getOuterRefSCC() == RC &&
"Current SCC not in current RefSCC!") ? void (0) : __assert_fail
("&C->getOuterRefSCC() == RC && \"Current SCC not in current RefSCC!\""
, "llvm/lib/Analysis/CGSCCPassManager.cpp", 1214, __extension__
__PRETTY_FUNCTION__))
;
1215
1216 // Record the current SCC for higher layers of the CGSCC pass manager now that
1217 // all the updates have been applied.
1218 if (C != &InitialC)
1219 UR.UpdatedC = C;
1220
1221 return *C;
1222}
1223
1224LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass(
1225 LazyCallGraph &G, LazyCallGraph::SCC &InitialC, LazyCallGraph::Node &N,
1226 CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR,
1227 FunctionAnalysisManager &FAM) {
1228 return updateCGAndAnalysisManagerForPass(G, InitialC, N, AM, UR, FAM,
1229 /* FunctionPass */ true);
1230}
1231LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForCGSCCPass(
1232 LazyCallGraph &G, LazyCallGraph::SCC &InitialC, LazyCallGraph::Node &N,
1233 CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR,
1234 FunctionAnalysisManager &FAM) {
1235 return updateCGAndAnalysisManagerForPass(G, InitialC, N, AM, UR, FAM,
1236 /* FunctionPass */ false);
1237}

/build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/llvm/include/llvm/ADT/PriorityWorklist.h

1//===- PriorityWorklist.h - Worklist with insertion priority ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10///
11/// This file provides a priority worklist. See the class comments for details.
12///
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_ADT_PRIORITYWORKLIST_H
16#define LLVM_ADT_PRIORITYWORKLIST_H
17
18#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SmallVector.h"
21#include "llvm/Support/Compiler.h"
22#include <cassert>
23#include <cstddef>
24#include <iterator>
25#include <type_traits>
26#include <vector>
27
28namespace llvm {
29
30/// A FILO worklist that prioritizes on re-insertion without duplication.
31///
32/// This is very similar to a \c SetVector with the primary difference that
33/// while re-insertion does not create a duplicate, it does adjust the
34/// visitation order to respect the last insertion point. This can be useful
35/// when the visit order needs to be prioritized based on insertion point
36/// without actually having duplicate visits.
37///
38/// Note that this doesn't prevent re-insertion of elements which have been
39/// visited -- if you need to break cycles, a set will still be necessary.
40///
41/// The type \c T must be default constructable to a null value that will be
42/// ignored. It is an error to insert such a value, and popping elements will
43/// never produce such a value. It is expected to be used with common nullable
44/// types like pointers or optionals.
45///
46/// Internally this uses a vector to store the worklist and a map to identify
47/// existing elements in the worklist. Both of these may be customized, but the
48/// map must support the basic DenseMap API for mapping from a T to an integer
49/// index into the vector.
50///
51/// A partial specialization is provided to automatically select a SmallVector
52/// and a SmallDenseMap if custom data structures are not provided.
53template <typename T, typename VectorT = std::vector<T>,
54 typename MapT = DenseMap<T, ptrdiff_t>>
55class PriorityWorklist {
56public:
57 using value_type = T;
58 using key_type = T;
59 using reference = T&;
60 using const_reference = const T&;
61 using size_type = typename MapT::size_type;
62
63 /// Construct an empty PriorityWorklist
64 PriorityWorklist() = default;
65
66 /// Determine if the PriorityWorklist is empty or not.
67 bool empty() const {
68 return V.empty();
69 }
70
71 /// Returns the number of elements in the worklist.
72 size_type size() const {
73 return M.size();
74 }
75
76 /// Count the number of elements of a given key in the PriorityWorklist.
77 /// \returns 0 if the element is not in the PriorityWorklist, 1 if it is.
78 size_type count(const key_type &key) const {
79 return M.count(key);
80 }
81
82 /// Return the last element of the PriorityWorklist.
83 const T &back() const {
84 assert(!empty() && "Cannot call back() on empty PriorityWorklist!")(static_cast <bool> (!empty() && "Cannot call back() on empty PriorityWorklist!"
) ? void (0) : __assert_fail ("!empty() && \"Cannot call back() on empty PriorityWorklist!\""
, "llvm/include/llvm/ADT/PriorityWorklist.h", 84, __extension__
__PRETTY_FUNCTION__))
;
10
'?' condition is true
85 return V.back();
11
Returning pointer
86 }
87
88 /// Insert a new element into the PriorityWorklist.
89 /// \returns true if the element was inserted into the PriorityWorklist.
90 bool insert(const T &X) {
91 assert(X != T() && "Cannot insert a null (default constructed) value!")(static_cast <bool> (X != T() && "Cannot insert a null (default constructed) value!"
) ? void (0) : __assert_fail ("X != T() && \"Cannot insert a null (default constructed) value!\""
, "llvm/include/llvm/ADT/PriorityWorklist.h", 91, __extension__
__PRETTY_FUNCTION__))
;
92 auto InsertResult = M.insert({X, V.size()});
93 if (InsertResult.second) {
94 // Fresh value, just append it to the vector.
95 V.push_back(X);
96 return true;
97 }
98
99 auto &Index = InsertResult.first->second;
100 assert(V[Index] == X && "Value not actually at index in map!")(static_cast <bool> (V[Index] == X && "Value not actually at index in map!"
) ? void (0) : __assert_fail ("V[Index] == X && \"Value not actually at index in map!\""
, "llvm/include/llvm/ADT/PriorityWorklist.h", 100, __extension__
__PRETTY_FUNCTION__))
;
101 if (Index != (ptrdiff_t)(V.size() - 1)) {
102 // If the element isn't at the back, null it out and append a fresh one.
103 V[Index] = T();
104 Index = (ptrdiff_t)V.size();
105 V.push_back(X);
106 }
107 return false;
108 }
109
110 /// Insert a sequence of new elements into the PriorityWorklist.
111 template <typename SequenceT>
112 std::enable_if_t<!std::is_convertible<SequenceT, T>::value>
113 insert(SequenceT &&Input) {
114 if (std::begin(Input) == std::end(Input))
115 // Nothing to do for an empty input sequence.
116 return;
117
118 // First pull the input sequence into the vector as a bulk append
119 // operation.
120 ptrdiff_t StartIndex = V.size();
121 V.insert(V.end(), std::begin(Input), std::end(Input));
122 // Now walk backwards fixing up the index map and deleting any duplicates.
123 for (ptrdiff_t i = V.size() - 1; i >= StartIndex; --i) {
124 auto InsertResult = M.insert({V[i], i});
125 if (InsertResult.second)
126 continue;
127
128 // If the existing index is before this insert's start, nuke that one and
129 // move it up.
130 ptrdiff_t &Index = InsertResult.first->second;
131 if (Index < StartIndex) {
132 V[Index] = T();
133 Index = i;
134 continue;
135 }
136
137 // Otherwise the existing one comes first so just clear out the value in
138 // this slot.
139 V[i] = T();
140 }
141 }
142
143 /// Remove the last element of the PriorityWorklist.
144 void pop_back() {
145 assert(!empty() && "Cannot remove an element when empty!")(static_cast <bool> (!empty() && "Cannot remove an element when empty!"
) ? void (0) : __assert_fail ("!empty() && \"Cannot remove an element when empty!\""
, "llvm/include/llvm/ADT/PriorityWorklist.h", 145, __extension__
__PRETTY_FUNCTION__))
;
146 assert(back() != T() && "Cannot have a null element at the back!")(static_cast <bool> (back() != T() && "Cannot have a null element at the back!"
) ? void (0) : __assert_fail ("back() != T() && \"Cannot have a null element at the back!\""
, "llvm/include/llvm/ADT/PriorityWorklist.h", 146, __extension__
__PRETTY_FUNCTION__))
;
147 M.erase(back());
148 do {
149 V.pop_back();
150 } while (!V.empty() && V.back() == T());
151 }
152
153 LLVM_NODISCARD[[clang::warn_unused_result]] T pop_back_val() {
154 T Ret = back();
9
Calling 'PriorityWorklist::back'
12
Returning from 'PriorityWorklist::back'
13
'Ret' initialized here
155 pop_back();
156 return Ret;
14
Returning pointer (loaded from 'Ret')
157 }
158
159 /// Erase an item from the worklist.
160 ///
161 /// Note that this is constant time due to the nature of the worklist implementation.
162 bool erase(const T& X) {
163 auto I = M.find(X);
164 if (I == M.end())
165 return false;
166
167 assert(V[I->second] == X && "Value not actually at index in map!")(static_cast <bool> (V[I->second] == X && "Value not actually at index in map!"
) ? void (0) : __assert_fail ("V[I->second] == X && \"Value not actually at index in map!\""
, "llvm/include/llvm/ADT/PriorityWorklist.h", 167, __extension__
__PRETTY_FUNCTION__))
;
168 if (I->second == (ptrdiff_t)(V.size() - 1)) {
169 do {
170 V.pop_back();
171 } while (!V.empty() && V.back() == T());
172 } else {
173 V[I->second] = T();
174 }
175 M.erase(I);
176 return true;
177 }
178
179 /// Erase items from the set vector based on a predicate function.
180 ///
181 /// This is intended to be equivalent to the following code, if we could
182 /// write it:
183 ///
184 /// \code
185 /// V.erase(remove_if(V, P), V.end());
186 /// \endcode
187 ///
188 /// However, PriorityWorklist doesn't expose non-const iterators, making any
189 /// algorithm like remove_if impossible to use.
190 ///
191 /// \returns true if any element is removed.
192 template <typename UnaryPredicate>
193 bool erase_if(UnaryPredicate P) {
194 typename VectorT::iterator E =
195 remove_if(V, TestAndEraseFromMap<UnaryPredicate>(P, M));
196 if (E == V.end())
197 return false;
198 for (auto I = V.begin(); I != E; ++I)
199 if (*I != T())
200 M[*I] = I - V.begin();
201 V.erase(E, V.end());
202 return true;
203 }
204
205 /// Reverse the items in the PriorityWorklist.
206 ///
207 /// This does an in-place reversal. Other kinds of reverse aren't easy to
208 /// support in the face of the worklist semantics.
209
210 /// Completely clear the PriorityWorklist
211 void clear() {
212 M.clear();
213 V.clear();
214 }
215
216private:
217 /// A wrapper predicate designed for use with std::remove_if.
218 ///
219 /// This predicate wraps a predicate suitable for use with std::remove_if to
220 /// call M.erase(x) on each element which is slated for removal. This just
221 /// allows the predicate to be move only which we can't do with lambdas
222 /// today.
223 template <typename UnaryPredicateT>
224 class TestAndEraseFromMap {
225 UnaryPredicateT P;
226 MapT &M;
227
228 public:
229 TestAndEraseFromMap(UnaryPredicateT P, MapT &M)
230 : P(std::move(P)), M(M) {}
231
232 bool operator()(const T &Arg) {
233 if (Arg == T())
234 // Skip null values in the PriorityWorklist.
235 return false;
236
237 if (P(Arg)) {
238 M.erase(Arg);
239 return true;
240 }
241 return false;
242 }
243 };
244
245 /// The map from value to index in the vector.
246 MapT M;
247
248 /// The vector of elements in insertion order.
249 VectorT V;
250};
251
252/// A version of \c PriorityWorklist that selects small size optimized data
253/// structures for the vector and map.
254template <typename T, unsigned N>
255class SmallPriorityWorklist
256 : public PriorityWorklist<T, SmallVector<T, N>,
257 SmallDenseMap<T, ptrdiff_t>> {
258public:
259 SmallPriorityWorklist() = default;
260};
261
262} // end namespace llvm
263
264#endif // LLVM_ADT_PRIORITYWORKLIST_H