Line data Source code
1 : //===- Parsing, selection, and construction of pass pipelines --*- C++ -*--===//
2 : //
3 : // The LLVM Compiler Infrastructure
4 : //
5 : // This file is distributed under the University of Illinois Open Source
6 : // License. See LICENSE.TXT for details.
7 : //
8 : //===----------------------------------------------------------------------===//
9 : /// \file
10 : ///
11 : /// Interfaces for registering analysis passes, producing common pass manager
12 : /// configurations, and parsing of pass pipelines.
13 : ///
14 : //===----------------------------------------------------------------------===//
15 :
16 : #ifndef LLVM_PASSES_PASSBUILDER_H
17 : #define LLVM_PASSES_PASSBUILDER_H
18 :
19 : #include "llvm/ADT/Optional.h"
20 : #include "llvm/Analysis/CGSCCPassManager.h"
21 : #include "llvm/IR/PassManager.h"
22 : #include "llvm/Support/Error.h"
23 : #include "llvm/Transforms/Instrumentation.h"
24 : #include "llvm/Transforms/Scalar/LoopPassManager.h"
25 : #include <vector>
26 :
27 : namespace llvm {
28 : class StringRef;
29 : class AAManager;
30 : class TargetMachine;
31 : class ModuleSummaryIndex;
32 :
33 : /// A struct capturing PGO tunables.
34 : struct PGOOptions {
35 16 : PGOOptions(std::string ProfileGenFile = "", std::string ProfileUseFile = "",
36 : std::string SampleProfileFile = "",
37 : std::string ProfileRemappingFile = "",
38 : bool RunProfileGen = false, bool SamplePGOSupport = false)
39 16 : : ProfileGenFile(ProfileGenFile), ProfileUseFile(ProfileUseFile),
40 : SampleProfileFile(SampleProfileFile),
41 : ProfileRemappingFile(ProfileRemappingFile),
42 : RunProfileGen(RunProfileGen),
43 16 : SamplePGOSupport(SamplePGOSupport || !SampleProfileFile.empty()) {
44 : assert((RunProfileGen ||
45 : !SampleProfileFile.empty() ||
46 : !ProfileUseFile.empty() ||
47 : SamplePGOSupport) && "Illegal PGOOptions.");
48 16 : }
49 : std::string ProfileGenFile;
50 : std::string ProfileUseFile;
51 : std::string SampleProfileFile;
52 : std::string ProfileRemappingFile;
53 : bool RunProfileGen;
54 : bool SamplePGOSupport;
55 : };
56 :
57 : /// This class provides access to building LLVM's passes.
58 : ///
59 : /// It's members provide the baseline state available to passes during their
60 : /// construction. The \c PassRegistry.def file specifies how to construct all
61 : /// of the built-in passes, and those may reference these members during
62 : /// construction.
63 : class PassBuilder {
64 : TargetMachine *TM;
65 : Optional<PGOOptions> PGOOpt;
66 : PassInstrumentationCallbacks *PIC;
67 :
68 : public:
69 : /// A struct to capture parsed pass pipeline names.
70 : ///
71 : /// A pipeline is defined as a series of names, each of which may in itself
72 : /// recursively contain a nested pipeline. A name is either the name of a pass
73 : /// (e.g. "instcombine") or the name of a pipeline type (e.g. "cgscc"). If the
74 : /// name is the name of a pass, the InnerPipeline is empty, since passes
75 : /// cannot contain inner pipelines. See parsePassPipeline() for a more
76 : /// detailed description of the textual pipeline format.
77 6221 : struct PipelineElement {
78 : StringRef Name;
79 : std::vector<PipelineElement> InnerPipeline;
80 : };
81 :
82 : /// ThinLTO phase.
83 : ///
84 : /// This enumerates the LLVM ThinLTO optimization phases.
85 : enum class ThinLTOPhase {
86 : /// No ThinLTO behavior needed.
87 : None,
88 : // ThinLTO prelink (summary) phase.
89 : PreLink,
90 : // ThinLTO postlink (backend compile) phase.
91 : PostLink
92 : };
93 :
94 : /// LLVM-provided high-level optimization levels.
95 : ///
96 : /// This enumerates the LLVM-provided high-level optimization levels. Each
97 : /// level has a specific goal and rationale.
98 : enum OptimizationLevel {
99 : /// Disable as many optimizations as possible. This doesn't completely
100 : /// disable the optimizer in all cases, for example always_inline functions
101 : /// can be required to be inlined for correctness.
102 : O0,
103 :
104 : /// Optimize quickly without destroying debuggability.
105 : ///
106 : /// FIXME: The current and historical behavior of this level does *not*
107 : /// agree with this goal, but we would like to move toward this goal in the
108 : /// future.
109 : ///
110 : /// This level is tuned to produce a result from the optimizer as quickly
111 : /// as possible and to avoid destroying debuggability. This tends to result
112 : /// in a very good development mode where the compiled code will be
113 : /// immediately executed as part of testing. As a consequence, where
114 : /// possible, we would like to produce efficient-to-execute code, but not
115 : /// if it significantly slows down compilation or would prevent even basic
116 : /// debugging of the resulting binary.
117 : ///
118 : /// As an example, complex loop transformations such as versioning,
119 : /// vectorization, or fusion might not make sense here due to the degree to
120 : /// which the executed code would differ from the source code, and the
121 : /// potential compile time cost.
122 : O1,
123 :
124 : /// Optimize for fast execution as much as possible without triggering
125 : /// significant incremental compile time or code size growth.
126 : ///
127 : /// The key idea is that optimizations at this level should "pay for
128 : /// themselves". So if an optimization increases compile time by 5% or
129 : /// increases code size by 5% for a particular benchmark, that benchmark
130 : /// should also be one which sees a 5% runtime improvement. If the compile
131 : /// time or code size penalties happen on average across a diverse range of
132 : /// LLVM users' benchmarks, then the improvements should as well.
133 : ///
134 : /// And no matter what, the compile time needs to not grow superlinearly
135 : /// with the size of input to LLVM so that users can control the runtime of
136 : /// the optimizer in this mode.
137 : ///
138 : /// This is expected to be a good default optimization level for the vast
139 : /// majority of users.
140 : O2,
141 :
142 : /// Optimize for fast execution as much as possible.
143 : ///
144 : /// This mode is significantly more aggressive in trading off compile time
145 : /// and code size to get execution time improvements. The core idea is that
146 : /// this mode should include any optimization that helps execution time on
147 : /// balance across a diverse collection of benchmarks, even if it increases
148 : /// code size or compile time for some benchmarks without corresponding
149 : /// improvements to execution time.
150 : ///
151 : /// Despite being willing to trade more compile time off to get improved
152 : /// execution time, this mode still tries to avoid superlinear growth in
153 : /// order to make even significantly slower compile times at least scale
154 : /// reasonably. This does not preclude very substantial constant factor
155 : /// costs though.
156 : O3,
157 :
158 : /// Similar to \c O2 but tries to optimize for small code size instead of
159 : /// fast execution without triggering significant incremental execution
160 : /// time slowdowns.
161 : ///
162 : /// The logic here is exactly the same as \c O2, but with code size and
163 : /// execution time metrics swapped.
164 : ///
165 : /// A consequence of the different core goal is that this should in general
166 : /// produce substantially smaller executables that still run in
167 : /// a reasonable amount of time.
168 : Os,
169 :
170 : /// A very specialized mode that will optimize for code size at any and all
171 : /// costs.
172 : ///
173 : /// This is useful primarily when there are absolute size limitations and
174 : /// any effort taken to reduce the size is worth it regardless of the
175 : /// execution time impact. You should expect this level to produce rather
176 : /// slow, but very small, code.
177 : Oz
178 : };
179 :
180 1039 : explicit PassBuilder(TargetMachine *TM = nullptr,
181 : Optional<PGOOptions> PGOOpt = None,
182 : PassInstrumentationCallbacks *PIC = nullptr)
183 1039 : : TM(TM), PGOOpt(PGOOpt), PIC(PIC) {}
184 :
185 : /// Cross register the analysis managers through their proxies.
186 : ///
187 : /// This is an interface that can be used to cross register each
188 : // AnalysisManager with all the others analysis managers.
189 : void crossRegisterProxies(LoopAnalysisManager &LAM,
190 : FunctionAnalysisManager &FAM,
191 : CGSCCAnalysisManager &CGAM,
192 : ModuleAnalysisManager &MAM);
193 :
194 : /// Registers all available module analysis passes.
195 : ///
196 : /// This is an interface that can be used to populate a \c
197 : /// ModuleAnalysisManager with all registered module analyses. Callers can
198 : /// still manually register any additional analyses. Callers can also
199 : /// pre-register analyses and this will not override those.
200 : void registerModuleAnalyses(ModuleAnalysisManager &MAM);
201 :
202 : /// Registers all available CGSCC analysis passes.
203 : ///
204 : /// This is an interface that can be used to populate a \c CGSCCAnalysisManager
205 : /// with all registered CGSCC analyses. Callers can still manually register any
206 : /// additional analyses. Callers can also pre-register analyses and this will
207 : /// not override those.
208 : void registerCGSCCAnalyses(CGSCCAnalysisManager &CGAM);
209 :
210 : /// Registers all available function analysis passes.
211 : ///
212 : /// This is an interface that can be used to populate a \c
213 : /// FunctionAnalysisManager with all registered function analyses. Callers can
214 : /// still manually register any additional analyses. Callers can also
215 : /// pre-register analyses and this will not override those.
216 : void registerFunctionAnalyses(FunctionAnalysisManager &FAM);
217 :
218 : /// Registers all available loop analysis passes.
219 : ///
220 : /// This is an interface that can be used to populate a \c LoopAnalysisManager
221 : /// with all registered loop analyses. Callers can still manually register any
222 : /// additional analyses.
223 : void registerLoopAnalyses(LoopAnalysisManager &LAM);
224 :
225 : /// Construct the core LLVM function canonicalization and simplification
226 : /// pipeline.
227 : ///
228 : /// This is a long pipeline and uses most of the per-function optimization
229 : /// passes in LLVM to canonicalize and simplify the IR. It is suitable to run
230 : /// repeatedly over the IR and is not expected to destroy important
231 : /// information about the semantics of the IR.
232 : ///
233 : /// Note that \p Level cannot be `O0` here. The pipelines produced are
234 : /// only intended for use when attempting to optimize code. If frontends
235 : /// require some transformations for semantic reasons, they should explicitly
236 : /// build them.
237 : ///
238 : /// \p Phase indicates the current ThinLTO phase.
239 : FunctionPassManager
240 : buildFunctionSimplificationPipeline(OptimizationLevel Level,
241 : ThinLTOPhase Phase,
242 : bool DebugLogging = false);
243 :
244 : /// Construct the core LLVM module canonicalization and simplification
245 : /// pipeline.
246 : ///
247 : /// This pipeline focuses on canonicalizing and simplifying the entire module
248 : /// of IR. Much like the function simplification pipeline above, it is
249 : /// suitable to run repeatedly over the IR and is not expected to destroy
250 : /// important information. It does, however, perform inlining and other
251 : /// heuristic based simplifications that are not strictly reversible.
252 : ///
253 : /// Note that \p Level cannot be `O0` here. The pipelines produced are
254 : /// only intended for use when attempting to optimize code. If frontends
255 : /// require some transformations for semantic reasons, they should explicitly
256 : /// build them.
257 : ///
258 : /// \p Phase indicates the current ThinLTO phase.
259 : ModulePassManager
260 : buildModuleSimplificationPipeline(OptimizationLevel Level,
261 : ThinLTOPhase Phase,
262 : bool DebugLogging = false);
263 :
264 : /// Construct the core LLVM module optimization pipeline.
265 : ///
266 : /// This pipeline focuses on optimizing the execution speed of the IR. It
267 : /// uses cost modeling and thresholds to balance code growth against runtime
268 : /// improvements. It includes vectorization and other information destroying
269 : /// transformations. It also cannot generally be run repeatedly on a module
270 : /// without potentially seriously regressing either runtime performance of
271 : /// the code or serious code size growth.
272 : ///
273 : /// Note that \p Level cannot be `O0` here. The pipelines produced are
274 : /// only intended for use when attempting to optimize code. If frontends
275 : /// require some transformations for semantic reasons, they should explicitly
276 : /// build them.
277 : ModulePassManager buildModuleOptimizationPipeline(OptimizationLevel Level,
278 : bool DebugLogging = false);
279 :
280 : /// Build a per-module default optimization pipeline.
281 : ///
282 : /// This provides a good default optimization pipeline for per-module
283 : /// optimization and code generation without any link-time optimization. It
284 : /// typically correspond to frontend "-O[123]" options for optimization
285 : /// levels \c O1, \c O2 and \c O3 resp.
286 : ///
287 : /// Note that \p Level cannot be `O0` here. The pipelines produced are
288 : /// only intended for use when attempting to optimize code. If frontends
289 : /// require some transformations for semantic reasons, they should explicitly
290 : /// build them.
291 : ModulePassManager buildPerModuleDefaultPipeline(OptimizationLevel Level,
292 : bool DebugLogging = false);
293 :
294 : /// Build a pre-link, ThinLTO-targeting default optimization pipeline to
295 : /// a pass manager.
296 : ///
297 : /// This adds the pre-link optimizations tuned to prepare a module for
298 : /// a ThinLTO run. It works to minimize the IR which needs to be analyzed
299 : /// without making irreversible decisions which could be made better during
300 : /// the LTO run.
301 : ///
302 : /// Note that \p Level cannot be `O0` here. The pipelines produced are
303 : /// only intended for use when attempting to optimize code. If frontends
304 : /// require some transformations for semantic reasons, they should explicitly
305 : /// build them.
306 : ModulePassManager
307 : buildThinLTOPreLinkDefaultPipeline(OptimizationLevel Level,
308 : bool DebugLogging = false);
309 :
310 : /// Build an ThinLTO default optimization pipeline to a pass manager.
311 : ///
312 : /// This provides a good default optimization pipeline for link-time
313 : /// optimization and code generation. It is particularly tuned to fit well
314 : /// when IR coming into the LTO phase was first run through \c
315 : /// addPreLinkLTODefaultPipeline, and the two coordinate closely.
316 : ///
317 : /// Note that \p Level cannot be `O0` here. The pipelines produced are
318 : /// only intended for use when attempting to optimize code. If frontends
319 : /// require some transformations for semantic reasons, they should explicitly
320 : /// build them.
321 : ModulePassManager
322 : buildThinLTODefaultPipeline(OptimizationLevel Level, bool DebugLogging,
323 : const ModuleSummaryIndex *ImportSummary);
324 :
325 : /// Build a pre-link, LTO-targeting default optimization pipeline to a pass
326 : /// manager.
327 : ///
328 : /// This adds the pre-link optimizations tuned to work well with a later LTO
329 : /// run. It works to minimize the IR which needs to be analyzed without
330 : /// making irreversible decisions which could be made better during the LTO
331 : /// run.
332 : ///
333 : /// Note that \p Level cannot be `O0` here. The pipelines produced are
334 : /// only intended for use when attempting to optimize code. If frontends
335 : /// require some transformations for semantic reasons, they should explicitly
336 : /// build them.
337 : ModulePassManager buildLTOPreLinkDefaultPipeline(OptimizationLevel Level,
338 : bool DebugLogging = false);
339 :
340 : /// Build an LTO default optimization pipeline to a pass manager.
341 : ///
342 : /// This provides a good default optimization pipeline for link-time
343 : /// optimization and code generation. It is particularly tuned to fit well
344 : /// when IR coming into the LTO phase was first run through \c
345 : /// addPreLinkLTODefaultPipeline, and the two coordinate closely.
346 : ///
347 : /// Note that \p Level cannot be `O0` here. The pipelines produced are
348 : /// only intended for use when attempting to optimize code. If frontends
349 : /// require some transformations for semantic reasons, they should explicitly
350 : /// build them.
351 : ModulePassManager buildLTODefaultPipeline(OptimizationLevel Level,
352 : bool DebugLogging,
353 : ModuleSummaryIndex *ExportSummary);
354 :
355 : /// Build the default `AAManager` with the default alias analysis pipeline
356 : /// registered.
357 : AAManager buildDefaultAAPipeline();
358 :
359 : /// Parse a textual pass pipeline description into a \c
360 : /// ModulePassManager.
361 : ///
362 : /// The format of the textual pass pipeline description looks something like:
363 : ///
364 : /// module(function(instcombine,sroa),dce,cgscc(inliner,function(...)),...)
365 : ///
366 : /// Pass managers have ()s describing the nest structure of passes. All passes
367 : /// are comma separated. As a special shortcut, if the very first pass is not
368 : /// a module pass (as a module pass manager is), this will automatically form
369 : /// the shortest stack of pass managers that allow inserting that first pass.
370 : /// So, assuming function passes 'fpassN', CGSCC passes 'cgpassN', and loop
371 : /// passes 'lpassN', all of these are valid:
372 : ///
373 : /// fpass1,fpass2,fpass3
374 : /// cgpass1,cgpass2,cgpass3
375 : /// lpass1,lpass2,lpass3
376 : ///
377 : /// And they are equivalent to the following (resp.):
378 : ///
379 : /// module(function(fpass1,fpass2,fpass3))
380 : /// module(cgscc(cgpass1,cgpass2,cgpass3))
381 : /// module(function(loop(lpass1,lpass2,lpass3)))
382 : ///
383 : /// This shortcut is especially useful for debugging and testing small pass
384 : /// combinations. Note that these shortcuts don't introduce any other magic.
385 : /// If the sequence of passes aren't all the exact same kind of pass, it will
386 : /// be an error. You cannot mix different levels implicitly, you must
387 : /// explicitly form a pass manager in which to nest passes.
388 : Error parsePassPipeline(ModulePassManager &MPM, StringRef PipelineText,
389 : bool VerifyEachPass = true,
390 : bool DebugLogging = false);
391 :
392 : /// {{@ Parse a textual pass pipeline description into a specific PassManager
393 : ///
394 : /// Automatic deduction of an appropriate pass manager stack is not supported.
395 : /// For example, to insert a loop pass 'lpass' into a FunctinoPassManager,
396 : /// this is the valid pipeline text:
397 : ///
398 : /// function(lpass)
399 : Error parsePassPipeline(CGSCCPassManager &CGPM, StringRef PipelineText,
400 : bool VerifyEachPass = true,
401 : bool DebugLogging = false);
402 : Error parsePassPipeline(FunctionPassManager &FPM, StringRef PipelineText,
403 : bool VerifyEachPass = true,
404 : bool DebugLogging = false);
405 : Error parsePassPipeline(LoopPassManager &LPM, StringRef PipelineText,
406 : bool VerifyEachPass = true,
407 : bool DebugLogging = false);
408 : /// @}}
409 :
410 : /// Parse a textual alias analysis pipeline into the provided AA manager.
411 : ///
412 : /// The format of the textual AA pipeline is a comma separated list of AA
413 : /// pass names:
414 : ///
415 : /// basic-aa,globals-aa,...
416 : ///
417 : /// The AA manager is set up such that the provided alias analyses are tried
418 : /// in the order specified. See the \c AAManaager documentation for details
419 : /// about the logic used. This routine just provides the textual mapping
420 : /// between AA names and the analyses to register with the manager.
421 : ///
422 : /// Returns false if the text cannot be parsed cleanly. The specific state of
423 : /// the \p AA manager is unspecified if such an error is encountered and this
424 : /// returns false.
425 : Error parseAAPipeline(AAManager &AA, StringRef PipelineText);
426 :
427 : /// Register a callback for a default optimizer pipeline extension
428 : /// point
429 : ///
430 : /// This extension point allows adding passes that perform peephole
431 : /// optimizations similar to the instruction combiner. These passes will be
432 : /// inserted after each instance of the instruction combiner pass.
433 : void registerPeepholeEPCallback(
434 : const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
435 2 : PeepholeEPCallbacks.push_back(C);
436 : }
437 :
438 : /// Register a callback for a default optimizer pipeline extension
439 : /// point
440 : ///
441 : /// This extension point allows adding late loop canonicalization and
442 : /// simplification passes. This is the last point in the loop optimization
443 : /// pipeline before loop deletion. Each pass added
444 : /// here must be an instance of LoopPass.
445 : /// This is the place to add passes that can remove loops, such as target-
446 : /// specific loop idiom recognition.
447 : void registerLateLoopOptimizationsEPCallback(
448 : const std::function<void(LoopPassManager &, OptimizationLevel)> &C) {
449 1 : LateLoopOptimizationsEPCallbacks.push_back(C);
450 : }
451 :
452 : /// Register a callback for a default optimizer pipeline extension
453 : /// point
454 : ///
455 : /// This extension point allows adding loop passes to the end of the loop
456 : /// optimizer.
457 : void registerLoopOptimizerEndEPCallback(
458 : const std::function<void(LoopPassManager &, OptimizationLevel)> &C) {
459 1 : LoopOptimizerEndEPCallbacks.push_back(C);
460 : }
461 :
462 : /// Register a callback for a default optimizer pipeline extension
463 : /// point
464 : ///
465 : /// This extension point allows adding optimization passes after most of the
466 : /// main optimizations, but before the last cleanup-ish optimizations.
467 : void registerScalarOptimizerLateEPCallback(
468 : const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
469 1 : ScalarOptimizerLateEPCallbacks.push_back(C);
470 : }
471 :
472 : /// Register a callback for a default optimizer pipeline extension
473 : /// point
474 : ///
475 : /// This extension point allows adding CallGraphSCC passes at the end of the
476 : /// main CallGraphSCC passes and before any function simplification passes run
477 : /// by CGPassManager.
478 : void registerCGSCCOptimizerLateEPCallback(
479 : const std::function<void(CGSCCPassManager &, OptimizationLevel)> &C) {
480 1 : CGSCCOptimizerLateEPCallbacks.push_back(C);
481 : }
482 :
483 : /// Register a callback for a default optimizer pipeline extension
484 : /// point
485 : ///
486 : /// This extension point allows adding optimization passes before the
487 : /// vectorizer and other highly target specific optimization passes are
488 : /// executed.
489 : void registerVectorizerStartEPCallback(
490 : const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
491 937 : VectorizerStartEPCallbacks.push_back(C);
492 : }
493 :
494 : /// Register a callback for a default optimizer pipeline extension point.
495 : ///
496 : /// This extension point allows adding optimization once at the start of the
497 : /// pipeline. This does not apply to 'backend' compiles (LTO and ThinLTO
498 : /// link-time pipelines).
499 : void registerPipelineStartEPCallback(
500 : const std::function<void(ModulePassManager &)> &C) {
501 5 : PipelineStartEPCallbacks.push_back(C);
502 : }
503 :
504 : /// Register a callback for parsing an AliasAnalysis Name to populate
505 : /// the given AAManager \p AA
506 : void registerParseAACallback(
507 : const std::function<bool(StringRef Name, AAManager &AA)> &C) {
508 : AAParsingCallbacks.push_back(C);
509 : }
510 :
511 : /// {{@ Register callbacks for analysis registration with this PassBuilder
512 : /// instance.
513 : /// Callees register their analyses with the given AnalysisManager objects.
514 : void registerAnalysisRegistrationCallback(
515 : const std::function<void(CGSCCAnalysisManager &)> &C) {
516 4 : CGSCCAnalysisRegistrationCallbacks.push_back(C);
517 : }
518 : void registerAnalysisRegistrationCallback(
519 : const std::function<void(FunctionAnalysisManager &)> &C) {
520 940 : FunctionAnalysisRegistrationCallbacks.push_back(C);
521 : }
522 : void registerAnalysisRegistrationCallback(
523 : const std::function<void(LoopAnalysisManager &)> &C) {
524 4 : LoopAnalysisRegistrationCallbacks.push_back(C);
525 : }
526 : void registerAnalysisRegistrationCallback(
527 : const std::function<void(ModuleAnalysisManager &)> &C) {
528 5 : ModuleAnalysisRegistrationCallbacks.push_back(C);
529 : }
530 : /// @}}
531 :
532 : /// {{@ Register pipeline parsing callbacks with this pass builder instance.
533 : /// Using these callbacks, callers can parse both a single pass name, as well
534 : /// as entire sub-pipelines, and populate the PassManager instance
535 : /// accordingly.
536 : void registerPipelineParsingCallback(
537 : const std::function<bool(StringRef Name, CGSCCPassManager &,
538 : ArrayRef<PipelineElement>)> &C) {
539 4 : CGSCCPipelineParsingCallbacks.push_back(C);
540 : }
541 : void registerPipelineParsingCallback(
542 : const std::function<bool(StringRef Name, FunctionPassManager &,
543 : ArrayRef<PipelineElement>)> &C) {
544 1876 : FunctionPipelineParsingCallbacks.push_back(C);
545 : }
546 : void registerPipelineParsingCallback(
547 : const std::function<bool(StringRef Name, LoopPassManager &,
548 : ArrayRef<PipelineElement>)> &C) {
549 4 : LoopPipelineParsingCallbacks.push_back(C);
550 : }
551 : void registerPipelineParsingCallback(
552 : const std::function<bool(StringRef Name, ModulePassManager &,
553 : ArrayRef<PipelineElement>)> &C) {
554 942 : ModulePipelineParsingCallbacks.push_back(C);
555 : }
556 : /// @}}
557 :
558 : /// Register a callback for a top-level pipeline entry.
559 : ///
560 : /// If the PassManager type is not given at the top level of the pipeline
561 : /// text, this Callback should be used to determine the appropriate stack of
562 : /// PassManagers and populate the passed ModulePassManager.
563 : void registerParseTopLevelPipelineCallback(
564 : const std::function<bool(ModulePassManager &, ArrayRef<PipelineElement>,
565 : bool VerifyEachPass, bool DebugLogging)> &C) {
566 937 : TopLevelPipelineParsingCallbacks.push_back(C);
567 : }
568 :
569 : private:
570 : static Optional<std::vector<PipelineElement>>
571 : parsePipelineText(StringRef Text);
572 :
573 : Error parseModulePass(ModulePassManager &MPM, const PipelineElement &E,
574 : bool VerifyEachPass, bool DebugLogging);
575 : Error parseCGSCCPass(CGSCCPassManager &CGPM, const PipelineElement &E,
576 : bool VerifyEachPass, bool DebugLogging);
577 : Error parseFunctionPass(FunctionPassManager &FPM, const PipelineElement &E,
578 : bool VerifyEachPass, bool DebugLogging);
579 : Error parseLoopPass(LoopPassManager &LPM, const PipelineElement &E,
580 : bool VerifyEachPass, bool DebugLogging);
581 : bool parseAAPassName(AAManager &AA, StringRef Name);
582 :
583 : Error parseLoopPassPipeline(LoopPassManager &LPM,
584 : ArrayRef<PipelineElement> Pipeline,
585 : bool VerifyEachPass, bool DebugLogging);
586 : Error parseFunctionPassPipeline(FunctionPassManager &FPM,
587 : ArrayRef<PipelineElement> Pipeline,
588 : bool VerifyEachPass, bool DebugLogging);
589 : Error parseCGSCCPassPipeline(CGSCCPassManager &CGPM,
590 : ArrayRef<PipelineElement> Pipeline,
591 : bool VerifyEachPass, bool DebugLogging);
592 : Error parseModulePassPipeline(ModulePassManager &MPM,
593 : ArrayRef<PipelineElement> Pipeline,
594 : bool VerifyEachPass, bool DebugLogging);
595 :
596 : void addPGOInstrPasses(ModulePassManager &MPM, bool DebugLogging,
597 : OptimizationLevel Level, bool RunProfileGen,
598 : std::string ProfileGenFile,
599 : std::string ProfileUseFile,
600 : std::string ProfileRemappingFile);
601 :
602 : void invokePeepholeEPCallbacks(FunctionPassManager &, OptimizationLevel);
603 :
604 : // Extension Point callbacks
605 : SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
606 : PeepholeEPCallbacks;
607 : SmallVector<std::function<void(LoopPassManager &, OptimizationLevel)>, 2>
608 : LateLoopOptimizationsEPCallbacks;
609 : SmallVector<std::function<void(LoopPassManager &, OptimizationLevel)>, 2>
610 : LoopOptimizerEndEPCallbacks;
611 : SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
612 : ScalarOptimizerLateEPCallbacks;
613 : SmallVector<std::function<void(CGSCCPassManager &, OptimizationLevel)>, 2>
614 : CGSCCOptimizerLateEPCallbacks;
615 : SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
616 : VectorizerStartEPCallbacks;
617 : // Module callbacks
618 : SmallVector<std::function<void(ModulePassManager &)>, 2>
619 : PipelineStartEPCallbacks;
620 : SmallVector<std::function<void(ModuleAnalysisManager &)>, 2>
621 : ModuleAnalysisRegistrationCallbacks;
622 : SmallVector<std::function<bool(StringRef, ModulePassManager &,
623 : ArrayRef<PipelineElement>)>,
624 : 2>
625 : ModulePipelineParsingCallbacks;
626 : SmallVector<std::function<bool(ModulePassManager &, ArrayRef<PipelineElement>,
627 : bool VerifyEachPass, bool DebugLogging)>,
628 : 2>
629 : TopLevelPipelineParsingCallbacks;
630 : // CGSCC callbacks
631 : SmallVector<std::function<void(CGSCCAnalysisManager &)>, 2>
632 : CGSCCAnalysisRegistrationCallbacks;
633 : SmallVector<std::function<bool(StringRef, CGSCCPassManager &,
634 : ArrayRef<PipelineElement>)>,
635 : 2>
636 : CGSCCPipelineParsingCallbacks;
637 : // Function callbacks
638 : SmallVector<std::function<void(FunctionAnalysisManager &)>, 2>
639 : FunctionAnalysisRegistrationCallbacks;
640 : SmallVector<std::function<bool(StringRef, FunctionPassManager &,
641 : ArrayRef<PipelineElement>)>,
642 : 2>
643 : FunctionPipelineParsingCallbacks;
644 : // Loop callbacks
645 : SmallVector<std::function<void(LoopAnalysisManager &)>, 2>
646 : LoopAnalysisRegistrationCallbacks;
647 : SmallVector<std::function<bool(StringRef, LoopPassManager &,
648 : ArrayRef<PipelineElement>)>,
649 : 2>
650 : LoopPipelineParsingCallbacks;
651 : // AA callbacks
652 : SmallVector<std::function<bool(StringRef Name, AAManager &AA)>, 2>
653 : AAParsingCallbacks;
654 : };
655 :
656 : /// This utility template takes care of adding require<> and invalidate<>
657 : /// passes for an analysis to a given \c PassManager. It is intended to be used
658 : /// during parsing of a pass pipeline when parsing a single PipelineName.
659 : /// When registering a new function analysis FancyAnalysis with the pass
660 : /// pipeline name "fancy-analysis", a matching ParsePipelineCallback could look
661 : /// like this:
662 : ///
663 : /// static bool parseFunctionPipeline(StringRef Name, FunctionPassManager &FPM,
664 : /// ArrayRef<PipelineElement> P) {
665 : /// if (parseAnalysisUtilityPasses<FancyAnalysis>("fancy-analysis", Name,
666 : /// FPM))
667 : /// return true;
668 : /// return false;
669 : /// }
670 : template <typename AnalysisT, typename IRUnitT, typename AnalysisManagerT,
671 : typename... ExtraArgTs>
672 135 : bool parseAnalysisUtilityPasses(
673 : StringRef AnalysisName, StringRef PipelineName,
674 : PassManager<IRUnitT, AnalysisManagerT, ExtraArgTs...> &PM) {
675 : if (!PipelineName.endswith(">"))
676 : return false;
677 : // See if this is an invalidate<> pass name
678 : if (PipelineName.startswith("invalidate<")) {
679 5 : PipelineName = PipelineName.substr(11, PipelineName.size() - 12);
680 : if (PipelineName != AnalysisName)
681 : return false;
682 5 : PM.addPass(InvalidateAnalysisPass<AnalysisT>());
683 5 : return true;
684 : }
685 :
686 : // See if this is a require<> pass name
687 : if (PipelineName.startswith("require<")) {
688 20 : PipelineName = PipelineName.substr(8, PipelineName.size() - 9);
689 : if (PipelineName != AnalysisName)
690 : return false;
691 8 : PM.addPass(RequireAnalysisPass<AnalysisT, IRUnitT, AnalysisManagerT,
692 : ExtraArgTs...>());
693 8 : return true;
694 : }
695 :
696 : return false;
697 : }
698 : }
699 :
700 : #endif
|