LLVM  15.0.0git
MLInlineAdvisor.cpp
Go to the documentation of this file.
1 //===- MLInlineAdvisor.cpp - machine learned InlineAdvisor ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the interface between the inliner and a learned model.
10 // It delegates model evaluation to either the AOT compiled model (the
11 // 'release' mode) or a runtime-loaded model (the 'development' case).
12 //
13 //===----------------------------------------------------------------------===//
15 #include "llvm/ADT/SCCIterator.h"
25 #include "llvm/IR/InstIterator.h"
26 #include "llvm/IR/PassManager.h"
28 
29 using namespace llvm;
30 
31 #if defined(LLVM_HAVE_TF_AOT_INLINERSIZEMODEL)
33 // codegen-ed file
34 #include "InlinerSizeModel.h" // NOLINT
35 
36 std::unique_ptr<InlineAdvisor>
38  auto AOTRunner =
39  std::make_unique<ReleaseModeModelRunner<llvm::InlinerSizeModel>>(
40  M.getContext(), FeatureMap, DecisionName);
41  return std::make_unique<MLInlineAdvisor>(M, MAM, std::move(AOTRunner));
42 }
43 #endif
44 
45 #define DEBUG_TYPE "inline-ml"
46 
48  "ml-advisor-size-increase-threshold", cl::Hidden,
49  cl::desc("Maximum factor by which expected native size may increase before "
50  "blocking any further inlining."),
51  cl::init(2.0));
52 
53 // clang-format off
54 const std::array<TensorSpec, NumberOfFeatures> llvm::FeatureMap{
55 #define POPULATE_NAMES(_, NAME) TensorSpec::createSpec<int64_t>(NAME, {1} ),
56 // InlineCost features - these must come first
58 #undef POPULATE_NAMES
59 
60 // Non-cost features
61 #define POPULATE_NAMES(_, NAME, __) TensorSpec::createSpec<int64_t>(NAME, {1} ),
63 #undef POPULATE_NAMES
64 };
65 // clang-format on
66 
67 const char *const llvm::DecisionName = "inlining_decision";
68 const char *const llvm::DefaultDecisionName = "inlining_default";
69 const char *const llvm::RewardName = "delta_size";
70 
72  if (auto *CS = dyn_cast<CallBase>(&I))
73  if (Function *Callee = CS->getCalledFunction()) {
74  if (!Callee->isDeclaration()) {
75  return CS;
76  }
77  }
78  return nullptr;
79 }
80 
82  std::unique_ptr<MLModelRunner> Runner)
83  : InlineAdvisor(
84  M, MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager()),
85  ModelRunner(std::move(Runner)),
86  CG(MAM.getResult<LazyCallGraphAnalysis>(M)),
87  InitialIRSize(getModuleIRSize()), CurrentIRSize(InitialIRSize) {
89 
90  // Extract the 'call site height' feature - the position of a call site
91  // relative to the farthest statically reachable SCC node. We don't mutate
92  // this value while inlining happens. Empirically, this feature proved
93  // critical in behavioral cloning - i.e. training a model to mimic the manual
94  // heuristic's decisions - and, thus, equally important for training for
95  // improvement.
96  CallGraph CGraph(M);
97  for (auto I = scc_begin(&CGraph); !I.isAtEnd(); ++I) {
98  const std::vector<CallGraphNode *> &CGNodes = *I;
99  unsigned Level = 0;
100  for (auto *CGNode : CGNodes) {
101  Function *F = CGNode->getFunction();
102  if (!F || F->isDeclaration())
103  continue;
104  for (auto &I : instructions(F)) {
105  if (auto *CS = getInlinableCS(I)) {
106  auto *Called = CS->getCalledFunction();
107  auto Pos = FunctionLevels.find(&CG.get(*Called));
108  // In bottom up traversal, an inlinable callee is either in the
109  // same SCC, or to a function in a visited SCC. So not finding its
110  // level means we haven't visited it yet, meaning it's in this SCC.
111  if (Pos == FunctionLevels.end())
112  continue;
113  Level = std::max(Level, Pos->second + 1);
114  }
115  }
116  }
117  for (auto *CGNode : CGNodes) {
118  Function *F = CGNode->getFunction();
119  if (F && !F->isDeclaration())
120  FunctionLevels[&CG.get(*F)] = Level;
121  }
122  }
123  for (auto KVP : FunctionLevels) {
124  AllNodes.insert(KVP.first);
125  EdgeCount += getLocalCalls(KVP.first->getFunction());
126  }
127  NodeCount = AllNodes.size();
128 }
129 
131  return CG.lookup(F) ? FunctionLevels.at(CG.lookup(F)) : 0;
132 }
133 
135  // Function passes executed between InlinerPass runs may have changed the
136  // module-wide features.
137  // The cgscc pass manager rules are such that:
138  // - if a pass leads to merging SCCs, then the pipeline is restarted on the
139  // merged SCC
140  // - if a pass leads to splitting the SCC, then we continue with one of the
141  // splits
142  // This means that the NodesInLastSCC is a superset (not strict) of the nodes
143  // that subsequent passes would have processed
144  // - in addition, if new Nodes were created by a pass (e.g. CoroSplit),
145  // they'd be adjacent to Nodes in the last SCC. So we just need to check the
146  // boundary of Nodes in NodesInLastSCC for Nodes we haven't seen. We don't
147  // care about the nature of the Edge (call or ref).
148  NodeCount -= static_cast<int64_t>(NodesInLastSCC.size());
149  while (!NodesInLastSCC.empty()) {
150  const auto *N = NodesInLastSCC.front();
151  NodesInLastSCC.pop_front();
152  // The Function wrapped by N could have been deleted since we last saw it.
153  if (N->isDead()) {
154  assert(!N->getFunction().isDeclaration());
155  continue;
156  }
157  ++NodeCount;
158  EdgeCount += getLocalCalls(N->getFunction());
159  for (const auto &E : *(*N)) {
160  const auto *AdjNode = &E.getNode();
161  assert(!AdjNode->isDead() && !AdjNode->getFunction().isDeclaration());
162  auto I = AllNodes.insert(AdjNode);
163  if (I.second)
164  NodesInLastSCC.push_back(AdjNode);
165  }
166  }
167 
168  EdgeCount -= EdgesOfLastSeenNodes;
169  EdgesOfLastSeenNodes = 0;
170 }
171 
173  if (!LastSCC)
174  return;
175  // Keep track of the nodes and edges we last saw. Then, in onPassEntry,
176  // we update the node count and edge count from the subset of these nodes that
177  // survived.
178  assert(NodesInLastSCC.empty());
179  assert(NodeCount >= LastSCC->size());
180  EdgesOfLastSeenNodes = 0;
181  for (const auto &N : *LastSCC) {
182  assert(!N.isDead());
183  EdgesOfLastSeenNodes += getLocalCalls(N.getFunction());
184  NodesInLastSCC.push_back(&N);
185  }
186  assert(EdgeCount >= EdgesOfLastSeenNodes);
187 }
188 
191  .DirectCallsToDefinedFunctions;
192 }
193 
194 // Update the internal state of the advisor, and force invalidate feature
195 // analysis. Currently, we maintain minimal (and very simple) global state - the
196 // number of functions and the number of static calls. We also keep track of the
197 // total IR size in this module, to stop misbehaving policies at a certain bloat
198 // factor (SizeIncreaseThreshold)
200  bool CalleeWasDeleted) {
201  assert(!ForceStop);
202  Function *Caller = Advice.getCaller();
203  Function *Callee = Advice.getCallee();
204 
205  // The caller features aren't valid anymore.
206  {
209  FAM.invalidate(*Caller, PA);
210  }
211  int64_t IRSizeAfter =
212  getIRSize(*Caller) + (CalleeWasDeleted ? 0 : Advice.CalleeIRSize);
213  CurrentIRSize += IRSizeAfter - (Advice.CallerIRSize + Advice.CalleeIRSize);
214  if (CurrentIRSize > SizeIncreaseThreshold * InitialIRSize)
215  ForceStop = true;
216 
217  // We can delta-update module-wide features. We know the inlining only changed
218  // the caller, and maybe the callee (by deleting the latter).
219  // Nodes are simple to update.
220  // For edges, we 'forget' the edges that the caller and callee used to have
221  // before inlining, and add back what they currently have together.
222  int64_t NewCallerAndCalleeEdges =
224  .DirectCallsToDefinedFunctions;
225 
226  if (CalleeWasDeleted)
227  --NodeCount;
228  else
229  NewCallerAndCalleeEdges +=
231  .DirectCallsToDefinedFunctions;
232  EdgeCount += (NewCallerAndCalleeEdges - Advice.CallerAndCalleeEdges);
233  assert(CurrentIRSize >= 0 && EdgeCount >= 0 && NodeCount >= 0);
234 }
235 
236 int64_t MLInlineAdvisor::getModuleIRSize() const {
237  int64_t Ret = 0;
238  for (auto &F : M)
239  if (!F.isDeclaration())
240  Ret += getIRSize(F);
241  return Ret;
242 }
243 
244 std::unique_ptr<InlineAdvice> MLInlineAdvisor::getAdviceImpl(CallBase &CB) {
245  auto &Caller = *CB.getCaller();
246  auto &Callee = *CB.getCalledFunction();
247 
248  auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & {
250  };
251  auto &TIR = FAM.getResult<TargetIRAnalysis>(Callee);
252  auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(Caller);
253 
254  auto MandatoryKind = InlineAdvisor::getMandatoryKind(CB, FAM, ORE);
255  // If this is a "never inline" case, there won't be any changes to internal
256  // state we need to track, so we can just return the base InlineAdvice, which
257  // will do nothing interesting.
258  // Same thing if this is a recursive case.
259  if (MandatoryKind == InlineAdvisor::MandatoryInliningKind::Never ||
260  &Caller == &Callee)
261  return getMandatoryAdvice(CB, false);
262 
263  bool Mandatory =
265 
266  // If we need to stop, we won't want to track anymore any state changes, so
267  // we just return the base InlineAdvice, which acts as a noop.
268  if (ForceStop) {
269  ORE.emit([&] {
270  return OptimizationRemarkMissed(DEBUG_TYPE, "ForceStop", &CB)
271  << "Won't attempt inlining because module size grew too much.";
272  });
273  return std::make_unique<InlineAdvice>(this, CB, ORE, Mandatory);
274  }
275 
276  int CostEstimate = 0;
277  if (!Mandatory) {
278  auto IsCallSiteInlinable =
279  llvm::getInliningCostEstimate(CB, TIR, GetAssumptionCache);
280  if (!IsCallSiteInlinable) {
281  // We can't inline this for correctness reasons, so return the base
282  // InlineAdvice, as we don't care about tracking any state changes (which
283  // won't happen).
284  return std::make_unique<InlineAdvice>(this, CB, ORE, false);
285  }
286  CostEstimate = *IsCallSiteInlinable;
287  }
288 
289  const auto CostFeatures =
290  llvm::getInliningCostFeatures(CB, TIR, GetAssumptionCache);
291  if (!CostFeatures) {
292  return std::make_unique<InlineAdvice>(this, CB, ORE, false);
293  }
294 
295  if (Mandatory)
296  return getMandatoryAdvice(CB, true);
297 
298  auto NrCtantParams = 0;
299  for (auto I = CB.arg_begin(), E = CB.arg_end(); I != E; ++I) {
300  NrCtantParams += (isa<Constant>(*I));
301  }
302 
303  auto &CallerBefore = FAM.getResult<FunctionPropertiesAnalysis>(Caller);
304  auto &CalleeBefore = FAM.getResult<FunctionPropertiesAnalysis>(Callee);
305 
306  *ModelRunner->getTensor<int64_t>(FeatureIndex::CalleeBasicBlockCount) =
307  CalleeBefore.BasicBlockCount;
308  *ModelRunner->getTensor<int64_t>(FeatureIndex::CallSiteHeight) =
309  getInitialFunctionLevel(Caller);
310  *ModelRunner->getTensor<int64_t>(FeatureIndex::NodeCount) = NodeCount;
311  *ModelRunner->getTensor<int64_t>(FeatureIndex::NrCtantParams) = NrCtantParams;
312  *ModelRunner->getTensor<int64_t>(FeatureIndex::EdgeCount) = EdgeCount;
313  *ModelRunner->getTensor<int64_t>(FeatureIndex::CallerUsers) =
314  CallerBefore.Uses;
315  *ModelRunner->getTensor<int64_t>(
316  FeatureIndex::CallerConditionallyExecutedBlocks) =
317  CallerBefore.BlocksReachedFromConditionalInstruction;
318  *ModelRunner->getTensor<int64_t>(FeatureIndex::CallerBasicBlockCount) =
319  CallerBefore.BasicBlockCount;
320  *ModelRunner->getTensor<int64_t>(
321  FeatureIndex::CalleeConditionallyExecutedBlocks) =
322  CalleeBefore.BlocksReachedFromConditionalInstruction;
323  *ModelRunner->getTensor<int64_t>(FeatureIndex::CalleeUsers) =
324  CalleeBefore.Uses;
325  *ModelRunner->getTensor<int64_t>(FeatureIndex::CostEstimate) = CostEstimate;
326 
327  // Add the cost features
328  for (size_t I = 0;
329  I < static_cast<size_t>(InlineCostFeatureIndex::NumberOfFeatures); ++I) {
330  *ModelRunner->getTensor<int64_t>(inlineCostFeatureToMlFeature(
331  static_cast<InlineCostFeatureIndex>(I))) = CostFeatures->at(I);
332  }
333 
334  return getAdviceFromModel(CB, ORE);
335 }
336 
337 std::unique_ptr<MLInlineAdvice>
340  return std::make_unique<MLInlineAdvice>(
341  this, CB, ORE, static_cast<bool>(ModelRunner->evaluate<int64_t>()));
342 }
343 
344 std::unique_ptr<InlineAdvice> MLInlineAdvisor::getMandatoryAdvice(CallBase &CB,
345  bool Advice) {
346  // Make sure we track inlinings in all cases - mandatory or not.
347  if (Advice && !ForceStop)
348  return getMandatoryAdviceImpl(CB);
349 
350  // If this is a "never inline" case, there won't be any changes to internal
351  // state we need to track, so we can just return the base InlineAdvice, which
352  // will do nothing interesting.
353  // Same if we are forced to stop - we don't track anymore.
354  return std::make_unique<InlineAdvice>(this, CB, getCallerORE(CB), Advice);
355 }
356 
357 std::unique_ptr<MLInlineAdvice>
359  return std::make_unique<MLInlineAdvice>(this, CB, getCallerORE(CB), true);
360 }
361 
362 void MLInlineAdvice::reportContextForRemark(
364  using namespace ore;
365  OR << NV("Callee", Callee->getName());
366  for (size_t I = 0; I < NumberOfFeatures; ++I)
367  OR << NV(FeatureMap[I].name(),
368  *getAdvisor()->getModelRunner().getTensor<int64_t>(I));
369  OR << NV("ShouldInline", isInliningRecommended());
370 }
371 
373  ORE.emit([&]() {
374  OptimizationRemark R(DEBUG_TYPE, "InliningSuccess", DLoc, Block);
375  reportContextForRemark(R);
376  return R;
377  });
378  getAdvisor()->onSuccessfulInlining(*this, /*CalleeWasDeleted*/ false);
379 }
380 
382  ORE.emit([&]() {
383  OptimizationRemark R(DEBUG_TYPE, "InliningSuccessWithCalleeDeleted", DLoc,
384  Block);
385  reportContextForRemark(R);
386  return R;
387  });
388  getAdvisor()->onSuccessfulInlining(*this, /*CalleeWasDeleted*/ true);
389 }
390 
392  const InlineResult &Result) {
393  ORE.emit([&]() {
394  OptimizationRemarkMissed R(DEBUG_TYPE, "InliningAttemptedAndUnsuccessful",
395  DLoc, Block);
396  reportContextForRemark(R);
397  return R;
398  });
399 }
401  ORE.emit([&]() {
402  OptimizationRemarkMissed R(DEBUG_TYPE, "IniningNotAttempted", DLoc, Block);
403  reportContextForRemark(R);
404  return R;
405  });
406 }
llvm::PreservedAnalyses
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:152
SizeIncreaseThreshold
static cl::opt< float > SizeIncreaseThreshold("ml-advisor-size-increase-threshold", cl::Hidden, cl::desc("Maximum factor by which expected native size may increase before " "blocking any further inlining."), cl::init(2.0))
AssumptionCache.h
llvm::TargetIRAnalysis
Analysis pass providing the TargetTransformInfo.
Definition: TargetTransformInfo.h:2458
INLINE_COST_FEATURE_ITERATOR
#define INLINE_COST_FEATURE_ITERATOR(M)
Definition: InlineModelFeatureMaps.h:24
llvm::OptimizationRemarkMissed
Diagnostic information for missed-optimization remarks.
Definition: DiagnosticInfo.h:735
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:17
llvm::getReleaseModeAdvisor
std::unique_ptr< InlineAdvisor > getReleaseModeAdvisor(Module &M, ModuleAnalysisManager &MAM)
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
llvm::ISD::OR
@ OR
Definition: ISDOpcodes.h:667
llvm::inlineCostFeatureToMlFeature
constexpr FeatureIndex inlineCostFeatureToMlFeature(InlineCostFeatureIndex Feature)
Definition: InlineModelFeatureMaps.h:125
llvm::MLInlineAdvisor::getLocalCalls
int64_t getLocalCalls(Function &F)
Definition: MLInlineAdvisor.cpp:189
llvm::InlineAdvisor::M
Module & M
Definition: InlineAdvisor.h:179
llvm::FeatureMap
const std::array< TensorSpec, NumberOfFeatures > FeatureMap
Definition: MLInlineAdvisor.cpp:54
SCCIterator.h
llvm::AnalysisManager::getResult
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:780
InstIterator.h
llvm::Function
Definition: Function.h:60
llvm::AnalysisManager::invalidate
void invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Invalidate cached analyses for an IR unit.
Definition: PassManagerImpl.h:89
MLModelRunner.h
getInlinableCS
CallBase * getInlinableCS(Instruction &I)
Definition: MLInlineAdvisor.cpp:71
OptimizationRemarkEmitter.h
llvm::CallGraph
The basic data container for the call graph of a Module of IR.
Definition: CallGraph.h:72
llvm::PreservedAnalyses::abandon
void abandon()
Mark an analysis as abandoned.
Definition: PassManager.h:206
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:139
llvm::LazyCallGraph::lookup
Node * lookup(const Function &F) const
Lookup a function in the graph which has already been scanned and added.
Definition: LazyCallGraph.h:965
llvm::InlineAdvisor::MandatoryInliningKind::Always
@ Always
POPULATE_NAMES
#define POPULATE_NAMES(_, NAME)
llvm::ore::NV
DiagnosticInfoOptimizationBase::Argument NV
Definition: OptimizationRemarkEmitter.h:136
llvm::InlineAdvisor::MandatoryInliningKind::Never
@ Never
llvm::MLInlineAdvisor::ModelRunner
std::unique_ptr< MLModelRunner > ModelRunner
Definition: MLInlineAdvisor.h:60
llvm::MipsISD::Ret
@ Ret
Definition: MipsISelLowering.h:119
llvm::CallBase::arg_begin
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1316
llvm::MLInlineAdvice::CalleeIRSize
const int64_t CalleeIRSize
Definition: MLInlineAdvisor.h:110
llvm::DiagnosticInfoOptimizationBase
Common features for diagnostics dealing with optimization remarks that are used by both IR and MIR pa...
Definition: DiagnosticInfo.h:414
llvm::MLInlineAdvice::recordInliningImpl
void recordInliningImpl() override
Definition: MLInlineAdvisor.cpp:372
F
#define F(x, y, z)
Definition: MD5.cpp:55
llvm::MLInlineAdvice::getCallee
Function * getCallee() const
Definition: MLInlineAdvisor.h:107
llvm::InlineAdvisor::getCallerORE
OptimizationRemarkEmitter & getCallerORE(CallBase &CB)
Definition: InlineAdvisor.cpp:559
CommandLine.h
llvm::LazyCallGraph::SCC
An SCC of the call graph.
Definition: LazyCallGraph.h:419
llvm::DecisionName
const char *const DecisionName
Definition: MLInlineAdvisor.cpp:67
llvm::MLInlineAdvice::recordInliningWithCalleeDeletedImpl
void recordInliningWithCalleeDeletedImpl() override
Definition: MLInlineAdvisor.cpp:381
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::InlineAdvice::ORE
OptimizationRemarkEmitter & ORE
Definition: InlineAdvisor.h:107
llvm::CallBase::getCalledFunction
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1396
MAM
ModuleAnalysisManager MAM
Definition: PassBuilderBindings.cpp:61
llvm::MLInlineAdvice::recordUnsuccessfulInliningImpl
void recordUnsuccessfulInliningImpl(const InlineResult &Result) override
Definition: MLInlineAdvisor.cpp:391
llvm::InlineCostFeatureIndex
InlineCostFeatureIndex
Definition: InlineModelFeatureMaps.h:51
llvm::DefaultDecisionName
const char *const DefaultDecisionName
Definition: MLInlineAdvisor.cpp:68
llvm::Instruction
Definition: Instruction.h:42
llvm::MLInlineAdvisor::getIRSize
int64_t getIRSize(const Function &F) const
Definition: MLInlineAdvisor.h:36
llvm::MLInlineAdvice::getCaller
Function * getCaller() const
Definition: MLInlineAdvisor.h:106
LazyCallGraph.h
llvm::CallBase::getCaller
Function * getCaller()
Helper to get the caller (the parent function).
Definition: Instructions.cpp:282
llvm::MLInlineAdvisor::getMandatoryAdviceImpl
virtual std::unique_ptr< MLInlineAdvice > getMandatoryAdviceImpl(CallBase &CB)
Definition: MLInlineAdvisor.cpp:358
llvm::LazyCallGraph::get
Node & get(Function &F)
Get a graph node for a given function, scanning it to populate the graph data as necessary.
Definition: LazyCallGraph.h:986
llvm::scc_begin
scc_iterator< T > scc_begin(const T &G)
Construct the begin iterator for a deduced graph type T.
Definition: SCCIterator.h:232
llvm::cl::opt
Definition: CommandLine.h:1392
llvm::instructions
inst_range instructions(Function *F)
Definition: InstIterator.h:133
llvm::MLInlineAdvisor::MLInlineAdvisor
MLInlineAdvisor(Module &M, ModuleAnalysisManager &MAM, std::unique_ptr< MLModelRunner > ModelRunner)
Definition: MLInlineAdvisor.cpp:81
llvm::AssumptionAnalysis
A function analysis which provides an AssumptionCache.
Definition: AssumptionCache.h:173
move
compiles ldr LCPI1_0 ldr ldr mov lsr tst moveq r1 ldr LCPI1_1 and r0 bx lr It would be better to do something like to fold the shift into the conditional move
Definition: README.txt:546
ReleaseModeModelRunner.h
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::InlineAdvice::Block
const BasicBlock *const Block
Definition: InlineAdvisor.h:106
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:432
llvm::MLInlineAdvisor::getMandatoryAdvice
std::unique_ptr< InlineAdvice > getMandatoryAdvice(CallBase &CB, bool Advice) override
Definition: MLInlineAdvisor.cpp:344
llvm::MLInlineAdvice::CallerAndCalleeEdges
const int64_t CallerAndCalleeEdges
Definition: MLInlineAdvisor.h:111
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::move
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1665
llvm::OptimizationRemarkEmitter::emit
void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Definition: OptimizationRemarkEmitter.cpp:77
llvm::InlineAdvice::DLoc
const DebugLoc DLoc
Definition: InlineAdvisor.h:105
InlineCost.h
llvm::Module
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
llvm::CallBase::arg_end
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Definition: InstrTypes.h:1322
llvm::InlineAdvice::isInliningRecommended
bool isInliningRecommended() const
Get the inlining recommendation.
Definition: InlineAdvisor.h:87
llvm::FunctionPropertiesAnalysis
Definition: FunctionPropertiesAnalysis.h:63
llvm::OptimizationRemarkEmitter
The optimization diagnostic interface.
Definition: OptimizationRemarkEmitter.h:33
llvm::MLInlineAdvisor::getInitialFunctionLevel
unsigned getInitialFunctionLevel(const Function &F) const
Definition: MLInlineAdvisor.cpp:130
llvm::AssumptionCache
A cache of @llvm.assume calls within a function.
Definition: AssumptionCache.h:42
llvm::InlineAdvisor
Interface for deciding whether to inline a call site or not.
Definition: InlineAdvisor.h:140
llvm::LazyCallGraph::SCC::size
int size() const
Definition: LazyCallGraph.h:480
llvm::PICLevel::Level
Level
Definition: CodeGen.h:33
llvm::Value::getName
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:305
Callee
amdgpu Simplify well known AMD library false FunctionCallee Callee
Definition: AMDGPULibCalls.cpp:186
FunctionPropertiesAnalysis.h
name
static const char * name
Definition: SVEIntrinsicOpts.cpp:74
llvm::MLInlineAdvisor::onPassEntry
void onPassEntry() override
This must be called when the Inliner pass is entered, to allow the InlineAdvisor update internal stat...
Definition: MLInlineAdvisor.cpp:134
llvm::InlineAdvisor::FAM
FunctionAnalysisManager & FAM
Definition: InlineAdvisor.h:180
std
Definition: BitVector.h:851
llvm::PreservedAnalyses::all
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:158
llvm::getInliningCostFeatures
Optional< InlineCostFeatures > getInliningCostFeatures(CallBase &Call, TargetTransformInfo &CalleeTTI, function_ref< AssumptionCache &(Function &)> GetAssumptionCache, function_ref< BlockFrequencyInfo &(Function &)> GetBFI=nullptr, ProfileSummaryInfo *PSI=nullptr, OptimizationRemarkEmitter *ORE=nullptr)
Get the expanded cost features.
Definition: InlineCost.cpp:2827
llvm::MLInlineAdvisor::onPassExit
void onPassExit(LazyCallGraph::SCC *SCC) override
This must be called when the Inliner pass is exited, as function passes may be run subsequently.
Definition: MLInlineAdvisor.cpp:172
llvm::LazyCallGraphAnalysis
An analysis pass which computes the call graph for a module.
Definition: LazyCallGraph.h:1249
PassManager.h
llvm::MLInlineAdvice::CallerIRSize
const int64_t CallerIRSize
Definition: MLInlineAdvisor.h:109
DEBUG_TYPE
#define DEBUG_TYPE
Definition: MLInlineAdvisor.cpp:45
llvm::InlineCostFeatureIndex::NumberOfFeatures
@ NumberOfFeatures
MLInlineAdvisor.h
llvm::MLInlineAdvisor::onSuccessfulInlining
void onSuccessfulInlining(const MLInlineAdvice &Advice, bool CalleeWasDeleted)
Definition: MLInlineAdvisor.cpp:199
CallGraph.h
llvm::InlineAdvisor::getMandatoryKind
static MandatoryInliningKind getMandatoryKind(CallBase &CB, FunctionAnalysisManager &FAM, OptimizationRemarkEmitter &ORE)
Definition: InlineAdvisor.cpp:527
llvm::getInliningCostEstimate
Optional< int > getInliningCostEstimate(CallBase &Call, TargetTransformInfo &CalleeTTI, function_ref< AssumptionCache &(Function &)> GetAssumptionCache, function_ref< BlockFrequencyInfo &(Function &)> GetBFI=nullptr, ProfileSummaryInfo *PSI=nullptr, OptimizationRemarkEmitter *ORE=nullptr)
Get the cost estimate ignoring thresholds.
Definition: InlineCost.cpp:2802
llvm::OptimizationRemark
Diagnostic information for applied optimization remarks.
Definition: DiagnosticInfo.h:690
llvm::MLInlineAdvisor::getAdviceImpl
std::unique_ptr< InlineAdvice > getAdviceImpl(CallBase &CB) override
Definition: MLInlineAdvisor.cpp:244
N
#define N
llvm::max
Align max(MaybeAlign Lhs, Align Rhs)
Definition: Alignment.h:340
TargetTransformInfo.h
llvm::CallBase
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1174
llvm::AnalysisManager
A container for analyses that lazily runs them and caches their results.
Definition: InstructionSimplify.h:42
llvm::InnerAnalysisManagerProxy
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Definition: PassManager.h:937
InlineModelFeatureMaps.h
llvm::MLInlineAdvisor::getAdviceFromModel
virtual std::unique_ptr< MLInlineAdvice > getAdviceFromModel(CallBase &CB, OptimizationRemarkEmitter &ORE)
Definition: MLInlineAdvisor.cpp:338
llvm::cl::desc
Definition: CommandLine.h:405
llvm::NumberOfFeatures
constexpr size_t NumberOfFeatures
Definition: InlineModelFeatureMaps.h:129
INLINE_FEATURE_ITERATOR
#define INLINE_FEATURE_ITERATOR(M)
Definition: InlineModelFeatureMaps.h:83
llvm::MLInlineAdvice
InlineAdvice that tracks changes post inlining.
Definition: MLInlineAdvisor.h:86
llvm::OptimizationRemarkEmitterAnalysis
Definition: OptimizationRemarkEmitter.h:164
llvm::InlineResult
InlineResult is basically true or false.
Definition: InlineCost.h:164
llvm::RewardName
const char *const RewardName
Definition: MLInlineAdvisor.cpp:69
llvm::InlineAdvice::Callee
Function *const Callee
Definition: InlineAdvisor.h:100
llvm::MLInlineAdvice::recordUnattemptedInliningImpl
void recordUnattemptedInliningImpl() override
Definition: MLInlineAdvisor.cpp:400