LLVM  4.0.0
InlineCost.cpp
Go to the documentation of this file.
1 //===- InlineCost.cpp - Cost analysis for inliner -------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements inline cost analysis.
11 //
12 //===----------------------------------------------------------------------===//
13 
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Statistic.h"
26 #include "llvm/IR/CallSite.h"
27 #include "llvm/IR/CallingConv.h"
28 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/GlobalAlias.h"
31 #include "llvm/IR/InstVisitor.h"
32 #include "llvm/IR/IntrinsicInst.h"
33 #include "llvm/IR/Operator.h"
34 #include "llvm/Support/Debug.h"
36 
37 using namespace llvm;
38 
39 #define DEBUG_TYPE "inline-cost"
40 
41 STATISTIC(NumCallsAnalyzed, "Number of call sites analyzed");
42 
44  "inline-threshold", cl::Hidden, cl::init(225), cl::ZeroOrMore,
45  cl::desc("Control the amount of inlining to perform (default = 225)"));
46 
48  "inlinehint-threshold", cl::Hidden, cl::init(325),
49  cl::desc("Threshold for inlining functions with inline hint"));
50 
51 // We introduce this threshold to help performance of instrumentation based
52 // PGO before we actually hook up inliner with analysis passes such as BPI and
53 // BFI.
55  "inlinecold-threshold", cl::Hidden, cl::init(225),
56  cl::desc("Threshold for inlining functions with cold attribute"));
57 
58 static cl::opt<int>
59  HotCallSiteThreshold("hot-callsite-threshold", cl::Hidden, cl::init(3000),
61  cl::desc("Threshold for hot callsites "));
62 
63 namespace {
64 
65 class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
67  friend class InstVisitor<CallAnalyzer, bool>;
68 
69  /// The TargetTransformInfo available for this compilation.
70  const TargetTransformInfo &TTI;
71 
72  /// Getter for the cache of @llvm.assume intrinsics.
73  std::function<AssumptionCache &(Function &)> &GetAssumptionCache;
74 
75  /// Profile summary information.
76  ProfileSummaryInfo *PSI;
77 
78  /// The called function.
79  Function &F;
80 
81  /// The candidate callsite being analyzed. Please do not use this to do
82  /// analysis in the caller function; we want the inline cost query to be
83  /// easily cacheable. Instead, use the cover function paramHasAttr.
84  CallSite CandidateCS;
85 
86  /// Tunable parameters that control the analysis.
87  const InlineParams &Params;
88 
89  int Threshold;
90  int Cost;
91 
92  bool IsCallerRecursive;
93  bool IsRecursiveCall;
94  bool ExposesReturnsTwice;
95  bool HasDynamicAlloca;
96  bool ContainsNoDuplicateCall;
97  bool HasReturn;
98  bool HasIndirectBr;
99  bool HasFrameEscape;
100 
101  /// Number of bytes allocated statically by the callee.
102  uint64_t AllocatedSize;
103  unsigned NumInstructions, NumVectorInstructions;
104  int FiftyPercentVectorBonus, TenPercentVectorBonus;
105  int VectorBonus;
106 
107  /// While we walk the potentially-inlined instructions, we build up and
108  /// maintain a mapping of simplified values specific to this callsite. The
109  /// idea is to propagate any special information we have about arguments to
110  /// this call through the inlinable section of the function, and account for
111  /// likely simplifications post-inlining. The most important aspect we track
112  /// is CFG altering simplifications -- when we prove a basic block dead, that
113  /// can cause dramatic shifts in the cost of inlining a function.
114  DenseMap<Value *, Constant *> SimplifiedValues;
115 
116  /// Keep track of the values which map back (through function arguments) to
117  /// allocas on the caller stack which could be simplified through SROA.
118  DenseMap<Value *, Value *> SROAArgValues;
119 
120  /// The mapping of caller Alloca values to their accumulated cost savings. If
121  /// we have to disable SROA for one of the allocas, this tells us how much
122  /// cost must be added.
123  DenseMap<Value *, int> SROAArgCosts;
124 
125  /// Keep track of values which map to a pointer base and constant offset.
127 
128  // Custom simplification helper routines.
129  bool isAllocaDerivedArg(Value *V);
130  bool lookupSROAArgAndCost(Value *V, Value *&Arg,
132  void disableSROA(DenseMap<Value *, int>::iterator CostIt);
133  void disableSROA(Value *V);
134  void accumulateSROACost(DenseMap<Value *, int>::iterator CostIt,
135  int InstructionCost);
136  bool isGEPOffsetConstant(GetElementPtrInst &GEP);
137  bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset);
138  bool simplifyCallSite(Function *F, CallSite CS);
139  ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V);
140 
141  /// Return true if the given argument to the function being considered for
142  /// inlining has the given attribute set either at the call site or the
143  /// function declaration. Primarily used to inspect call site specific
144  /// attributes since these can be more precise than the ones on the callee
145  /// itself.
146  bool paramHasAttr(Argument *A, Attribute::AttrKind Attr);
147 
148  /// Return true if the given value is known non null within the callee if
149  /// inlined through this particular callsite.
150  bool isKnownNonNullInCallee(Value *V);
151 
152  /// Update Threshold based on callsite properties such as callee
153  /// attributes and callee hotness for PGO builds. The Callee is explicitly
154  /// passed to support analyzing indirect calls whose target is inferred by
155  /// analysis.
156  void updateThreshold(CallSite CS, Function &Callee);
157 
158  /// Return true if size growth is allowed when inlining the callee at CS.
159  bool allowSizeGrowth(CallSite CS);
160 
161  // Custom analysis routines.
162  bool analyzeBlock(BasicBlock *BB, SmallPtrSetImpl<const Value *> &EphValues);
163 
164  // Disable several entry points to the visitor so we don't accidentally use
165  // them by declaring but not defining them here.
166  void visit(Module *);
167  void visit(Module &);
168  void visit(Function *);
169  void visit(Function &);
170  void visit(BasicBlock *);
171  void visit(BasicBlock &);
172 
173  // Provide base case for our instruction visit.
174  bool visitInstruction(Instruction &I);
175 
176  // Our visit overrides.
177  bool visitAlloca(AllocaInst &I);
178  bool visitPHI(PHINode &I);
179  bool visitGetElementPtr(GetElementPtrInst &I);
180  bool visitBitCast(BitCastInst &I);
181  bool visitPtrToInt(PtrToIntInst &I);
182  bool visitIntToPtr(IntToPtrInst &I);
183  bool visitCastInst(CastInst &I);
184  bool visitUnaryInstruction(UnaryInstruction &I);
185  bool visitCmpInst(CmpInst &I);
186  bool visitSub(BinaryOperator &I);
187  bool visitBinaryOperator(BinaryOperator &I);
188  bool visitLoad(LoadInst &I);
189  bool visitStore(StoreInst &I);
190  bool visitExtractValue(ExtractValueInst &I);
191  bool visitInsertValue(InsertValueInst &I);
192  bool visitCallSite(CallSite CS);
193  bool visitReturnInst(ReturnInst &RI);
194  bool visitBranchInst(BranchInst &BI);
195  bool visitSwitchInst(SwitchInst &SI);
196  bool visitIndirectBrInst(IndirectBrInst &IBI);
197  bool visitResumeInst(ResumeInst &RI);
198  bool visitCleanupReturnInst(CleanupReturnInst &RI);
199  bool visitCatchReturnInst(CatchReturnInst &RI);
200  bool visitUnreachableInst(UnreachableInst &I);
201 
202 public:
203  CallAnalyzer(const TargetTransformInfo &TTI,
204  std::function<AssumptionCache &(Function &)> &GetAssumptionCache,
205  ProfileSummaryInfo *PSI, Function &Callee, CallSite CSArg,
206  const InlineParams &Params)
207  : TTI(TTI), GetAssumptionCache(GetAssumptionCache), PSI(PSI), F(Callee),
208  CandidateCS(CSArg), Params(Params), Threshold(Params.DefaultThreshold),
209  Cost(0), IsCallerRecursive(false), IsRecursiveCall(false),
210  ExposesReturnsTwice(false), HasDynamicAlloca(false),
211  ContainsNoDuplicateCall(false), HasReturn(false), HasIndirectBr(false),
212  HasFrameEscape(false), AllocatedSize(0), NumInstructions(0),
213  NumVectorInstructions(0), FiftyPercentVectorBonus(0),
214  TenPercentVectorBonus(0), VectorBonus(0), NumConstantArgs(0),
215  NumConstantOffsetPtrArgs(0), NumAllocaArgs(0), NumConstantPtrCmps(0),
216  NumConstantPtrDiffs(0), NumInstructionsSimplified(0),
217  SROACostSavings(0), SROACostSavingsLost(0) {}
218 
219  bool analyzeCall(CallSite CS);
220 
221  int getThreshold() { return Threshold; }
222  int getCost() { return Cost; }
223 
224  // Keep a bunch of stats about the cost savings found so we can print them
225  // out when debugging.
226  unsigned NumConstantArgs;
227  unsigned NumConstantOffsetPtrArgs;
228  unsigned NumAllocaArgs;
229  unsigned NumConstantPtrCmps;
230  unsigned NumConstantPtrDiffs;
231  unsigned NumInstructionsSimplified;
232  unsigned SROACostSavings;
233  unsigned SROACostSavingsLost;
234 
235  void dump();
236 };
237 
238 } // namespace
239 
240 /// \brief Test whether the given value is an Alloca-derived function argument.
241 bool CallAnalyzer::isAllocaDerivedArg(Value *V) {
242  return SROAArgValues.count(V);
243 }
244 
245 /// \brief Lookup the SROA-candidate argument and cost iterator which V maps to.
246 /// Returns false if V does not map to a SROA-candidate.
247 bool CallAnalyzer::lookupSROAArgAndCost(
248  Value *V, Value *&Arg, DenseMap<Value *, int>::iterator &CostIt) {
249  if (SROAArgValues.empty() || SROAArgCosts.empty())
250  return false;
251 
252  DenseMap<Value *, Value *>::iterator ArgIt = SROAArgValues.find(V);
253  if (ArgIt == SROAArgValues.end())
254  return false;
255 
256  Arg = ArgIt->second;
257  CostIt = SROAArgCosts.find(Arg);
258  return CostIt != SROAArgCosts.end();
259 }
260 
261 /// \brief Disable SROA for the candidate marked by this cost iterator.
262 ///
263 /// This marks the candidate as no longer viable for SROA, and adds the cost
264 /// savings associated with it back into the inline cost measurement.
265 void CallAnalyzer::disableSROA(DenseMap<Value *, int>::iterator CostIt) {
266  // If we're no longer able to perform SROA we need to undo its cost savings
267  // and prevent subsequent analysis.
268  Cost += CostIt->second;
269  SROACostSavings -= CostIt->second;
270  SROACostSavingsLost += CostIt->second;
271  SROAArgCosts.erase(CostIt);
272 }
273 
274 /// \brief If 'V' maps to a SROA candidate, disable SROA for it.
275 void CallAnalyzer::disableSROA(Value *V) {
276  Value *SROAArg;
278  if (lookupSROAArgAndCost(V, SROAArg, CostIt))
279  disableSROA(CostIt);
280 }
281 
282 /// \brief Accumulate the given cost for a particular SROA candidate.
283 void CallAnalyzer::accumulateSROACost(DenseMap<Value *, int>::iterator CostIt,
284  int InstructionCost) {
285  CostIt->second += InstructionCost;
286  SROACostSavings += InstructionCost;
287 }
288 
289 /// \brief Check whether a GEP's indices are all constant.
290 ///
291 /// Respects any simplified values known during the analysis of this callsite.
292 bool CallAnalyzer::isGEPOffsetConstant(GetElementPtrInst &GEP) {
293  for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I)
294  if (!isa<Constant>(*I) && !SimplifiedValues.lookup(*I))
295  return false;
296 
297  return true;
298 }
299 
300 /// \brief Accumulate a constant GEP offset into an APInt if possible.
301 ///
302 /// Returns false if unable to compute the offset for any reason. Respects any
303 /// simplified values known during the analysis of this callsite.
304 bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
305  const DataLayout &DL = F.getParent()->getDataLayout();
306  unsigned IntPtrWidth = DL.getPointerSizeInBits();
307  assert(IntPtrWidth == Offset.getBitWidth());
308 
309  for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
310  GTI != GTE; ++GTI) {
311  ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
312  if (!OpC)
313  if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand()))
314  OpC = dyn_cast<ConstantInt>(SimpleOp);
315  if (!OpC)
316  return false;
317  if (OpC->isZero())
318  continue;
319 
320  // Handle a struct index, which adds its field offset to the pointer.
321  if (StructType *STy = GTI.getStructTypeOrNull()) {
322  unsigned ElementIdx = OpC->getZExtValue();
323  const StructLayout *SL = DL.getStructLayout(STy);
324  Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));
325  continue;
326  }
327 
328  APInt TypeSize(IntPtrWidth, DL.getTypeAllocSize(GTI.getIndexedType()));
329  Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize;
330  }
331  return true;
332 }
333 
334 bool CallAnalyzer::visitAlloca(AllocaInst &I) {
335  // Check whether inlining will turn a dynamic alloca into a static
336  // alloca and handle that case.
337  if (I.isArrayAllocation()) {
338  Constant *Size = SimplifiedValues.lookup(I.getArraySize());
339  if (auto *AllocSize = dyn_cast_or_null<ConstantInt>(Size)) {
340  const DataLayout &DL = F.getParent()->getDataLayout();
341  Type *Ty = I.getAllocatedType();
342  AllocatedSize = SaturatingMultiplyAdd(
343  AllocSize->getLimitedValue(), DL.getTypeAllocSize(Ty), AllocatedSize);
344  return Base::visitAlloca(I);
345  }
346  }
347 
348  // Accumulate the allocated size.
349  if (I.isStaticAlloca()) {
350  const DataLayout &DL = F.getParent()->getDataLayout();
351  Type *Ty = I.getAllocatedType();
352  AllocatedSize = SaturatingAdd(DL.getTypeAllocSize(Ty), AllocatedSize);
353  }
354 
355  // We will happily inline static alloca instructions.
356  if (I.isStaticAlloca())
357  return Base::visitAlloca(I);
358 
359  // FIXME: This is overly conservative. Dynamic allocas are inefficient for
360  // a variety of reasons, and so we would like to not inline them into
361  // functions which don't currently have a dynamic alloca. This simply
362  // disables inlining altogether in the presence of a dynamic alloca.
363  HasDynamicAlloca = true;
364  return false;
365 }
366 
367 bool CallAnalyzer::visitPHI(PHINode &I) {
368  // FIXME: We should potentially be tracking values through phi nodes,
369  // especially when they collapse to a single value due to deleted CFG edges
370  // during inlining.
371 
372  // FIXME: We need to propagate SROA *disabling* through phi nodes, even
373  // though we don't want to propagate it's bonuses. The idea is to disable
374  // SROA if it *might* be used in an inappropriate manner.
375 
376  // Phi nodes are always zero-cost.
377  return true;
378 }
379 
380 bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) {
381  Value *SROAArg;
383  bool SROACandidate =
384  lookupSROAArgAndCost(I.getPointerOperand(), SROAArg, CostIt);
385 
386  // Try to fold GEPs of constant-offset call site argument pointers. This
387  // requires target data and inbounds GEPs.
388  if (I.isInBounds()) {
389  // Check if we have a base + offset for the pointer.
390  Value *Ptr = I.getPointerOperand();
391  std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Ptr);
392  if (BaseAndOffset.first) {
393  // Check if the offset of this GEP is constant, and if so accumulate it
394  // into Offset.
395  if (!accumulateGEPOffset(cast<GEPOperator>(I), BaseAndOffset.second)) {
396  // Non-constant GEPs aren't folded, and disable SROA.
397  if (SROACandidate)
398  disableSROA(CostIt);
399  return false;
400  }
401 
402  // Add the result as a new mapping to Base + Offset.
403  ConstantOffsetPtrs[&I] = BaseAndOffset;
404 
405  // Also handle SROA candidates here, we already know that the GEP is
406  // all-constant indexed.
407  if (SROACandidate)
408  SROAArgValues[&I] = SROAArg;
409 
410  return true;
411  }
412  }
413 
414  if (isGEPOffsetConstant(I)) {
415  if (SROACandidate)
416  SROAArgValues[&I] = SROAArg;
417 
418  // Constant GEPs are modeled as free.
419  return true;
420  }
421 
422  // Variable GEPs will require math and will disable SROA.
423  if (SROACandidate)
424  disableSROA(CostIt);
425  return false;
426 }
427 
428 bool CallAnalyzer::visitBitCast(BitCastInst &I) {
429  // Propagate constants through bitcasts.
430  Constant *COp = dyn_cast<Constant>(I.getOperand(0));
431  if (!COp)
432  COp = SimplifiedValues.lookup(I.getOperand(0));
433  if (COp)
434  if (Constant *C = ConstantExpr::getBitCast(COp, I.getType())) {
435  SimplifiedValues[&I] = C;
436  return true;
437  }
438 
439  // Track base/offsets through casts
440  std::pair<Value *, APInt> BaseAndOffset =
441  ConstantOffsetPtrs.lookup(I.getOperand(0));
442  // Casts don't change the offset, just wrap it up.
443  if (BaseAndOffset.first)
444  ConstantOffsetPtrs[&I] = BaseAndOffset;
445 
446  // Also look for SROA candidates here.
447  Value *SROAArg;
449  if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt))
450  SROAArgValues[&I] = SROAArg;
451 
452  // Bitcasts are always zero cost.
453  return true;
454 }
455 
456 bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
457  // Propagate constants through ptrtoint.
458  Constant *COp = dyn_cast<Constant>(I.getOperand(0));
459  if (!COp)
460  COp = SimplifiedValues.lookup(I.getOperand(0));
461  if (COp)
462  if (Constant *C = ConstantExpr::getPtrToInt(COp, I.getType())) {
463  SimplifiedValues[&I] = C;
464  return true;
465  }
466 
467  // Track base/offset pairs when converted to a plain integer provided the
468  // integer is large enough to represent the pointer.
469  unsigned IntegerSize = I.getType()->getScalarSizeInBits();
470  const DataLayout &DL = F.getParent()->getDataLayout();
471  if (IntegerSize >= DL.getPointerSizeInBits()) {
472  std::pair<Value *, APInt> BaseAndOffset =
473  ConstantOffsetPtrs.lookup(I.getOperand(0));
474  if (BaseAndOffset.first)
475  ConstantOffsetPtrs[&I] = BaseAndOffset;
476  }
477 
478  // This is really weird. Technically, ptrtoint will disable SROA. However,
479  // unless that ptrtoint is *used* somewhere in the live basic blocks after
480  // inlining, it will be nuked, and SROA should proceed. All of the uses which
481  // would block SROA would also block SROA if applied directly to a pointer,
482  // and so we can just add the integer in here. The only places where SROA is
483  // preserved either cannot fire on an integer, or won't in-and-of themselves
484  // disable SROA (ext) w/o some later use that we would see and disable.
485  Value *SROAArg;
487  if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt))
488  SROAArgValues[&I] = SROAArg;
489 
490  return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I);
491 }
492 
493 bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
494  // Propagate constants through ptrtoint.
495  Constant *COp = dyn_cast<Constant>(I.getOperand(0));
496  if (!COp)
497  COp = SimplifiedValues.lookup(I.getOperand(0));
498  if (COp)
499  if (Constant *C = ConstantExpr::getIntToPtr(COp, I.getType())) {
500  SimplifiedValues[&I] = C;
501  return true;
502  }
503 
504  // Track base/offset pairs when round-tripped through a pointer without
505  // modifications provided the integer is not too large.
506  Value *Op = I.getOperand(0);
507  unsigned IntegerSize = Op->getType()->getScalarSizeInBits();
508  const DataLayout &DL = F.getParent()->getDataLayout();
509  if (IntegerSize <= DL.getPointerSizeInBits()) {
510  std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op);
511  if (BaseAndOffset.first)
512  ConstantOffsetPtrs[&I] = BaseAndOffset;
513  }
514 
515  // "Propagate" SROA here in the same manner as we do for ptrtoint above.
516  Value *SROAArg;
518  if (lookupSROAArgAndCost(Op, SROAArg, CostIt))
519  SROAArgValues[&I] = SROAArg;
520 
521  return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I);
522 }
523 
524 bool CallAnalyzer::visitCastInst(CastInst &I) {
525  // Propagate constants through ptrtoint.
526  Constant *COp = dyn_cast<Constant>(I.getOperand(0));
527  if (!COp)
528  COp = SimplifiedValues.lookup(I.getOperand(0));
529  if (COp)
530  if (Constant *C = ConstantExpr::getCast(I.getOpcode(), COp, I.getType())) {
531  SimplifiedValues[&I] = C;
532  return true;
533  }
534 
535  // Disable SROA in the face of arbitrary casts we don't whitelist elsewhere.
536  disableSROA(I.getOperand(0));
537 
538  return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I);
539 }
540 
541 bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) {
542  Value *Operand = I.getOperand(0);
543  Constant *COp = dyn_cast<Constant>(Operand);
544  if (!COp)
545  COp = SimplifiedValues.lookup(Operand);
546  if (COp) {
547  const DataLayout &DL = F.getParent()->getDataLayout();
548  if (Constant *C = ConstantFoldInstOperands(&I, COp, DL)) {
549  SimplifiedValues[&I] = C;
550  return true;
551  }
552  }
553 
554  // Disable any SROA on the argument to arbitrary unary operators.
555  disableSROA(Operand);
556 
557  return false;
558 }
559 
560 bool CallAnalyzer::paramHasAttr(Argument *A, Attribute::AttrKind Attr) {
561  unsigned ArgNo = A->getArgNo();
562  return CandidateCS.paramHasAttr(ArgNo + 1, Attr);
563 }
564 
565 bool CallAnalyzer::isKnownNonNullInCallee(Value *V) {
566  // Does the *call site* have the NonNull attribute set on an argument? We
567  // use the attribute on the call site to memoize any analysis done in the
568  // caller. This will also trip if the callee function has a non-null
569  // parameter attribute, but that's a less interesting case because hopefully
570  // the callee would already have been simplified based on that.
571  if (Argument *A = dyn_cast<Argument>(V))
572  if (paramHasAttr(A, Attribute::NonNull))
573  return true;
574 
575  // Is this an alloca in the caller? This is distinct from the attribute case
576  // above because attributes aren't updated within the inliner itself and we
577  // always want to catch the alloca derived case.
578  if (isAllocaDerivedArg(V))
579  // We can actually predict the result of comparisons between an
580  // alloca-derived value and null. Note that this fires regardless of
581  // SROA firing.
582  return true;
583 
584  return false;
585 }
586 
587 bool CallAnalyzer::allowSizeGrowth(CallSite CS) {
588  // If the normal destination of the invoke or the parent block of the call
589  // site is unreachable-terminated, there is little point in inlining this
590  // unless there is literally zero cost.
591  // FIXME: Note that it is possible that an unreachable-terminated block has a
592  // hot entry. For example, in below scenario inlining hot_call_X() may be
593  // beneficial :
594  // main() {
595  // hot_call_1();
596  // ...
597  // hot_call_N()
598  // exit(0);
599  // }
600  // For now, we are not handling this corner case here as it is rare in real
601  // code. In future, we should elaborate this based on BPI and BFI in more
602  // general threshold adjusting heuristics in updateThreshold().
603  Instruction *Instr = CS.getInstruction();
604  if (InvokeInst *II = dyn_cast<InvokeInst>(Instr)) {
605  if (isa<UnreachableInst>(II->getNormalDest()->getTerminator()))
606  return false;
607  } else if (isa<UnreachableInst>(Instr->getParent()->getTerminator()))
608  return false;
609 
610  return true;
611 }
612 
613 void CallAnalyzer::updateThreshold(CallSite CS, Function &Callee) {
614  // If no size growth is allowed for this inlining, set Threshold to 0.
615  if (!allowSizeGrowth(CS)) {
616  Threshold = 0;
617  return;
618  }
619 
620  Function *Caller = CS.getCaller();
621 
622  // return min(A, B) if B is valid.
623  auto MinIfValid = [](int A, Optional<int> B) {
624  return B ? std::min(A, B.getValue()) : A;
625  };
626 
627  // return max(A, B) if B is valid.
628  auto MaxIfValid = [](int A, Optional<int> B) {
629  return B ? std::max(A, B.getValue()) : A;
630  };
631 
632  // Use the OptMinSizeThreshold or OptSizeThreshold knob if they are available
633  // and reduce the threshold if the caller has the necessary attribute.
634  if (Caller->optForMinSize())
635  Threshold = MinIfValid(Threshold, Params.OptMinSizeThreshold);
636  else if (Caller->optForSize())
637  Threshold = MinIfValid(Threshold, Params.OptSizeThreshold);
638 
639  // Adjust the threshold based on inlinehint attribute and profile based
640  // hotness information if the caller does not have MinSize attribute.
641  if (!Caller->optForMinSize()) {
642  if (Callee.hasFnAttribute(Attribute::InlineHint))
643  Threshold = MaxIfValid(Threshold, Params.HintThreshold);
644  if (PSI) {
645  uint64_t TotalWeight;
646  if (CS.getInstruction()->extractProfTotalWeight(TotalWeight) &&
647  PSI->isHotCount(TotalWeight)) {
648  Threshold = MaxIfValid(Threshold, Params.HotCallSiteThreshold);
649  } else if (PSI->isFunctionEntryHot(&Callee)) {
650  // If callsite hotness can not be determined, we may still know
651  // that the callee is hot and treat it as a weaker hint for threshold
652  // increase.
653  Threshold = MaxIfValid(Threshold, Params.HintThreshold);
654  } else if (PSI->isFunctionEntryCold(&Callee)) {
655  Threshold = MinIfValid(Threshold, Params.ColdThreshold);
656  }
657  }
658  }
659 
660  // Finally, take the target-specific inlining threshold multiplier into
661  // account.
662  Threshold *= TTI.getInliningThresholdMultiplier();
663 }
664 
665 bool CallAnalyzer::visitCmpInst(CmpInst &I) {
666  Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
667  // First try to handle simplified comparisons.
668  if (!isa<Constant>(LHS))
669  if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
670  LHS = SimpleLHS;
671  if (!isa<Constant>(RHS))
672  if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
673  RHS = SimpleRHS;
674  if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
675  if (Constant *CRHS = dyn_cast<Constant>(RHS))
676  if (Constant *C =
677  ConstantExpr::getCompare(I.getPredicate(), CLHS, CRHS)) {
678  SimplifiedValues[&I] = C;
679  return true;
680  }
681  }
682 
683  if (I.getOpcode() == Instruction::FCmp)
684  return false;
685 
686  // Otherwise look for a comparison between constant offset pointers with
687  // a common base.
688  Value *LHSBase, *RHSBase;
689  APInt LHSOffset, RHSOffset;
690  std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
691  if (LHSBase) {
692  std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
693  if (RHSBase && LHSBase == RHSBase) {
694  // We have common bases, fold the icmp to a constant based on the
695  // offsets.
696  Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
697  Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
698  if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) {
699  SimplifiedValues[&I] = C;
700  ++NumConstantPtrCmps;
701  return true;
702  }
703  }
704  }
705 
706  // If the comparison is an equality comparison with null, we can simplify it
707  // if we know the value (argument) can't be null
708  if (I.isEquality() && isa<ConstantPointerNull>(I.getOperand(1)) &&
709  isKnownNonNullInCallee(I.getOperand(0))) {
710  bool IsNotEqual = I.getPredicate() == CmpInst::ICMP_NE;
711  SimplifiedValues[&I] = IsNotEqual ? ConstantInt::getTrue(I.getType())
713  return true;
714  }
715  // Finally check for SROA candidates in comparisons.
716  Value *SROAArg;
718  if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) {
719  if (isa<ConstantPointerNull>(I.getOperand(1))) {
720  accumulateSROACost(CostIt, InlineConstants::InstrCost);
721  return true;
722  }
723 
724  disableSROA(CostIt);
725  }
726 
727  return false;
728 }
729 
730 bool CallAnalyzer::visitSub(BinaryOperator &I) {
731  // Try to handle a special case: we can fold computing the difference of two
732  // constant-related pointers.
733  Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
734  Value *LHSBase, *RHSBase;
735  APInt LHSOffset, RHSOffset;
736  std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
737  if (LHSBase) {
738  std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
739  if (RHSBase && LHSBase == RHSBase) {
740  // We have common bases, fold the subtract to a constant based on the
741  // offsets.
742  Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
743  Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
744  if (Constant *C = ConstantExpr::getSub(CLHS, CRHS)) {
745  SimplifiedValues[&I] = C;
746  ++NumConstantPtrDiffs;
747  return true;
748  }
749  }
750  }
751 
752  // Otherwise, fall back to the generic logic for simplifying and handling
753  // instructions.
754  return Base::visitSub(I);
755 }
756 
757 bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) {
758  Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
759  const DataLayout &DL = F.getParent()->getDataLayout();
760  if (!isa<Constant>(LHS))
761  if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
762  LHS = SimpleLHS;
763  if (!isa<Constant>(RHS))
764  if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
765  RHS = SimpleRHS;
766  Value *SimpleV = nullptr;
767  if (auto FI = dyn_cast<FPMathOperator>(&I))
768  SimpleV =
769  SimplifyFPBinOp(I.getOpcode(), LHS, RHS, FI->getFastMathFlags(), DL);
770  else
771  SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, DL);
772 
773  if (Constant *C = dyn_cast_or_null<Constant>(SimpleV)) {
774  SimplifiedValues[&I] = C;
775  return true;
776  }
777 
778  // Disable any SROA on arguments to arbitrary, unsimplified binary operators.
779  disableSROA(LHS);
780  disableSROA(RHS);
781 
782  return false;
783 }
784 
785 bool CallAnalyzer::visitLoad(LoadInst &I) {
786  Value *SROAArg;
788  if (lookupSROAArgAndCost(I.getPointerOperand(), SROAArg, CostIt)) {
789  if (I.isSimple()) {
790  accumulateSROACost(CostIt, InlineConstants::InstrCost);
791  return true;
792  }
793 
794  disableSROA(CostIt);
795  }
796 
797  return false;
798 }
799 
800 bool CallAnalyzer::visitStore(StoreInst &I) {
801  Value *SROAArg;
803  if (lookupSROAArgAndCost(I.getPointerOperand(), SROAArg, CostIt)) {
804  if (I.isSimple()) {
805  accumulateSROACost(CostIt, InlineConstants::InstrCost);
806  return true;
807  }
808 
809  disableSROA(CostIt);
810  }
811 
812  return false;
813 }
814 
815 bool CallAnalyzer::visitExtractValue(ExtractValueInst &I) {
816  // Constant folding for extract value is trivial.
818  if (!C)
819  C = SimplifiedValues.lookup(I.getAggregateOperand());
820  if (C) {
821  SimplifiedValues[&I] = ConstantExpr::getExtractValue(C, I.getIndices());
822  return true;
823  }
824 
825  // SROA can look through these but give them a cost.
826  return false;
827 }
828 
829 bool CallAnalyzer::visitInsertValue(InsertValueInst &I) {
830  // Constant folding for insert value is trivial.
832  if (!AggC)
833  AggC = SimplifiedValues.lookup(I.getAggregateOperand());
835  if (!InsertedC)
836  InsertedC = SimplifiedValues.lookup(I.getInsertedValueOperand());
837  if (AggC && InsertedC) {
838  SimplifiedValues[&I] =
839  ConstantExpr::getInsertValue(AggC, InsertedC, I.getIndices());
840  return true;
841  }
842 
843  // SROA can look through these but give them a cost.
844  return false;
845 }
846 
847 /// \brief Try to simplify a call site.
848 ///
849 /// Takes a concrete function and callsite and tries to actually simplify it by
850 /// analyzing the arguments and call itself with instsimplify. Returns true if
851 /// it has simplified the callsite to some other entity (a constant), making it
852 /// free.
853 bool CallAnalyzer::simplifyCallSite(Function *F, CallSite CS) {
854  // FIXME: Using the instsimplify logic directly for this is inefficient
855  // because we have to continually rebuild the argument list even when no
856  // simplifications can be performed. Until that is fixed with remapping
857  // inside of instsimplify, directly constant fold calls here.
858  if (!canConstantFoldCallTo(F))
859  return false;
860 
861  // Try to re-map the arguments to constants.
862  SmallVector<Constant *, 4> ConstantArgs;
863  ConstantArgs.reserve(CS.arg_size());
864  for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); I != E;
865  ++I) {
866  Constant *C = dyn_cast<Constant>(*I);
867  if (!C)
868  C = dyn_cast_or_null<Constant>(SimplifiedValues.lookup(*I));
869  if (!C)
870  return false; // This argument doesn't map to a constant.
871 
872  ConstantArgs.push_back(C);
873  }
874  if (Constant *C = ConstantFoldCall(F, ConstantArgs)) {
875  SimplifiedValues[CS.getInstruction()] = C;
876  return true;
877  }
878 
879  return false;
880 }
881 
882 bool CallAnalyzer::visitCallSite(CallSite CS) {
883  if (CS.hasFnAttr(Attribute::ReturnsTwice) &&
884  !F.hasFnAttribute(Attribute::ReturnsTwice)) {
885  // This aborts the entire analysis.
886  ExposesReturnsTwice = true;
887  return false;
888  }
889  if (CS.isCall() && cast<CallInst>(CS.getInstruction())->cannotDuplicate())
890  ContainsNoDuplicateCall = true;
891 
892  if (Function *F = CS.getCalledFunction()) {
893  // When we have a concrete function, first try to simplify it directly.
894  if (simplifyCallSite(F, CS))
895  return true;
896 
897  // Next check if it is an intrinsic we know about.
898  // FIXME: Lift this into part of the InstVisitor.
899  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) {
900  switch (II->getIntrinsicID()) {
901  default:
902  return Base::visitCallSite(CS);
903 
904  case Intrinsic::load_relative:
905  // This is normally lowered to 4 LLVM instructions.
906  Cost += 3 * InlineConstants::InstrCost;
907  return false;
908 
909  case Intrinsic::memset:
910  case Intrinsic::memcpy:
911  case Intrinsic::memmove:
912  // SROA can usually chew through these intrinsics, but they aren't free.
913  return false;
914  case Intrinsic::localescape:
915  HasFrameEscape = true;
916  return false;
917  }
918  }
919 
920  if (F == CS.getInstruction()->getParent()->getParent()) {
921  // This flag will fully abort the analysis, so don't bother with anything
922  // else.
923  IsRecursiveCall = true;
924  return false;
925  }
926 
927  if (TTI.isLoweredToCall(F)) {
928  // We account for the average 1 instruction per call argument setup
929  // here.
930  Cost += CS.arg_size() * InlineConstants::InstrCost;
931 
932  // Everything other than inline ASM will also have a significant cost
933  // merely from making the call.
934  if (!isa<InlineAsm>(CS.getCalledValue()))
936  }
937 
938  return Base::visitCallSite(CS);
939  }
940 
941  // Otherwise we're in a very special case -- an indirect function call. See
942  // if we can be particularly clever about this.
943  Value *Callee = CS.getCalledValue();
944 
945  // First, pay the price of the argument setup. We account for the average
946  // 1 instruction per call argument setup here.
947  Cost += CS.arg_size() * InlineConstants::InstrCost;
948 
949  // Next, check if this happens to be an indirect function call to a known
950  // function in this inline context. If not, we've done all we can.
951  Function *F = dyn_cast_or_null<Function>(SimplifiedValues.lookup(Callee));
952  if (!F)
953  return Base::visitCallSite(CS);
954 
955  // If we have a constant that we are calling as a function, we can peer
956  // through it and see the function target. This happens not infrequently
957  // during devirtualization and so we want to give it a hefty bonus for
958  // inlining, but cap that bonus in the event that inlining wouldn't pan
959  // out. Pretend to inline the function, with a custom threshold.
960  auto IndirectCallParams = Params;
961  IndirectCallParams.DefaultThreshold = InlineConstants::IndirectCallThreshold;
962  CallAnalyzer CA(TTI, GetAssumptionCache, PSI, *F, CS, IndirectCallParams);
963  if (CA.analyzeCall(CS)) {
964  // We were able to inline the indirect call! Subtract the cost from the
965  // threshold to get the bonus we want to apply, but don't go below zero.
966  Cost -= std::max(0, CA.getThreshold() - CA.getCost());
967  }
968 
969  return Base::visitCallSite(CS);
970 }
971 
972 bool CallAnalyzer::visitReturnInst(ReturnInst &RI) {
973  // At least one return instruction will be free after inlining.
974  bool Free = !HasReturn;
975  HasReturn = true;
976  return Free;
977 }
978 
979 bool CallAnalyzer::visitBranchInst(BranchInst &BI) {
980  // We model unconditional branches as essentially free -- they really
981  // shouldn't exist at all, but handling them makes the behavior of the
982  // inliner more regular and predictable. Interestingly, conditional branches
983  // which will fold away are also free.
984  return BI.isUnconditional() || isa<ConstantInt>(BI.getCondition()) ||
985  dyn_cast_or_null<ConstantInt>(
986  SimplifiedValues.lookup(BI.getCondition()));
987 }
988 
989 bool CallAnalyzer::visitSwitchInst(SwitchInst &SI) {
990  // We model unconditional switches as free, see the comments on handling
991  // branches.
992  if (isa<ConstantInt>(SI.getCondition()))
993  return true;
994  if (Value *V = SimplifiedValues.lookup(SI.getCondition()))
995  if (isa<ConstantInt>(V))
996  return true;
997 
998  // Otherwise, we need to accumulate a cost proportional to the number of
999  // distinct successor blocks. This fan-out in the CFG cannot be represented
1000  // for free even if we can represent the core switch as a jumptable that
1001  // takes a single instruction.
1002  //
1003  // NB: We convert large switches which are just used to initialize large phi
1004  // nodes to lookup tables instead in simplify-cfg, so this shouldn't prevent
1005  // inlining those. It will prevent inlining in cases where the optimization
1006  // does not (yet) fire.
1007  SmallPtrSet<BasicBlock *, 8> SuccessorBlocks;
1008  SuccessorBlocks.insert(SI.getDefaultDest());
1009  for (auto I = SI.case_begin(), E = SI.case_end(); I != E; ++I)
1010  SuccessorBlocks.insert(I.getCaseSuccessor());
1011  // Add cost corresponding to the number of distinct destinations. The first
1012  // we model as free because of fallthrough.
1013  Cost += (SuccessorBlocks.size() - 1) * InlineConstants::InstrCost;
1014  return false;
1015 }
1016 
1017 bool CallAnalyzer::visitIndirectBrInst(IndirectBrInst &IBI) {
1018  // We never want to inline functions that contain an indirectbr. This is
1019  // incorrect because all the blockaddress's (in static global initializers
1020  // for example) would be referring to the original function, and this
1021  // indirect jump would jump from the inlined copy of the function into the
1022  // original function which is extremely undefined behavior.
1023  // FIXME: This logic isn't really right; we can safely inline functions with
1024  // indirectbr's as long as no other function or global references the
1025  // blockaddress of a block within the current function.
1026  HasIndirectBr = true;
1027  return false;
1028 }
1029 
1030 bool CallAnalyzer::visitResumeInst(ResumeInst &RI) {
1031  // FIXME: It's not clear that a single instruction is an accurate model for
1032  // the inline cost of a resume instruction.
1033  return false;
1034 }
1035 
1036 bool CallAnalyzer::visitCleanupReturnInst(CleanupReturnInst &CRI) {
1037  // FIXME: It's not clear that a single instruction is an accurate model for
1038  // the inline cost of a cleanupret instruction.
1039  return false;
1040 }
1041 
1042 bool CallAnalyzer::visitCatchReturnInst(CatchReturnInst &CRI) {
1043  // FIXME: It's not clear that a single instruction is an accurate model for
1044  // the inline cost of a catchret instruction.
1045  return false;
1046 }
1047 
1048 bool CallAnalyzer::visitUnreachableInst(UnreachableInst &I) {
1049  // FIXME: It might be reasonably to discount the cost of instructions leading
1050  // to unreachable as they have the lowest possible impact on both runtime and
1051  // code size.
1052  return true; // No actual code is needed for unreachable.
1053 }
1054 
1055 bool CallAnalyzer::visitInstruction(Instruction &I) {
1056  // Some instructions are free. All of the free intrinsics can also be
1057  // handled by SROA, etc.
1058  if (TargetTransformInfo::TCC_Free == TTI.getUserCost(&I))
1059  return true;
1060 
1061  // We found something we don't understand or can't handle. Mark any SROA-able
1062  // values in the operand list as no longer viable.
1063  for (User::op_iterator OI = I.op_begin(), OE = I.op_end(); OI != OE; ++OI)
1064  disableSROA(*OI);
1065 
1066  return false;
1067 }
1068 
1069 /// \brief Analyze a basic block for its contribution to the inline cost.
1070 ///
1071 /// This method walks the analyzer over every instruction in the given basic
1072 /// block and accounts for their cost during inlining at this callsite. It
1073 /// aborts early if the threshold has been exceeded or an impossible to inline
1074 /// construct has been detected. It returns false if inlining is no longer
1075 /// viable, and true if inlining remains viable.
1076 bool CallAnalyzer::analyzeBlock(BasicBlock *BB,
1077  SmallPtrSetImpl<const Value *> &EphValues) {
1078  for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
1079  // FIXME: Currently, the number of instructions in a function regardless of
1080  // our ability to simplify them during inline to constants or dead code,
1081  // are actually used by the vector bonus heuristic. As long as that's true,
1082  // we have to special case debug intrinsics here to prevent differences in
1083  // inlining due to debug symbols. Eventually, the number of unsimplified
1084  // instructions shouldn't factor into the cost computation, but until then,
1085  // hack around it here.
1086  if (isa<DbgInfoIntrinsic>(I))
1087  continue;
1088 
1089  // Skip ephemeral values.
1090  if (EphValues.count(&*I))
1091  continue;
1092 
1093  ++NumInstructions;
1094  if (isa<ExtractElementInst>(I) || I->getType()->isVectorTy())
1095  ++NumVectorInstructions;
1096 
1097  // If the instruction is floating point, and the target says this operation
1098  // is expensive or the function has the "use-soft-float" attribute, this may
1099  // eventually become a library call. Treat the cost as such.
1100  if (I->getType()->isFloatingPointTy()) {
1101  bool hasSoftFloatAttr = false;
1102 
1103  // If the function has the "use-soft-float" attribute, mark it as
1104  // expensive.
1105  if (F.hasFnAttribute("use-soft-float")) {
1106  Attribute Attr = F.getFnAttribute("use-soft-float");
1107  StringRef Val = Attr.getValueAsString();
1108  if (Val == "true")
1109  hasSoftFloatAttr = true;
1110  }
1111 
1112  if (TTI.getFPOpCost(I->getType()) == TargetTransformInfo::TCC_Expensive ||
1113  hasSoftFloatAttr)
1115  }
1116 
1117  // If the instruction simplified to a constant, there is no cost to this
1118  // instruction. Visit the instructions using our InstVisitor to account for
1119  // all of the per-instruction logic. The visit tree returns true if we
1120  // consumed the instruction in any way, and false if the instruction's base
1121  // cost should count against inlining.
1122  if (Base::visit(&*I))
1123  ++NumInstructionsSimplified;
1124  else
1126 
1127  // If the visit this instruction detected an uninlinable pattern, abort.
1128  if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca ||
1129  HasIndirectBr || HasFrameEscape)
1130  return false;
1131 
1132  // If the caller is a recursive function then we don't want to inline
1133  // functions which allocate a lot of stack space because it would increase
1134  // the caller stack usage dramatically.
1135  if (IsCallerRecursive &&
1137  return false;
1138 
1139  // Check if we've past the maximum possible threshold so we don't spin in
1140  // huge basic blocks that will never inline.
1141  if (Cost > Threshold)
1142  return false;
1143  }
1144 
1145  return true;
1146 }
1147 
1148 /// \brief Compute the base pointer and cumulative constant offsets for V.
1149 ///
1150 /// This strips all constant offsets off of V, leaving it the base pointer, and
1151 /// accumulates the total constant offset applied in the returned constant. It
1152 /// returns 0 if V is not a pointer, and returns the constant '0' if there are
1153 /// no constant offsets applied.
1154 ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
1155  if (!V->getType()->isPointerTy())
1156  return nullptr;
1157 
1158  const DataLayout &DL = F.getParent()->getDataLayout();
1159  unsigned IntPtrWidth = DL.getPointerSizeInBits();
1160  APInt Offset = APInt::getNullValue(IntPtrWidth);
1161 
1162  // Even though we don't look through PHI nodes, we could be called on an
1163  // instruction in an unreachable block, which may be on a cycle.
1164  SmallPtrSet<Value *, 4> Visited;
1165  Visited.insert(V);
1166  do {
1167  if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
1168  if (!GEP->isInBounds() || !accumulateGEPOffset(*GEP, Offset))
1169  return nullptr;
1170  V = GEP->getPointerOperand();
1171  } else if (Operator::getOpcode(V) == Instruction::BitCast) {
1172  V = cast<Operator>(V)->getOperand(0);
1173  } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1174  if (GA->isInterposable())
1175  break;
1176  V = GA->getAliasee();
1177  } else {
1178  break;
1179  }
1180  assert(V->getType()->isPointerTy() && "Unexpected operand type!");
1181  } while (Visited.insert(V).second);
1182 
1183  Type *IntPtrTy = DL.getIntPtrType(V->getContext());
1184  return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset));
1185 }
1186 
1187 /// \brief Analyze a call site for potential inlining.
1188 ///
1189 /// Returns true if inlining this call is viable, and false if it is not
1190 /// viable. It computes the cost and adjusts the threshold based on numerous
1191 /// factors and heuristics. If this method returns false but the computed cost
1192 /// is below the computed threshold, then inlining was forcibly disabled by
1193 /// some artifact of the routine.
1194 bool CallAnalyzer::analyzeCall(CallSite CS) {
1195  ++NumCallsAnalyzed;
1196 
1197  // Perform some tweaks to the cost and threshold based on the direct
1198  // callsite information.
1199 
1200  // We want to more aggressively inline vector-dense kernels, so up the
1201  // threshold, and we'll lower it if the % of vector instructions gets too
1202  // low. Note that these bonuses are some what arbitrary and evolved over time
1203  // by accident as much as because they are principled bonuses.
1204  //
1205  // FIXME: It would be nice to remove all such bonuses. At least it would be
1206  // nice to base the bonus values on something more scientific.
1207  assert(NumInstructions == 0);
1208  assert(NumVectorInstructions == 0);
1209 
1210  // Update the threshold based on callsite properties
1211  updateThreshold(CS, F);
1212 
1213  FiftyPercentVectorBonus = 3 * Threshold / 2;
1214  TenPercentVectorBonus = 3 * Threshold / 4;
1215  const DataLayout &DL = F.getParent()->getDataLayout();
1216 
1217  // Track whether the post-inlining function would have more than one basic
1218  // block. A single basic block is often intended for inlining. Balloon the
1219  // threshold by 50% until we pass the single-BB phase.
1220  bool SingleBB = true;
1221  int SingleBBBonus = Threshold / 2;
1222 
1223  // Speculatively apply all possible bonuses to Threshold. If cost exceeds
1224  // this Threshold any time, and cost cannot decrease, we can stop processing
1225  // the rest of the function body.
1226  Threshold += (SingleBBBonus + FiftyPercentVectorBonus);
1227 
1228  // Give out bonuses per argument, as the instructions setting them up will
1229  // be gone after inlining.
1230  for (unsigned I = 0, E = CS.arg_size(); I != E; ++I) {
1231  if (CS.isByValArgument(I)) {
1232  // We approximate the number of loads and stores needed by dividing the
1233  // size of the byval type by the target's pointer size.
1234  PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType());
1235  unsigned TypeSize = DL.getTypeSizeInBits(PTy->getElementType());
1236  unsigned PointerSize = DL.getPointerSizeInBits();
1237  // Ceiling division.
1238  unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;
1239 
1240  // If it generates more than 8 stores it is likely to be expanded as an
1241  // inline memcpy so we take that as an upper bound. Otherwise we assume
1242  // one load and one store per word copied.
1243  // FIXME: The maxStoresPerMemcpy setting from the target should be used
1244  // here instead of a magic number of 8, but it's not available via
1245  // DataLayout.
1246  NumStores = std::min(NumStores, 8U);
1247 
1248  Cost -= 2 * NumStores * InlineConstants::InstrCost;
1249  } else {
1250  // For non-byval arguments subtract off one instruction per call
1251  // argument.
1253  }
1254  }
1255  // The call instruction also disappears after inlining.
1257 
1258  // If there is only one call of the function, and it has internal linkage,
1259  // the cost of inlining it drops dramatically.
1260  bool OnlyOneCallAndLocalLinkage =
1261  F.hasLocalLinkage() && F.hasOneUse() && &F == CS.getCalledFunction();
1262  if (OnlyOneCallAndLocalLinkage)
1264 
1265  // If this function uses the coldcc calling convention, prefer not to inline
1266  // it.
1267  if (F.getCallingConv() == CallingConv::Cold)
1269 
1270  // Check if we're done. This can happen due to bonuses and penalties.
1271  if (Cost > Threshold)
1272  return false;
1273 
1274  if (F.empty())
1275  return true;
1276 
1277  Function *Caller = CS.getInstruction()->getParent()->getParent();
1278  // Check if the caller function is recursive itself.
1279  for (User *U : Caller->users()) {
1280  CallSite Site(U);
1281  if (!Site)
1282  continue;
1283  Instruction *I = Site.getInstruction();
1284  if (I->getParent()->getParent() == Caller) {
1285  IsCallerRecursive = true;
1286  break;
1287  }
1288  }
1289 
1290  // Populate our simplified values by mapping from function arguments to call
1291  // arguments with known important simplifications.
1292  CallSite::arg_iterator CAI = CS.arg_begin();
1293  for (Function::arg_iterator FAI = F.arg_begin(), FAE = F.arg_end();
1294  FAI != FAE; ++FAI, ++CAI) {
1295  assert(CAI != CS.arg_end());
1296  if (Constant *C = dyn_cast<Constant>(CAI))
1297  SimplifiedValues[&*FAI] = C;
1298 
1299  Value *PtrArg = *CAI;
1300  if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) {
1301  ConstantOffsetPtrs[&*FAI] = std::make_pair(PtrArg, C->getValue());
1302 
1303  // We can SROA any pointer arguments derived from alloca instructions.
1304  if (isa<AllocaInst>(PtrArg)) {
1305  SROAArgValues[&*FAI] = PtrArg;
1306  SROAArgCosts[PtrArg] = 0;
1307  }
1308  }
1309  }
1310  NumConstantArgs = SimplifiedValues.size();
1311  NumConstantOffsetPtrArgs = ConstantOffsetPtrs.size();
1312  NumAllocaArgs = SROAArgValues.size();
1313 
1314  // FIXME: If a caller has multiple calls to a callee, we end up recomputing
1315  // the ephemeral values multiple times (and they're completely determined by
1316  // the callee, so this is purely duplicate work).
1318  CodeMetrics::collectEphemeralValues(&F, &GetAssumptionCache(F), EphValues);
1319 
1320  // The worklist of live basic blocks in the callee *after* inlining. We avoid
1321  // adding basic blocks of the callee which can be proven to be dead for this
1322  // particular call site in order to get more accurate cost estimates. This
1323  // requires a somewhat heavyweight iteration pattern: we need to walk the
1324  // basic blocks in a breadth-first order as we insert live successors. To
1325  // accomplish this, prioritizing for small iterations because we exit after
1326  // crossing our threshold, we use a small-size optimized SetVector.
1329  BBSetVector;
1330  BBSetVector BBWorklist;
1331  BBWorklist.insert(&F.getEntryBlock());
1332  // Note that we *must not* cache the size, this loop grows the worklist.
1333  for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) {
1334  // Bail out the moment we cross the threshold. This means we'll under-count
1335  // the cost, but only when undercounting doesn't matter.
1336  if (Cost > Threshold)
1337  break;
1338 
1339  BasicBlock *BB = BBWorklist[Idx];
1340  if (BB->empty())
1341  continue;
1342 
1343  // Disallow inlining a blockaddress. A blockaddress only has defined
1344  // behavior for an indirect branch in the same function, and we do not
1345  // currently support inlining indirect branches. But, the inliner may not
1346  // see an indirect branch that ends up being dead code at a particular call
1347  // site. If the blockaddress escapes the function, e.g., via a global
1348  // variable, inlining may lead to an invalid cross-function reference.
1349  if (BB->hasAddressTaken())
1350  return false;
1351 
1352  // Analyze the cost of this block. If we blow through the threshold, this
1353  // returns false, and we can bail on out.
1354  if (!analyzeBlock(BB, EphValues))
1355  return false;
1356 
1357  TerminatorInst *TI = BB->getTerminator();
1358 
1359  // Add in the live successors by first checking whether we have terminator
1360  // that may be simplified based on the values simplified by this call.
1361  if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
1362  if (BI->isConditional()) {
1363  Value *Cond = BI->getCondition();
1364  if (ConstantInt *SimpleCond =
1365  dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
1366  BBWorklist.insert(BI->getSuccessor(SimpleCond->isZero() ? 1 : 0));
1367  continue;
1368  }
1369  }
1370  } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
1371  Value *Cond = SI->getCondition();
1372  if (ConstantInt *SimpleCond =
1373  dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
1374  BBWorklist.insert(SI->findCaseValue(SimpleCond).getCaseSuccessor());
1375  continue;
1376  }
1377  }
1378 
1379  // If we're unable to select a particular successor, just count all of
1380  // them.
1381  for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize;
1382  ++TIdx)
1383  BBWorklist.insert(TI->getSuccessor(TIdx));
1384 
1385  // If we had any successors at this point, than post-inlining is likely to
1386  // have them as well. Note that we assume any basic blocks which existed
1387  // due to branches or switches which folded above will also fold after
1388  // inlining.
1389  if (SingleBB && TI->getNumSuccessors() > 1) {
1390  // Take off the bonus we applied to the threshold.
1391  Threshold -= SingleBBBonus;
1392  SingleBB = false;
1393  }
1394  }
1395 
1396  // If this is a noduplicate call, we can still inline as long as
1397  // inlining this would cause the removal of the caller (so the instruction
1398  // is not actually duplicated, just moved).
1399  if (!OnlyOneCallAndLocalLinkage && ContainsNoDuplicateCall)
1400  return false;
1401 
1402  // We applied the maximum possible vector bonus at the beginning. Now,
1403  // subtract the excess bonus, if any, from the Threshold before
1404  // comparing against Cost.
1405  if (NumVectorInstructions <= NumInstructions / 10)
1406  Threshold -= FiftyPercentVectorBonus;
1407  else if (NumVectorInstructions <= NumInstructions / 2)
1408  Threshold -= (FiftyPercentVectorBonus - TenPercentVectorBonus);
1409 
1410  return Cost < std::max(1, Threshold);
1411 }
1412 
1413 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1414 /// \brief Dump stats about this call's analysis.
1416 #define DEBUG_PRINT_STAT(x) dbgs() << " " #x ": " << x << "\n"
1417  DEBUG_PRINT_STAT(NumConstantArgs);
1418  DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs);
1419  DEBUG_PRINT_STAT(NumAllocaArgs);
1420  DEBUG_PRINT_STAT(NumConstantPtrCmps);
1421  DEBUG_PRINT_STAT(NumConstantPtrDiffs);
1422  DEBUG_PRINT_STAT(NumInstructionsSimplified);
1423  DEBUG_PRINT_STAT(NumInstructions);
1424  DEBUG_PRINT_STAT(SROACostSavings);
1425  DEBUG_PRINT_STAT(SROACostSavingsLost);
1426  DEBUG_PRINT_STAT(ContainsNoDuplicateCall);
1427  DEBUG_PRINT_STAT(Cost);
1429 #undef DEBUG_PRINT_STAT
1430 }
1431 #endif
1432 
1433 /// \brief Test that two functions either have or have not the given attribute
1434 /// at the same time.
1435 template <typename AttrKind>
1436 static bool attributeMatches(Function *F1, Function *F2, AttrKind Attr) {
1437  return F1->getFnAttribute(Attr) == F2->getFnAttribute(Attr);
1438 }
1439 
1440 /// \brief Test that there are no attribute conflicts between Caller and Callee
1441 /// that prevent inlining.
1443  Function *Callee,
1444  TargetTransformInfo &TTI) {
1445  return TTI.areInlineCompatible(Caller, Callee) &&
1446  AttributeFuncs::areInlineCompatible(*Caller, *Callee);
1447 }
1448 
1450  CallSite CS, const InlineParams &Params, TargetTransformInfo &CalleeTTI,
1451  std::function<AssumptionCache &(Function &)> &GetAssumptionCache,
1452  ProfileSummaryInfo *PSI) {
1453  return getInlineCost(CS, CS.getCalledFunction(), Params, CalleeTTI,
1454  GetAssumptionCache, PSI);
1455 }
1456 
1458  CallSite CS, Function *Callee, const InlineParams &Params,
1459  TargetTransformInfo &CalleeTTI,
1460  std::function<AssumptionCache &(Function &)> &GetAssumptionCache,
1461  ProfileSummaryInfo *PSI) {
1462 
1463  // Cannot inline indirect calls.
1464  if (!Callee)
1465  return llvm::InlineCost::getNever();
1466 
1467  // Calls to functions with always-inline attributes should be inlined
1468  // whenever possible.
1469  if (CS.hasFnAttr(Attribute::AlwaysInline)) {
1470  if (isInlineViable(*Callee))
1471  return llvm::InlineCost::getAlways();
1472  return llvm::InlineCost::getNever();
1473  }
1474 
1475  // Never inline functions with conflicting attributes (unless callee has
1476  // always-inline attribute).
1477  if (!functionsHaveCompatibleAttributes(CS.getCaller(), Callee, CalleeTTI))
1478  return llvm::InlineCost::getNever();
1479 
1480  // Don't inline this call if the caller has the optnone attribute.
1481  if (CS.getCaller()->hasFnAttribute(Attribute::OptimizeNone))
1482  return llvm::InlineCost::getNever();
1483 
1484  // Don't inline functions which can be interposed at link-time. Don't inline
1485  // functions marked noinline or call sites marked noinline.
1486  // Note: inlining non-exact non-interposable functions is fine, since we know
1487  // we have *a* correct implementation of the source level function.
1488  if (Callee->isInterposable() || Callee->hasFnAttribute(Attribute::NoInline) ||
1489  CS.isNoInline())
1490  return llvm::InlineCost::getNever();
1491 
1492  DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName()
1493  << "...\n");
1494 
1495  CallAnalyzer CA(CalleeTTI, GetAssumptionCache, PSI, *Callee, CS, Params);
1496  bool ShouldInline = CA.analyzeCall(CS);
1497 
1498  DEBUG(CA.dump());
1499 
1500  // Check if there was a reason to force inlining or no inlining.
1501  if (!ShouldInline && CA.getCost() < CA.getThreshold())
1502  return InlineCost::getNever();
1503  if (ShouldInline && CA.getCost() >= CA.getThreshold())
1504  return InlineCost::getAlways();
1505 
1506  return llvm::InlineCost::get(CA.getCost(), CA.getThreshold());
1507 }
1508 
1510  bool ReturnsTwice = F.hasFnAttribute(Attribute::ReturnsTwice);
1511  for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
1512  // Disallow inlining of functions which contain indirect branches or
1513  // blockaddresses.
1514  if (isa<IndirectBrInst>(BI->getTerminator()) || BI->hasAddressTaken())
1515  return false;
1516 
1517  for (auto &II : *BI) {
1518  CallSite CS(&II);
1519  if (!CS)
1520  continue;
1521 
1522  // Disallow recursive calls.
1523  if (&F == CS.getCalledFunction())
1524  return false;
1525 
1526  // Disallow calls which expose returns-twice to a function not previously
1527  // attributed as such.
1528  if (!ReturnsTwice && CS.isCall() &&
1529  cast<CallInst>(CS.getInstruction())->canReturnTwice())
1530  return false;
1531 
1532  // Disallow inlining functions that call @llvm.localescape. Doing this
1533  // correctly would require major changes to the inliner.
1534  if (CS.getCalledFunction() &&
1536  llvm::Intrinsic::localescape)
1537  return false;
1538  }
1539  }
1540 
1541  return true;
1542 }
1543 
1544 // APIs to create InlineParams based on command line flags and/or other
1545 // parameters.
1546 
1548  InlineParams Params;
1549 
1550  // This field is the threshold to use for a callee by default. This is
1551  // derived from one or more of:
1552  // * optimization or size-optimization levels,
1553  // * a value passed to createFunctionInliningPass function, or
1554  // * the -inline-threshold flag.
1555  // If the -inline-threshold flag is explicitly specified, that is used
1556  // irrespective of anything else.
1557  if (InlineThreshold.getNumOccurrences() > 0)
1559  else
1560  Params.DefaultThreshold = Threshold;
1561 
1562  // Set the HintThreshold knob from the -inlinehint-threshold.
1563  Params.HintThreshold = HintThreshold;
1564 
1565  // Set the HotCallSiteThreshold knob from the -hot-callsite-threshold.
1567 
1568  // Set the OptMinSizeThreshold and OptSizeThreshold params only if the
1569  // Set the OptMinSizeThreshold and OptSizeThreshold params only if the
1570  // -inlinehint-threshold commandline option is not explicitly given. If that
1571  // option is present, then its value applies even for callees with size and
1572  // minsize attributes.
1573  // If the -inline-threshold is not specified, set the ColdThreshold from the
1574  // -inlinecold-threshold even if it is not explicitly passed. If
1575  // -inline-threshold is specified, then -inlinecold-threshold needs to be
1576  // explicitly specified to set the ColdThreshold knob
1577  if (InlineThreshold.getNumOccurrences() == 0) {
1580  Params.ColdThreshold = ColdThreshold;
1581  } else if (ColdThreshold.getNumOccurrences() > 0) {
1582  Params.ColdThreshold = ColdThreshold;
1583  }
1584  return Params;
1585 }
1586 
1589 }
1590 
1591 // Compute the default threshold for inlining based on the opt level and the
1592 // size opt level.
1593 static int computeThresholdFromOptLevels(unsigned OptLevel,
1594  unsigned SizeOptLevel) {
1595  if (OptLevel > 2)
1597  if (SizeOptLevel == 1) // -Os
1599  if (SizeOptLevel == 2) // -Oz
1601  return InlineThreshold;
1602 }
1603 
1604 InlineParams llvm::getInlineParams(unsigned OptLevel, unsigned SizeOptLevel) {
1605  return getInlineParams(computeThresholdFromOptLevels(OptLevel, SizeOptLevel));
1606 }
IterTy arg_end() const
Definition: CallSite.h:532
Return a value (possibly void), from a function.
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:102
static ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:513
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition: InstrTypes.h:840
Thresholds to tune inline cost analysis.
Definition: InlineCost.h:122
CaseIt case_end()
Returns a read/write iterator that points one past the last in the SwitchInst.
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:870
FunTy * getCaller() const
getCaller - Return the caller function for this call site
Definition: CallSite.h:262
This instruction extracts a struct member or array element value from an aggregate value...
LLVM Argument representation.
Definition: Argument.h:34
Base class for instruction visitors.
Definition: InstVisitor.h:81
Value * getAggregateOperand()
STATISTIC(NumFunctions,"Total number of functions")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds...
Definition: Compiler.h:450
ArrayRef< unsigned > getIndices() const
Optional< int > OptSizeThreshold
Threshold to use when the caller is optimized for size.
Definition: InlineCost.h:133
bool canConstantFoldCallTo(const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function...
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:52
iterator end()
Definition: Function.h:537
static int computeThresholdFromOptLevels(unsigned OptLevel, unsigned SizeOptLevel)
Analysis providing profile information.
bool isSimple() const
Definition: Instructions.h:384
const int OptMinSizeThreshold
Use when minsize (-Oz) is specified.
Definition: InlineCost.h:36
const int ColdccPenalty
Definition: InlineCost.h:46
size_type count(PtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:380
gep_type_iterator gep_type_end(const User *GEP)
CaseIt case_begin()
Returns a read/write iterator that points to the first case in the SwitchInst.
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1682
A cache of .assume calls within a function.
static void dump(StringRef Title, SpillInfo const &Spills)
Definition: CoroFrame.cpp:283
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:100
arg_iterator arg_end()
Definition: Function.h:559
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.h:234
An instruction for reading from memory.
Definition: Instructions.h:164
static Constant * getCompare(unsigned short pred, Constant *C1, Constant *C2, bool OnlyIfReduced=false)
Return an ICmp or FCmp comparison operator constant expression.
Definition: Constants.cpp:1850
Hexagon Common GEP
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2143
Type * getElementType() const
Definition: DerivedTypes.h:462
void reserve(size_type N)
Definition: SmallVector.h:377
bool isSimple() const
Definition: Instructions.h:263
bool optForSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Definition: Function.h:464
op_iterator op_begin()
Definition: User.h:205
std::enable_if< std::is_unsigned< T >::value, T >::type SaturatingAdd(T X, T Y, bool *ResultOverflowed=nullptr)
Add two unsigned integers, X and Y, of type T.
Definition: MathExtras.h:750
Represents the cost of inlining a function.
Definition: InlineCost.h:63
bool isEquality() const
This is just a convenience that dispatches to the subclasses.
bool optForMinSize() const
Optimize this function for minimum size (-Oz).
Definition: Function.h:461
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:165
unsigned arg_size() const
Definition: CallSite.h:211
bool areInlineCompatible(const Function &Caller, const Function &Callee)
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:191
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:345
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:228
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1...
ArrayRef< unsigned > getIndices() const
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Definition: DataLayout.h:496
bool isUnconditional() const
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:566
InlineCost getInlineCost(CallSite CS, const InlineParams &Params, TargetTransformInfo &CalleeTTI, std::function< AssumptionCache &(Function &)> &GetAssumptionCache, ProfileSummaryInfo *PSI)
Get an InlineCost object representing the cost of inlining this callsite.
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:578
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:143
Optional< int > HintThreshold
Threshold to use for callees with inline hint.
Definition: InlineCost.h:127
Class to represent struct types.
Definition: DerivedTypes.h:199
A Use represents the edge between a Value definition and its users.
Definition: Use.h:56
bool isInlineViable(Function &Callee)
Minimal filter to detect invalid constructs for inlining.
ValTy * getCalledValue() const
getCalledValue - Return the pointer to function that is being called.
Definition: CallSite.h:102
bool isCall() const
isCall - true if a CallInst is enclosed.
Definition: CallSite.h:87
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches, switches, etc.
Definition: BasicBlock.h:308
#define DEBUG_PRINT_STAT(x)
This class represents a cast from a pointer to an integer.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:154
#define F(x, y, z)
Definition: MD5.cpp:51
bool empty() const
Definition: BasicBlock.h:239
Function Alias Analysis false
BasicBlock * getSuccessor(unsigned i) const
This class represents a no-op cast from one type to another.
op_iterator idx_begin()
Definition: Instructions.h:956
Value * getInsertedValueOperand()
bool isFloatingPointTy() const
Return true if this is one of the six floating-point types.
Definition: Type.h:160
const int LastCallToStaticBonus
Definition: InlineCost.h:45
static GCRegistry::Add< OcamlGC > B("ocaml","ocaml 3.10-compatible GC")
An instruction for storing to memory.
Definition: Instructions.h:300
static cl::opt< int > ColdThreshold("inlinecold-threshold", cl::Hidden, cl::init(225), cl::desc("Threshold for inlining functions with cold attribute"))
static cl::opt< int > HotCallSiteThreshold("hot-callsite-threshold", cl::Hidden, cl::init(3000), cl::ZeroOrMore, cl::desc("Threshold for hot callsites "))
iterator begin()
Definition: Function.h:535
const int CallPenalty
Definition: InlineCost.h:44
Maximum length of the test input libFuzzer tries to guess a good value based on the corpus and reports it always prefer smaller inputs during the corpus shuffle When libFuzzer itself reports a bug this exit code will be used If indicates the maximal total time in seconds to run the fuzzer minimizes the provided crash input Use with etc Experimental Use value profile to guide fuzzing Number of simultaneous worker processes to run the jobs If min(jobs, NumberOfCpuCores()/2)\" is used.") FUZZER_FLAG_INT(reload
bool isInBounds() const
Determine whether the GEP has the inbounds flag.
Class to represent pointers.
Definition: DerivedTypes.h:443
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:517
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1695
unsigned getNumSuccessors() const
Return the number of successors that this terminator has.
Definition: InstrTypes.h:74
an instruction for type-safe pointer arithmetic to access elements of arrays and structs ...
Definition: Instructions.h:830
static Constant * getInsertValue(Constant *Agg, Constant *Val, ArrayRef< unsigned > Idxs, Type *OnlyIfReducedTy=nullptr)
Definition: Constants.cpp:2065
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:395
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
Subclasses of this class are all able to terminate a basic block.
Definition: InstrTypes.h:52
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
const int IndirectCallThreshold
Definition: InlineCost.h:43
LLVM Basic Block Representation.
Definition: BasicBlock.h:51
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
size_type size() const
Definition: SmallPtrSet.h:99
BasicBlock * getSuccessor(unsigned idx) const
Return the specified successor.
Definition: InstrTypes.h:79
Conditional or Unconditional Branch instruction.
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:219
This function has undefined behavior.
This is an important base class in LLVM.
Definition: Constant.h:42
Resume the propagation of an exception.
bool isNoInline() const
Return true if the call should not be inlined.
Definition: CallSite.h:413
Indirect Branch Instruction.
const int OptAggressiveThreshold
Use when -O3 is specified.
Definition: InlineCost.h:39
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:368
Expected to fold away in lowering.
op_iterator op_end()
Definition: User.h:207
Optional< int > OptMinSizeThreshold
Threshold to use when the caller is optimized for minsize.
Definition: InlineCost.h:136
uint32_t Offset
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
Definition: CallSite.h:555
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1255
static InlineCost getNever()
Definition: InlineCost.h:87
Value * getPointerOperand()
Definition: Operator.h:401
Value * getOperand(unsigned i) const
Definition: User.h:145
bool hasFnAttr(Attribute::AttrKind Kind) const
Return true if this function has the given attribute.
Definition: CallSite.h:349
Value * getPointerOperand()
Definition: Instructions.h:270
Value * SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS, const FastMathFlags &FMF, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for an FP BinaryOperator, fold the result or return null.
arg_iterator arg_begin()
Definition: Function.h:550
static Constant * getICmp(unsigned short pred, Constant *LHS, Constant *RHS, bool OnlyIfReduced=false)
get* - Return some common constants without having to specify the full Instruction::OPCODE identifier...
Definition: Constants.cpp:1948
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:960
This class represents a cast from an integer to a pointer.
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:213
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:654
User::op_iterator arg_iterator
arg_iterator - The type of iterator to use when looping over actual arguments at this call site...
Definition: CallSite.h:205
std::enable_if< std::is_unsigned< T >::value, T >::type SaturatingMultiplyAdd(T X, T Y, T A, bool *ResultOverflowed=nullptr)
Multiply two unsigned integers, X and Y, and add the unsigned integer, A to the product.
Definition: MathExtras.h:813
static bool functionsHaveCompatibleAttributes(Function *Caller, Function *Callee, TargetTransformInfo &TTI)
Test that there are no attribute conflicts between Caller and Callee that prevent inlining...
bool isConditional() const
IterTy arg_begin() const
Definition: CallSite.h:528
bool extractProfTotalWeight(uint64_t &TotalVal) const
Retrieve total raw weight values of a branch.
Definition: Metadata.cpp:1297
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
Definition: DataLayout.cpp:709
static bool attributeMatches(Function *F1, Function *F2, AttrKind Attr)
Test that two functions either have or have not the given attribute at the same time.
BinaryOps getOpcode() const
Definition: InstrTypes.h:541
Iterator for intrusive lists based on ilist_node.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:425
This is the shared class of boolean and integer constants.
Definition: Constants.h:88
InstrTy * getInstruction() const
Definition: CallSite.h:93
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:408
bool areInlineCompatible(const Function *Caller, const Function *Callee) const
iterator end()
Definition: BasicBlock.h:230
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type...
Definition: Type.cpp:123
InlineParams getInlineParams()
Generate the parameters to tune the inline cost analysis based only on the commandline options...
ValTy * getArgument(unsigned ArgNo) const
Definition: CallSite.h:178
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:843
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size...
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:230
BasicBlockTy * getCaseSuccessor()
Resolves successor for current case.
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:558
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:198
const BasicBlock & getEntryBlock() const
Definition: Function.h:519
Optional< int > ColdThreshold
Threshold to use for cold callees.
Definition: InlineCost.h:130
static cl::opt< int > HintThreshold("inlinehint-threshold", cl::Hidden, cl::init(325), cl::desc("Threshold for inlining functions with inline hint"))
CaseIt findCaseValue(const ConstantInt *C)
Search all of the case values for the specified constant.
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:506
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:146
static GCRegistry::Add< ShadowStackGC > C("shadow-stack","Very portable GC for uncooperative code generators")
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
Class for arbitrary precision integers.
Definition: APInt.h:77
iterator_range< user_iterator > users()
Definition: Value.h:370
Value * SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for a BinaryOperator, fold the result or return null.
static Constant * getCast(unsigned ops, Constant *C, Type *Ty, bool OnlyIfReduced=false)
Convenience function for getting a Cast operation.
Definition: Constants.cpp:1452
bool empty() const
Definition: Function.h:541
Value * getCondition() const
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Definition: Operator.h:49
static cl::opt< int > InlineThreshold("inline-threshold", cl::Hidden, cl::init(225), cl::ZeroOrMore, cl::desc("Control the amount of inlining to perform (default = 225)"))
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.cpp:384
Value * getCondition() const
BasicBlock * getDefaultDest() const
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:226
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
Definition: Operator.h:385
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1669
#define I(x, y, z)
Definition: MD5.cpp:54
TerminatorInst * getTerminator()
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:124
bool hasOneUse() const
Return true if there is exactly one user of this value.
Definition: Value.h:383
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Definition: DataLayout.h:349
iterator find(const KeyT &Val)
Definition: DenseMap.h:127
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:287
bool isInterposable() const
Return true if this global's definition can be substituted with an arbitrary definition at link time...
Definition: GlobalValue.h:399
bool hasLocalLinkage() const
Definition: GlobalValue.h:415
OtherOps getOpcode() const
Get the opcode casted to the right type.
Definition: InstrTypes.h:955
static int const Threshold
TODO: Write a new FunctionPass AliasAnalysis so that it can keep a cache.
Multiway switch.
StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:178
const unsigned TotalAllocaSizeRecursiveCaller
Do not inline functions which allocate this many bytes on the stack when the caller is recursive...
Definition: InlineCost.h:50
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Constant * ConstantFoldInstOperands(Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands...
FunTy * getCalledFunction() const
getCalledFunction - Return the function being called if this is a direct call, otherwise return null ...
Definition: CallSite.h:110
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:537
LLVM Value Representation.
Definition: Value.h:71
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition: APInt.cpp:1007
A vector that has set insertion semantics.
Definition: SetVector.h:41
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
Definition: Function.cpp:57
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:93
uint64_t getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:533
Invoke instruction.
#define DEBUG(X)
Definition: Debug.h:100
print Print MemDeps of function
static Constant * getExtractValue(Constant *Agg, ArrayRef< unsigned > Idxs, Type *OnlyIfReducedTy=nullptr)
Definition: Constants.cpp:2089
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:47
This pass exposes codegen information to IR-level passes.
const int OptSizeThreshold
Use when optsize (-Os) is specified.
Definition: InlineCost.h:33
static APInt getNullValue(unsigned numBits)
Get the '0' value.
Definition: APInt.h:465
int * Ptr
static void collectEphemeralValues(const Loop *L, AssumptionCache *AC, SmallPtrSetImpl< const Value * > &EphValues)
Collect a loop's ephemeral values (those used only by an assume or similar intrinsics in the loop)...
Definition: CodeMetrics.cpp:73
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:102
static GCRegistry::Add< ErlangGC > A("erlang","erlang-compatible garbage collector")
static InlineCost getAlways()
Definition: InlineCost.h:84
Value * getPointerOperand()
Definition: Instructions.h:394
The cost of a 'div' instruction on x86.
const BasicBlock * getParent() const
Definition: Instruction.h:62
Optional< int > HotCallSiteThreshold
Threshold to use when the callsite is considered hot.
Definition: InlineCost.h:139
int DefaultThreshold
The default threshold to start with for a callee.
Definition: InlineCost.h:124
static InlineCost get(int Cost, int Threshold)
Definition: InlineCost.h:79
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:44
Constant * ConstantFoldCall(Function *F, ArrayRef< Constant * > Operands, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldCall - Attempt to constant fold a call to the specified function with the specified argum...
an instruction to allocate memory on the stack
Definition: Instructions.h:60
This instruction inserts a struct field of array element value into an aggregate value.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results...
Definition: Attributes.h:67
gep_type_iterator gep_type_begin(const User *GEP)