LLVM  6.0.0svn
InlineFunction.cpp
Go to the documentation of this file.
1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements inlining of a function into a call site, resolving
11 // parameters and the return value as appropriate.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/ADT/SetVector.h"
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/StringExtras.h"
29 #include "llvm/IR/Attributes.h"
30 #include "llvm/IR/CFG.h"
31 #include "llvm/IR/CallSite.h"
32 #include "llvm/IR/Constants.h"
33 #include "llvm/IR/DIBuilder.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/DebugInfo.h"
36 #include "llvm/IR/DerivedTypes.h"
37 #include "llvm/IR/Dominators.h"
38 #include "llvm/IR/IRBuilder.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/IntrinsicInst.h"
41 #include "llvm/IR/Intrinsics.h"
42 #include "llvm/IR/MDBuilder.h"
43 #include "llvm/IR/Module.h"
47 #include <algorithm>
48 
49 using namespace llvm;
50 
51 static cl::opt<bool>
52 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
53  cl::Hidden,
54  cl::desc("Convert noalias attributes to metadata during inlining."));
55 
56 static cl::opt<bool>
57 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
58  cl::init(true), cl::Hidden,
59  cl::desc("Convert align attributes to assumptions during inlining."));
60 
62  AAResults *CalleeAAR, bool InsertLifetime) {
63  return InlineFunction(CallSite(CI), IFI, CalleeAAR, InsertLifetime);
64 }
66  AAResults *CalleeAAR, bool InsertLifetime) {
67  return InlineFunction(CallSite(II), IFI, CalleeAAR, InsertLifetime);
68 }
69 
70 namespace {
71  /// A class for recording information about inlining a landing pad.
72  class LandingPadInliningInfo {
73  BasicBlock *OuterResumeDest; ///< Destination of the invoke's unwind.
74  BasicBlock *InnerResumeDest; ///< Destination for the callee's resume.
75  LandingPadInst *CallerLPad; ///< LandingPadInst associated with the invoke.
76  PHINode *InnerEHValuesPHI; ///< PHI for EH values from landingpad insts.
77  SmallVector<Value*, 8> UnwindDestPHIValues;
78 
79  public:
80  LandingPadInliningInfo(InvokeInst *II)
81  : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(nullptr),
82  CallerLPad(nullptr), InnerEHValuesPHI(nullptr) {
83  // If there are PHI nodes in the unwind destination block, we need to keep
84  // track of which values came into them from the invoke before removing
85  // the edge from this block.
86  llvm::BasicBlock *InvokeBB = II->getParent();
87  BasicBlock::iterator I = OuterResumeDest->begin();
88  for (; isa<PHINode>(I); ++I) {
89  // Save the value to use for this edge.
90  PHINode *PHI = cast<PHINode>(I);
91  UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
92  }
93 
94  CallerLPad = cast<LandingPadInst>(I);
95  }
96 
97  /// The outer unwind destination is the target of
98  /// unwind edges introduced for calls within the inlined function.
99  BasicBlock *getOuterResumeDest() const {
100  return OuterResumeDest;
101  }
102 
103  BasicBlock *getInnerResumeDest();
104 
105  LandingPadInst *getLandingPadInst() const { return CallerLPad; }
106 
107  /// Forward the 'resume' instruction to the caller's landing pad block.
108  /// When the landing pad block has only one predecessor, this is
109  /// a simple branch. When there is more than one predecessor, we need to
110  /// split the landing pad block after the landingpad instruction and jump
111  /// to there.
112  void forwardResume(ResumeInst *RI,
113  SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
114 
115  /// Add incoming-PHI values to the unwind destination block for the given
116  /// basic block, using the values for the original invoke's source block.
117  void addIncomingPHIValuesFor(BasicBlock *BB) const {
118  addIncomingPHIValuesForInto(BB, OuterResumeDest);
119  }
120 
121  void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
122  BasicBlock::iterator I = dest->begin();
123  for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
124  PHINode *phi = cast<PHINode>(I);
125  phi->addIncoming(UnwindDestPHIValues[i], src);
126  }
127  }
128  };
129 } // anonymous namespace
130 
131 /// Get or create a target for the branch from ResumeInsts.
132 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
133  if (InnerResumeDest) return InnerResumeDest;
134 
135  // Split the landing pad.
136  BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator();
137  InnerResumeDest =
138  OuterResumeDest->splitBasicBlock(SplitPoint,
139  OuterResumeDest->getName() + ".body");
140 
141  // The number of incoming edges we expect to the inner landing pad.
142  const unsigned PHICapacity = 2;
143 
144  // Create corresponding new PHIs for all the PHIs in the outer landing pad.
145  Instruction *InsertPoint = &InnerResumeDest->front();
146  BasicBlock::iterator I = OuterResumeDest->begin();
147  for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
148  PHINode *OuterPHI = cast<PHINode>(I);
149  PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
150  OuterPHI->getName() + ".lpad-body",
151  InsertPoint);
152  OuterPHI->replaceAllUsesWith(InnerPHI);
153  InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
154  }
155 
156  // Create a PHI for the exception values.
157  InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
158  "eh.lpad-body", InsertPoint);
159  CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
160  InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
161 
162  // All done.
163  return InnerResumeDest;
164 }
165 
166 /// Forward the 'resume' instruction to the caller's landing pad block.
167 /// When the landing pad block has only one predecessor, this is a simple
168 /// branch. When there is more than one predecessor, we need to split the
169 /// landing pad block after the landingpad instruction and jump to there.
170 void LandingPadInliningInfo::forwardResume(
171  ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) {
172  BasicBlock *Dest = getInnerResumeDest();
173  BasicBlock *Src = RI->getParent();
174 
175  BranchInst::Create(Dest, Src);
176 
177  // Update the PHIs in the destination. They were inserted in an order which
178  // makes this work.
179  addIncomingPHIValuesForInto(Src, Dest);
180 
181  InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
182  RI->eraseFromParent();
183 }
184 
185 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
186 static Value *getParentPad(Value *EHPad) {
187  if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
188  return FPI->getParentPad();
189  return cast<CatchSwitchInst>(EHPad)->getParentPad();
190 }
191 
193 
194 /// Helper for getUnwindDestToken that does the descendant-ward part of
195 /// the search.
197  UnwindDestMemoTy &MemoMap) {
198  SmallVector<Instruction *, 8> Worklist(1, EHPad);
199 
200  while (!Worklist.empty()) {
201  Instruction *CurrentPad = Worklist.pop_back_val();
202  // We only put pads on the worklist that aren't in the MemoMap. When
203  // we find an unwind dest for a pad we may update its ancestors, but
204  // the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
205  // so they should never get updated while queued on the worklist.
206  assert(!MemoMap.count(CurrentPad));
207  Value *UnwindDestToken = nullptr;
208  if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
209  if (CatchSwitch->hasUnwindDest()) {
210  UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
211  } else {
212  // Catchswitch doesn't have a 'nounwind' variant, and one might be
213  // annotated as "unwinds to caller" when really it's nounwind (see
214  // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
215  // parent's unwind dest from this. We can check its catchpads'
216  // descendants, since they might include a cleanuppad with an
217  // "unwinds to caller" cleanupret, which can be trusted.
218  for (auto HI = CatchSwitch->handler_begin(),
219  HE = CatchSwitch->handler_end();
220  HI != HE && !UnwindDestToken; ++HI) {
221  BasicBlock *HandlerBlock = *HI;
222  auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI());
223  for (User *Child : CatchPad->users()) {
224  // Intentionally ignore invokes here -- since the catchswitch is
225  // marked "unwind to caller", it would be a verifier error if it
226  // contained an invoke which unwinds out of it, so any invoke we'd
227  // encounter must unwind to some child of the catch.
228  if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
229  continue;
230 
231  Instruction *ChildPad = cast<Instruction>(Child);
232  auto Memo = MemoMap.find(ChildPad);
233  if (Memo == MemoMap.end()) {
234  // Haven't figured out this child pad yet; queue it.
235  Worklist.push_back(ChildPad);
236  continue;
237  }
238  // We've already checked this child, but might have found that
239  // it offers no proof either way.
240  Value *ChildUnwindDestToken = Memo->second;
241  if (!ChildUnwindDestToken)
242  continue;
243  // We already know the child's unwind dest, which can either
244  // be ConstantTokenNone to indicate unwind to caller, or can
245  // be another child of the catchpad. Only the former indicates
246  // the unwind dest of the catchswitch.
247  if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
248  UnwindDestToken = ChildUnwindDestToken;
249  break;
250  }
251  assert(getParentPad(ChildUnwindDestToken) == CatchPad);
252  }
253  }
254  }
255  } else {
256  auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
257  for (User *U : CleanupPad->users()) {
258  if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
259  if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
260  UnwindDestToken = RetUnwindDest->getFirstNonPHI();
261  else
262  UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext());
263  break;
264  }
265  Value *ChildUnwindDestToken;
266  if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
267  ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
268  } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
269  Instruction *ChildPad = cast<Instruction>(U);
270  auto Memo = MemoMap.find(ChildPad);
271  if (Memo == MemoMap.end()) {
272  // Haven't resolved this child yet; queue it and keep searching.
273  Worklist.push_back(ChildPad);
274  continue;
275  }
276  // We've checked this child, but still need to ignore it if it
277  // had no proof either way.
278  ChildUnwindDestToken = Memo->second;
279  if (!ChildUnwindDestToken)
280  continue;
281  } else {
282  // Not a relevant user of the cleanuppad
283  continue;
284  }
285  // In a well-formed program, the child/invoke must either unwind to
286  // an(other) child of the cleanup, or exit the cleanup. In the
287  // first case, continue searching.
288  if (isa<Instruction>(ChildUnwindDestToken) &&
289  getParentPad(ChildUnwindDestToken) == CleanupPad)
290  continue;
291  UnwindDestToken = ChildUnwindDestToken;
292  break;
293  }
294  }
295  // If we haven't found an unwind dest for CurrentPad, we may have queued its
296  // children, so move on to the next in the worklist.
297  if (!UnwindDestToken)
298  continue;
299 
300  // Now we know that CurrentPad unwinds to UnwindDestToken. It also exits
301  // any ancestors of CurrentPad up to but not including UnwindDestToken's
302  // parent pad. Record this in the memo map, and check to see if the
303  // original EHPad being queried is one of the ones exited.
304  Value *UnwindParent;
305  if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
306  UnwindParent = getParentPad(UnwindPad);
307  else
308  UnwindParent = nullptr;
309  bool ExitedOriginalPad = false;
310  for (Instruction *ExitedPad = CurrentPad;
311  ExitedPad && ExitedPad != UnwindParent;
312  ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) {
313  // Skip over catchpads since they just follow their catchswitches.
314  if (isa<CatchPadInst>(ExitedPad))
315  continue;
316  MemoMap[ExitedPad] = UnwindDestToken;
317  ExitedOriginalPad |= (ExitedPad == EHPad);
318  }
319 
320  if (ExitedOriginalPad)
321  return UnwindDestToken;
322 
323  // Continue the search.
324  }
325 
326  // No definitive information is contained within this funclet.
327  return nullptr;
328 }
329 
330 /// Given an EH pad, find where it unwinds. If it unwinds to an EH pad,
331 /// return that pad instruction. If it unwinds to caller, return
332 /// ConstantTokenNone. If it does not have a definitive unwind destination,
333 /// return nullptr.
334 ///
335 /// This routine gets invoked for calls in funclets in inlinees when inlining
336 /// an invoke. Since many funclets don't have calls inside them, it's queried
337 /// on-demand rather than building a map of pads to unwind dests up front.
338 /// Determining a funclet's unwind dest may require recursively searching its
339 /// descendants, and also ancestors and cousins if the descendants don't provide
340 /// an answer. Since most funclets will have their unwind dest immediately
341 /// available as the unwind dest of a catchswitch or cleanupret, this routine
342 /// searches top-down from the given pad and then up. To avoid worst-case
343 /// quadratic run-time given that approach, it uses a memo map to avoid
344 /// re-processing funclet trees. The callers that rewrite the IR as they go
345 /// take advantage of this, for correctness, by checking/forcing rewritten
346 /// pads' entries to match the original callee view.
348  UnwindDestMemoTy &MemoMap) {
349  // Catchpads unwind to the same place as their catchswitch;
350  // redirct any queries on catchpads so the code below can
351  // deal with just catchswitches and cleanuppads.
352  if (auto *CPI = dyn_cast<CatchPadInst>(EHPad))
353  EHPad = CPI->getCatchSwitch();
354 
355  // Check if we've already determined the unwind dest for this pad.
356  auto Memo = MemoMap.find(EHPad);
357  if (Memo != MemoMap.end())
358  return Memo->second;
359 
360  // Search EHPad and, if necessary, its descendants.
361  Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap);
362  assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));
363  if (UnwindDestToken)
364  return UnwindDestToken;
365 
366  // No information is available for this EHPad from itself or any of its
367  // descendants. An unwind all the way out to a pad in the caller would
368  // need also to agree with the unwind dest of the parent funclet, so
369  // search up the chain to try to find a funclet with information. Put
370  // null entries in the memo map to avoid re-processing as we go up.
371  MemoMap[EHPad] = nullptr;
372 #ifndef NDEBUG
374  TempMemos.insert(EHPad);
375 #endif
376  Instruction *LastUselessPad = EHPad;
377  Value *AncestorToken;
378  for (AncestorToken = getParentPad(EHPad);
379  auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
380  AncestorToken = getParentPad(AncestorToken)) {
381  // Skip over catchpads since they just follow their catchswitches.
382  if (isa<CatchPadInst>(AncestorPad))
383  continue;
384  // If the MemoMap had an entry mapping AncestorPad to nullptr, since we
385  // haven't yet called getUnwindDestTokenHelper for AncestorPad in this
386  // call to getUnwindDestToken, that would mean that AncestorPad had no
387  // information in itself, its descendants, or its ancestors. If that
388  // were the case, then we should also have recorded the lack of information
389  // for the descendant that we're coming from. So assert that we don't
390  // find a null entry in the MemoMap for AncestorPad.
391  assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);
392  auto AncestorMemo = MemoMap.find(AncestorPad);
393  if (AncestorMemo == MemoMap.end()) {
394  UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap);
395  } else {
396  UnwindDestToken = AncestorMemo->second;
397  }
398  if (UnwindDestToken)
399  break;
400  LastUselessPad = AncestorPad;
401  MemoMap[LastUselessPad] = nullptr;
402 #ifndef NDEBUG
403  TempMemos.insert(LastUselessPad);
404 #endif
405  }
406 
407  // We know that getUnwindDestTokenHelper was called on LastUselessPad and
408  // returned nullptr (and likewise for EHPad and any of its ancestors up to
409  // LastUselessPad), so LastUselessPad has no information from below. Since
410  // getUnwindDestTokenHelper must investigate all downward paths through
411  // no-information nodes to prove that a node has no information like this,
412  // and since any time it finds information it records it in the MemoMap for
413  // not just the immediately-containing funclet but also any ancestors also
414  // exited, it must be the case that, walking downward from LastUselessPad,
415  // visiting just those nodes which have not been mapped to an unwind dest
416  // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since
417  // they are just used to keep getUnwindDestTokenHelper from repeating work),
418  // any node visited must have been exhaustively searched with no information
419  // for it found.
420  SmallVector<Instruction *, 8> Worklist(1, LastUselessPad);
421  while (!Worklist.empty()) {
422  Instruction *UselessPad = Worklist.pop_back_val();
423  auto Memo = MemoMap.find(UselessPad);
424  if (Memo != MemoMap.end() && Memo->second) {
425  // Here the name 'UselessPad' is a bit of a misnomer, because we've found
426  // that it is a funclet that does have information about unwinding to
427  // a particular destination; its parent was a useless pad.
428  // Since its parent has no information, the unwind edge must not escape
429  // the parent, and must target a sibling of this pad. This local unwind
430  // gives us no information about EHPad. Leave it and the subtree rooted
431  // at it alone.
432  assert(getParentPad(Memo->second) == getParentPad(UselessPad));
433  continue;
434  }
435  // We know we don't have information for UselesPad. If it has an entry in
436  // the MemoMap (mapping it to nullptr), it must be one of the TempMemos
437  // added on this invocation of getUnwindDestToken; if a previous invocation
438  // recorded nullptr, it would have had to prove that the ancestors of
439  // UselessPad, which include LastUselessPad, had no information, and that
440  // in turn would have required proving that the descendants of
441  // LastUselesPad, which include EHPad, have no information about
442  // LastUselessPad, which would imply that EHPad was mapped to nullptr in
443  // the MemoMap on that invocation, which isn't the case if we got here.
444  assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad));
445  // Assert as we enumerate users that 'UselessPad' doesn't have any unwind
446  // information that we'd be contradicting by making a map entry for it
447  // (which is something that getUnwindDestTokenHelper must have proved for
448  // us to get here). Just assert on is direct users here; the checks in
449  // this downward walk at its descendants will verify that they don't have
450  // any unwind edges that exit 'UselessPad' either (i.e. they either have no
451  // unwind edges or unwind to a sibling).
452  MemoMap[UselessPad] = UnwindDestToken;
453  if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
454  assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad");
455  for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
456  auto *CatchPad = HandlerBlock->getFirstNonPHI();
457  for (User *U : CatchPad->users()) {
458  assert(
459  (!isa<InvokeInst>(U) ||
460  (getParentPad(
461  cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
462  CatchPad)) &&
463  "Expected useless pad");
464  if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
465  Worklist.push_back(cast<Instruction>(U));
466  }
467  }
468  } else {
469  assert(isa<CleanupPadInst>(UselessPad));
470  for (User *U : UselessPad->users()) {
471  assert(!isa<CleanupReturnInst>(U) && "Expected useless pad");
472  assert((!isa<InvokeInst>(U) ||
473  (getParentPad(
474  cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
475  UselessPad)) &&
476  "Expected useless pad");
477  if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
478  Worklist.push_back(cast<Instruction>(U));
479  }
480  }
481  }
482 
483  return UnwindDestToken;
484 }
485 
486 /// When we inline a basic block into an invoke,
487 /// we have to turn all of the calls that can throw into invokes.
488 /// This function analyze BB to see if there are any calls, and if so,
489 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
490 /// nodes in that block with the values specified in InvokeDestPHIValues.
492  BasicBlock *BB, BasicBlock *UnwindEdge,
493  UnwindDestMemoTy *FuncletUnwindMap = nullptr) {
494  for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
495  Instruction *I = &*BBI++;
496 
497  // We only need to check for function calls: inlined invoke
498  // instructions require no special handling.
499  CallInst *CI = dyn_cast<CallInst>(I);
500 
501  if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue()))
502  continue;
503 
504  // We do not need to (and in fact, cannot) convert possibly throwing calls
505  // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into
506  // invokes. The caller's "segment" of the deoptimization continuation
507  // attached to the newly inlined @llvm.experimental_deoptimize
508  // (resp. @llvm.experimental.guard) call should contain the exception
509  // handling logic, if any.
510  if (auto *F = CI->getCalledFunction())
511  if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
512  F->getIntrinsicID() == Intrinsic::experimental_guard)
513  continue;
514 
515  if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
516  // This call is nested inside a funclet. If that funclet has an unwind
517  // destination within the inlinee, then unwinding out of this call would
518  // be UB. Rewriting this call to an invoke which targets the inlined
519  // invoke's unwind dest would give the call's parent funclet multiple
520  // unwind destinations, which is something that subsequent EH table
521  // generation can't handle and that the veirifer rejects. So when we
522  // see such a call, leave it as a call.
523  auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
524  Value *UnwindDestToken =
525  getUnwindDestToken(FuncletPad, *FuncletUnwindMap);
526  if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
527  continue;
528 #ifndef NDEBUG
529  Instruction *MemoKey;
530  if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
531  MemoKey = CatchPad->getCatchSwitch();
532  else
533  MemoKey = FuncletPad;
534  assert(FuncletUnwindMap->count(MemoKey) &&
535  (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
536  "must get memoized to avoid confusing later searches");
537 #endif // NDEBUG
538  }
539 
540  changeToInvokeAndSplitBasicBlock(CI, UnwindEdge);
541  return BB;
542  }
543  return nullptr;
544 }
545 
546 /// If we inlined an invoke site, we need to convert calls
547 /// in the body of the inlined function into invokes.
548 ///
549 /// II is the invoke instruction being inlined. FirstNewBlock is the first
550 /// block of the inlined code (the last block is the end of the function),
551 /// and InlineCodeInfo is information about the code that got inlined.
552 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock,
553  ClonedCodeInfo &InlinedCodeInfo) {
554  BasicBlock *InvokeDest = II->getUnwindDest();
555 
556  Function *Caller = FirstNewBlock->getParent();
557 
558  // The inlined code is currently at the end of the function, scan from the
559  // start of the inlined code to its end, checking for stuff we need to
560  // rewrite.
561  LandingPadInliningInfo Invoke(II);
562 
563  // Get all of the inlined landing pad instructions.
565  for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end();
566  I != E; ++I)
567  if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
568  InlinedLPads.insert(II->getLandingPadInst());
569 
570  // Append the clauses from the outer landing pad instruction into the inlined
571  // landing pad instructions.
572  LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
573  for (LandingPadInst *InlinedLPad : InlinedLPads) {
574  unsigned OuterNum = OuterLPad->getNumClauses();
575  InlinedLPad->reserveClauses(OuterNum);
576  for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
577  InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
578  if (OuterLPad->isCleanup())
579  InlinedLPad->setCleanup(true);
580  }
581 
582  for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
583  BB != E; ++BB) {
584  if (InlinedCodeInfo.ContainsCalls)
586  &*BB, Invoke.getOuterResumeDest()))
587  // Update any PHI nodes in the exceptional block to indicate that there
588  // is now a new entry in them.
589  Invoke.addIncomingPHIValuesFor(NewBB);
590 
591  // Forward any resumes that are remaining here.
592  if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
593  Invoke.forwardResume(RI, InlinedLPads);
594  }
595 
596  // Now that everything is happy, we have one final detail. The PHI nodes in
597  // the exception destination block still have entries due to the original
598  // invoke instruction. Eliminate these entries (which might even delete the
599  // PHI node) now.
600  InvokeDest->removePredecessor(II->getParent());
601 }
602 
603 /// If we inlined an invoke site, we need to convert calls
604 /// in the body of the inlined function into invokes.
605 ///
606 /// II is the invoke instruction being inlined. FirstNewBlock is the first
607 /// block of the inlined code (the last block is the end of the function),
608 /// and InlineCodeInfo is information about the code that got inlined.
609 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock,
610  ClonedCodeInfo &InlinedCodeInfo) {
611  BasicBlock *UnwindDest = II->getUnwindDest();
612  Function *Caller = FirstNewBlock->getParent();
613 
614  assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!");
615 
616  // If there are PHI nodes in the unwind destination block, we need to keep
617  // track of which values came into them from the invoke before removing the
618  // edge from this block.
619  SmallVector<Value *, 8> UnwindDestPHIValues;
620  llvm::BasicBlock *InvokeBB = II->getParent();
621  for (Instruction &I : *UnwindDest) {
622  // Save the value to use for this edge.
623  PHINode *PHI = dyn_cast<PHINode>(&I);
624  if (!PHI)
625  break;
626  UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
627  }
628 
629  // Add incoming-PHI values to the unwind destination block for the given basic
630  // block, using the values for the original invoke's source block.
631  auto UpdatePHINodes = [&](BasicBlock *Src) {
632  BasicBlock::iterator I = UnwindDest->begin();
633  for (Value *V : UnwindDestPHIValues) {
634  PHINode *PHI = cast<PHINode>(I);
635  PHI->addIncoming(V, Src);
636  ++I;
637  }
638  };
639 
640  // This connects all the instructions which 'unwind to caller' to the invoke
641  // destination.
642  UnwindDestMemoTy FuncletUnwindMap;
643  for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
644  BB != E; ++BB) {
645  if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
646  if (CRI->unwindsToCaller()) {
647  auto *CleanupPad = CRI->getCleanupPad();
648  CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI);
649  CRI->eraseFromParent();
650  UpdatePHINodes(&*BB);
651  // Finding a cleanupret with an unwind destination would confuse
652  // subsequent calls to getUnwindDestToken, so map the cleanuppad
653  // to short-circuit any such calls and recognize this as an "unwind
654  // to caller" cleanup.
655  assert(!FuncletUnwindMap.count(CleanupPad) ||
656  isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
657  FuncletUnwindMap[CleanupPad] =
659  }
660  }
661 
662  Instruction *I = BB->getFirstNonPHI();
663  if (!I->isEHPad())
664  continue;
665 
666  Instruction *Replacement = nullptr;
667  if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
668  if (CatchSwitch->unwindsToCaller()) {
669  Value *UnwindDestToken;
670  if (auto *ParentPad =
671  dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
672  // This catchswitch is nested inside another funclet. If that
673  // funclet has an unwind destination within the inlinee, then
674  // unwinding out of this catchswitch would be UB. Rewriting this
675  // catchswitch to unwind to the inlined invoke's unwind dest would
676  // give the parent funclet multiple unwind destinations, which is
677  // something that subsequent EH table generation can't handle and
678  // that the veirifer rejects. So when we see such a call, leave it
679  // as "unwind to caller".
680  UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap);
681  if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
682  continue;
683  } else {
684  // This catchswitch has no parent to inherit constraints from, and
685  // none of its descendants can have an unwind edge that exits it and
686  // targets another funclet in the inlinee. It may or may not have a
687  // descendant that definitively has an unwind to caller. In either
688  // case, we'll have to assume that any unwinds out of it may need to
689  // be routed to the caller, so treat it as though it has a definitive
690  // unwind to caller.
691  UnwindDestToken = ConstantTokenNone::get(Caller->getContext());
692  }
693  auto *NewCatchSwitch = CatchSwitchInst::Create(
694  CatchSwitch->getParentPad(), UnwindDest,
695  CatchSwitch->getNumHandlers(), CatchSwitch->getName(),
696  CatchSwitch);
697  for (BasicBlock *PadBB : CatchSwitch->handlers())
698  NewCatchSwitch->addHandler(PadBB);
699  // Propagate info for the old catchswitch over to the new one in
700  // the unwind map. This also serves to short-circuit any subsequent
701  // checks for the unwind dest of this catchswitch, which would get
702  // confused if they found the outer handler in the callee.
703  FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
704  Replacement = NewCatchSwitch;
705  }
706  } else if (!isa<FuncletPadInst>(I)) {
707  llvm_unreachable("unexpected EHPad!");
708  }
709 
710  if (Replacement) {
711  Replacement->takeName(I);
712  I->replaceAllUsesWith(Replacement);
713  I->eraseFromParent();
714  UpdatePHINodes(&*BB);
715  }
716  }
717 
718  if (InlinedCodeInfo.ContainsCalls)
719  for (Function::iterator BB = FirstNewBlock->getIterator(),
720  E = Caller->end();
721  BB != E; ++BB)
723  &*BB, UnwindDest, &FuncletUnwindMap))
724  // Update any PHI nodes in the exceptional block to indicate that there
725  // is now a new entry in them.
726  UpdatePHINodes(NewBB);
727 
728  // Now that everything is happy, we have one final detail. The PHI nodes in
729  // the exception destination block still have entries due to the original
730  // invoke instruction. Eliminate these entries (which might even delete the
731  // PHI node) now.
732  UnwindDest->removePredecessor(InvokeBB);
733 }
734 
735 /// When inlining a call site that has !llvm.mem.parallel_loop_access metadata,
736 /// that metadata should be propagated to all memory-accessing cloned
737 /// instructions.
739  ValueToValueMapTy &VMap) {
740  MDNode *M =
742  if (!M)
743  return;
744 
745  for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
746  VMI != VMIE; ++VMI) {
747  if (!VMI->second)
748  continue;
749 
750  Instruction *NI = dyn_cast<Instruction>(VMI->second);
751  if (!NI)
752  continue;
753 
755  M = MDNode::concatenate(PM, M);
757  } else if (NI->mayReadOrWriteMemory()) {
759  }
760  }
761 }
762 
763 /// When inlining a function that contains noalias scope metadata,
764 /// this metadata needs to be cloned so that the inlined blocks
765 /// have different "unique scopes" at every call site. Were this not done, then
766 /// aliasing scopes from a function inlined into a caller multiple times could
767 /// not be differentiated (and this would lead to miscompiles because the
768 /// non-aliasing property communicated by the metadata could have
769 /// call-site-specific control dependencies).
771  const Function *CalledFunc = CS.getCalledFunction();
773 
774  // Note: We could only clone the metadata if it is already used in the
775  // caller. I'm omitting that check here because it might confuse
776  // inter-procedural alias analysis passes. We can revisit this if it becomes
777  // an efficiency or overhead problem.
778 
779  for (const BasicBlock &I : *CalledFunc)
780  for (const Instruction &J : I) {
781  if (const MDNode *M = J.getMetadata(LLVMContext::MD_alias_scope))
782  MD.insert(M);
783  if (const MDNode *M = J.getMetadata(LLVMContext::MD_noalias))
784  MD.insert(M);
785  }
786 
787  if (MD.empty())
788  return;
789 
790  // Walk the existing metadata, adding the complete (perhaps cyclic) chain to
791  // the set.
792  SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
793  while (!Queue.empty()) {
794  const MDNode *M = cast<MDNode>(Queue.pop_back_val());
795  for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i)
796  if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i)))
797  if (MD.insert(M1))
798  Queue.push_back(M1);
799  }
800 
801  // Now we have a complete set of all metadata in the chains used to specify
802  // the noalias scopes and the lists of those scopes.
803  SmallVector<TempMDTuple, 16> DummyNodes;
805  for (const MDNode *I : MD) {
806  DummyNodes.push_back(MDTuple::getTemporary(CalledFunc->getContext(), None));
807  MDMap[I].reset(DummyNodes.back().get());
808  }
809 
810  // Create new metadata nodes to replace the dummy nodes, replacing old
811  // metadata references with either a dummy node or an already-created new
812  // node.
813  for (const MDNode *I : MD) {
815  for (unsigned i = 0, ie = I->getNumOperands(); i != ie; ++i) {
816  const Metadata *V = I->getOperand(i);
817  if (const MDNode *M = dyn_cast<MDNode>(V))
818  NewOps.push_back(MDMap[M]);
819  else
820  NewOps.push_back(const_cast<Metadata *>(V));
821  }
822 
823  MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps);
824  MDTuple *TempM = cast<MDTuple>(MDMap[I]);
825  assert(TempM->isTemporary() && "Expected temporary node");
826 
827  TempM->replaceAllUsesWith(NewM);
828  }
829 
830  // Now replace the metadata in the new inlined instructions with the
831  // repacements from the map.
832  for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
833  VMI != VMIE; ++VMI) {
834  if (!VMI->second)
835  continue;
836 
837  Instruction *NI = dyn_cast<Instruction>(VMI->second);
838  if (!NI)
839  continue;
840 
842  MDNode *NewMD = MDMap[M];
843  // If the call site also had alias scope metadata (a list of scopes to
844  // which instructions inside it might belong), propagate those scopes to
845  // the inlined instructions.
846  if (MDNode *CSM =
848  NewMD = MDNode::concatenate(NewMD, CSM);
850  } else if (NI->mayReadOrWriteMemory()) {
851  if (MDNode *M =
854  }
855 
857  MDNode *NewMD = MDMap[M];
858  // If the call site also had noalias metadata (a list of scopes with
859  // which instructions inside it don't alias), propagate those scopes to
860  // the inlined instructions.
861  if (MDNode *CSM =
863  NewMD = MDNode::concatenate(NewMD, CSM);
865  } else if (NI->mayReadOrWriteMemory()) {
868  }
869  }
870 }
871 
872 /// If the inlined function has noalias arguments,
873 /// then add new alias scopes for each noalias argument, tag the mapped noalias
874 /// parameters with noalias metadata specifying the new scope, and tag all
875 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
877  const DataLayout &DL, AAResults *CalleeAAR) {
879  return;
880 
881  const Function *CalledFunc = CS.getCalledFunction();
883 
884  for (const Argument &Arg : CalledFunc->args())
885  if (Arg.hasNoAliasAttr() && !Arg.use_empty())
886  NoAliasArgs.push_back(&Arg);
887 
888  if (NoAliasArgs.empty())
889  return;
890 
891  // To do a good job, if a noalias variable is captured, we need to know if
892  // the capture point dominates the particular use we're considering.
893  DominatorTree DT;
894  DT.recalculate(const_cast<Function&>(*CalledFunc));
895 
896  // noalias indicates that pointer values based on the argument do not alias
897  // pointer values which are not based on it. So we add a new "scope" for each
898  // noalias function argument. Accesses using pointers based on that argument
899  // become part of that alias scope, accesses using pointers not based on that
900  // argument are tagged as noalias with that scope.
901 
903  MDBuilder MDB(CalledFunc->getContext());
904 
905  // Create a new scope domain for this function.
906  MDNode *NewDomain =
907  MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
908  for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
909  const Argument *A = NoAliasArgs[i];
910 
911  std::string Name = CalledFunc->getName();
912  if (A->hasName()) {
913  Name += ": %";
914  Name += A->getName();
915  } else {
916  Name += ": argument ";
917  Name += utostr(i);
918  }
919 
920  // Note: We always create a new anonymous root here. This is true regardless
921  // of the linkage of the callee because the aliasing "scope" is not just a
922  // property of the callee, but also all control dependencies in the caller.
923  MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
924  NewScopes.insert(std::make_pair(A, NewScope));
925  }
926 
927  // Iterate over all new instructions in the map; for all memory-access
928  // instructions, add the alias scope metadata.
929  for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
930  VMI != VMIE; ++VMI) {
931  if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
932  if (!VMI->second)
933  continue;
934 
935  Instruction *NI = dyn_cast<Instruction>(VMI->second);
936  if (!NI)
937  continue;
938 
939  bool IsArgMemOnlyCall = false, IsFuncCall = false;
941 
942  if (const LoadInst *LI = dyn_cast<LoadInst>(I))
943  PtrArgs.push_back(LI->getPointerOperand());
944  else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
945  PtrArgs.push_back(SI->getPointerOperand());
946  else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
947  PtrArgs.push_back(VAAI->getPointerOperand());
948  else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
949  PtrArgs.push_back(CXI->getPointerOperand());
950  else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
951  PtrArgs.push_back(RMWI->getPointerOperand());
952  else if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
953  // If we know that the call does not access memory, then we'll still
954  // know that about the inlined clone of this call site, and we don't
955  // need to add metadata.
956  if (ICS.doesNotAccessMemory())
957  continue;
958 
959  IsFuncCall = true;
960  if (CalleeAAR) {
961  FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(ICS);
964  IsArgMemOnlyCall = true;
965  }
966 
967  for (Value *Arg : ICS.args()) {
968  // We need to check the underlying objects of all arguments, not just
969  // the pointer arguments, because we might be passing pointers as
970  // integers, etc.
971  // However, if we know that the call only accesses pointer arguments,
972  // then we only need to check the pointer arguments.
973  if (IsArgMemOnlyCall && !Arg->getType()->isPointerTy())
974  continue;
975 
976  PtrArgs.push_back(Arg);
977  }
978  }
979 
980  // If we found no pointers, then this instruction is not suitable for
981  // pairing with an instruction to receive aliasing metadata.
982  // However, if this is a call, this we might just alias with none of the
983  // noalias arguments.
984  if (PtrArgs.empty() && !IsFuncCall)
985  continue;
986 
987  // It is possible that there is only one underlying object, but you
988  // need to go through several PHIs to see it, and thus could be
989  // repeated in the Objects list.
991  SmallVector<Metadata *, 4> Scopes, NoAliases;
992 
994  for (const Value *V : PtrArgs) {
995  SmallVector<Value *, 4> Objects;
996  GetUnderlyingObjects(const_cast<Value*>(V),
997  Objects, DL, /* LI = */ nullptr);
998 
999  for (Value *O : Objects)
1000  ObjSet.insert(O);
1001  }
1002 
1003  // Figure out if we're derived from anything that is not a noalias
1004  // argument.
1005  bool CanDeriveViaCapture = false, UsesAliasingPtr = false;
1006  for (const Value *V : ObjSet) {
1007  // Is this value a constant that cannot be derived from any pointer
1008  // value (we need to exclude constant expressions, for example, that
1009  // are formed from arithmetic on global symbols).
1010  bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1011  isa<ConstantPointerNull>(V) ||
1012  isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1013  if (IsNonPtrConst)
1014  continue;
1015 
1016  // If this is anything other than a noalias argument, then we cannot
1017  // completely describe the aliasing properties using alias.scope
1018  // metadata (and, thus, won't add any).
1019  if (const Argument *A = dyn_cast<Argument>(V)) {
1020  if (!A->hasNoAliasAttr())
1021  UsesAliasingPtr = true;
1022  } else {
1023  UsesAliasingPtr = true;
1024  }
1025 
1026  // If this is not some identified function-local object (which cannot
1027  // directly alias a noalias argument), or some other argument (which,
1028  // by definition, also cannot alias a noalias argument), then we could
1029  // alias a noalias argument that has been captured).
1030  if (!isa<Argument>(V) &&
1031  !isIdentifiedFunctionLocal(const_cast<Value*>(V)))
1032  CanDeriveViaCapture = true;
1033  }
1034 
1035  // A function call can always get captured noalias pointers (via other
1036  // parameters, globals, etc.).
1037  if (IsFuncCall && !IsArgMemOnlyCall)
1038  CanDeriveViaCapture = true;
1039 
1040  // First, we want to figure out all of the sets with which we definitely
1041  // don't alias. Iterate over all noalias set, and add those for which:
1042  // 1. The noalias argument is not in the set of objects from which we
1043  // definitely derive.
1044  // 2. The noalias argument has not yet been captured.
1045  // An arbitrary function that might load pointers could see captured
1046  // noalias arguments via other noalias arguments or globals, and so we
1047  // must always check for prior capture.
1048  for (const Argument *A : NoAliasArgs) {
1049  if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
1050  // It might be tempting to skip the
1051  // PointerMayBeCapturedBefore check if
1052  // A->hasNoCaptureAttr() is true, but this is
1053  // incorrect because nocapture only guarantees
1054  // that no copies outlive the function, not
1055  // that the value cannot be locally captured.
1057  /* ReturnCaptures */ false,
1058  /* StoreCaptures */ false, I, &DT)))
1059  NoAliases.push_back(NewScopes[A]);
1060  }
1061 
1062  if (!NoAliases.empty())
1066  MDNode::get(CalledFunc->getContext(), NoAliases)));
1067 
1068  // Next, we want to figure out all of the sets to which we might belong.
1069  // We might belong to a set if the noalias argument is in the set of
1070  // underlying objects. If there is some non-noalias argument in our list
1071  // of underlying objects, then we cannot add a scope because the fact
1072  // that some access does not alias with any set of our noalias arguments
1073  // cannot itself guarantee that it does not alias with this access
1074  // (because there is some pointer of unknown origin involved and the
1075  // other access might also depend on this pointer). We also cannot add
1076  // scopes to arbitrary functions unless we know they don't access any
1077  // non-parameter pointer-values.
1078  bool CanAddScopes = !UsesAliasingPtr;
1079  if (CanAddScopes && IsFuncCall)
1080  CanAddScopes = IsArgMemOnlyCall;
1081 
1082  if (CanAddScopes)
1083  for (const Argument *A : NoAliasArgs) {
1084  if (ObjSet.count(A))
1085  Scopes.push_back(NewScopes[A]);
1086  }
1087 
1088  if (!Scopes.empty())
1089  NI->setMetadata(
1092  MDNode::get(CalledFunc->getContext(), Scopes)));
1093  }
1094  }
1095 }
1096 
1097 /// If the inlined function has non-byval align arguments, then
1098 /// add @llvm.assume-based alignment assumptions to preserve this information.
1101  return;
1102 
1103  AssumptionCache *AC = &(*IFI.GetAssumptionCache)(*CS.getCaller());
1104  auto &DL = CS.getCaller()->getParent()->getDataLayout();
1105 
1106  // To avoid inserting redundant assumptions, we should check for assumptions
1107  // already in the caller. To do this, we might need a DT of the caller.
1108  DominatorTree DT;
1109  bool DTCalculated = false;
1110 
1111  Function *CalledFunc = CS.getCalledFunction();
1112  for (Argument &Arg : CalledFunc->args()) {
1113  unsigned Align = Arg.getType()->isPointerTy() ? Arg.getParamAlignment() : 0;
1114  if (Align && !Arg.hasByValOrInAllocaAttr() && !Arg.hasNUses(0)) {
1115  if (!DTCalculated) {
1116  DT.recalculate(*CS.getCaller());
1117  DTCalculated = true;
1118  }
1119 
1120  // If we can already prove the asserted alignment in the context of the
1121  // caller, then don't bother inserting the assumption.
1122  Value *ArgVal = CS.getArgument(Arg.getArgNo());
1123  if (getKnownAlignment(ArgVal, DL, CS.getInstruction(), AC, &DT) >= Align)
1124  continue;
1125 
1126  CallInst *NewAsmp = IRBuilder<>(CS.getInstruction())
1127  .CreateAlignmentAssumption(DL, ArgVal, Align);
1128  AC->registerAssumption(NewAsmp);
1129  }
1130  }
1131 }
1132 
1133 /// Once we have cloned code over from a callee into the caller,
1134 /// update the specified callgraph to reflect the changes we made.
1135 /// Note that it's possible that not all code was copied over, so only
1136 /// some edges of the callgraph may remain.
1138  Function::iterator FirstNewBlock,
1139  ValueToValueMapTy &VMap,
1140  InlineFunctionInfo &IFI) {
1141  CallGraph &CG = *IFI.CG;
1142  const Function *Caller = CS.getCaller();
1143  const Function *Callee = CS.getCalledFunction();
1144  CallGraphNode *CalleeNode = CG[Callee];
1145  CallGraphNode *CallerNode = CG[Caller];
1146 
1147  // Since we inlined some uninlined call sites in the callee into the caller,
1148  // add edges from the caller to all of the callees of the callee.
1149  CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
1150 
1151  // Consider the case where CalleeNode == CallerNode.
1153  if (CalleeNode == CallerNode) {
1154  CallCache.assign(I, E);
1155  I = CallCache.begin();
1156  E = CallCache.end();
1157  }
1158 
1159  for (; I != E; ++I) {
1160  const Value *OrigCall = I->first;
1161 
1162  ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
1163  // Only copy the edge if the call was inlined!
1164  if (VMI == VMap.end() || VMI->second == nullptr)
1165  continue;
1166 
1167  // If the call was inlined, but then constant folded, there is no edge to
1168  // add. Check for this case.
1169  Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
1170  if (!NewCall)
1171  continue;
1172 
1173  // We do not treat intrinsic calls like real function calls because we
1174  // expect them to become inline code; do not add an edge for an intrinsic.
1175  CallSite CS = CallSite(NewCall);
1176  if (CS && CS.getCalledFunction() && CS.getCalledFunction()->isIntrinsic())
1177  continue;
1178 
1179  // Remember that this call site got inlined for the client of
1180  // InlineFunction.
1181  IFI.InlinedCalls.push_back(NewCall);
1182 
1183  // It's possible that inlining the callsite will cause it to go from an
1184  // indirect to a direct call by resolving a function pointer. If this
1185  // happens, set the callee of the new call site to a more precise
1186  // destination. This can also happen if the call graph node of the caller
1187  // was just unnecessarily imprecise.
1188  if (!I->second->getFunction())
1189  if (Function *F = CallSite(NewCall).getCalledFunction()) {
1190  // Indirect call site resolved to direct call.
1191  CallerNode->addCalledFunction(CallSite(NewCall), CG[F]);
1192 
1193  continue;
1194  }
1195 
1196  CallerNode->addCalledFunction(CallSite(NewCall), I->second);
1197  }
1198 
1199  // Update the call graph by deleting the edge from Callee to Caller. We must
1200  // do this after the loop above in case Caller and Callee are the same.
1201  CallerNode->removeCallEdgeFor(CS);
1202 }
1203 
1204 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
1205  BasicBlock *InsertBlock,
1206  InlineFunctionInfo &IFI) {
1207  Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
1208  IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
1209 
1210  Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy));
1211 
1212  // Always generate a memcpy of alignment 1 here because we don't know
1213  // the alignment of the src pointer. Other optimizations can infer
1214  // better alignment.
1215  Builder.CreateMemCpy(Dst, Src, Size, /*Align=*/1);
1216 }
1217 
1218 /// When inlining a call site that has a byval argument,
1219 /// we have to make the implicit memcpy explicit by adding it.
1221  const Function *CalledFunc,
1222  InlineFunctionInfo &IFI,
1223  unsigned ByValAlignment) {
1224  PointerType *ArgTy = cast<PointerType>(Arg->getType());
1225  Type *AggTy = ArgTy->getElementType();
1226 
1227  Function *Caller = TheCall->getFunction();
1228  const DataLayout &DL = Caller->getParent()->getDataLayout();
1229 
1230  // If the called function is readonly, then it could not mutate the caller's
1231  // copy of the byval'd memory. In this case, it is safe to elide the copy and
1232  // temporary.
1233  if (CalledFunc->onlyReadsMemory()) {
1234  // If the byval argument has a specified alignment that is greater than the
1235  // passed in pointer, then we either have to round up the input pointer or
1236  // give up on this transformation.
1237  if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment.
1238  return Arg;
1239 
1240  AssumptionCache *AC =
1241  IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr;
1242 
1243  // If the pointer is already known to be sufficiently aligned, or if we can
1244  // round it up to a larger alignment, then we don't need a temporary.
1245  if (getOrEnforceKnownAlignment(Arg, ByValAlignment, DL, TheCall, AC) >=
1246  ByValAlignment)
1247  return Arg;
1248 
1249  // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
1250  // for code quality, but rarely happens and is required for correctness.
1251  }
1252 
1253  // Create the alloca. If we have DataLayout, use nice alignment.
1254  unsigned Align = DL.getPrefTypeAlignment(AggTy);
1255 
1256  // If the byval had an alignment specified, we *must* use at least that
1257  // alignment, as it is required by the byval argument (and uses of the
1258  // pointer inside the callee).
1259  Align = std::max(Align, ByValAlignment);
1260 
1261  Value *NewAlloca = new AllocaInst(AggTy, DL.getAllocaAddrSpace(),
1262  nullptr, Align, Arg->getName(),
1263  &*Caller->begin()->begin());
1264  IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
1265 
1266  // Uses of the argument in the function should use our new alloca
1267  // instead.
1268  return NewAlloca;
1269 }
1270 
1271 // Check whether this Value is used by a lifetime intrinsic.
1273  for (User *U : V->users()) {
1274  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
1275  switch (II->getIntrinsicID()) {
1276  default: break;
1277  case Intrinsic::lifetime_start:
1278  case Intrinsic::lifetime_end:
1279  return true;
1280  }
1281  }
1282  }
1283  return false;
1284 }
1285 
1286 // Check whether the given alloca already has
1287 // lifetime.start or lifetime.end intrinsics.
1288 static bool hasLifetimeMarkers(AllocaInst *AI) {
1289  Type *Ty = AI->getType();
1290  Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
1291  Ty->getPointerAddressSpace());
1292  if (Ty == Int8PtrTy)
1293  return isUsedByLifetimeMarker(AI);
1294 
1295  // Do a scan to find all the casts to i8*.
1296  for (User *U : AI->users()) {
1297  if (U->getType() != Int8PtrTy) continue;
1298  if (U->stripPointerCasts() != AI) continue;
1299  if (isUsedByLifetimeMarker(U))
1300  return true;
1301  }
1302  return false;
1303 }
1304 
1305 /// Return the result of AI->isStaticAlloca() if AI were moved to the entry
1306 /// block. Allocas used in inalloca calls and allocas of dynamic array size
1307 /// cannot be static.
1308 static bool allocaWouldBeStaticInEntry(const AllocaInst *AI ) {
1309  return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca();
1310 }
1311 
1312 /// Update inlined instructions' line numbers to
1313 /// to encode location where these instructions are inlined.
1315  Instruction *TheCall, bool CalleeHasDebugInfo) {
1316  const DebugLoc &TheCallDL = TheCall->getDebugLoc();
1317  if (!TheCallDL)
1318  return;
1319 
1320  auto &Ctx = Fn->getContext();
1321  DILocation *InlinedAtNode = TheCallDL;
1322 
1323  // Create a unique call site, not to be confused with any other call from the
1324  // same location.
1325  InlinedAtNode = DILocation::getDistinct(
1326  Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1327  InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1328 
1329  // Cache the inlined-at nodes as they're built so they are reused, without
1330  // this every instruction's inlined-at chain would become distinct from each
1331  // other.
1333 
1334  for (; FI != Fn->end(); ++FI) {
1335  for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
1336  BI != BE; ++BI) {
1337  if (DebugLoc DL = BI->getDebugLoc()) {
1338  auto IA = DebugLoc::appendInlinedAt(DL, InlinedAtNode, BI->getContext(),
1339  IANodes);
1340  auto IDL = DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(), IA);
1341  BI->setDebugLoc(IDL);
1342  continue;
1343  }
1344 
1345  if (CalleeHasDebugInfo)
1346  continue;
1347 
1348  // If the inlined instruction has no line number, make it look as if it
1349  // originates from the call location. This is important for
1350  // ((__always_inline__, __nodebug__)) functions which must use caller
1351  // location for all instructions in their function body.
1352 
1353  // Don't update static allocas, as they may get moved later.
1354  if (auto *AI = dyn_cast<AllocaInst>(BI))
1356  continue;
1357 
1358  BI->setDebugLoc(TheCallDL);
1359  }
1360  }
1361 }
1362 /// Update the block frequencies of the caller after a callee has been inlined.
1363 ///
1364 /// Each block cloned into the caller has its block frequency scaled by the
1365 /// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of
1366 /// callee's entry block gets the same frequency as the callsite block and the
1367 /// relative frequencies of all cloned blocks remain the same after cloning.
1368 static void updateCallerBFI(BasicBlock *CallSiteBlock,
1369  const ValueToValueMapTy &VMap,
1370  BlockFrequencyInfo *CallerBFI,
1371  BlockFrequencyInfo *CalleeBFI,
1372  const BasicBlock &CalleeEntryBlock) {
1374  for (auto const &Entry : VMap) {
1375  if (!isa<BasicBlock>(Entry.first) || !Entry.second)
1376  continue;
1377  auto *OrigBB = cast<BasicBlock>(Entry.first);
1378  auto *ClonedBB = cast<BasicBlock>(Entry.second);
1379  uint64_t Freq = CalleeBFI->getBlockFreq(OrigBB).getFrequency();
1380  if (!ClonedBBs.insert(ClonedBB).second) {
1381  // Multiple blocks in the callee might get mapped to one cloned block in
1382  // the caller since we prune the callee as we clone it. When that happens,
1383  // we want to use the maximum among the original blocks' frequencies.
1384  uint64_t NewFreq = CallerBFI->getBlockFreq(ClonedBB).getFrequency();
1385  if (NewFreq > Freq)
1386  Freq = NewFreq;
1387  }
1388  CallerBFI->setBlockFreq(ClonedBB, Freq);
1389  }
1390  BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock));
1391  CallerBFI->setBlockFreqAndScale(
1392  EntryClone, CallerBFI->getBlockFreq(CallSiteBlock).getFrequency(),
1393  ClonedBBs);
1394 }
1395 
1396 /// Update the branch metadata for cloned call instructions.
1398  const Optional<uint64_t> &CalleeEntryCount,
1399  const Instruction *TheCall,
1400  ProfileSummaryInfo *PSI,
1401  BlockFrequencyInfo *CallerBFI) {
1402  if (!CalleeEntryCount.hasValue() || CalleeEntryCount.getValue() < 1)
1403  return;
1404  Optional<uint64_t> CallSiteCount =
1405  PSI ? PSI->getProfileCount(TheCall, CallerBFI) : None;
1406  uint64_t CallCount =
1407  std::min(CallSiteCount.hasValue() ? CallSiteCount.getValue() : 0,
1408  CalleeEntryCount.getValue());
1409 
1410  for (auto const &Entry : VMap)
1411  if (isa<CallInst>(Entry.first))
1412  if (auto *CI = dyn_cast_or_null<CallInst>(Entry.second))
1413  CI->updateProfWeight(CallCount, CalleeEntryCount.getValue());
1414  for (BasicBlock &BB : *Callee)
1415  // No need to update the callsite if it is pruned during inlining.
1416  if (VMap.count(&BB))
1417  for (Instruction &I : BB)
1418  if (CallInst *CI = dyn_cast<CallInst>(&I))
1419  CI->updateProfWeight(CalleeEntryCount.getValue() - CallCount,
1420  CalleeEntryCount.getValue());
1421 }
1422 
1423 /// Update the entry count of callee after inlining.
1424 ///
1425 /// The callsite's block count is subtracted from the callee's function entry
1426 /// count.
1427 static void updateCalleeCount(BlockFrequencyInfo *CallerBFI, BasicBlock *CallBB,
1429  ProfileSummaryInfo *PSI) {
1430  // If the callee has a original count of N, and the estimated count of
1431  // callsite is M, the new callee count is set to N - M. M is estimated from
1432  // the caller's entry count, its entry block frequency and the block frequency
1433  // of the callsite.
1434  Optional<uint64_t> CalleeCount = Callee->getEntryCount();
1435  if (!CalleeCount.hasValue() || !PSI)
1436  return;
1437  Optional<uint64_t> CallCount = PSI->getProfileCount(CallInst, CallerBFI);
1438  if (!CallCount.hasValue())
1439  return;
1440  // Since CallSiteCount is an estimate, it could exceed the original callee
1441  // count and has to be set to 0.
1442  if (CallCount.getValue() > CalleeCount.getValue())
1443  Callee->setEntryCount(0);
1444  else
1445  Callee->setEntryCount(CalleeCount.getValue() - CallCount.getValue());
1446 }
1447 
1448 /// This function inlines the called function into the basic block of the
1449 /// caller. This returns false if it is not possible to inline this call.
1450 /// The program is still in a well defined state if this occurs though.
1451 ///
1452 /// Note that this only does one level of inlining. For example, if the
1453 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
1454 /// exists in the instruction stream. Similarly this will inline a recursive
1455 /// function by one level.
1457  AAResults *CalleeAAR, bool InsertLifetime) {
1458  Instruction *TheCall = CS.getInstruction();
1459  assert(TheCall->getParent() && TheCall->getFunction()
1460  && "Instruction not in function!");
1461 
1462  // If IFI has any state in it, zap it before we fill it in.
1463  IFI.reset();
1464 
1465  Function *CalledFunc = CS.getCalledFunction();
1466  if (!CalledFunc || // Can't inline external function or indirect
1467  CalledFunc->isDeclaration() || // call, or call to a vararg function!
1468  CalledFunc->getFunctionType()->isVarArg()) return false;
1469 
1470  // The inliner does not know how to inline through calls with operand bundles
1471  // in general ...
1472  if (CS.hasOperandBundles()) {
1473  for (int i = 0, e = CS.getNumOperandBundles(); i != e; ++i) {
1475  // ... but it knows how to inline through "deopt" operand bundles ...
1476  if (Tag == LLVMContext::OB_deopt)
1477  continue;
1478  // ... and "funclet" operand bundles.
1479  if (Tag == LLVMContext::OB_funclet)
1480  continue;
1481 
1482  return false;
1483  }
1484  }
1485 
1486  // If the call to the callee cannot throw, set the 'nounwind' flag on any
1487  // calls that we inline.
1488  bool MarkNoUnwind = CS.doesNotThrow();
1489 
1490  BasicBlock *OrigBB = TheCall->getParent();
1491  Function *Caller = OrigBB->getParent();
1492 
1493  // GC poses two hazards to inlining, which only occur when the callee has GC:
1494  // 1. If the caller has no GC, then the callee's GC must be propagated to the
1495  // caller.
1496  // 2. If the caller has a differing GC, it is invalid to inline.
1497  if (CalledFunc->hasGC()) {
1498  if (!Caller->hasGC())
1499  Caller->setGC(CalledFunc->getGC());
1500  else if (CalledFunc->getGC() != Caller->getGC())
1501  return false;
1502  }
1503 
1504  // Get the personality function from the callee if it contains a landing pad.
1505  Constant *CalledPersonality =
1506  CalledFunc->hasPersonalityFn()
1507  ? CalledFunc->getPersonalityFn()->stripPointerCasts()
1508  : nullptr;
1509 
1510  // Find the personality function used by the landing pads of the caller. If it
1511  // exists, then check to see that it matches the personality function used in
1512  // the callee.
1513  Constant *CallerPersonality =
1514  Caller->hasPersonalityFn()
1515  ? Caller->getPersonalityFn()->stripPointerCasts()
1516  : nullptr;
1517  if (CalledPersonality) {
1518  if (!CallerPersonality)
1519  Caller->setPersonalityFn(CalledPersonality);
1520  // If the personality functions match, then we can perform the
1521  // inlining. Otherwise, we can't inline.
1522  // TODO: This isn't 100% true. Some personality functions are proper
1523  // supersets of others and can be used in place of the other.
1524  else if (CalledPersonality != CallerPersonality)
1525  return false;
1526  }
1527 
1528  // We need to figure out which funclet the callsite was in so that we may
1529  // properly nest the callee.
1530  Instruction *CallSiteEHPad = nullptr;
1531  if (CallerPersonality) {
1532  EHPersonality Personality = classifyEHPersonality(CallerPersonality);
1533  if (isFuncletEHPersonality(Personality)) {
1534  Optional<OperandBundleUse> ParentFunclet =
1536  if (ParentFunclet)
1537  CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
1538 
1539  // OK, the inlining site is legal. What about the target function?
1540 
1541  if (CallSiteEHPad) {
1542  if (Personality == EHPersonality::MSVC_CXX) {
1543  // The MSVC personality cannot tolerate catches getting inlined into
1544  // cleanup funclets.
1545  if (isa<CleanupPadInst>(CallSiteEHPad)) {
1546  // Ok, the call site is within a cleanuppad. Let's check the callee
1547  // for catchpads.
1548  for (const BasicBlock &CalledBB : *CalledFunc) {
1549  if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
1550  return false;
1551  }
1552  }
1553  } else if (isAsynchronousEHPersonality(Personality)) {
1554  // SEH is even less tolerant, there may not be any sort of exceptional
1555  // funclet in the callee.
1556  for (const BasicBlock &CalledBB : *CalledFunc) {
1557  if (CalledBB.isEHPad())
1558  return false;
1559  }
1560  }
1561  }
1562  }
1563  }
1564 
1565  // Determine if we are dealing with a call in an EHPad which does not unwind
1566  // to caller.
1567  bool EHPadForCallUnwindsLocally = false;
1568  if (CallSiteEHPad && CS.isCall()) {
1569  UnwindDestMemoTy FuncletUnwindMap;
1570  Value *CallSiteUnwindDestToken =
1571  getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap);
1572 
1573  EHPadForCallUnwindsLocally =
1574  CallSiteUnwindDestToken &&
1575  !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
1576  }
1577 
1578  // Get an iterator to the last basic block in the function, which will have
1579  // the new function inlined after it.
1580  Function::iterator LastBlock = --Caller->end();
1581 
1582  // Make sure to capture all of the return instructions from the cloned
1583  // function.
1585  ClonedCodeInfo InlinedFunctionInfo;
1586  Function::iterator FirstNewBlock;
1587 
1588  { // Scope to destroy VMap after cloning.
1589  ValueToValueMapTy VMap;
1590  // Keep a list of pair (dst, src) to emit byval initializations.
1592 
1593  auto &DL = Caller->getParent()->getDataLayout();
1594 
1595  assert(CalledFunc->arg_size() == CS.arg_size() &&
1596  "No varargs calls can be inlined!");
1597 
1598  // Calculate the vector of arguments to pass into the function cloner, which
1599  // matches up the formal to the actual argument values.
1601  unsigned ArgNo = 0;
1602  for (Function::arg_iterator I = CalledFunc->arg_begin(),
1603  E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
1604  Value *ActualArg = *AI;
1605 
1606  // When byval arguments actually inlined, we need to make the copy implied
1607  // by them explicit. However, we don't do this if the callee is readonly
1608  // or readnone, because the copy would be unneeded: the callee doesn't
1609  // modify the struct.
1610  if (CS.isByValArgument(ArgNo)) {
1611  ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
1612  CalledFunc->getParamAlignment(ArgNo));
1613  if (ActualArg != *AI)
1614  ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
1615  }
1616 
1617  VMap[&*I] = ActualArg;
1618  }
1619 
1620  // Add alignment assumptions if necessary. We do this before the inlined
1621  // instructions are actually cloned into the caller so that we can easily
1622  // check what will be known at the start of the inlined code.
1623  AddAlignmentAssumptions(CS, IFI);
1624 
1625  // We want the inliner to prune the code as it copies. We would LOVE to
1626  // have no dead or constant instructions leftover after inlining occurs
1627  // (which can happen, e.g., because an argument was constant), but we'll be
1628  // happy with whatever the cloner can do.
1629  CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
1630  /*ModuleLevelChanges=*/false, Returns, ".i",
1631  &InlinedFunctionInfo, TheCall);
1632  // Remember the first block that is newly cloned over.
1633  FirstNewBlock = LastBlock; ++FirstNewBlock;
1634 
1635  if (IFI.CallerBFI != nullptr && IFI.CalleeBFI != nullptr)
1636  // Update the BFI of blocks cloned into the caller.
1637  updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI,
1638  CalledFunc->front());
1639 
1640  updateCallProfile(CalledFunc, VMap, CalledFunc->getEntryCount(), TheCall,
1641  IFI.PSI, IFI.CallerBFI);
1642  // Update the profile count of callee.
1643  updateCalleeCount(IFI.CallerBFI, OrigBB, TheCall, CalledFunc, IFI.PSI);
1644 
1645  // Inject byval arguments initialization.
1646  for (std::pair<Value*, Value*> &Init : ByValInit)
1647  HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
1648  &*FirstNewBlock, IFI);
1649 
1650  Optional<OperandBundleUse> ParentDeopt =
1652  if (ParentDeopt) {
1654 
1655  for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) {
1656  Instruction *I = dyn_cast_or_null<Instruction>(VH);
1657  if (!I) continue; // instruction was DCE'd or RAUW'ed to undef
1658 
1659  OpDefs.clear();
1660 
1661  CallSite ICS(I);
1662  OpDefs.reserve(ICS.getNumOperandBundles());
1663 
1664  for (unsigned i = 0, e = ICS.getNumOperandBundles(); i < e; ++i) {
1665  auto ChildOB = ICS.getOperandBundleAt(i);
1666  if (ChildOB.getTagID() != LLVMContext::OB_deopt) {
1667  // If the inlined call has other operand bundles, let them be
1668  OpDefs.emplace_back(ChildOB);
1669  continue;
1670  }
1671 
1672  // It may be useful to separate this logic (of handling operand
1673  // bundles) out to a separate "policy" component if this gets crowded.
1674  // Prepend the parent's deoptimization continuation to the newly
1675  // inlined call's deoptimization continuation.
1676  std::vector<Value *> MergedDeoptArgs;
1677  MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
1678  ChildOB.Inputs.size());
1679 
1680  MergedDeoptArgs.insert(MergedDeoptArgs.end(),
1681  ParentDeopt->Inputs.begin(),
1682  ParentDeopt->Inputs.end());
1683  MergedDeoptArgs.insert(MergedDeoptArgs.end(), ChildOB.Inputs.begin(),
1684  ChildOB.Inputs.end());
1685 
1686  OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));
1687  }
1688 
1689  Instruction *NewI = nullptr;
1690  if (isa<CallInst>(I))
1691  NewI = CallInst::Create(cast<CallInst>(I), OpDefs, I);
1692  else
1693  NewI = InvokeInst::Create(cast<InvokeInst>(I), OpDefs, I);
1694 
1695  // Note: the RAUW does the appropriate fixup in VMap, so we need to do
1696  // this even if the call returns void.
1697  I->replaceAllUsesWith(NewI);
1698 
1699  VH = nullptr;
1700  I->eraseFromParent();
1701  }
1702  }
1703 
1704  // Update the callgraph if requested.
1705  if (IFI.CG)
1706  UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
1707 
1708  // For 'nodebug' functions, the associated DISubprogram is always null.
1709  // Conservatively avoid propagating the callsite debug location to
1710  // instructions inlined from a function whose DISubprogram is not null.
1711  fixupLineNumbers(Caller, FirstNewBlock, TheCall,
1712  CalledFunc->getSubprogram() != nullptr);
1713 
1714  // Clone existing noalias metadata if necessary.
1715  CloneAliasScopeMetadata(CS, VMap);
1716 
1717  // Add noalias metadata if necessary.
1718  AddAliasScopeMetadata(CS, VMap, DL, CalleeAAR);
1719 
1720  // Propagate llvm.mem.parallel_loop_access if necessary.
1722 
1723  // Register any cloned assumptions.
1724  if (IFI.GetAssumptionCache)
1725  for (BasicBlock &NewBlock :
1726  make_range(FirstNewBlock->getIterator(), Caller->end()))
1727  for (Instruction &I : NewBlock) {
1728  if (auto *II = dyn_cast<IntrinsicInst>(&I))
1729  if (II->getIntrinsicID() == Intrinsic::assume)
1730  (*IFI.GetAssumptionCache)(*Caller).registerAssumption(II);
1731  }
1732  }
1733 
1734  // If there are any alloca instructions in the block that used to be the entry
1735  // block for the callee, move them to the entry block of the caller. First
1736  // calculate which instruction they should be inserted before. We insert the
1737  // instructions at the end of the current alloca list.
1738  {
1739  BasicBlock::iterator InsertPoint = Caller->begin()->begin();
1740  for (BasicBlock::iterator I = FirstNewBlock->begin(),
1741  E = FirstNewBlock->end(); I != E; ) {
1742  AllocaInst *AI = dyn_cast<AllocaInst>(I++);
1743  if (!AI) continue;
1744 
1745  // If the alloca is now dead, remove it. This often occurs due to code
1746  // specialization.
1747  if (AI->use_empty()) {
1748  AI->eraseFromParent();
1749  continue;
1750  }
1751 
1752  if (!allocaWouldBeStaticInEntry(AI))
1753  continue;
1754 
1755  // Keep track of the static allocas that we inline into the caller.
1756  IFI.StaticAllocas.push_back(AI);
1757 
1758  // Scan for the block of allocas that we can move over, and move them
1759  // all at once.
1760  while (isa<AllocaInst>(I) &&
1761  allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) {
1762  IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
1763  ++I;
1764  }
1765 
1766  // Transfer all of the allocas over in a block. Using splice means
1767  // that the instructions aren't removed from the symbol table, then
1768  // reinserted.
1769  Caller->getEntryBlock().getInstList().splice(
1770  InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I);
1771  }
1772  // Move any dbg.declares describing the allocas into the entry basic block.
1773  DIBuilder DIB(*Caller->getParent());
1774  for (auto &AI : IFI.StaticAllocas)
1775  replaceDbgDeclareForAlloca(AI, AI, DIB, /*Deref=*/false);
1776  }
1777 
1778  bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false;
1779  if (InlinedFunctionInfo.ContainsCalls) {
1780  CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
1781  if (CallInst *CI = dyn_cast<CallInst>(TheCall))
1782  CallSiteTailKind = CI->getTailCallKind();
1783 
1784  for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
1785  ++BB) {
1786  for (Instruction &I : *BB) {
1787  CallInst *CI = dyn_cast<CallInst>(&I);
1788  if (!CI)
1789  continue;
1790 
1791  if (Function *F = CI->getCalledFunction())
1792  InlinedDeoptimizeCalls |=
1793  F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
1794 
1795  // We need to reduce the strength of any inlined tail calls. For
1796  // musttail, we have to avoid introducing potential unbounded stack
1797  // growth. For example, if functions 'f' and 'g' are mutually recursive
1798  // with musttail, we can inline 'g' into 'f' so long as we preserve
1799  // musttail on the cloned call to 'f'. If either the inlined call site
1800  // or the cloned call site is *not* musttail, the program already has
1801  // one frame of stack growth, so it's safe to remove musttail. Here is
1802  // a table of example transformations:
1803  //
1804  // f -> musttail g -> musttail f ==> f -> musttail f
1805  // f -> musttail g -> tail f ==> f -> tail f
1806  // f -> g -> musttail f ==> f -> f
1807  // f -> g -> tail f ==> f -> f
1808  CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
1809  ChildTCK = std::min(CallSiteTailKind, ChildTCK);
1810  CI->setTailCallKind(ChildTCK);
1811  InlinedMustTailCalls |= CI->isMustTailCall();
1812 
1813  // Calls inlined through a 'nounwind' call site should be marked
1814  // 'nounwind'.
1815  if (MarkNoUnwind)
1816  CI->setDoesNotThrow();
1817  }
1818  }
1819  }
1820 
1821  // Leave lifetime markers for the static alloca's, scoping them to the
1822  // function we just inlined.
1823  if (InsertLifetime && !IFI.StaticAllocas.empty()) {
1824  IRBuilder<> builder(&FirstNewBlock->front());
1825  for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
1826  AllocaInst *AI = IFI.StaticAllocas[ai];
1827  // Don't mark swifterror allocas. They can't have bitcast uses.
1828  if (AI->isSwiftError())
1829  continue;
1830 
1831  // If the alloca is already scoped to something smaller than the whole
1832  // function then there's no need to add redundant, less accurate markers.
1833  if (hasLifetimeMarkers(AI))
1834  continue;
1835 
1836  // Try to determine the size of the allocation.
1837  ConstantInt *AllocaSize = nullptr;
1838  if (ConstantInt *AIArraySize =
1839  dyn_cast<ConstantInt>(AI->getArraySize())) {
1840  auto &DL = Caller->getParent()->getDataLayout();
1841  Type *AllocaType = AI->getAllocatedType();
1842  uint64_t AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
1843  uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
1844 
1845  // Don't add markers for zero-sized allocas.
1846  if (AllocaArraySize == 0)
1847  continue;
1848 
1849  // Check that array size doesn't saturate uint64_t and doesn't
1850  // overflow when it's multiplied by type size.
1851  if (AllocaArraySize != ~0ULL &&
1852  UINT64_MAX / AllocaArraySize >= AllocaTypeSize) {
1853  AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
1854  AllocaArraySize * AllocaTypeSize);
1855  }
1856  }
1857 
1858  builder.CreateLifetimeStart(AI, AllocaSize);
1859  for (ReturnInst *RI : Returns) {
1860  // Don't insert llvm.lifetime.end calls between a musttail or deoptimize
1861  // call and a return. The return kills all local allocas.
1862  if (InlinedMustTailCalls &&
1864  continue;
1865  if (InlinedDeoptimizeCalls &&
1867  continue;
1868  IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
1869  }
1870  }
1871  }
1872 
1873  // If the inlined code contained dynamic alloca instructions, wrap the inlined
1874  // code with llvm.stacksave/llvm.stackrestore intrinsics.
1875  if (InlinedFunctionInfo.ContainsDynamicAllocas) {
1876  Module *M = Caller->getParent();
1877  // Get the two intrinsics we care about.
1878  Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
1879  Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
1880 
1881  // Insert the llvm.stacksave.
1882  CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
1883  .CreateCall(StackSave, {}, "savedstack");
1884 
1885  // Insert a call to llvm.stackrestore before any return instructions in the
1886  // inlined function.
1887  for (ReturnInst *RI : Returns) {
1888  // Don't insert llvm.stackrestore calls between a musttail or deoptimize
1889  // call and a return. The return will restore the stack pointer.
1890  if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
1891  continue;
1892  if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())
1893  continue;
1894  IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
1895  }
1896  }
1897 
1898  // If we are inlining for an invoke instruction, we must make sure to rewrite
1899  // any call instructions into invoke instructions. This is sensitive to which
1900  // funclet pads were top-level in the inlinee, so must be done before
1901  // rewriting the "parent pad" links.
1902  if (auto *II = dyn_cast<InvokeInst>(TheCall)) {
1903  BasicBlock *UnwindDest = II->getUnwindDest();
1904  Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI();
1905  if (isa<LandingPadInst>(FirstNonPHI)) {
1906  HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo);
1907  } else {
1908  HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo);
1909  }
1910  }
1911 
1912  // Update the lexical scopes of the new funclets and callsites.
1913  // Anything that had 'none' as its parent is now nested inside the callsite's
1914  // EHPad.
1915 
1916  if (CallSiteEHPad) {
1917  for (Function::iterator BB = FirstNewBlock->getIterator(),
1918  E = Caller->end();
1919  BB != E; ++BB) {
1920  // Add bundle operands to any top-level call sites.
1922  for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;) {
1923  Instruction *I = &*BBI++;
1924  CallSite CS(I);
1925  if (!CS)
1926  continue;
1927 
1928  // Skip call sites which are nounwind intrinsics.
1929  auto *CalledFn =
1931  if (CalledFn && CalledFn->isIntrinsic() && CS.doesNotThrow())
1932  continue;
1933 
1934  // Skip call sites which already have a "funclet" bundle.
1936  continue;
1937 
1938  CS.getOperandBundlesAsDefs(OpBundles);
1939  OpBundles.emplace_back("funclet", CallSiteEHPad);
1940 
1941  Instruction *NewInst;
1942  if (CS.isCall())
1943  NewInst = CallInst::Create(cast<CallInst>(I), OpBundles, I);
1944  else
1945  NewInst = InvokeInst::Create(cast<InvokeInst>(I), OpBundles, I);
1946  NewInst->takeName(I);
1947  I->replaceAllUsesWith(NewInst);
1948  I->eraseFromParent();
1949 
1950  OpBundles.clear();
1951  }
1952 
1953  // It is problematic if the inlinee has a cleanupret which unwinds to
1954  // caller and we inline it into a call site which doesn't unwind but into
1955  // an EH pad that does. Such an edge must be dynamically unreachable.
1956  // As such, we replace the cleanupret with unreachable.
1957  if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
1958  if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
1959  changeToUnreachable(CleanupRet, /*UseLLVMTrap=*/false);
1960 
1961  Instruction *I = BB->getFirstNonPHI();
1962  if (!I->isEHPad())
1963  continue;
1964 
1965  if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
1966  if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
1967  CatchSwitch->setParentPad(CallSiteEHPad);
1968  } else {
1969  auto *FPI = cast<FuncletPadInst>(I);
1970  if (isa<ConstantTokenNone>(FPI->getParentPad()))
1971  FPI->setParentPad(CallSiteEHPad);
1972  }
1973  }
1974  }
1975 
1976  if (InlinedDeoptimizeCalls) {
1977  // We need to at least remove the deoptimizing returns from the Return set,
1978  // so that the control flow from those returns does not get merged into the
1979  // caller (but terminate it instead). If the caller's return type does not
1980  // match the callee's return type, we also need to change the return type of
1981  // the intrinsic.
1982  if (Caller->getReturnType() == TheCall->getType()) {
1983  auto NewEnd = remove_if(Returns, [](ReturnInst *RI) {
1984  return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;
1985  });
1986  Returns.erase(NewEnd, Returns.end());
1987  } else {
1988  SmallVector<ReturnInst *, 8> NormalReturns;
1989  Function *NewDeoptIntrinsic = Intrinsic::getDeclaration(
1990  Caller->getParent(), Intrinsic::experimental_deoptimize,
1991  {Caller->getReturnType()});
1992 
1993  for (ReturnInst *RI : Returns) {
1994  CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();
1995  if (!DeoptCall) {
1996  NormalReturns.push_back(RI);
1997  continue;
1998  }
1999 
2000  // The calling convention on the deoptimize call itself may be bogus,
2001  // since the code we're inlining may have undefined behavior (and may
2002  // never actually execute at runtime); but all
2003  // @llvm.experimental.deoptimize declarations have to have the same
2004  // calling convention in a well-formed module.
2005  auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv();
2006  NewDeoptIntrinsic->setCallingConv(CallingConv);
2007  auto *CurBB = RI->getParent();
2008  RI->eraseFromParent();
2009 
2010  SmallVector<Value *, 4> CallArgs(DeoptCall->arg_begin(),
2011  DeoptCall->arg_end());
2012 
2014  DeoptCall->getOperandBundlesAsDefs(OpBundles);
2015  DeoptCall->eraseFromParent();
2016  assert(!OpBundles.empty() &&
2017  "Expected at least the deopt operand bundle");
2018 
2019  IRBuilder<> Builder(CurBB);
2020  CallInst *NewDeoptCall =
2021  Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
2022  NewDeoptCall->setCallingConv(CallingConv);
2023  if (NewDeoptCall->getType()->isVoidTy())
2024  Builder.CreateRetVoid();
2025  else
2026  Builder.CreateRet(NewDeoptCall);
2027  }
2028 
2029  // Leave behind the normal returns so we can merge control flow.
2030  std::swap(Returns, NormalReturns);
2031  }
2032  }
2033 
2034  // Handle any inlined musttail call sites. In order for a new call site to be
2035  // musttail, the source of the clone and the inlined call site must have been
2036  // musttail. Therefore it's safe to return without merging control into the
2037  // phi below.
2038  if (InlinedMustTailCalls) {
2039  // Check if we need to bitcast the result of any musttail calls.
2040  Type *NewRetTy = Caller->getReturnType();
2041  bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy;
2042 
2043  // Handle the returns preceded by musttail calls separately.
2044  SmallVector<ReturnInst *, 8> NormalReturns;
2045  for (ReturnInst *RI : Returns) {
2046  CallInst *ReturnedMustTail =
2048  if (!ReturnedMustTail) {
2049  NormalReturns.push_back(RI);
2050  continue;
2051  }
2052  if (!NeedBitCast)
2053  continue;
2054 
2055  // Delete the old return and any preceding bitcast.
2056  BasicBlock *CurBB = RI->getParent();
2057  auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
2058  RI->eraseFromParent();
2059  if (OldCast)
2060  OldCast->eraseFromParent();
2061 
2062  // Insert a new bitcast and return with the right type.
2063  IRBuilder<> Builder(CurBB);
2064  Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
2065  }
2066 
2067  // Leave behind the normal returns so we can merge control flow.
2068  std::swap(Returns, NormalReturns);
2069  }
2070 
2071  // Now that all of the transforms on the inlined code have taken place but
2072  // before we splice the inlined code into the CFG and lose track of which
2073  // blocks were actually inlined, collect the call sites. We only do this if
2074  // call graph updates weren't requested, as those provide value handle based
2075  // tracking of inlined call sites instead.
2076  if (InlinedFunctionInfo.ContainsCalls && !IFI.CG) {
2077  // Otherwise just collect the raw call sites that were inlined.
2078  for (BasicBlock &NewBB :
2079  make_range(FirstNewBlock->getIterator(), Caller->end()))
2080  for (Instruction &I : NewBB)
2081  if (auto CS = CallSite(&I))
2082  IFI.InlinedCallSites.push_back(CS);
2083  }
2084 
2085  // If we cloned in _exactly one_ basic block, and if that block ends in a
2086  // return instruction, we splice the body of the inlined callee directly into
2087  // the calling basic block.
2088  if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
2089  // Move all of the instructions right before the call.
2090  OrigBB->getInstList().splice(TheCall->getIterator(),
2091  FirstNewBlock->getInstList(),
2092  FirstNewBlock->begin(), FirstNewBlock->end());
2093  // Remove the cloned basic block.
2094  Caller->getBasicBlockList().pop_back();
2095 
2096  // If the call site was an invoke instruction, add a branch to the normal
2097  // destination.
2098  if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
2099  BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
2100  NewBr->setDebugLoc(Returns[0]->getDebugLoc());
2101  }
2102 
2103  // If the return instruction returned a value, replace uses of the call with
2104  // uses of the returned value.
2105  if (!TheCall->use_empty()) {
2106  ReturnInst *R = Returns[0];
2107  if (TheCall == R->getReturnValue())
2108  TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2109  else
2110  TheCall->replaceAllUsesWith(R->getReturnValue());
2111  }
2112  // Since we are now done with the Call/Invoke, we can delete it.
2113  TheCall->eraseFromParent();
2114 
2115  // Since we are now done with the return instruction, delete it also.
2116  Returns[0]->eraseFromParent();
2117 
2118  // We are now done with the inlining.
2119  return true;
2120  }
2121 
2122  // Otherwise, we have the normal case, of more than one block to inline or
2123  // multiple return sites.
2124 
2125  // We want to clone the entire callee function into the hole between the
2126  // "starter" and "ender" blocks. How we accomplish this depends on whether
2127  // this is an invoke instruction or a call instruction.
2128  BasicBlock *AfterCallBB;
2129  BranchInst *CreatedBranchToNormalDest = nullptr;
2130  if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
2131 
2132  // Add an unconditional branch to make this look like the CallInst case...
2133  CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall);
2134 
2135  // Split the basic block. This guarantees that no PHI nodes will have to be
2136  // updated due to new incoming edges, and make the invoke case more
2137  // symmetric to the call case.
2138  AfterCallBB =
2139  OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(),
2140  CalledFunc->getName() + ".exit");
2141 
2142  } else { // It's a call
2143  // If this is a call instruction, we need to split the basic block that
2144  // the call lives in.
2145  //
2146  AfterCallBB = OrigBB->splitBasicBlock(TheCall->getIterator(),
2147  CalledFunc->getName() + ".exit");
2148  }
2149 
2150  if (IFI.CallerBFI) {
2151  // Copy original BB's block frequency to AfterCallBB
2152  IFI.CallerBFI->setBlockFreq(
2153  AfterCallBB, IFI.CallerBFI->getBlockFreq(OrigBB).getFrequency());
2154  }
2155 
2156  // Change the branch that used to go to AfterCallBB to branch to the first
2157  // basic block of the inlined function.
2158  //
2159  TerminatorInst *Br = OrigBB->getTerminator();
2160  assert(Br && Br->getOpcode() == Instruction::Br &&
2161  "splitBasicBlock broken!");
2162  Br->setOperand(0, &*FirstNewBlock);
2163 
2164  // Now that the function is correct, make it a little bit nicer. In
2165  // particular, move the basic blocks inserted from the end of the function
2166  // into the space made by splitting the source basic block.
2167  Caller->getBasicBlockList().splice(AfterCallBB->getIterator(),
2168  Caller->getBasicBlockList(), FirstNewBlock,
2169  Caller->end());
2170 
2171  // Handle all of the return instructions that we just cloned in, and eliminate
2172  // any users of the original call/invoke instruction.
2173  Type *RTy = CalledFunc->getReturnType();
2174 
2175  PHINode *PHI = nullptr;
2176  if (Returns.size() > 1) {
2177  // The PHI node should go at the front of the new basic block to merge all
2178  // possible incoming values.
2179  if (!TheCall->use_empty()) {
2180  PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(),
2181  &AfterCallBB->front());
2182  // Anything that used the result of the function call should now use the
2183  // PHI node as their operand.
2184  TheCall->replaceAllUsesWith(PHI);
2185  }
2186 
2187  // Loop over all of the return instructions adding entries to the PHI node
2188  // as appropriate.
2189  if (PHI) {
2190  for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2191  ReturnInst *RI = Returns[i];
2192  assert(RI->getReturnValue()->getType() == PHI->getType() &&
2193  "Ret value not consistent in function!");
2194  PHI->addIncoming(RI->getReturnValue(), RI->getParent());
2195  }
2196  }
2197 
2198  // Add a branch to the merge points and remove return instructions.
2199  DebugLoc Loc;
2200  for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2201  ReturnInst *RI = Returns[i];
2202  BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
2203  Loc = RI->getDebugLoc();
2204  BI->setDebugLoc(Loc);
2205  RI->eraseFromParent();
2206  }
2207  // We need to set the debug location to *somewhere* inside the
2208  // inlined function. The line number may be nonsensical, but the
2209  // instruction will at least be associated with the right
2210  // function.
2211  if (CreatedBranchToNormalDest)
2212  CreatedBranchToNormalDest->setDebugLoc(Loc);
2213  } else if (!Returns.empty()) {
2214  // Otherwise, if there is exactly one return value, just replace anything
2215  // using the return value of the call with the computed value.
2216  if (!TheCall->use_empty()) {
2217  if (TheCall == Returns[0]->getReturnValue())
2218  TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2219  else
2220  TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
2221  }
2222 
2223  // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
2224  BasicBlock *ReturnBB = Returns[0]->getParent();
2225  ReturnBB->replaceAllUsesWith(AfterCallBB);
2226 
2227  // Splice the code from the return block into the block that it will return
2228  // to, which contains the code that was after the call.
2229  AfterCallBB->getInstList().splice(AfterCallBB->begin(),
2230  ReturnBB->getInstList());
2231 
2232  if (CreatedBranchToNormalDest)
2233  CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
2234 
2235  // Delete the return instruction now and empty ReturnBB now.
2236  Returns[0]->eraseFromParent();
2237  ReturnBB->eraseFromParent();
2238  } else if (!TheCall->use_empty()) {
2239  // No returns, but something is using the return value of the call. Just
2240  // nuke the result.
2241  TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2242  }
2243 
2244  // Since we are now done with the Call/Invoke, we can delete it.
2245  TheCall->eraseFromParent();
2246 
2247  // If we inlined any musttail calls and the original return is now
2248  // unreachable, delete it. It can only contain a bitcast and ret.
2249  if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB))
2250  AfterCallBB->eraseFromParent();
2251 
2252  // We should always be able to fold the entry block of the function into the
2253  // single predecessor of the block...
2254  assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
2255  BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
2256 
2257  // Splice the code entry block into calling block, right before the
2258  // unconditional branch.
2259  CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
2260  OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList());
2261 
2262  // Remove the unconditional branch.
2263  OrigBB->getInstList().erase(Br);
2264 
2265  // Now we can remove the CalleeEntry block, which is now empty.
2266  Caller->getBasicBlockList().erase(CalleeEntry);
2267 
2268  // If we inserted a phi node, check to see if it has a single value (e.g. all
2269  // the entries are the same or undef). If so, remove the PHI so it doesn't
2270  // block other optimizations.
2271  if (PHI) {
2272  AssumptionCache *AC =
2273  IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr;
2274  auto &DL = Caller->getParent()->getDataLayout();
2275  if (Value *V = SimplifyInstruction(PHI, {DL, nullptr, nullptr, AC})) {
2276  PHI->replaceAllUsesWith(V);
2277  PHI->eraseFromParent();
2278  }
2279  }
2280 
2281  return true;
2282 }
bool onlyReadsMemory() const
Determine if the function does not access or only reads memory.
Definition: Function.h:403
Return a value (possibly void), from a function.
bool isIntrinsic() const
isIntrinsic - Returns true if the function&#39;s name starts with "llvm.".
Definition: Function.h:180
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks &#39;this&#39; from the containing basic block and deletes it.
Definition: Instruction.cpp:69
void push_back(const T &Elt)
Definition: SmallVector.h:212
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:109
bool replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress, DIBuilder &Builder, bool Deref, int Offset=0)
Replaces llvm.dbg.declare instruction when the alloca it describes is replaced with a new value...
Definition: Local.cpp:1284
void setDoesNotThrow()
void removePredecessor(BasicBlock *Pred, bool DontDeleteUselessPHIs=false)
Notify the BasicBlock that the predecessor Pred is no longer able to reach it.
Definition: BasicBlock.cpp:276
DILocation * get() const
Get the underlying DILocation.
Definition: DebugLoc.cpp:22
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static cl::opt< bool > EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), cl::Hidden, cl::desc("Convert noalias attributes to metadata during inlining."))
unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
Definition: Local.cpp:1034
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
This class represents an incoming formal argument to a Function.
Definition: Argument.h:30
unsigned arg_size() const
Definition: CallSite.h:219
iterator erase(iterator where)
Definition: ilist.h:280
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
CallGraph * CG
CG - If non-null, InlineFunction will update the callgraph to reflect the changes it makes...
Definition: Cloning.h:189
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:63
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:136
auto remove_if(R &&Range, UnaryPredicate P) -> decltype(std::begin(Range))
Provide wrappers to std::remove_if which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:856
iterator end()
Definition: Function.h:590
an instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
Definition: Instructions.h:514
static DebugLoc appendInlinedAt(DebugLoc DL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode *> &Cache, bool ReplaceLast=false)
Rebuild the entire inlined-at chain for this instruction so that the top of the chain now is inlined-...
Definition: DebugLoc.cpp:70
std::function< AssumptionCache &(Function &)> * GetAssumptionCache
Definition: Cloning.h:190
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
unsigned getParamAlignment(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
Definition: Function.h:363
Analysis providing profile information.
const CallInst * getTerminatingMustTailCall() const
Returns the call instruction marked &#39;musttail&#39; prior to the terminating return instruction of this ba...
Definition: BasicBlock.cpp:125
This class represents a function call, abstracting a target machine&#39;s calling convention.
void setGC(std::string Str)
Definition: Function.cpp:449
static void updateCalleeCount(BlockFrequencyInfo *CallerBFI, BasicBlock *CallBB, Instruction *CallInst, Function *Callee, ProfileSummaryInfo *PSI)
Update the entry count of callee after inlining.
A cache of .assume calls within a function.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:132
uint64_t getFrequency() const
Returns the frequency as a fixpoint number scaled by the entry frequency.
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:697
static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI)
If the inlined function has non-byval align arguments, then add .assume-based alignment assumptions t...
Optional< uint64_t > getProfileCount(const Instruction *CallInst, BlockFrequencyInfo *BFI)
Returns the profile count for CallInst.
arg_iterator arg_end()
Definition: Function.h:612
A debug info location.
Definition: DebugLoc.h:34
Metadata node.
Definition: Metadata.h:862
F(f)
static CallInst * Create(Value *Func, ArrayRef< Value *> Args, ArrayRef< OperandBundleDef > Bundles=None, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1067
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:503
An instruction for reading from memory.
Definition: Instructions.h:164
static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap, const DataLayout &DL, AAResults *CalleeAAR)
If the inlined function has noalias arguments, then add new alias scopes for each noalias argument...
static IntegerType * getInt64Ty(LLVMContext &C)
Definition: Type.cpp:177
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:677
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
void reserve(size_type N)
Definition: SmallVector.h:380
bool isMustTailCall() const
unsigned changeToUnreachable(Instruction *I, bool UseLLVMTrap, bool PreserveLCSSA=false)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
Definition: Local.cpp:1401
InlineFunctionInfo - This class captures the data input to the InlineFunction call, and records the auxiliary results produced by it.
Definition: Cloning.h:176
iterator end()
Get an iterator to the end of the SetVector.
Definition: SetVector.h:93
A node in the call graph for a module.
Definition: CallGraph.h:165
Tuple of metadata.
Definition: Metadata.h:1104
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:130
static void fixupLineNumbers(Function *Fn, Function::iterator FI, Instruction *TheCall, bool CalleeHasDebugInfo)
Update inlined instructions&#39; line numbers to to encode location where these instructions are inlined...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:336
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:252
void setCallingConv(CallingConv::ID CC)
bool InlineFunction(CallInst *C, InlineFunctionInfo &IFI, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true)
InlineFunction - This function inlines the called function into the basic block of the caller...
static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap, const Optional< uint64_t > &CalleeEntryCount, const Instruction *TheCall, ProfileSummaryInfo *PSI, BlockFrequencyInfo *CallerBFI)
Update the branch metadata for cloned call instructions.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:191
void addCalledFunction(CallSite CS, CallGraphNode *M)
Adds a function to the list of functions called by this one.
Definition: CallGraph.h:233
unsigned getAllocaAddrSpace() const
Definition: DataLayout.h:253
const CallInst * getTerminatingDeoptimizeCall() const
Returns the call instruction calling .experimental.deoptimize prior to the terminating return instruc...
Definition: BasicBlock.cpp:156
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:361
iterator end()
Definition: CallGraph.h:191
static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:97
TailCallKind getTailCallKind() const
A Use represents the edge between a Value definition and its users.
Definition: Use.h:56
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
Definition: InstrTypes.h:1197
static void updateCallerBFI(BasicBlock *CallSiteBlock, const ValueToValueMapTy &VMap, BlockFrequencyInfo *CallerBFI, BlockFrequencyInfo *CalleeBFI, const BasicBlock &CalleeEntryBlock)
Update the block frequencies of the caller after a callee has been inlined.
ReturnInst * CreateRet(Value *V)
Create a &#39;ret <val>&#39; instruction.
Definition: IRBuilder.h:750
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:664
This file contains the simple types necessary to represent the attributes associated with functions a...
InstrTy * getInstruction() const
Definition: CallSite.h:92
static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap)
When inlining a function that contains noalias scope metadata, this metadata needs to be cloned so th...
The only memory references in this function (if it has any) are non-volatile loads from objects point...
void setBlockFreqAndScale(const BasicBlock *ReferenceBB, uint64_t Freq, SmallPtrSetImpl< BasicBlock *> &BlocksToScale)
Set the frequency of ReferenceBB to Freq and scale the frequencies of the blocks in BlocksToScale suc...
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
std::vector< CallRecord > CalledFunctionsVector
Definition: CallGraph.h:172
void addHandler(BasicBlock *Dest)
Add an entry to the switch instruction...
ValTy * getCalledValue() const
Return the pointer to function that is being called.
Definition: CallSite.h:100
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
static void UpdateCallGraphAfterInlining(CallSite CS, Function::iterator FirstNewBlock, ValueToValueMapTy &VMap, InlineFunctionInfo &IFI)
Once we have cloned code over from a callee into the caller, update the specified callgraph to reflec...
void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, bool ModuleLevelChanges, SmallVectorImpl< ReturnInst *> &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr, Instruction *TheCall=nullptr)
CloneAndPruneFunctionInto - This works exactly like CloneFunctionInto, except that it does some simpl...
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1444
static void UpdatePHINodes(BasicBlock *OrigBB, BasicBlock *NewBB, ArrayRef< BasicBlock *> Preds, BranchInst *BI, bool HasLoopExit)
Update the PHI nodes in OrigBB to include the values coming from NewBB.
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
std::vector< WeakTrackingVH > OperandBundleCallSites
All cloned call sites that have operand bundles attached are appended to this vector.
Definition: Cloning.h:79
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:142
LandingPadInst * getLandingPadInst() const
Get the landingpad instruction from the landing pad block (the unwind destination).
op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
const T & getValue() const LLVM_LVALUE_FUNCTION
Definition: Optional.h:129
LLVMContext & getContext() const
Definition: Metadata.h:922
FunctionModRefBehavior
Summary of how a function affects memory in the program.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition: Instructions.h:121
bool isVarArg() const
Definition: DerivedTypes.h:123
iterator find(const KeyT &Val)
Definition: ValueMap.h:158
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition: SetVector.h:83
BlockFrequencyInfo * CalleeBFI
Definition: Cloning.h:192
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:194
static std::string utostr(uint64_t X, bool isNeg=false)
Definition: StringExtras.h:160
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:125
const std::string & getGC() const
Definition: Function.cpp:444
An instruction for storing to memory.
Definition: Instructions.h:306
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition: Function.h:634
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:428
Debug location.
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:290
iterator begin()
Definition: Function.h:588
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:140
amdgpu Simplify well known AMD library false Value * Callee
Function * getDeclaration(Module *M, ID id, ArrayRef< Type *> Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:975
static unsigned getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
Definition: Local.h:190
Value * getOperand(unsigned i) const
Definition: User.h:154
Class to represent pointers.
Definition: DerivedTypes.h:467
static BasicBlock * HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, BasicBlock *UnwindEdge, UnwindDestMemoTy *FuncletUnwindMap=nullptr)
When we inline a basic block into an invoke, we have to turn all of the calls that can throw into inv...
bool isCall() const
Return true if a CallInst is enclosed.
Definition: CallSite.h:87
static bool isUsedByLifetimeMarker(Value *V)
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:146
static cl::opt< bool > PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", cl::init(true), cl::Hidden, cl::desc("Convert align attributes to assumptions during inlining."))
Optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Definition: CallSite.h:537
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:141
const BasicBlock & getEntryBlock() const
Definition: Function.h:572
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
static TempMDTuple getTemporary(LLVMContext &Context, ArrayRef< Metadata *> MDs)
Return a temporary node.
Definition: Metadata.h:1151
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata *> MDs)
Definition: Metadata.h:1164
void setCallingConv(CallingConv::ID CC)
Definition: Function.h:198
SmallVector< CallSite, 8 > InlinedCallSites
All of the new call sites inlined into the caller.
Definition: Cloning.h:207
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:406
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:150
The landingpad instruction holds all of the information necessary to generate correct exception handl...
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:171
const Value * getCalledValue() const
Get a pointer to the function that is invoked by this instruction.
Subclasses of this class are all able to terminate a basic block.
Definition: InstrTypes.h:54
bool hasNUses(unsigned N) const
Return true if this Value has exactly N users.
Definition: Value.cpp:126
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:281
bool hasName() const
Definition: Value.h:251
LLVM Basic Block Representation.
Definition: BasicBlock.h:59
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition: Metadata.cpp:1498
Conditional or Unconditional Branch instruction.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:42
ArrayRef< Use > Inputs
Definition: InstrTypes.h:1170
Resume the propagation of an exception.
static MDTuple * getDistinct(LLVMContext &Context, ArrayRef< Metadata *> MDs)
Definition: Metadata.h:1172
Value * getIncomingValueForBlock(const BasicBlock *BB) const
This file contains the declarations for the subclasses of Constant, which represent the different fla...
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:221
const Instruction & front() const
Definition: BasicBlock.h:264
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:363
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:113
unsigned getPrefTypeAlignment(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Definition: DataLayout.cpp:692
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS)
Return the behavior of the given call site.
bool hasOperandBundles() const
Definition: CallSite.h:517
void splice(iterator where, iplist_impl &L2)
Definition: ilist.h:342
static Value * getUnwindDestTokenHelper(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Helper for getUnwindDestToken that does the descendant-ward part of the search.
ProfileSummaryInfo * PSI
Definition: Cloning.h:191
static bool hasLifetimeMarkers(AllocaInst *AI)
The only memory references in this function (if it has any) are non-volatile loads and stores from ob...
size_t arg_size() const
Definition: Function.h:630
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:116
arg_iterator arg_begin()
Definition: Function.h:603
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:374
self_iterator getIterator()
Definition: ilist_node.h:82
op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
void setTailCallKind(TailCallKind TCK)
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:61
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:194
Optional< uint64_t > getEntryCount() const
Get the entry count for this function.
Definition: Function.cpp:1325
static UndefValue * get(Type *T)
Static factory methods - Return an &#39;undef&#39; object of the specified type.
Definition: Constants.cpp:1320
const Constant * stripPointerCasts() const
Definition: Constant.h:153
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs, and aliases.
Definition: Value.cpp:527
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
iterator erase(const_iterator CI)
Definition: SmallVector.h:449
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:93
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:220
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1214
static InvokeInst * Create(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value *> Args, const Twine &NameStr, Instruction *InsertBefore=nullptr)
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:102
bool doesNotThrow() const
Determine if the call cannot unwind.
bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
iterator end()
Definition: ValueMap.h:138
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void setEntryCount(uint64_t Count, const DenseSet< GlobalValue::GUID > *Imports=nullptr)
Set the entry count for this function.
Definition: Function.cpp:1319
static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M, BasicBlock *InsertBlock, InlineFunctionInfo &IFI)
const InstListType & getInstList() const
Return the underlying instruction list container.
Definition: BasicBlock.h:317
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:298
Iterator for intrusive lists based on ilist_node.
This is the shared class of boolean and integer constants.
Definition: Constants.h:84
static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
ValTy * getArgument(unsigned ArgNo) const
Definition: CallSite.h:186
OperandBundleUse getOperandBundleAt(unsigned Index) const
Definition: CallSite.h:533
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...
iterator end()
Definition: BasicBlock.h:254
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:194
IterTy arg_begin() const
Definition: CallSite.h:553
Module.h This file contains the declarations for the Module class.
void setBlockFreq(const BasicBlock *BB, uint64_t Freq)
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:385
static Value * getParentPad(Value *EHPad)
Helper for getUnwindDestToken/getUnwindDestTokenHelper.
static Value * getUnwindDestToken(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Given an EH pad, find where it unwinds.
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:560
static BranchInst * Create(BasicBlock *IfTrue, Instruction *InsertBefore=nullptr)
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
BlockFrequency getBlockFreq(const BasicBlock *BB) const
getblockFreq - Return block frequency.
DenseMap< Instruction *, Value * > UnwindDestMemoTy
void setOperand(unsigned i, Value *Val)
Definition: User.h:159
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:923
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:145
bool isCleanup() const
Return &#39;true&#39; if this landingpad instruction is a cleanup.
iterator_range< user_iterator > users()
Definition: Value.h:395
constexpr char Size[]
Key for Kernel::Arg::Metadata::mSize.
amdgpu Simplify well known AMD library false Value Value * Arg
bool ContainsCalls
ContainsCalls - This is set to true if the cloned code contains a normal call instruction.
Definition: Cloning.h:68
Function * getCalledFunction() const
Return the function called, or null if this is an indirect function invocation.
SmallVector< AllocaInst *, 4 > StaticAllocas
StaticAllocas - InlineFunction fills this in with all static allocas that get copied into the caller...
Definition: Cloning.h:196
bool hasValue() const
Definition: Optional.h:133
The basic data container for the call graph of a Module of IR.
Definition: CallGraph.h:74
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
Definition: SmallVector.h:120
FunTy * getCaller() const
Return the caller function for this call site.
Definition: CallSite.h:267
bool hasGC() const
hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm to use during code generatio...
Definition: Function.h:286
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:284
unsigned getNumOperandBundles() const
Definition: CallSite.h:513
void registerAssumption(CallInst *CI)
Add an .assume intrinsic to this function&#39;s cache.
void emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:656
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:61
SmallVector< WeakTrackingVH, 8 > InlinedCalls
InlinedCalls - InlineFunction fills this in with callsites that were inlined from the callee...
Definition: Cloning.h:200
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Definition: CallSite.h:564
Optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Definition: InstrTypes.h:1370
StringRef getName() const
Return a constant reference to the value&#39;s name.
Definition: Value.cpp:218
Establish a view to a call site for examination.
Definition: CallSite.h:695
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:108
static MDNode * concatenate(MDNode *A, MDNode *B)
Methods for metadata merging.
Definition: Metadata.cpp:888
static void PropagateParallelLoopAccessMetadata(CallSite CS, ValueToValueMapTy &VMap)
When inlining a call site that has !llvm.mem.parallel_loop_access metadata, that metadata should be p...
SymbolTableList< BasicBlock >::iterator eraseFromParent()
Unlink &#39;this&#39; from the containing function and delete it.
Definition: BasicBlock.cpp:97
#define I(x, y, z)
Definition: MD5.cpp:58
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:73
bool doesNotThrow() const
Determine if the call cannot unwind.
Definition: CallSite.h:487
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
static Value * HandleByValArgument(Value *Arg, Instruction *TheCall, const Function *CalledFunc, InlineFunctionInfo &IFI, unsigned ByValAlignment)
When inlining a call site that has a byval argument, we have to make the implicit memcpy explicit by ...
iterator end()
Definition: DenseMap.h:79
ClonedCodeInfo - This struct can be used to capture information about code being cloned, while it is being cloned.
Definition: Cloning.h:65
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
const BasicBlockListType & getBasicBlockList() const
Get the underlying elements of the Function...
Definition: Function.h:565
static ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
Definition: Constants.cpp:1035
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="")
Split the basic block into two basic blocks at the specified instruction.
Definition: BasicBlock.cpp:382
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
std::vector< CallRecord >::iterator iterator
Definition: CallGraph.h:184
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:141
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
Definition: InstrTypes.h:1405
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
Definition: CallSite.h:580
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:200
bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures, bool StoreCaptures, const Instruction *I, DominatorTree *DT, bool IncludeI=false, OrderedBasicBlock *OBB=nullptr)
PointerMayBeCapturedBefore - Return true if this pointer value may be captured by the enclosing funct...
FunTy * getCalledFunction() const
Return the function being called if this is a direct call, otherwise return null (if it&#39;s an indirect...
Definition: CallSite.h:107
void GetUnderlyingObjects(Value *V, SmallVectorImpl< Value *> &Objects, const DataLayout &DL, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to GetUnderlyingObject except that it can look through phi and select instruct...
static bool allocaWouldBeStaticInEntry(const AllocaInst *AI)
Return the result of AI->isStaticAlloca() if AI were moved to the entry block.
BlockFrequencyInfo * CallerBFI
Definition: Cloning.h:192
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool ContainsDynamicAllocas
ContainsDynamicAllocas - This is set to true if the cloned code contains a &#39;dynamic&#39; alloca...
Definition: Cloning.h:74
const BasicBlock & front() const
Definition: Function.h:595
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
BasicBlock * getUnwindDest() const
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:545
LLVM Value Representation.
Definition: Value.h:73
Constant * getPersonalityFn() const
Get the personality function associated with this function.
Definition: Function.cpp:1255
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:388
A vector that has set insertion semantics.
Definition: SetVector.h:41
void removeCallEdgeFor(CallSite CS)
Removes the edge in the node for the specified call site.
Definition: CallGraph.cpp:188
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, Instruction *InsertBefore=nullptr)
BasicBlock * changeToInvokeAndSplitBasicBlock(CallInst *CI, BasicBlock *UnwindEdge)
Convert the CallInst to InvokeInst with the specified unwind edge basic block.
Definition: Local.cpp:1452
Invoke instruction.
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
Definition: Instruction.h:507
iterator begin()
Definition: CallGraph.h:190
void setPersonalityFn(Constant *Fn)
Definition: Function.cpp:1260
const TerminatorInst * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:120
static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
void pop_back()
Definition: ilist.h:331
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1073
Value * SimplifyInstruction(Instruction *I, const SimplifyQuery &Q, OptimizationRemarkEmitter *ORE=nullptr)
See if we can compute a simplified version of this instruction.
Root of the metadata hierarchy.
Definition: Metadata.h:58
bool use_empty() const
Definition: Value.h:322
iterator begin()
Definition: ValueMap.h:137
Type * getElementType() const
Definition: DerivedTypes.h:486
iterator_range< arg_iterator > args()
Definition: Function.h:621
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:44
const BasicBlock * getParent() const
Definition: Instruction.h:66
an instruction to allocate memory on the stack
Definition: Instructions.h:60
bool mayReadOrWriteMemory() const
Return true if this instruction may read or write memory.
Definition: Instruction.h:464