LLVM  4.0.0
InlineFunction.cpp
Go to the documentation of this file.
1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements inlining of a function into a call site, resolving
11 // parameters and the return value as appropriate.
12 //
13 //===----------------------------------------------------------------------===//
14 
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/StringExtras.h"
28 #include "llvm/IR/Attributes.h"
29 #include "llvm/IR/CallSite.h"
30 #include "llvm/IR/CFG.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/DebugInfo.h"
34 #include "llvm/IR/DerivedTypes.h"
35 #include "llvm/IR/DIBuilder.h"
36 #include "llvm/IR/Dominators.h"
37 #include "llvm/IR/IRBuilder.h"
38 #include "llvm/IR/Instructions.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/IR/Intrinsics.h"
41 #include "llvm/IR/MDBuilder.h"
42 #include "llvm/IR/Module.h"
45 #include <algorithm>
46 
47 using namespace llvm;
48 
49 static cl::opt<bool>
50 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
51  cl::Hidden,
52  cl::desc("Convert noalias attributes to metadata during inlining."));
53 
54 static cl::opt<bool>
55 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
56  cl::init(true), cl::Hidden,
57  cl::desc("Convert align attributes to assumptions during inlining."));
58 
60  AAResults *CalleeAAR, bool InsertLifetime) {
61  return InlineFunction(CallSite(CI), IFI, CalleeAAR, InsertLifetime);
62 }
64  AAResults *CalleeAAR, bool InsertLifetime) {
65  return InlineFunction(CallSite(II), IFI, CalleeAAR, InsertLifetime);
66 }
67 
68 namespace {
69  /// A class for recording information about inlining a landing pad.
70  class LandingPadInliningInfo {
71  BasicBlock *OuterResumeDest; ///< Destination of the invoke's unwind.
72  BasicBlock *InnerResumeDest; ///< Destination for the callee's resume.
73  LandingPadInst *CallerLPad; ///< LandingPadInst associated with the invoke.
74  PHINode *InnerEHValuesPHI; ///< PHI for EH values from landingpad insts.
75  SmallVector<Value*, 8> UnwindDestPHIValues;
76 
77  public:
78  LandingPadInliningInfo(InvokeInst *II)
79  : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(nullptr),
80  CallerLPad(nullptr), InnerEHValuesPHI(nullptr) {
81  // If there are PHI nodes in the unwind destination block, we need to keep
82  // track of which values came into them from the invoke before removing
83  // the edge from this block.
84  llvm::BasicBlock *InvokeBB = II->getParent();
85  BasicBlock::iterator I = OuterResumeDest->begin();
86  for (; isa<PHINode>(I); ++I) {
87  // Save the value to use for this edge.
88  PHINode *PHI = cast<PHINode>(I);
89  UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
90  }
91 
92  CallerLPad = cast<LandingPadInst>(I);
93  }
94 
95  /// The outer unwind destination is the target of
96  /// unwind edges introduced for calls within the inlined function.
97  BasicBlock *getOuterResumeDest() const {
98  return OuterResumeDest;
99  }
100 
101  BasicBlock *getInnerResumeDest();
102 
103  LandingPadInst *getLandingPadInst() const { return CallerLPad; }
104 
105  /// Forward the 'resume' instruction to the caller's landing pad block.
106  /// When the landing pad block has only one predecessor, this is
107  /// a simple branch. When there is more than one predecessor, we need to
108  /// split the landing pad block after the landingpad instruction and jump
109  /// to there.
110  void forwardResume(ResumeInst *RI,
111  SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
112 
113  /// Add incoming-PHI values to the unwind destination block for the given
114  /// basic block, using the values for the original invoke's source block.
115  void addIncomingPHIValuesFor(BasicBlock *BB) const {
116  addIncomingPHIValuesForInto(BB, OuterResumeDest);
117  }
118 
119  void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
120  BasicBlock::iterator I = dest->begin();
121  for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
122  PHINode *phi = cast<PHINode>(I);
123  phi->addIncoming(UnwindDestPHIValues[i], src);
124  }
125  }
126  };
127 } // anonymous namespace
128 
129 /// Get or create a target for the branch from ResumeInsts.
130 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
131  if (InnerResumeDest) return InnerResumeDest;
132 
133  // Split the landing pad.
134  BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator();
135  InnerResumeDest =
136  OuterResumeDest->splitBasicBlock(SplitPoint,
137  OuterResumeDest->getName() + ".body");
138 
139  // The number of incoming edges we expect to the inner landing pad.
140  const unsigned PHICapacity = 2;
141 
142  // Create corresponding new PHIs for all the PHIs in the outer landing pad.
143  Instruction *InsertPoint = &InnerResumeDest->front();
144  BasicBlock::iterator I = OuterResumeDest->begin();
145  for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
146  PHINode *OuterPHI = cast<PHINode>(I);
147  PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
148  OuterPHI->getName() + ".lpad-body",
149  InsertPoint);
150  OuterPHI->replaceAllUsesWith(InnerPHI);
151  InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
152  }
153 
154  // Create a PHI for the exception values.
155  InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
156  "eh.lpad-body", InsertPoint);
157  CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
158  InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
159 
160  // All done.
161  return InnerResumeDest;
162 }
163 
164 /// Forward the 'resume' instruction to the caller's landing pad block.
165 /// When the landing pad block has only one predecessor, this is a simple
166 /// branch. When there is more than one predecessor, we need to split the
167 /// landing pad block after the landingpad instruction and jump to there.
168 void LandingPadInliningInfo::forwardResume(
169  ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) {
170  BasicBlock *Dest = getInnerResumeDest();
171  BasicBlock *Src = RI->getParent();
172 
173  BranchInst::Create(Dest, Src);
174 
175  // Update the PHIs in the destination. They were inserted in an order which
176  // makes this work.
177  addIncomingPHIValuesForInto(Src, Dest);
178 
179  InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
180  RI->eraseFromParent();
181 }
182 
183 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
184 static Value *getParentPad(Value *EHPad) {
185  if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
186  return FPI->getParentPad();
187  return cast<CatchSwitchInst>(EHPad)->getParentPad();
188 }
189 
191 
192 /// Helper for getUnwindDestToken that does the descendant-ward part of
193 /// the search.
195  UnwindDestMemoTy &MemoMap) {
196  SmallVector<Instruction *, 8> Worklist(1, EHPad);
197 
198  while (!Worklist.empty()) {
199  Instruction *CurrentPad = Worklist.pop_back_val();
200  // We only put pads on the worklist that aren't in the MemoMap. When
201  // we find an unwind dest for a pad we may update its ancestors, but
202  // the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
203  // so they should never get updated while queued on the worklist.
204  assert(!MemoMap.count(CurrentPad));
205  Value *UnwindDestToken = nullptr;
206  if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
207  if (CatchSwitch->hasUnwindDest()) {
208  UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
209  } else {
210  // Catchswitch doesn't have a 'nounwind' variant, and one might be
211  // annotated as "unwinds to caller" when really it's nounwind (see
212  // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
213  // parent's unwind dest from this. We can check its catchpads'
214  // descendants, since they might include a cleanuppad with an
215  // "unwinds to caller" cleanupret, which can be trusted.
216  for (auto HI = CatchSwitch->handler_begin(),
217  HE = CatchSwitch->handler_end();
218  HI != HE && !UnwindDestToken; ++HI) {
219  BasicBlock *HandlerBlock = *HI;
220  auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI());
221  for (User *Child : CatchPad->users()) {
222  // Intentionally ignore invokes here -- since the catchswitch is
223  // marked "unwind to caller", it would be a verifier error if it
224  // contained an invoke which unwinds out of it, so any invoke we'd
225  // encounter must unwind to some child of the catch.
226  if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
227  continue;
228 
229  Instruction *ChildPad = cast<Instruction>(Child);
230  auto Memo = MemoMap.find(ChildPad);
231  if (Memo == MemoMap.end()) {
232  // Haven't figured out this child pad yet; queue it.
233  Worklist.push_back(ChildPad);
234  continue;
235  }
236  // We've already checked this child, but might have found that
237  // it offers no proof either way.
238  Value *ChildUnwindDestToken = Memo->second;
239  if (!ChildUnwindDestToken)
240  continue;
241  // We already know the child's unwind dest, which can either
242  // be ConstantTokenNone to indicate unwind to caller, or can
243  // be another child of the catchpad. Only the former indicates
244  // the unwind dest of the catchswitch.
245  if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
246  UnwindDestToken = ChildUnwindDestToken;
247  break;
248  }
249  assert(getParentPad(ChildUnwindDestToken) == CatchPad);
250  }
251  }
252  }
253  } else {
254  auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
255  for (User *U : CleanupPad->users()) {
256  if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
257  if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
258  UnwindDestToken = RetUnwindDest->getFirstNonPHI();
259  else
260  UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext());
261  break;
262  }
263  Value *ChildUnwindDestToken;
264  if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
265  ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
266  } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
267  Instruction *ChildPad = cast<Instruction>(U);
268  auto Memo = MemoMap.find(ChildPad);
269  if (Memo == MemoMap.end()) {
270  // Haven't resolved this child yet; queue it and keep searching.
271  Worklist.push_back(ChildPad);
272  continue;
273  }
274  // We've checked this child, but still need to ignore it if it
275  // had no proof either way.
276  ChildUnwindDestToken = Memo->second;
277  if (!ChildUnwindDestToken)
278  continue;
279  } else {
280  // Not a relevant user of the cleanuppad
281  continue;
282  }
283  // In a well-formed program, the child/invoke must either unwind to
284  // an(other) child of the cleanup, or exit the cleanup. In the
285  // first case, continue searching.
286  if (isa<Instruction>(ChildUnwindDestToken) &&
287  getParentPad(ChildUnwindDestToken) == CleanupPad)
288  continue;
289  UnwindDestToken = ChildUnwindDestToken;
290  break;
291  }
292  }
293  // If we haven't found an unwind dest for CurrentPad, we may have queued its
294  // children, so move on to the next in the worklist.
295  if (!UnwindDestToken)
296  continue;
297 
298  // Now we know that CurrentPad unwinds to UnwindDestToken. It also exits
299  // any ancestors of CurrentPad up to but not including UnwindDestToken's
300  // parent pad. Record this in the memo map, and check to see if the
301  // original EHPad being queried is one of the ones exited.
302  Value *UnwindParent;
303  if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
304  UnwindParent = getParentPad(UnwindPad);
305  else
306  UnwindParent = nullptr;
307  bool ExitedOriginalPad = false;
308  for (Instruction *ExitedPad = CurrentPad;
309  ExitedPad && ExitedPad != UnwindParent;
310  ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) {
311  // Skip over catchpads since they just follow their catchswitches.
312  if (isa<CatchPadInst>(ExitedPad))
313  continue;
314  MemoMap[ExitedPad] = UnwindDestToken;
315  ExitedOriginalPad |= (ExitedPad == EHPad);
316  }
317 
318  if (ExitedOriginalPad)
319  return UnwindDestToken;
320 
321  // Continue the search.
322  }
323 
324  // No definitive information is contained within this funclet.
325  return nullptr;
326 }
327 
328 /// Given an EH pad, find where it unwinds. If it unwinds to an EH pad,
329 /// return that pad instruction. If it unwinds to caller, return
330 /// ConstantTokenNone. If it does not have a definitive unwind destination,
331 /// return nullptr.
332 ///
333 /// This routine gets invoked for calls in funclets in inlinees when inlining
334 /// an invoke. Since many funclets don't have calls inside them, it's queried
335 /// on-demand rather than building a map of pads to unwind dests up front.
336 /// Determining a funclet's unwind dest may require recursively searching its
337 /// descendants, and also ancestors and cousins if the descendants don't provide
338 /// an answer. Since most funclets will have their unwind dest immediately
339 /// available as the unwind dest of a catchswitch or cleanupret, this routine
340 /// searches top-down from the given pad and then up. To avoid worst-case
341 /// quadratic run-time given that approach, it uses a memo map to avoid
342 /// re-processing funclet trees. The callers that rewrite the IR as they go
343 /// take advantage of this, for correctness, by checking/forcing rewritten
344 /// pads' entries to match the original callee view.
346  UnwindDestMemoTy &MemoMap) {
347  // Catchpads unwind to the same place as their catchswitch;
348  // redirct any queries on catchpads so the code below can
349  // deal with just catchswitches and cleanuppads.
350  if (auto *CPI = dyn_cast<CatchPadInst>(EHPad))
351  EHPad = CPI->getCatchSwitch();
352 
353  // Check if we've already determined the unwind dest for this pad.
354  auto Memo = MemoMap.find(EHPad);
355  if (Memo != MemoMap.end())
356  return Memo->second;
357 
358  // Search EHPad and, if necessary, its descendants.
359  Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap);
360  assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));
361  if (UnwindDestToken)
362  return UnwindDestToken;
363 
364  // No information is available for this EHPad from itself or any of its
365  // descendants. An unwind all the way out to a pad in the caller would
366  // need also to agree with the unwind dest of the parent funclet, so
367  // search up the chain to try to find a funclet with information. Put
368  // null entries in the memo map to avoid re-processing as we go up.
369  MemoMap[EHPad] = nullptr;
370 #ifndef NDEBUG
372  TempMemos.insert(EHPad);
373 #endif
374  Instruction *LastUselessPad = EHPad;
375  Value *AncestorToken;
376  for (AncestorToken = getParentPad(EHPad);
377  auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
378  AncestorToken = getParentPad(AncestorToken)) {
379  // Skip over catchpads since they just follow their catchswitches.
380  if (isa<CatchPadInst>(AncestorPad))
381  continue;
382  // If the MemoMap had an entry mapping AncestorPad to nullptr, since we
383  // haven't yet called getUnwindDestTokenHelper for AncestorPad in this
384  // call to getUnwindDestToken, that would mean that AncestorPad had no
385  // information in itself, its descendants, or its ancestors. If that
386  // were the case, then we should also have recorded the lack of information
387  // for the descendant that we're coming from. So assert that we don't
388  // find a null entry in the MemoMap for AncestorPad.
389  assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);
390  auto AncestorMemo = MemoMap.find(AncestorPad);
391  if (AncestorMemo == MemoMap.end()) {
392  UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap);
393  } else {
394  UnwindDestToken = AncestorMemo->second;
395  }
396  if (UnwindDestToken)
397  break;
398  LastUselessPad = AncestorPad;
399  MemoMap[LastUselessPad] = nullptr;
400 #ifndef NDEBUG
401  TempMemos.insert(LastUselessPad);
402 #endif
403  }
404 
405  // We know that getUnwindDestTokenHelper was called on LastUselessPad and
406  // returned nullptr (and likewise for EHPad and any of its ancestors up to
407  // LastUselessPad), so LastUselessPad has no information from below. Since
408  // getUnwindDestTokenHelper must investigate all downward paths through
409  // no-information nodes to prove that a node has no information like this,
410  // and since any time it finds information it records it in the MemoMap for
411  // not just the immediately-containing funclet but also any ancestors also
412  // exited, it must be the case that, walking downward from LastUselessPad,
413  // visiting just those nodes which have not been mapped to an unwind dest
414  // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since
415  // they are just used to keep getUnwindDestTokenHelper from repeating work),
416  // any node visited must have been exhaustively searched with no information
417  // for it found.
418  SmallVector<Instruction *, 8> Worklist(1, LastUselessPad);
419  while (!Worklist.empty()) {
420  Instruction *UselessPad = Worklist.pop_back_val();
421  auto Memo = MemoMap.find(UselessPad);
422  if (Memo != MemoMap.end() && Memo->second) {
423  // Here the name 'UselessPad' is a bit of a misnomer, because we've found
424  // that it is a funclet that does have information about unwinding to
425  // a particular destination; its parent was a useless pad.
426  // Since its parent has no information, the unwind edge must not escape
427  // the parent, and must target a sibling of this pad. This local unwind
428  // gives us no information about EHPad. Leave it and the subtree rooted
429  // at it alone.
430  assert(getParentPad(Memo->second) == getParentPad(UselessPad));
431  continue;
432  }
433  // We know we don't have information for UselesPad. If it has an entry in
434  // the MemoMap (mapping it to nullptr), it must be one of the TempMemos
435  // added on this invocation of getUnwindDestToken; if a previous invocation
436  // recorded nullptr, it would have had to prove that the ancestors of
437  // UselessPad, which include LastUselessPad, had no information, and that
438  // in turn would have required proving that the descendants of
439  // LastUselesPad, which include EHPad, have no information about
440  // LastUselessPad, which would imply that EHPad was mapped to nullptr in
441  // the MemoMap on that invocation, which isn't the case if we got here.
442  assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad));
443  // Assert as we enumerate users that 'UselessPad' doesn't have any unwind
444  // information that we'd be contradicting by making a map entry for it
445  // (which is something that getUnwindDestTokenHelper must have proved for
446  // us to get here). Just assert on is direct users here; the checks in
447  // this downward walk at its descendants will verify that they don't have
448  // any unwind edges that exit 'UselessPad' either (i.e. they either have no
449  // unwind edges or unwind to a sibling).
450  MemoMap[UselessPad] = UnwindDestToken;
451  if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
452  assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad");
453  for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
454  auto *CatchPad = HandlerBlock->getFirstNonPHI();
455  for (User *U : CatchPad->users()) {
456  assert(
457  (!isa<InvokeInst>(U) ||
458  (getParentPad(
459  cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
460  CatchPad)) &&
461  "Expected useless pad");
462  if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
463  Worklist.push_back(cast<Instruction>(U));
464  }
465  }
466  } else {
467  assert(isa<CleanupPadInst>(UselessPad));
468  for (User *U : UselessPad->users()) {
469  assert(!isa<CleanupReturnInst>(U) && "Expected useless pad");
470  assert((!isa<InvokeInst>(U) ||
471  (getParentPad(
472  cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
473  UselessPad)) &&
474  "Expected useless pad");
475  if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
476  Worklist.push_back(cast<Instruction>(U));
477  }
478  }
479  }
480 
481  return UnwindDestToken;
482 }
483 
484 /// When we inline a basic block into an invoke,
485 /// we have to turn all of the calls that can throw into invokes.
486 /// This function analyze BB to see if there are any calls, and if so,
487 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
488 /// nodes in that block with the values specified in InvokeDestPHIValues.
490  BasicBlock *BB, BasicBlock *UnwindEdge,
491  UnwindDestMemoTy *FuncletUnwindMap = nullptr) {
492  for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
493  Instruction *I = &*BBI++;
494 
495  // We only need to check for function calls: inlined invoke
496  // instructions require no special handling.
497  CallInst *CI = dyn_cast<CallInst>(I);
498 
499  if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue()))
500  continue;
501 
502  // We do not need to (and in fact, cannot) convert possibly throwing calls
503  // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into
504  // invokes. The caller's "segment" of the deoptimization continuation
505  // attached to the newly inlined @llvm.experimental_deoptimize
506  // (resp. @llvm.experimental.guard) call should contain the exception
507  // handling logic, if any.
508  if (auto *F = CI->getCalledFunction())
509  if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
510  F->getIntrinsicID() == Intrinsic::experimental_guard)
511  continue;
512 
513  if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
514  // This call is nested inside a funclet. If that funclet has an unwind
515  // destination within the inlinee, then unwinding out of this call would
516  // be UB. Rewriting this call to an invoke which targets the inlined
517  // invoke's unwind dest would give the call's parent funclet multiple
518  // unwind destinations, which is something that subsequent EH table
519  // generation can't handle and that the veirifer rejects. So when we
520  // see such a call, leave it as a call.
521  auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
522  Value *UnwindDestToken =
523  getUnwindDestToken(FuncletPad, *FuncletUnwindMap);
524  if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
525  continue;
526 #ifndef NDEBUG
527  Instruction *MemoKey;
528  if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
529  MemoKey = CatchPad->getCatchSwitch();
530  else
531  MemoKey = FuncletPad;
532  assert(FuncletUnwindMap->count(MemoKey) &&
533  (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
534  "must get memoized to avoid confusing later searches");
535 #endif // NDEBUG
536  }
537 
538  changeToInvokeAndSplitBasicBlock(CI, UnwindEdge);
539  return BB;
540  }
541  return nullptr;
542 }
543 
544 /// If we inlined an invoke site, we need to convert calls
545 /// in the body of the inlined function into invokes.
546 ///
547 /// II is the invoke instruction being inlined. FirstNewBlock is the first
548 /// block of the inlined code (the last block is the end of the function),
549 /// and InlineCodeInfo is information about the code that got inlined.
550 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock,
551  ClonedCodeInfo &InlinedCodeInfo) {
552  BasicBlock *InvokeDest = II->getUnwindDest();
553 
554  Function *Caller = FirstNewBlock->getParent();
555 
556  // The inlined code is currently at the end of the function, scan from the
557  // start of the inlined code to its end, checking for stuff we need to
558  // rewrite.
559  LandingPadInliningInfo Invoke(II);
560 
561  // Get all of the inlined landing pad instructions.
563  for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end();
564  I != E; ++I)
565  if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
566  InlinedLPads.insert(II->getLandingPadInst());
567 
568  // Append the clauses from the outer landing pad instruction into the inlined
569  // landing pad instructions.
570  LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
571  for (LandingPadInst *InlinedLPad : InlinedLPads) {
572  unsigned OuterNum = OuterLPad->getNumClauses();
573  InlinedLPad->reserveClauses(OuterNum);
574  for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
575  InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
576  if (OuterLPad->isCleanup())
577  InlinedLPad->setCleanup(true);
578  }
579 
580  for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
581  BB != E; ++BB) {
582  if (InlinedCodeInfo.ContainsCalls)
584  &*BB, Invoke.getOuterResumeDest()))
585  // Update any PHI nodes in the exceptional block to indicate that there
586  // is now a new entry in them.
587  Invoke.addIncomingPHIValuesFor(NewBB);
588 
589  // Forward any resumes that are remaining here.
590  if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
591  Invoke.forwardResume(RI, InlinedLPads);
592  }
593 
594  // Now that everything is happy, we have one final detail. The PHI nodes in
595  // the exception destination block still have entries due to the original
596  // invoke instruction. Eliminate these entries (which might even delete the
597  // PHI node) now.
598  InvokeDest->removePredecessor(II->getParent());
599 }
600 
601 /// If we inlined an invoke site, we need to convert calls
602 /// in the body of the inlined function into invokes.
603 ///
604 /// II is the invoke instruction being inlined. FirstNewBlock is the first
605 /// block of the inlined code (the last block is the end of the function),
606 /// and InlineCodeInfo is information about the code that got inlined.
607 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock,
608  ClonedCodeInfo &InlinedCodeInfo) {
609  BasicBlock *UnwindDest = II->getUnwindDest();
610  Function *Caller = FirstNewBlock->getParent();
611 
612  assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!");
613 
614  // If there are PHI nodes in the unwind destination block, we need to keep
615  // track of which values came into them from the invoke before removing the
616  // edge from this block.
617  SmallVector<Value *, 8> UnwindDestPHIValues;
618  llvm::BasicBlock *InvokeBB = II->getParent();
619  for (Instruction &I : *UnwindDest) {
620  // Save the value to use for this edge.
621  PHINode *PHI = dyn_cast<PHINode>(&I);
622  if (!PHI)
623  break;
624  UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
625  }
626 
627  // Add incoming-PHI values to the unwind destination block for the given basic
628  // block, using the values for the original invoke's source block.
629  auto UpdatePHINodes = [&](BasicBlock *Src) {
630  BasicBlock::iterator I = UnwindDest->begin();
631  for (Value *V : UnwindDestPHIValues) {
632  PHINode *PHI = cast<PHINode>(I);
633  PHI->addIncoming(V, Src);
634  ++I;
635  }
636  };
637 
638  // This connects all the instructions which 'unwind to caller' to the invoke
639  // destination.
640  UnwindDestMemoTy FuncletUnwindMap;
641  for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
642  BB != E; ++BB) {
643  if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
644  if (CRI->unwindsToCaller()) {
645  auto *CleanupPad = CRI->getCleanupPad();
646  CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI);
647  CRI->eraseFromParent();
648  UpdatePHINodes(&*BB);
649  // Finding a cleanupret with an unwind destination would confuse
650  // subsequent calls to getUnwindDestToken, so map the cleanuppad
651  // to short-circuit any such calls and recognize this as an "unwind
652  // to caller" cleanup.
653  assert(!FuncletUnwindMap.count(CleanupPad) ||
654  isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
655  FuncletUnwindMap[CleanupPad] =
657  }
658  }
659 
660  Instruction *I = BB->getFirstNonPHI();
661  if (!I->isEHPad())
662  continue;
663 
664  Instruction *Replacement = nullptr;
665  if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
666  if (CatchSwitch->unwindsToCaller()) {
667  Value *UnwindDestToken;
668  if (auto *ParentPad =
669  dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
670  // This catchswitch is nested inside another funclet. If that
671  // funclet has an unwind destination within the inlinee, then
672  // unwinding out of this catchswitch would be UB. Rewriting this
673  // catchswitch to unwind to the inlined invoke's unwind dest would
674  // give the parent funclet multiple unwind destinations, which is
675  // something that subsequent EH table generation can't handle and
676  // that the veirifer rejects. So when we see such a call, leave it
677  // as "unwind to caller".
678  UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap);
679  if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
680  continue;
681  } else {
682  // This catchswitch has no parent to inherit constraints from, and
683  // none of its descendants can have an unwind edge that exits it and
684  // targets another funclet in the inlinee. It may or may not have a
685  // descendant that definitively has an unwind to caller. In either
686  // case, we'll have to assume that any unwinds out of it may need to
687  // be routed to the caller, so treat it as though it has a definitive
688  // unwind to caller.
689  UnwindDestToken = ConstantTokenNone::get(Caller->getContext());
690  }
691  auto *NewCatchSwitch = CatchSwitchInst::Create(
692  CatchSwitch->getParentPad(), UnwindDest,
693  CatchSwitch->getNumHandlers(), CatchSwitch->getName(),
694  CatchSwitch);
695  for (BasicBlock *PadBB : CatchSwitch->handlers())
696  NewCatchSwitch->addHandler(PadBB);
697  // Propagate info for the old catchswitch over to the new one in
698  // the unwind map. This also serves to short-circuit any subsequent
699  // checks for the unwind dest of this catchswitch, which would get
700  // confused if they found the outer handler in the callee.
701  FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
702  Replacement = NewCatchSwitch;
703  }
704  } else if (!isa<FuncletPadInst>(I)) {
705  llvm_unreachable("unexpected EHPad!");
706  }
707 
708  if (Replacement) {
709  Replacement->takeName(I);
710  I->replaceAllUsesWith(Replacement);
711  I->eraseFromParent();
712  UpdatePHINodes(&*BB);
713  }
714  }
715 
716  if (InlinedCodeInfo.ContainsCalls)
717  for (Function::iterator BB = FirstNewBlock->getIterator(),
718  E = Caller->end();
719  BB != E; ++BB)
721  &*BB, UnwindDest, &FuncletUnwindMap))
722  // Update any PHI nodes in the exceptional block to indicate that there
723  // is now a new entry in them.
724  UpdatePHINodes(NewBB);
725 
726  // Now that everything is happy, we have one final detail. The PHI nodes in
727  // the exception destination block still have entries due to the original
728  // invoke instruction. Eliminate these entries (which might even delete the
729  // PHI node) now.
730  UnwindDest->removePredecessor(InvokeBB);
731 }
732 
733 /// When inlining a call site that has !llvm.mem.parallel_loop_access metadata,
734 /// that metadata should be propagated to all memory-accessing cloned
735 /// instructions.
737  ValueToValueMapTy &VMap) {
738  MDNode *M =
740  if (!M)
741  return;
742 
743  for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
744  VMI != VMIE; ++VMI) {
745  if (!VMI->second)
746  continue;
747 
748  Instruction *NI = dyn_cast<Instruction>(VMI->second);
749  if (!NI)
750  continue;
751 
753  M = MDNode::concatenate(PM, M);
755  } else if (NI->mayReadOrWriteMemory()) {
757  }
758  }
759 }
760 
761 /// When inlining a function that contains noalias scope metadata,
762 /// this metadata needs to be cloned so that the inlined blocks
763 /// have different "unique scopes" at every call site. Were this not done, then
764 /// aliasing scopes from a function inlined into a caller multiple times could
765 /// not be differentiated (and this would lead to miscompiles because the
766 /// non-aliasing property communicated by the metadata could have
767 /// call-site-specific control dependencies).
769  const Function *CalledFunc = CS.getCalledFunction();
771 
772  // Note: We could only clone the metadata if it is already used in the
773  // caller. I'm omitting that check here because it might confuse
774  // inter-procedural alias analysis passes. We can revisit this if it becomes
775  // an efficiency or overhead problem.
776 
777  for (const BasicBlock &I : *CalledFunc)
778  for (const Instruction &J : I) {
779  if (const MDNode *M = J.getMetadata(LLVMContext::MD_alias_scope))
780  MD.insert(M);
781  if (const MDNode *M = J.getMetadata(LLVMContext::MD_noalias))
782  MD.insert(M);
783  }
784 
785  if (MD.empty())
786  return;
787 
788  // Walk the existing metadata, adding the complete (perhaps cyclic) chain to
789  // the set.
791  while (!Queue.empty()) {
792  const MDNode *M = cast<MDNode>(Queue.pop_back_val());
793  for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i)
794  if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i)))
795  if (MD.insert(M1))
796  Queue.push_back(M1);
797  }
798 
799  // Now we have a complete set of all metadata in the chains used to specify
800  // the noalias scopes and the lists of those scopes.
801  SmallVector<TempMDTuple, 16> DummyNodes;
803  for (const MDNode *I : MD) {
804  DummyNodes.push_back(MDTuple::getTemporary(CalledFunc->getContext(), None));
805  MDMap[I].reset(DummyNodes.back().get());
806  }
807 
808  // Create new metadata nodes to replace the dummy nodes, replacing old
809  // metadata references with either a dummy node or an already-created new
810  // node.
811  for (const MDNode *I : MD) {
813  for (unsigned i = 0, ie = I->getNumOperands(); i != ie; ++i) {
814  const Metadata *V = I->getOperand(i);
815  if (const MDNode *M = dyn_cast<MDNode>(V))
816  NewOps.push_back(MDMap[M]);
817  else
818  NewOps.push_back(const_cast<Metadata *>(V));
819  }
820 
821  MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps);
822  MDTuple *TempM = cast<MDTuple>(MDMap[I]);
823  assert(TempM->isTemporary() && "Expected temporary node");
824 
825  TempM->replaceAllUsesWith(NewM);
826  }
827 
828  // Now replace the metadata in the new inlined instructions with the
829  // repacements from the map.
830  for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
831  VMI != VMIE; ++VMI) {
832  if (!VMI->second)
833  continue;
834 
835  Instruction *NI = dyn_cast<Instruction>(VMI->second);
836  if (!NI)
837  continue;
838 
840  MDNode *NewMD = MDMap[M];
841  // If the call site also had alias scope metadata (a list of scopes to
842  // which instructions inside it might belong), propagate those scopes to
843  // the inlined instructions.
844  if (MDNode *CSM =
846  NewMD = MDNode::concatenate(NewMD, CSM);
848  } else if (NI->mayReadOrWriteMemory()) {
849  if (MDNode *M =
852  }
853 
855  MDNode *NewMD = MDMap[M];
856  // If the call site also had noalias metadata (a list of scopes with
857  // which instructions inside it don't alias), propagate those scopes to
858  // the inlined instructions.
859  if (MDNode *CSM =
861  NewMD = MDNode::concatenate(NewMD, CSM);
863  } else if (NI->mayReadOrWriteMemory()) {
866  }
867  }
868 }
869 
870 /// If the inlined function has noalias arguments,
871 /// then add new alias scopes for each noalias argument, tag the mapped noalias
872 /// parameters with noalias metadata specifying the new scope, and tag all
873 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
875  const DataLayout &DL, AAResults *CalleeAAR) {
877  return;
878 
879  const Function *CalledFunc = CS.getCalledFunction();
881 
882  for (const Argument &Arg : CalledFunc->args())
883  if (Arg.hasNoAliasAttr() && !Arg.use_empty())
884  NoAliasArgs.push_back(&Arg);
885 
886  if (NoAliasArgs.empty())
887  return;
888 
889  // To do a good job, if a noalias variable is captured, we need to know if
890  // the capture point dominates the particular use we're considering.
891  DominatorTree DT;
892  DT.recalculate(const_cast<Function&>(*CalledFunc));
893 
894  // noalias indicates that pointer values based on the argument do not alias
895  // pointer values which are not based on it. So we add a new "scope" for each
896  // noalias function argument. Accesses using pointers based on that argument
897  // become part of that alias scope, accesses using pointers not based on that
898  // argument are tagged as noalias with that scope.
899 
901  MDBuilder MDB(CalledFunc->getContext());
902 
903  // Create a new scope domain for this function.
904  MDNode *NewDomain =
905  MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
906  for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
907  const Argument *A = NoAliasArgs[i];
908 
909  std::string Name = CalledFunc->getName();
910  if (A->hasName()) {
911  Name += ": %";
912  Name += A->getName();
913  } else {
914  Name += ": argument ";
915  Name += utostr(i);
916  }
917 
918  // Note: We always create a new anonymous root here. This is true regardless
919  // of the linkage of the callee because the aliasing "scope" is not just a
920  // property of the callee, but also all control dependencies in the caller.
921  MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
922  NewScopes.insert(std::make_pair(A, NewScope));
923  }
924 
925  // Iterate over all new instructions in the map; for all memory-access
926  // instructions, add the alias scope metadata.
927  for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
928  VMI != VMIE; ++VMI) {
929  if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
930  if (!VMI->second)
931  continue;
932 
933  Instruction *NI = dyn_cast<Instruction>(VMI->second);
934  if (!NI)
935  continue;
936 
937  bool IsArgMemOnlyCall = false, IsFuncCall = false;
939 
940  if (const LoadInst *LI = dyn_cast<LoadInst>(I))
941  PtrArgs.push_back(LI->getPointerOperand());
942  else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
943  PtrArgs.push_back(SI->getPointerOperand());
944  else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
945  PtrArgs.push_back(VAAI->getPointerOperand());
946  else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
947  PtrArgs.push_back(CXI->getPointerOperand());
948  else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
949  PtrArgs.push_back(RMWI->getPointerOperand());
950  else if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
951  // If we know that the call does not access memory, then we'll still
952  // know that about the inlined clone of this call site, and we don't
953  // need to add metadata.
954  if (ICS.doesNotAccessMemory())
955  continue;
956 
957  IsFuncCall = true;
958  if (CalleeAAR) {
959  FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(ICS);
962  IsArgMemOnlyCall = true;
963  }
964 
965  for (Value *Arg : ICS.args()) {
966  // We need to check the underlying objects of all arguments, not just
967  // the pointer arguments, because we might be passing pointers as
968  // integers, etc.
969  // However, if we know that the call only accesses pointer arguments,
970  // then we only need to check the pointer arguments.
971  if (IsArgMemOnlyCall && !Arg->getType()->isPointerTy())
972  continue;
973 
974  PtrArgs.push_back(Arg);
975  }
976  }
977 
978  // If we found no pointers, then this instruction is not suitable for
979  // pairing with an instruction to receive aliasing metadata.
980  // However, if this is a call, this we might just alias with none of the
981  // noalias arguments.
982  if (PtrArgs.empty() && !IsFuncCall)
983  continue;
984 
985  // It is possible that there is only one underlying object, but you
986  // need to go through several PHIs to see it, and thus could be
987  // repeated in the Objects list.
989  SmallVector<Metadata *, 4> Scopes, NoAliases;
990 
992  for (const Value *V : PtrArgs) {
993  SmallVector<Value *, 4> Objects;
994  GetUnderlyingObjects(const_cast<Value*>(V),
995  Objects, DL, /* LI = */ nullptr);
996 
997  for (Value *O : Objects)
998  ObjSet.insert(O);
999  }
1000 
1001  // Figure out if we're derived from anything that is not a noalias
1002  // argument.
1003  bool CanDeriveViaCapture = false, UsesAliasingPtr = false;
1004  for (const Value *V : ObjSet) {
1005  // Is this value a constant that cannot be derived from any pointer
1006  // value (we need to exclude constant expressions, for example, that
1007  // are formed from arithmetic on global symbols).
1008  bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1009  isa<ConstantPointerNull>(V) ||
1010  isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1011  if (IsNonPtrConst)
1012  continue;
1013 
1014  // If this is anything other than a noalias argument, then we cannot
1015  // completely describe the aliasing properties using alias.scope
1016  // metadata (and, thus, won't add any).
1017  if (const Argument *A = dyn_cast<Argument>(V)) {
1018  if (!A->hasNoAliasAttr())
1019  UsesAliasingPtr = true;
1020  } else {
1021  UsesAliasingPtr = true;
1022  }
1023 
1024  // If this is not some identified function-local object (which cannot
1025  // directly alias a noalias argument), or some other argument (which,
1026  // by definition, also cannot alias a noalias argument), then we could
1027  // alias a noalias argument that has been captured).
1028  if (!isa<Argument>(V) &&
1029  !isIdentifiedFunctionLocal(const_cast<Value*>(V)))
1030  CanDeriveViaCapture = true;
1031  }
1032 
1033  // A function call can always get captured noalias pointers (via other
1034  // parameters, globals, etc.).
1035  if (IsFuncCall && !IsArgMemOnlyCall)
1036  CanDeriveViaCapture = true;
1037 
1038  // First, we want to figure out all of the sets with which we definitely
1039  // don't alias. Iterate over all noalias set, and add those for which:
1040  // 1. The noalias argument is not in the set of objects from which we
1041  // definitely derive.
1042  // 2. The noalias argument has not yet been captured.
1043  // An arbitrary function that might load pointers could see captured
1044  // noalias arguments via other noalias arguments or globals, and so we
1045  // must always check for prior capture.
1046  for (const Argument *A : NoAliasArgs) {
1047  if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
1048  // It might be tempting to skip the
1049  // PointerMayBeCapturedBefore check if
1050  // A->hasNoCaptureAttr() is true, but this is
1051  // incorrect because nocapture only guarantees
1052  // that no copies outlive the function, not
1053  // that the value cannot be locally captured.
1055  /* ReturnCaptures */ false,
1056  /* StoreCaptures */ false, I, &DT)))
1057  NoAliases.push_back(NewScopes[A]);
1058  }
1059 
1060  if (!NoAliases.empty())
1064  MDNode::get(CalledFunc->getContext(), NoAliases)));
1065 
1066  // Next, we want to figure out all of the sets to which we might belong.
1067  // We might belong to a set if the noalias argument is in the set of
1068  // underlying objects. If there is some non-noalias argument in our list
1069  // of underlying objects, then we cannot add a scope because the fact
1070  // that some access does not alias with any set of our noalias arguments
1071  // cannot itself guarantee that it does not alias with this access
1072  // (because there is some pointer of unknown origin involved and the
1073  // other access might also depend on this pointer). We also cannot add
1074  // scopes to arbitrary functions unless we know they don't access any
1075  // non-parameter pointer-values.
1076  bool CanAddScopes = !UsesAliasingPtr;
1077  if (CanAddScopes && IsFuncCall)
1078  CanAddScopes = IsArgMemOnlyCall;
1079 
1080  if (CanAddScopes)
1081  for (const Argument *A : NoAliasArgs) {
1082  if (ObjSet.count(A))
1083  Scopes.push_back(NewScopes[A]);
1084  }
1085 
1086  if (!Scopes.empty())
1087  NI->setMetadata(
1090  MDNode::get(CalledFunc->getContext(), Scopes)));
1091  }
1092  }
1093 }
1094 
1095 /// If the inlined function has non-byval align arguments, then
1096 /// add @llvm.assume-based alignment assumptions to preserve this information.
1099  return;
1100 
1101  AssumptionCache *AC = &(*IFI.GetAssumptionCache)(*CS.getCaller());
1102  auto &DL = CS.getCaller()->getParent()->getDataLayout();
1103 
1104  // To avoid inserting redundant assumptions, we should check for assumptions
1105  // already in the caller. To do this, we might need a DT of the caller.
1106  DominatorTree DT;
1107  bool DTCalculated = false;
1108 
1109  Function *CalledFunc = CS.getCalledFunction();
1110  for (Function::arg_iterator I = CalledFunc->arg_begin(),
1111  E = CalledFunc->arg_end();
1112  I != E; ++I) {
1113  unsigned Align = I->getType()->isPointerTy() ? I->getParamAlignment() : 0;
1114  if (Align && !I->hasByValOrInAllocaAttr() && !I->hasNUses(0)) {
1115  if (!DTCalculated) {
1116  DT.recalculate(const_cast<Function&>(*CS.getInstruction()->getParent()
1117  ->getParent()));
1118  DTCalculated = true;
1119  }
1120 
1121  // If we can already prove the asserted alignment in the context of the
1122  // caller, then don't bother inserting the assumption.
1123  Value *Arg = CS.getArgument(I->getArgNo());
1124  if (getKnownAlignment(Arg, DL, CS.getInstruction(), AC, &DT) >= Align)
1125  continue;
1126 
1127  CallInst *NewAssumption = IRBuilder<>(CS.getInstruction())
1128  .CreateAlignmentAssumption(DL, Arg, Align);
1129  AC->registerAssumption(NewAssumption);
1130  }
1131  }
1132 }
1133 
1134 /// Once we have cloned code over from a callee into the caller,
1135 /// update the specified callgraph to reflect the changes we made.
1136 /// Note that it's possible that not all code was copied over, so only
1137 /// some edges of the callgraph may remain.
1139  Function::iterator FirstNewBlock,
1140  ValueToValueMapTy &VMap,
1141  InlineFunctionInfo &IFI) {
1142  CallGraph &CG = *IFI.CG;
1143  const Function *Caller = CS.getInstruction()->getParent()->getParent();
1144  const Function *Callee = CS.getCalledFunction();
1145  CallGraphNode *CalleeNode = CG[Callee];
1146  CallGraphNode *CallerNode = CG[Caller];
1147 
1148  // Since we inlined some uninlined call sites in the callee into the caller,
1149  // add edges from the caller to all of the callees of the callee.
1150  CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
1151 
1152  // Consider the case where CalleeNode == CallerNode.
1154  if (CalleeNode == CallerNode) {
1155  CallCache.assign(I, E);
1156  I = CallCache.begin();
1157  E = CallCache.end();
1158  }
1159 
1160  for (; I != E; ++I) {
1161  const Value *OrigCall = I->first;
1162 
1163  ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
1164  // Only copy the edge if the call was inlined!
1165  if (VMI == VMap.end() || VMI->second == nullptr)
1166  continue;
1167 
1168  // If the call was inlined, but then constant folded, there is no edge to
1169  // add. Check for this case.
1170  Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
1171  if (!NewCall)
1172  continue;
1173 
1174  // We do not treat intrinsic calls like real function calls because we
1175  // expect them to become inline code; do not add an edge for an intrinsic.
1176  CallSite CS = CallSite(NewCall);
1177  if (CS && CS.getCalledFunction() && CS.getCalledFunction()->isIntrinsic())
1178  continue;
1179 
1180  // Remember that this call site got inlined for the client of
1181  // InlineFunction.
1182  IFI.InlinedCalls.push_back(NewCall);
1183 
1184  // It's possible that inlining the callsite will cause it to go from an
1185  // indirect to a direct call by resolving a function pointer. If this
1186  // happens, set the callee of the new call site to a more precise
1187  // destination. This can also happen if the call graph node of the caller
1188  // was just unnecessarily imprecise.
1189  if (!I->second->getFunction())
1190  if (Function *F = CallSite(NewCall).getCalledFunction()) {
1191  // Indirect call site resolved to direct call.
1192  CallerNode->addCalledFunction(CallSite(NewCall), CG[F]);
1193 
1194  continue;
1195  }
1196 
1197  CallerNode->addCalledFunction(CallSite(NewCall), I->second);
1198  }
1199 
1200  // Update the call graph by deleting the edge from Callee to Caller. We must
1201  // do this after the loop above in case Caller and Callee are the same.
1202  CallerNode->removeCallEdgeFor(CS);
1203 }
1204 
1205 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
1206  BasicBlock *InsertBlock,
1207  InlineFunctionInfo &IFI) {
1208  Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
1209  IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
1210 
1211  Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy));
1212 
1213  // Always generate a memcpy of alignment 1 here because we don't know
1214  // the alignment of the src pointer. Other optimizations can infer
1215  // better alignment.
1216  Builder.CreateMemCpy(Dst, Src, Size, /*Align=*/1);
1217 }
1218 
1219 /// When inlining a call site that has a byval argument,
1220 /// we have to make the implicit memcpy explicit by adding it.
1222  const Function *CalledFunc,
1223  InlineFunctionInfo &IFI,
1224  unsigned ByValAlignment) {
1225  PointerType *ArgTy = cast<PointerType>(Arg->getType());
1226  Type *AggTy = ArgTy->getElementType();
1227 
1228  Function *Caller = TheCall->getParent()->getParent();
1229 
1230  // If the called function is readonly, then it could not mutate the caller's
1231  // copy of the byval'd memory. In this case, it is safe to elide the copy and
1232  // temporary.
1233  if (CalledFunc->onlyReadsMemory()) {
1234  // If the byval argument has a specified alignment that is greater than the
1235  // passed in pointer, then we either have to round up the input pointer or
1236  // give up on this transformation.
1237  if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment.
1238  return Arg;
1239 
1240  AssumptionCache *AC =
1241  IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr;
1242  const DataLayout &DL = Caller->getParent()->getDataLayout();
1243 
1244  // If the pointer is already known to be sufficiently aligned, or if we can
1245  // round it up to a larger alignment, then we don't need a temporary.
1246  if (getOrEnforceKnownAlignment(Arg, ByValAlignment, DL, TheCall, AC) >=
1247  ByValAlignment)
1248  return Arg;
1249 
1250  // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
1251  // for code quality, but rarely happens and is required for correctness.
1252  }
1253 
1254  // Create the alloca. If we have DataLayout, use nice alignment.
1255  unsigned Align =
1256  Caller->getParent()->getDataLayout().getPrefTypeAlignment(AggTy);
1257 
1258  // If the byval had an alignment specified, we *must* use at least that
1259  // alignment, as it is required by the byval argument (and uses of the
1260  // pointer inside the callee).
1261  Align = std::max(Align, ByValAlignment);
1262 
1263  Value *NewAlloca = new AllocaInst(AggTy, nullptr, Align, Arg->getName(),
1264  &*Caller->begin()->begin());
1265  IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
1266 
1267  // Uses of the argument in the function should use our new alloca
1268  // instead.
1269  return NewAlloca;
1270 }
1271 
1272 // Check whether this Value is used by a lifetime intrinsic.
1274  for (User *U : V->users()) {
1275  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
1276  switch (II->getIntrinsicID()) {
1277  default: break;
1278  case Intrinsic::lifetime_start:
1279  case Intrinsic::lifetime_end:
1280  return true;
1281  }
1282  }
1283  }
1284  return false;
1285 }
1286 
1287 // Check whether the given alloca already has
1288 // lifetime.start or lifetime.end intrinsics.
1289 static bool hasLifetimeMarkers(AllocaInst *AI) {
1290  Type *Ty = AI->getType();
1291  Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
1292  Ty->getPointerAddressSpace());
1293  if (Ty == Int8PtrTy)
1294  return isUsedByLifetimeMarker(AI);
1295 
1296  // Do a scan to find all the casts to i8*.
1297  for (User *U : AI->users()) {
1298  if (U->getType() != Int8PtrTy) continue;
1299  if (U->stripPointerCasts() != AI) continue;
1300  if (isUsedByLifetimeMarker(U))
1301  return true;
1302  }
1303  return false;
1304 }
1305 
1306 /// Rebuild the entire inlined-at chain for this instruction so that the top of
1307 /// the chain now is inlined-at the new call site.
1308 static DebugLoc
1309 updateInlinedAtInfo(const DebugLoc &DL, DILocation *InlinedAtNode,
1310  LLVMContext &Ctx,
1312  SmallVector<DILocation *, 3> InlinedAtLocations;
1313  DILocation *Last = InlinedAtNode;
1314  DILocation *CurInlinedAt = DL;
1315 
1316  // Gather all the inlined-at nodes
1317  while (DILocation *IA = CurInlinedAt->getInlinedAt()) {
1318  // Skip any we've already built nodes for
1319  if (DILocation *Found = IANodes[IA]) {
1320  Last = Found;
1321  break;
1322  }
1323 
1324  InlinedAtLocations.push_back(IA);
1325  CurInlinedAt = IA;
1326  }
1327 
1328  // Starting from the top, rebuild the nodes to point to the new inlined-at
1329  // location (then rebuilding the rest of the chain behind it) and update the
1330  // map of already-constructed inlined-at nodes.
1331  for (const DILocation *MD : reverse(InlinedAtLocations)) {
1332  Last = IANodes[MD] = DILocation::getDistinct(
1333  Ctx, MD->getLine(), MD->getColumn(), MD->getScope(), Last);
1334  }
1335 
1336  // And finally create the normal location for this instruction, referring to
1337  // the new inlined-at chain.
1338  return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(), Last);
1339 }
1340 
1341 /// Return the result of AI->isStaticAlloca() if AI were moved to the entry
1342 /// block. Allocas used in inalloca calls and allocas of dynamic array size
1343 /// cannot be static.
1344 static bool allocaWouldBeStaticInEntry(const AllocaInst *AI ) {
1345  return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca();
1346 }
1347 
1348 /// Update inlined instructions' line numbers to
1349 /// to encode location where these instructions are inlined.
1351  Instruction *TheCall, bool CalleeHasDebugInfo) {
1352  const DebugLoc &TheCallDL = TheCall->getDebugLoc();
1353  if (!TheCallDL)
1354  return;
1355 
1356  auto &Ctx = Fn->getContext();
1357  DILocation *InlinedAtNode = TheCallDL;
1358 
1359  // Create a unique call site, not to be confused with any other call from the
1360  // same location.
1361  InlinedAtNode = DILocation::getDistinct(
1362  Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1363  InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1364 
1365  // Cache the inlined-at nodes as they're built so they are reused, without
1366  // this every instruction's inlined-at chain would become distinct from each
1367  // other.
1369 
1370  for (; FI != Fn->end(); ++FI) {
1371  for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
1372  BI != BE; ++BI) {
1373  if (DebugLoc DL = BI->getDebugLoc()) {
1374  BI->setDebugLoc(
1375  updateInlinedAtInfo(DL, InlinedAtNode, BI->getContext(), IANodes));
1376  continue;
1377  }
1378 
1379  if (CalleeHasDebugInfo)
1380  continue;
1381 
1382  // If the inlined instruction has no line number, make it look as if it
1383  // originates from the call location. This is important for
1384  // ((__always_inline__, __nodebug__)) functions which must use caller
1385  // location for all instructions in their function body.
1386 
1387  // Don't update static allocas, as they may get moved later.
1388  if (auto *AI = dyn_cast<AllocaInst>(BI))
1390  continue;
1391 
1392  BI->setDebugLoc(TheCallDL);
1393  }
1394  }
1395 }
1396 
1397 /// This function inlines the called function into the basic block of the
1398 /// caller. This returns false if it is not possible to inline this call.
1399 /// The program is still in a well defined state if this occurs though.
1400 ///
1401 /// Note that this only does one level of inlining. For example, if the
1402 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
1403 /// exists in the instruction stream. Similarly this will inline a recursive
1404 /// function by one level.
1406  AAResults *CalleeAAR, bool InsertLifetime) {
1407  Instruction *TheCall = CS.getInstruction();
1408  assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
1409  "Instruction not in function!");
1410 
1411  // If IFI has any state in it, zap it before we fill it in.
1412  IFI.reset();
1413 
1414  const Function *CalledFunc = CS.getCalledFunction();
1415  if (!CalledFunc || // Can't inline external function or indirect
1416  CalledFunc->isDeclaration() || // call, or call to a vararg function!
1417  CalledFunc->getFunctionType()->isVarArg()) return false;
1418 
1419  // The inliner does not know how to inline through calls with operand bundles
1420  // in general ...
1421  if (CS.hasOperandBundles()) {
1422  for (int i = 0, e = CS.getNumOperandBundles(); i != e; ++i) {
1424  // ... but it knows how to inline through "deopt" operand bundles ...
1425  if (Tag == LLVMContext::OB_deopt)
1426  continue;
1427  // ... and "funclet" operand bundles.
1428  if (Tag == LLVMContext::OB_funclet)
1429  continue;
1430 
1431  return false;
1432  }
1433  }
1434 
1435  // If the call to the callee cannot throw, set the 'nounwind' flag on any
1436  // calls that we inline.
1437  bool MarkNoUnwind = CS.doesNotThrow();
1438 
1439  BasicBlock *OrigBB = TheCall->getParent();
1440  Function *Caller = OrigBB->getParent();
1441 
1442  // GC poses two hazards to inlining, which only occur when the callee has GC:
1443  // 1. If the caller has no GC, then the callee's GC must be propagated to the
1444  // caller.
1445  // 2. If the caller has a differing GC, it is invalid to inline.
1446  if (CalledFunc->hasGC()) {
1447  if (!Caller->hasGC())
1448  Caller->setGC(CalledFunc->getGC());
1449  else if (CalledFunc->getGC() != Caller->getGC())
1450  return false;
1451  }
1452 
1453  // Get the personality function from the callee if it contains a landing pad.
1454  Constant *CalledPersonality =
1455  CalledFunc->hasPersonalityFn()
1456  ? CalledFunc->getPersonalityFn()->stripPointerCasts()
1457  : nullptr;
1458 
1459  // Find the personality function used by the landing pads of the caller. If it
1460  // exists, then check to see that it matches the personality function used in
1461  // the callee.
1462  Constant *CallerPersonality =
1463  Caller->hasPersonalityFn()
1464  ? Caller->getPersonalityFn()->stripPointerCasts()
1465  : nullptr;
1466  if (CalledPersonality) {
1467  if (!CallerPersonality)
1468  Caller->setPersonalityFn(CalledPersonality);
1469  // If the personality functions match, then we can perform the
1470  // inlining. Otherwise, we can't inline.
1471  // TODO: This isn't 100% true. Some personality functions are proper
1472  // supersets of others and can be used in place of the other.
1473  else if (CalledPersonality != CallerPersonality)
1474  return false;
1475  }
1476 
1477  // We need to figure out which funclet the callsite was in so that we may
1478  // properly nest the callee.
1479  Instruction *CallSiteEHPad = nullptr;
1480  if (CallerPersonality) {
1481  EHPersonality Personality = classifyEHPersonality(CallerPersonality);
1482  if (isFuncletEHPersonality(Personality)) {
1483  Optional<OperandBundleUse> ParentFunclet =
1485  if (ParentFunclet)
1486  CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
1487 
1488  // OK, the inlining site is legal. What about the target function?
1489 
1490  if (CallSiteEHPad) {
1491  if (Personality == EHPersonality::MSVC_CXX) {
1492  // The MSVC personality cannot tolerate catches getting inlined into
1493  // cleanup funclets.
1494  if (isa<CleanupPadInst>(CallSiteEHPad)) {
1495  // Ok, the call site is within a cleanuppad. Let's check the callee
1496  // for catchpads.
1497  for (const BasicBlock &CalledBB : *CalledFunc) {
1498  if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
1499  return false;
1500  }
1501  }
1502  } else if (isAsynchronousEHPersonality(Personality)) {
1503  // SEH is even less tolerant, there may not be any sort of exceptional
1504  // funclet in the callee.
1505  for (const BasicBlock &CalledBB : *CalledFunc) {
1506  if (CalledBB.isEHPad())
1507  return false;
1508  }
1509  }
1510  }
1511  }
1512  }
1513 
1514  // Determine if we are dealing with a call in an EHPad which does not unwind
1515  // to caller.
1516  bool EHPadForCallUnwindsLocally = false;
1517  if (CallSiteEHPad && CS.isCall()) {
1518  UnwindDestMemoTy FuncletUnwindMap;
1519  Value *CallSiteUnwindDestToken =
1520  getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap);
1521 
1522  EHPadForCallUnwindsLocally =
1523  CallSiteUnwindDestToken &&
1524  !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
1525  }
1526 
1527  // Get an iterator to the last basic block in the function, which will have
1528  // the new function inlined after it.
1529  Function::iterator LastBlock = --Caller->end();
1530 
1531  // Make sure to capture all of the return instructions from the cloned
1532  // function.
1534  ClonedCodeInfo InlinedFunctionInfo;
1535  Function::iterator FirstNewBlock;
1536 
1537  { // Scope to destroy VMap after cloning.
1538  ValueToValueMapTy VMap;
1539  // Keep a list of pair (dst, src) to emit byval initializations.
1541 
1542  auto &DL = Caller->getParent()->getDataLayout();
1543 
1544  assert(CalledFunc->arg_size() == CS.arg_size() &&
1545  "No varargs calls can be inlined!");
1546 
1547  // Calculate the vector of arguments to pass into the function cloner, which
1548  // matches up the formal to the actual argument values.
1550  unsigned ArgNo = 0;
1551  for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
1552  E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
1553  Value *ActualArg = *AI;
1554 
1555  // When byval arguments actually inlined, we need to make the copy implied
1556  // by them explicit. However, we don't do this if the callee is readonly
1557  // or readnone, because the copy would be unneeded: the callee doesn't
1558  // modify the struct.
1559  if (CS.isByValArgument(ArgNo)) {
1560  ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
1561  CalledFunc->getParamAlignment(ArgNo+1));
1562  if (ActualArg != *AI)
1563  ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
1564  }
1565 
1566  VMap[&*I] = ActualArg;
1567  }
1568 
1569  // Add alignment assumptions if necessary. We do this before the inlined
1570  // instructions are actually cloned into the caller so that we can easily
1571  // check what will be known at the start of the inlined code.
1572  AddAlignmentAssumptions(CS, IFI);
1573 
1574  // We want the inliner to prune the code as it copies. We would LOVE to
1575  // have no dead or constant instructions leftover after inlining occurs
1576  // (which can happen, e.g., because an argument was constant), but we'll be
1577  // happy with whatever the cloner can do.
1578  CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
1579  /*ModuleLevelChanges=*/false, Returns, ".i",
1580  &InlinedFunctionInfo, TheCall);
1581 
1582  // Remember the first block that is newly cloned over.
1583  FirstNewBlock = LastBlock; ++FirstNewBlock;
1584 
1585  // Inject byval arguments initialization.
1586  for (std::pair<Value*, Value*> &Init : ByValInit)
1587  HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
1588  &*FirstNewBlock, IFI);
1589 
1590  Optional<OperandBundleUse> ParentDeopt =
1592  if (ParentDeopt) {
1594 
1595  for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) {
1596  Instruction *I = dyn_cast_or_null<Instruction>(VH);
1597  if (!I) continue; // instruction was DCE'd or RAUW'ed to undef
1598 
1599  OpDefs.clear();
1600 
1601  CallSite ICS(I);
1602  OpDefs.reserve(ICS.getNumOperandBundles());
1603 
1604  for (unsigned i = 0, e = ICS.getNumOperandBundles(); i < e; ++i) {
1605  auto ChildOB = ICS.getOperandBundleAt(i);
1606  if (ChildOB.getTagID() != LLVMContext::OB_deopt) {
1607  // If the inlined call has other operand bundles, let them be
1608  OpDefs.emplace_back(ChildOB);
1609  continue;
1610  }
1611 
1612  // It may be useful to separate this logic (of handling operand
1613  // bundles) out to a separate "policy" component if this gets crowded.
1614  // Prepend the parent's deoptimization continuation to the newly
1615  // inlined call's deoptimization continuation.
1616  std::vector<Value *> MergedDeoptArgs;
1617  MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
1618  ChildOB.Inputs.size());
1619 
1620  MergedDeoptArgs.insert(MergedDeoptArgs.end(),
1621  ParentDeopt->Inputs.begin(),
1622  ParentDeopt->Inputs.end());
1623  MergedDeoptArgs.insert(MergedDeoptArgs.end(), ChildOB.Inputs.begin(),
1624  ChildOB.Inputs.end());
1625 
1626  OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));
1627  }
1628 
1629  Instruction *NewI = nullptr;
1630  if (isa<CallInst>(I))
1631  NewI = CallInst::Create(cast<CallInst>(I), OpDefs, I);
1632  else
1633  NewI = InvokeInst::Create(cast<InvokeInst>(I), OpDefs, I);
1634 
1635  // Note: the RAUW does the appropriate fixup in VMap, so we need to do
1636  // this even if the call returns void.
1637  I->replaceAllUsesWith(NewI);
1638 
1639  VH = nullptr;
1640  I->eraseFromParent();
1641  }
1642  }
1643 
1644  // Update the callgraph if requested.
1645  if (IFI.CG)
1646  UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
1647 
1648  // For 'nodebug' functions, the associated DISubprogram is always null.
1649  // Conservatively avoid propagating the callsite debug location to
1650  // instructions inlined from a function whose DISubprogram is not null.
1651  fixupLineNumbers(Caller, FirstNewBlock, TheCall,
1652  CalledFunc->getSubprogram() != nullptr);
1653 
1654  // Clone existing noalias metadata if necessary.
1655  CloneAliasScopeMetadata(CS, VMap);
1656 
1657  // Add noalias metadata if necessary.
1658  AddAliasScopeMetadata(CS, VMap, DL, CalleeAAR);
1659 
1660  // Propagate llvm.mem.parallel_loop_access if necessary.
1662 
1663  // Register any cloned assumptions.
1664  if (IFI.GetAssumptionCache)
1665  for (BasicBlock &NewBlock :
1666  make_range(FirstNewBlock->getIterator(), Caller->end()))
1667  for (Instruction &I : NewBlock) {
1668  if (auto *II = dyn_cast<IntrinsicInst>(&I))
1669  if (II->getIntrinsicID() == Intrinsic::assume)
1670  (*IFI.GetAssumptionCache)(*Caller).registerAssumption(II);
1671  }
1672  }
1673 
1674  // If there are any alloca instructions in the block that used to be the entry
1675  // block for the callee, move them to the entry block of the caller. First
1676  // calculate which instruction they should be inserted before. We insert the
1677  // instructions at the end of the current alloca list.
1678  {
1679  BasicBlock::iterator InsertPoint = Caller->begin()->begin();
1680  for (BasicBlock::iterator I = FirstNewBlock->begin(),
1681  E = FirstNewBlock->end(); I != E; ) {
1682  AllocaInst *AI = dyn_cast<AllocaInst>(I++);
1683  if (!AI) continue;
1684 
1685  // If the alloca is now dead, remove it. This often occurs due to code
1686  // specialization.
1687  if (AI->use_empty()) {
1688  AI->eraseFromParent();
1689  continue;
1690  }
1691 
1692  if (!allocaWouldBeStaticInEntry(AI))
1693  continue;
1694 
1695  // Keep track of the static allocas that we inline into the caller.
1696  IFI.StaticAllocas.push_back(AI);
1697 
1698  // Scan for the block of allocas that we can move over, and move them
1699  // all at once.
1700  while (isa<AllocaInst>(I) &&
1701  allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) {
1702  IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
1703  ++I;
1704  }
1705 
1706  // Transfer all of the allocas over in a block. Using splice means
1707  // that the instructions aren't removed from the symbol table, then
1708  // reinserted.
1709  Caller->getEntryBlock().getInstList().splice(
1710  InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I);
1711  }
1712  // Move any dbg.declares describing the allocas into the entry basic block.
1713  DIBuilder DIB(*Caller->getParent());
1714  for (auto &AI : IFI.StaticAllocas)
1715  replaceDbgDeclareForAlloca(AI, AI, DIB, /*Deref=*/false);
1716  }
1717 
1718  bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false;
1719  if (InlinedFunctionInfo.ContainsCalls) {
1720  CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
1721  if (CallInst *CI = dyn_cast<CallInst>(TheCall))
1722  CallSiteTailKind = CI->getTailCallKind();
1723 
1724  for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
1725  ++BB) {
1726  for (Instruction &I : *BB) {
1727  CallInst *CI = dyn_cast<CallInst>(&I);
1728  if (!CI)
1729  continue;
1730 
1731  if (Function *F = CI->getCalledFunction())
1732  InlinedDeoptimizeCalls |=
1733  F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
1734 
1735  // We need to reduce the strength of any inlined tail calls. For
1736  // musttail, we have to avoid introducing potential unbounded stack
1737  // growth. For example, if functions 'f' and 'g' are mutually recursive
1738  // with musttail, we can inline 'g' into 'f' so long as we preserve
1739  // musttail on the cloned call to 'f'. If either the inlined call site
1740  // or the cloned call site is *not* musttail, the program already has
1741  // one frame of stack growth, so it's safe to remove musttail. Here is
1742  // a table of example transformations:
1743  //
1744  // f -> musttail g -> musttail f ==> f -> musttail f
1745  // f -> musttail g -> tail f ==> f -> tail f
1746  // f -> g -> musttail f ==> f -> f
1747  // f -> g -> tail f ==> f -> f
1748  CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
1749  ChildTCK = std::min(CallSiteTailKind, ChildTCK);
1750  CI->setTailCallKind(ChildTCK);
1751  InlinedMustTailCalls |= CI->isMustTailCall();
1752 
1753  // Calls inlined through a 'nounwind' call site should be marked
1754  // 'nounwind'.
1755  if (MarkNoUnwind)
1756  CI->setDoesNotThrow();
1757  }
1758  }
1759  }
1760 
1761  // Leave lifetime markers for the static alloca's, scoping them to the
1762  // function we just inlined.
1763  if (InsertLifetime && !IFI.StaticAllocas.empty()) {
1764  IRBuilder<> builder(&FirstNewBlock->front());
1765  for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
1766  AllocaInst *AI = IFI.StaticAllocas[ai];
1767  // Don't mark swifterror allocas. They can't have bitcast uses.
1768  if (AI->isSwiftError())
1769  continue;
1770 
1771  // If the alloca is already scoped to something smaller than the whole
1772  // function then there's no need to add redundant, less accurate markers.
1773  if (hasLifetimeMarkers(AI))
1774  continue;
1775 
1776  // Try to determine the size of the allocation.
1777  ConstantInt *AllocaSize = nullptr;
1778  if (ConstantInt *AIArraySize =
1779  dyn_cast<ConstantInt>(AI->getArraySize())) {
1780  auto &DL = Caller->getParent()->getDataLayout();
1781  Type *AllocaType = AI->getAllocatedType();
1782  uint64_t AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
1783  uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
1784 
1785  // Don't add markers for zero-sized allocas.
1786  if (AllocaArraySize == 0)
1787  continue;
1788 
1789  // Check that array size doesn't saturate uint64_t and doesn't
1790  // overflow when it's multiplied by type size.
1791  if (AllocaArraySize != ~0ULL &&
1792  UINT64_MAX / AllocaArraySize >= AllocaTypeSize) {
1793  AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
1794  AllocaArraySize * AllocaTypeSize);
1795  }
1796  }
1797 
1798  builder.CreateLifetimeStart(AI, AllocaSize);
1799  for (ReturnInst *RI : Returns) {
1800  // Don't insert llvm.lifetime.end calls between a musttail or deoptimize
1801  // call and a return. The return kills all local allocas.
1802  if (InlinedMustTailCalls &&
1804  continue;
1805  if (InlinedDeoptimizeCalls &&
1807  continue;
1808  IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
1809  }
1810  }
1811  }
1812 
1813  // If the inlined code contained dynamic alloca instructions, wrap the inlined
1814  // code with llvm.stacksave/llvm.stackrestore intrinsics.
1815  if (InlinedFunctionInfo.ContainsDynamicAllocas) {
1816  Module *M = Caller->getParent();
1817  // Get the two intrinsics we care about.
1818  Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
1819  Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
1820 
1821  // Insert the llvm.stacksave.
1822  CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
1823  .CreateCall(StackSave, {}, "savedstack");
1824 
1825  // Insert a call to llvm.stackrestore before any return instructions in the
1826  // inlined function.
1827  for (ReturnInst *RI : Returns) {
1828  // Don't insert llvm.stackrestore calls between a musttail or deoptimize
1829  // call and a return. The return will restore the stack pointer.
1830  if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
1831  continue;
1832  if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())
1833  continue;
1834  IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
1835  }
1836  }
1837 
1838  // If we are inlining for an invoke instruction, we must make sure to rewrite
1839  // any call instructions into invoke instructions. This is sensitive to which
1840  // funclet pads were top-level in the inlinee, so must be done before
1841  // rewriting the "parent pad" links.
1842  if (auto *II = dyn_cast<InvokeInst>(TheCall)) {
1843  BasicBlock *UnwindDest = II->getUnwindDest();
1844  Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI();
1845  if (isa<LandingPadInst>(FirstNonPHI)) {
1846  HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo);
1847  } else {
1848  HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo);
1849  }
1850  }
1851 
1852  // Update the lexical scopes of the new funclets and callsites.
1853  // Anything that had 'none' as its parent is now nested inside the callsite's
1854  // EHPad.
1855 
1856  if (CallSiteEHPad) {
1857  for (Function::iterator BB = FirstNewBlock->getIterator(),
1858  E = Caller->end();
1859  BB != E; ++BB) {
1860  // Add bundle operands to any top-level call sites.
1862  for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;) {
1863  Instruction *I = &*BBI++;
1864  CallSite CS(I);
1865  if (!CS)
1866  continue;
1867 
1868  // Skip call sites which are nounwind intrinsics.
1869  auto *CalledFn =
1871  if (CalledFn && CalledFn->isIntrinsic() && CS.doesNotThrow())
1872  continue;
1873 
1874  // Skip call sites which already have a "funclet" bundle.
1876  continue;
1877 
1878  CS.getOperandBundlesAsDefs(OpBundles);
1879  OpBundles.emplace_back("funclet", CallSiteEHPad);
1880 
1881  Instruction *NewInst;
1882  if (CS.isCall())
1883  NewInst = CallInst::Create(cast<CallInst>(I), OpBundles, I);
1884  else
1885  NewInst = InvokeInst::Create(cast<InvokeInst>(I), OpBundles, I);
1886  NewInst->takeName(I);
1887  I->replaceAllUsesWith(NewInst);
1888  I->eraseFromParent();
1889 
1890  OpBundles.clear();
1891  }
1892 
1893  // It is problematic if the inlinee has a cleanupret which unwinds to
1894  // caller and we inline it into a call site which doesn't unwind but into
1895  // an EH pad that does. Such an edge must be dynamically unreachable.
1896  // As such, we replace the cleanupret with unreachable.
1897  if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
1898  if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
1899  changeToUnreachable(CleanupRet, /*UseLLVMTrap=*/false);
1900 
1901  Instruction *I = BB->getFirstNonPHI();
1902  if (!I->isEHPad())
1903  continue;
1904 
1905  if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
1906  if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
1907  CatchSwitch->setParentPad(CallSiteEHPad);
1908  } else {
1909  auto *FPI = cast<FuncletPadInst>(I);
1910  if (isa<ConstantTokenNone>(FPI->getParentPad()))
1911  FPI->setParentPad(CallSiteEHPad);
1912  }
1913  }
1914  }
1915 
1916  if (InlinedDeoptimizeCalls) {
1917  // We need to at least remove the deoptimizing returns from the Return set,
1918  // so that the control flow from those returns does not get merged into the
1919  // caller (but terminate it instead). If the caller's return type does not
1920  // match the callee's return type, we also need to change the return type of
1921  // the intrinsic.
1922  if (Caller->getReturnType() == TheCall->getType()) {
1923  auto NewEnd = remove_if(Returns, [](ReturnInst *RI) {
1924  return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;
1925  });
1926  Returns.erase(NewEnd, Returns.end());
1927  } else {
1928  SmallVector<ReturnInst *, 8> NormalReturns;
1929  Function *NewDeoptIntrinsic = Intrinsic::getDeclaration(
1930  Caller->getParent(), Intrinsic::experimental_deoptimize,
1931  {Caller->getReturnType()});
1932 
1933  for (ReturnInst *RI : Returns) {
1934  CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();
1935  if (!DeoptCall) {
1936  NormalReturns.push_back(RI);
1937  continue;
1938  }
1939 
1940  // The calling convention on the deoptimize call itself may be bogus,
1941  // since the code we're inlining may have undefined behavior (and may
1942  // never actually execute at runtime); but all
1943  // @llvm.experimental.deoptimize declarations have to have the same
1944  // calling convention in a well-formed module.
1945  auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv();
1946  NewDeoptIntrinsic->setCallingConv(CallingConv);
1947  auto *CurBB = RI->getParent();
1948  RI->eraseFromParent();
1949 
1950  SmallVector<Value *, 4> CallArgs(DeoptCall->arg_begin(),
1951  DeoptCall->arg_end());
1952 
1954  DeoptCall->getOperandBundlesAsDefs(OpBundles);
1955  DeoptCall->eraseFromParent();
1956  assert(!OpBundles.empty() &&
1957  "Expected at least the deopt operand bundle");
1958 
1959  IRBuilder<> Builder(CurBB);
1960  CallInst *NewDeoptCall =
1961  Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
1962  NewDeoptCall->setCallingConv(CallingConv);
1963  if (NewDeoptCall->getType()->isVoidTy())
1964  Builder.CreateRetVoid();
1965  else
1966  Builder.CreateRet(NewDeoptCall);
1967  }
1968 
1969  // Leave behind the normal returns so we can merge control flow.
1970  std::swap(Returns, NormalReturns);
1971  }
1972  }
1973 
1974  // Handle any inlined musttail call sites. In order for a new call site to be
1975  // musttail, the source of the clone and the inlined call site must have been
1976  // musttail. Therefore it's safe to return without merging control into the
1977  // phi below.
1978  if (InlinedMustTailCalls) {
1979  // Check if we need to bitcast the result of any musttail calls.
1980  Type *NewRetTy = Caller->getReturnType();
1981  bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy;
1982 
1983  // Handle the returns preceded by musttail calls separately.
1984  SmallVector<ReturnInst *, 8> NormalReturns;
1985  for (ReturnInst *RI : Returns) {
1986  CallInst *ReturnedMustTail =
1988  if (!ReturnedMustTail) {
1989  NormalReturns.push_back(RI);
1990  continue;
1991  }
1992  if (!NeedBitCast)
1993  continue;
1994 
1995  // Delete the old return and any preceding bitcast.
1996  BasicBlock *CurBB = RI->getParent();
1997  auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
1998  RI->eraseFromParent();
1999  if (OldCast)
2000  OldCast->eraseFromParent();
2001 
2002  // Insert a new bitcast and return with the right type.
2003  IRBuilder<> Builder(CurBB);
2004  Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
2005  }
2006 
2007  // Leave behind the normal returns so we can merge control flow.
2008  std::swap(Returns, NormalReturns);
2009  }
2010 
2011  // Now that all of the transforms on the inlined code have taken place but
2012  // before we splice the inlined code into the CFG and lose track of which
2013  // blocks were actually inlined, collect the call sites. We only do this if
2014  // call graph updates weren't requested, as those provide value handle based
2015  // tracking of inlined call sites instead.
2016  if (InlinedFunctionInfo.ContainsCalls && !IFI.CG) {
2017  // Otherwise just collect the raw call sites that were inlined.
2018  for (BasicBlock &NewBB :
2019  make_range(FirstNewBlock->getIterator(), Caller->end()))
2020  for (Instruction &I : NewBB)
2021  if (auto CS = CallSite(&I))
2022  IFI.InlinedCallSites.push_back(CS);
2023  }
2024 
2025  // If we cloned in _exactly one_ basic block, and if that block ends in a
2026  // return instruction, we splice the body of the inlined callee directly into
2027  // the calling basic block.
2028  if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
2029  // Move all of the instructions right before the call.
2030  OrigBB->getInstList().splice(TheCall->getIterator(),
2031  FirstNewBlock->getInstList(),
2032  FirstNewBlock->begin(), FirstNewBlock->end());
2033  // Remove the cloned basic block.
2034  Caller->getBasicBlockList().pop_back();
2035 
2036  // If the call site was an invoke instruction, add a branch to the normal
2037  // destination.
2038  if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
2039  BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
2040  NewBr->setDebugLoc(Returns[0]->getDebugLoc());
2041  }
2042 
2043  // If the return instruction returned a value, replace uses of the call with
2044  // uses of the returned value.
2045  if (!TheCall->use_empty()) {
2046  ReturnInst *R = Returns[0];
2047  if (TheCall == R->getReturnValue())
2048  TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2049  else
2050  TheCall->replaceAllUsesWith(R->getReturnValue());
2051  }
2052  // Since we are now done with the Call/Invoke, we can delete it.
2053  TheCall->eraseFromParent();
2054 
2055  // Since we are now done with the return instruction, delete it also.
2056  Returns[0]->eraseFromParent();
2057 
2058  // We are now done with the inlining.
2059  return true;
2060  }
2061 
2062  // Otherwise, we have the normal case, of more than one block to inline or
2063  // multiple return sites.
2064 
2065  // We want to clone the entire callee function into the hole between the
2066  // "starter" and "ender" blocks. How we accomplish this depends on whether
2067  // this is an invoke instruction or a call instruction.
2068  BasicBlock *AfterCallBB;
2069  BranchInst *CreatedBranchToNormalDest = nullptr;
2070  if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
2071 
2072  // Add an unconditional branch to make this look like the CallInst case...
2073  CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall);
2074 
2075  // Split the basic block. This guarantees that no PHI nodes will have to be
2076  // updated due to new incoming edges, and make the invoke case more
2077  // symmetric to the call case.
2078  AfterCallBB =
2079  OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(),
2080  CalledFunc->getName() + ".exit");
2081 
2082  } else { // It's a call
2083  // If this is a call instruction, we need to split the basic block that
2084  // the call lives in.
2085  //
2086  AfterCallBB = OrigBB->splitBasicBlock(TheCall->getIterator(),
2087  CalledFunc->getName() + ".exit");
2088  }
2089 
2090  // Change the branch that used to go to AfterCallBB to branch to the first
2091  // basic block of the inlined function.
2092  //
2093  TerminatorInst *Br = OrigBB->getTerminator();
2094  assert(Br && Br->getOpcode() == Instruction::Br &&
2095  "splitBasicBlock broken!");
2096  Br->setOperand(0, &*FirstNewBlock);
2097 
2098  // Now that the function is correct, make it a little bit nicer. In
2099  // particular, move the basic blocks inserted from the end of the function
2100  // into the space made by splitting the source basic block.
2101  Caller->getBasicBlockList().splice(AfterCallBB->getIterator(),
2102  Caller->getBasicBlockList(), FirstNewBlock,
2103  Caller->end());
2104 
2105  // Handle all of the return instructions that we just cloned in, and eliminate
2106  // any users of the original call/invoke instruction.
2107  Type *RTy = CalledFunc->getReturnType();
2108 
2109  PHINode *PHI = nullptr;
2110  if (Returns.size() > 1) {
2111  // The PHI node should go at the front of the new basic block to merge all
2112  // possible incoming values.
2113  if (!TheCall->use_empty()) {
2114  PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(),
2115  &AfterCallBB->front());
2116  // Anything that used the result of the function call should now use the
2117  // PHI node as their operand.
2118  TheCall->replaceAllUsesWith(PHI);
2119  }
2120 
2121  // Loop over all of the return instructions adding entries to the PHI node
2122  // as appropriate.
2123  if (PHI) {
2124  for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2125  ReturnInst *RI = Returns[i];
2126  assert(RI->getReturnValue()->getType() == PHI->getType() &&
2127  "Ret value not consistent in function!");
2128  PHI->addIncoming(RI->getReturnValue(), RI->getParent());
2129  }
2130  }
2131 
2132  // Add a branch to the merge points and remove return instructions.
2133  DebugLoc Loc;
2134  for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2135  ReturnInst *RI = Returns[i];
2136  BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
2137  Loc = RI->getDebugLoc();
2138  BI->setDebugLoc(Loc);
2139  RI->eraseFromParent();
2140  }
2141  // We need to set the debug location to *somewhere* inside the
2142  // inlined function. The line number may be nonsensical, but the
2143  // instruction will at least be associated with the right
2144  // function.
2145  if (CreatedBranchToNormalDest)
2146  CreatedBranchToNormalDest->setDebugLoc(Loc);
2147  } else if (!Returns.empty()) {
2148  // Otherwise, if there is exactly one return value, just replace anything
2149  // using the return value of the call with the computed value.
2150  if (!TheCall->use_empty()) {
2151  if (TheCall == Returns[0]->getReturnValue())
2152  TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2153  else
2154  TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
2155  }
2156 
2157  // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
2158  BasicBlock *ReturnBB = Returns[0]->getParent();
2159  ReturnBB->replaceAllUsesWith(AfterCallBB);
2160 
2161  // Splice the code from the return block into the block that it will return
2162  // to, which contains the code that was after the call.
2163  AfterCallBB->getInstList().splice(AfterCallBB->begin(),
2164  ReturnBB->getInstList());
2165 
2166  if (CreatedBranchToNormalDest)
2167  CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
2168 
2169  // Delete the return instruction now and empty ReturnBB now.
2170  Returns[0]->eraseFromParent();
2171  ReturnBB->eraseFromParent();
2172  } else if (!TheCall->use_empty()) {
2173  // No returns, but something is using the return value of the call. Just
2174  // nuke the result.
2175  TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2176  }
2177 
2178  // Since we are now done with the Call/Invoke, we can delete it.
2179  TheCall->eraseFromParent();
2180 
2181  // If we inlined any musttail calls and the original return is now
2182  // unreachable, delete it. It can only contain a bitcast and ret.
2183  if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB))
2184  AfterCallBB->eraseFromParent();
2185 
2186  // We should always be able to fold the entry block of the function into the
2187  // single predecessor of the block...
2188  assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
2189  BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
2190 
2191  // Splice the code entry block into calling block, right before the
2192  // unconditional branch.
2193  CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
2194  OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList());
2195 
2196  // Remove the unconditional branch.
2197  OrigBB->getInstList().erase(Br);
2198 
2199  // Now we can remove the CalleeEntry block, which is now empty.
2200  Caller->getBasicBlockList().erase(CalleeEntry);
2201 
2202  // If we inserted a phi node, check to see if it has a single value (e.g. all
2203  // the entries are the same or undef). If so, remove the PHI so it doesn't
2204  // block other optimizations.
2205  if (PHI) {
2206  AssumptionCache *AC =
2207  IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr;
2208  auto &DL = Caller->getParent()->getDataLayout();
2209  if (Value *V = SimplifyInstruction(PHI, DL, nullptr, nullptr, AC)) {
2210  PHI->replaceAllUsesWith(V);
2211  PHI->eraseFromParent();
2212  }
2213  }
2214 
2215  return true;
2216 }
const NoneType None
Definition: None.h:23
Return a value (possibly void), from a function.
const Value * getCalledValue() const
Get a pointer to the function that is invoked by this instruction.
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:76
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:102
bool replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress, DIBuilder &Builder, bool Deref, int Offset=0)
Replaces llvm.dbg.declare instruction when the alloca it describes is replaced with a new value...
Definition: Local.cpp:1306
void setDoesNotThrow()
FunTy * getCaller() const
getCaller - Return the caller function for this call site
Definition: CallSite.h:262
void removePredecessor(BasicBlock *Pred, bool DontDeleteUselessPHIs=false)
Notify the BasicBlock that the predecessor Pred is no longer able to reach it.
Definition: BasicBlock.cpp:281
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static cl::opt< bool > EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), cl::Hidden, cl::desc("Convert noalias attributes to metadata during inlining."))
unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
Definition: Local.cpp:1019
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:226
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Definition: CallSite.h:539
unsigned getNumOperandBundles() const
Definition: CallSite.h:488
LLVM Argument representation.
Definition: Argument.h:34
bool hasName() const
Definition: Value.h:236
iterator erase(iterator where)
Definition: ilist.h:280
size_t i
MDNode * getScope() const
Definition: DebugLoc.cpp:35
CallGraph * CG
CG - If non-null, InlineFunction will update the callgraph to reflect the changes it makes...
Definition: Cloning.h:186
bool onlyReadsMemory() const
Determine if the function does not access or only reads memory.
Definition: Function.h:321
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:52
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
static MDTuple * getDistinct(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1140
auto remove_if(R &&Range, UnaryPredicate P) -> decltype(std::begin(Range))
Provide wrappers to std::remove_if which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:776
iterator end()
Definition: Function.h:537
an instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
Definition: Instructions.h:504
CallInst * getTerminatingMustTailCall()
Returns the call instruction marked 'musttail' prior to the terminating return instruction of this ba...
Definition: BasicBlock.cpp:134
std::function< AssumptionCache &(Function &)> * GetAssumptionCache
Definition: Cloning.h:187
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1040
unsigned getPrefTypeAlignment(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Definition: DataLayout.cpp:699
This class represents a function call, abstracting a target machine's calling convention.
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition: Function.h:151
void setGC(std::string Str)
Definition: Function.cpp:417
size_type count(PtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:380
A cache of .assume calls within a function.
static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI)
If the inlined function has non-byval align arguments, then add .assume-based alignment assumptions t...
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.cpp:238
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:100
arg_iterator arg_end()
Definition: Function.h:559
A debug info location.
Definition: DebugLoc.h:34
const Instruction & front() const
Definition: BasicBlock.h:240
Metadata node.
Definition: Metadata.h:830
std::vector< WeakVH > OperandBundleCallSites
All cloned call sites that have operand bundles attached are appended to this vector.
Definition: Cloning.h:81
An instruction for reading from memory.
Definition: Instructions.h:164
static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap, const DataLayout &DL, AAResults *CalleeAAR)
If the inlined function has noalias arguments, then add new alias scopes for each noalias argument...
static IntegerType * getInt64Ty(LLVMContext &C)
Definition: Type.cpp:170
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:669
void GetUnderlyingObjects(Value *V, SmallVectorImpl< Value * > &Objects, const DataLayout &DL, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to GetUnderlyingObject except that it can look through phi and select instruct...
Type * getElementType() const
Definition: DerivedTypes.h:462
void reserve(size_type N)
Definition: SmallVector.h:377
unsigned changeToUnreachable(Instruction *I, bool UseLLVMTrap, bool PreserveLCSSA=false)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
Definition: Local.cpp:1373
InlineFunctionInfo - This class captures the data input to the InlineFunction call, and records the auxiliary results produced by it.
Definition: Cloning.h:177
iterator end()
Get an iterator to the end of the SetVector.
Definition: SetVector.h:93
A node in the call graph for a module.
Definition: CallGraph.h:171
Tuple of metadata.
Definition: Metadata.h:1072
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:165
unsigned arg_size() const
Definition: CallSite.h:211
size_t arg_size() const
Definition: Function.cpp:327
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:191
static void fixupLineNumbers(Function *Fn, Function::iterator FI, Instruction *TheCall, bool CalleeHasDebugInfo)
Update inlined instructions' line numbers to to encode location where these instructions are inlined...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:345
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:228
void setCallingConv(CallingConv::ID CC)
bool InlineFunction(CallInst *C, InlineFunctionInfo &IFI, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true)
InlineFunction - This function inlines the called function into the basic block of the caller...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:172
void addCalledFunction(CallSite CS, CallGraphNode *M)
Adds a function to the list of functions called by this one.
Definition: CallGraph.h:236
std::vector< CallRecord >::iterator iterator
Definition: CallGraph.h:187
static DebugLoc updateInlinedAtInfo(const DebugLoc &DL, DILocation *InlinedAtNode, LLVMContext &Ctx, DenseMap< const DILocation *, DILocation * > &IANodes)
Rebuild the entire inlined-at chain for this instruction so that the top of the chain now is inlined-...
void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, bool ModuleLevelChanges, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr, Instruction *TheCall=nullptr)
CloneAndPruneFunctionInto - This works exactly like CloneFunctionInto, except that it does some simpl...
bool hasGC() const
hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm to use during code generatio...
Definition: Function.h:250
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
DILocation * get() const
Get the underlying DILocation.
Definition: DebugLoc.cpp:21
iterator end()
Definition: CallGraph.h:194
static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
A Use represents the edge between a Value definition and its users.
Definition: Use.h:56
ValTy * getCalledValue() const
getCalledValue - Return the pointer to function that is being called.
Definition: CallSite.h:102
Instruction * getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:180
ReturnInst * CreateRet(Value *V)
Create a 'ret <val>' instruction.
Definition: IRBuilder.h:674
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:588
bool isCall() const
isCall - true if a CallInst is enclosed.
Definition: CallSite.h:87
Constant * getPersonalityFn() const
Get the personality function associated with this function.
Definition: Function.cpp:1218
This file contains the simple types necessary to represent the attributes associated with functions a...
static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap)
When inlining a function that contains noalias scope metadata, this metadata needs to be cloned so th...
bool isMustTailCall() const
The only memory references in this function (if it has any) are non-volatile loads from objects point...
LandingPadInst * getLandingPadInst() const
Get the landingpad instruction from the landing pad block (the unwind destination).
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:60
bool doesNotThrow() const
Determine if the call cannot unwind.
void addHandler(BasicBlock *Dest)
Add an entry to the switch instruction...
auto reverse(ContainerTy &&C, typename std::enable_if< has_rbegin< ContainerTy >::value >::type *=nullptr) -> decltype(make_range(C.rbegin(), C.rend()))
Definition: STLExtras.h:241
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
static void UpdateCallGraphAfterInlining(CallSite CS, Function::iterator FirstNewBlock, ValueToValueMapTy &VMap, InlineFunctionInfo &IFI)
Once we have cloned code over from a callee into the caller, update the specified callgraph to reflec...
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1362
#define F(x, y, z)
Definition: MD5.cpp:51
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:136
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
FunctionModRefBehavior
Summary of how a function affects memory in the program.
iterator find(const KeyT &Val)
Definition: ValueMap.h:158
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition: SetVector.h:83
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:73
static std::string utostr(uint64_t X, bool isNeg=false)
Definition: StringExtras.h:79
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:949
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
An instruction for storing to memory.
Definition: Instructions.h:300
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:401
static TempMDTuple getTemporary(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Return a temporary node.
Definition: Metadata.h:1119
Debug location.
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:263
iterator begin()
Definition: Function.h:535
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:96
static unsigned getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
Definition: Local.h:183
BasicBlock * getNormalDest() const
Maximum length of the test input libFuzzer tries to guess a good value based on the corpus and reports it always prefer smaller inputs during the corpus shuffle When libFuzzer itself reports a bug this exit code will be used If indicates the maximal total time in seconds to run the fuzzer minimizes the provided crash input Use with etc Experimental Use value profile to guide fuzzing Number of simultaneous worker processes to run the jobs If min(jobs, NumberOfCpuCores()/2)\" is used.") FUZZER_FLAG_INT(reload
Class to represent pointers.
Definition: DerivedTypes.h:443
bool mayReadOrWriteMemory() const
Return true if this instruction may read or write memory.
Definition: Instruction.h:416
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
static BasicBlock * HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, BasicBlock *UnwindEdge, UnwindDestMemoTy *FuncletUnwindMap=nullptr)
When we inline a basic block into an invoke, we have to turn all of the calls that can throw into inv...
bool hasOperandBundles() const
Definition: CallSite.h:492
static bool isUsedByLifetimeMarker(Value *V)
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
Definition: InstrTypes.h:1441
static cl::opt< bool > PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", cl::init(true), cl::Hidden, cl::desc("Convert align attributes to assumptions during inlining."))
unsigned getLine() const
Definition: DebugLoc.cpp:25
void setCallingConv(CallingConv::ID CC)
Definition: Function.h:169
SmallVector< CallSite, 8 > InlinedCallSites
All of the new call sites inlined into the caller.
Definition: Cloning.h:202
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:395
The landingpad instruction holds all of the information necessary to generate correct exception handl...
Optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Definition: CallSite.h:512
Subclasses of this class are all able to terminate a basic block.
Definition: InstrTypes.h:52
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:256
Constant * stripPointerCasts()
Definition: Constant.h:155
LLVM Basic Block Representation.
Definition: BasicBlock.h:51
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:48
Conditional or Unconditional Branch instruction.
This is an important base class in LLVM.
Definition: Constant.h:42
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:97
ArrayRef< Use > Inputs
Definition: InstrTypes.h:1207
Resume the propagation of an exception.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:368
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:116
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition: Function.h:581
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:259
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS)
Return the behavior of the given call site.
SmallVector< WeakVH, 8 > InlinedCalls
InlinedCalls - InlineFunction fills this in with callsites that were inlined from the callee...
Definition: Cloning.h:195
void splice(iterator where, iplist_impl &L2)
Definition: ilist.h:342
static Value * getUnwindDestTokenHelper(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Helper for getUnwindDestToken that does the descendant-ward part of the search.
const InstListType & getInstList() const
Return the underlying instruction list container.
Definition: BasicBlock.h:249
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition: Instructions.h:121
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
Definition: CallSite.h:555
static bool hasLifetimeMarkers(AllocaInst *AI)
The only memory references in this function (if it has any) are non-volatile loads and stores from ob...
Value * getOperand(unsigned i) const
Definition: User.h:145
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:119
arg_iterator arg_begin()
Definition: Function.h:550
self_iterator getIterator()
Definition: ilist_node.h:81
op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
Definition: InstrTypes.h:1234
void setTailCallKind(TailCallKind TCK)
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1337
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
iterator erase(const_iterator CI)
Definition: SmallVector.h:431
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:654
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:213
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1183
OperandBundleUse getOperandBundleAt(unsigned Index) const
Definition: CallSite.h:508
unsigned getCol() const
Definition: DebugLoc.cpp:30
bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
static void UpdatePHINodes(BasicBlock *OrigBB, BasicBlock *NewBB, ArrayRef< BasicBlock * > Preds, BranchInst *BI, bool HasLoopExit)
Update the PHI nodes in OrigBB to include the values coming from NewBB.
static InvokeInst * Create(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, Instruction *InsertBefore=nullptr)
iterator end()
Definition: ValueMap.h:138
IterTy arg_begin() const
Definition: CallSite.h:528
CallInst * getTerminatingDeoptimizeCall()
Returns the call instruction calling .experimental.deoptimize prior to the terminating return instruc...
Definition: BasicBlock.cpp:165
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M, BasicBlock *InsertBlock, InlineFunctionInfo &IFI)
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1034
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:292
Iterator for intrusive lists based on ilist_node.
const BasicBlockListType & getBasicBlockList() const
Definition: Function.h:512
BasicBlock * getUnwindDest() const
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:132
This is the shared class of boolean and integer constants.
Definition: Constants.h:88
InstrTy * getInstruction() const
Definition: CallSite.h:93
static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Definition: InstrTypes.h:1406
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...
iterator end()
Definition: BasicBlock.h:230
ValTy * getArgument(unsigned ArgNo) const
Definition: CallSite.h:178
static CallInst * Create(Value *Func, ArrayRef< Value * > Args, ArrayRef< OperandBundleDef > Bundles=None, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Module.h This file contains the declarations for the Module class.
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:230
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:175
TailCallKind getTailCallKind() const
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:382
static Value * getParentPad(Value *EHPad)
Helper for getUnwindDestToken/getUnwindDestTokenHelper.
Value * stripPointerCasts()
Strip off pointer casts, all-zero GEPs, and aliases.
Definition: Value.cpp:490
static Value * getUnwindDestToken(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Given an EH pad, find where it unwinds.
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:558
Function * getCalledFunction() const
Return the function called, or null if this is an indirect function invocation.
static BranchInst * Create(BasicBlock *IfTrue, Instruction *InsertBefore=nullptr)
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
const BasicBlock & getEntryBlock() const
Definition: Function.h:519
DenseMap< Instruction *, Value * > UnwindDestMemoTy
void setOperand(unsigned i, Value *Val)
Definition: User.h:150
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:122
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:586
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition: Metadata.cpp:1458
Value * getIncomingValueForBlock(const BasicBlock *BB) const
iterator_range< user_iterator > users()
Definition: Value.h:370
bool ContainsCalls
ContainsCalls - This is set to true if the cloned code contains a normal call instruction.
Definition: Cloning.h:70
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
Definition: Instruction.h:453
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1132
SmallVector< AllocaInst *, 4 > StaticAllocas
StaticAllocas - InlineFunction fills this in with all static allocas that get copied into the caller...
Definition: Cloning.h:191
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.cpp:384
The basic data container for the call graph of a Module of IR.
Definition: CallGraph.h:76
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
Definition: SmallVector.h:119
void emplace_back(ArgTypes &&...Args)
Definition: SmallVector.h:635
void registerAssumption(CallInst *CI)
Add an .assume intrinsic to this function's cache.
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:188
ImmutableCallSite - establish a view to a call site for examination.
Definition: CallSite.h:665
static MDNode * concatenate(MDNode *A, MDNode *B)
Methods for metadata merging.
Definition: Metadata.cpp:856
const std::string & getGC() const
Definition: Function.cpp:412
static void PropagateParallelLoopAccessMetadata(CallSite CS, ValueToValueMapTy &VMap)
When inlining a call site that has !llvm.mem.parallel_loop_access metadata, that metadata should be p...
SymbolTableList< BasicBlock >::iterator eraseFromParent()
Unlink 'this' from the containing function and delete it.
Definition: BasicBlock.cpp:97
#define I(x, y, z)
Definition: MD5.cpp:54
TerminatorInst * getTerminator()
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:124
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:135
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.cpp:230
static Value * HandleByValArgument(Value *Arg, Instruction *TheCall, const Function *CalledFunc, InlineFunctionInfo &IFI, unsigned ByValAlignment)
When inlining a call site that has a byval argument, we have to make the implicit memcpy explicit by ...
iterator end()
Definition: DenseMap.h:69
ClonedCodeInfo - This struct can be used to capture information about code being cloned, while it is being cloned.
Definition: Cloning.h:67
iterator find(const KeyT &Val)
Definition: DenseMap.h:127
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:287
static ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
Definition: Constants.cpp:1045
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="")
Split the basic block into two basic blocks at the specified instruction.
Definition: BasicBlock.cpp:374
LLVMContext & getContext() const
Definition: Metadata.h:889
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:391
bool isVarArg() const
Definition: DerivedTypes.h:122
bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures, bool StoreCaptures, const Instruction *I, DominatorTree *DT, bool IncludeI=false, OrderedBasicBlock *OBB=nullptr)
PointerMayBeCapturedBefore - Return true if this pointer value may be captured by the enclosing funct...
static bool allocaWouldBeStaticInEntry(const AllocaInst *AI)
Return the result of AI->isStaticAlloca() if AI were moved to the entry block.
bool use_empty() const
Definition: Value.h:299
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool ContainsDynamicAllocas
ContainsDynamicAllocas - This is set to true if the cloned code contains a 'dynamic' alloca...
Definition: Cloning.h:76
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
unsigned getParamAlignment(unsigned i) const
Extract the alignment for a call or parameter (0=unknown).
Definition: Function.h:296
FunTy * getCalledFunction() const
getCalledFunction - Return the function being called if this is a direct call, otherwise return null ...
Definition: CallSite.h:110
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:537
LLVM Value Representation.
Definition: Value.h:71
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:111
A vector that has set insertion semantics.
Definition: SetVector.h:41
void removeCallEdgeFor(CallSite CS)
Removes the edge in the node for the specified call site.
Definition: CallGraph.cpp:203
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, Instruction *InsertBefore=nullptr)
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:93
BasicBlock * changeToInvokeAndSplitBasicBlock(CallInst *CI, BasicBlock *UnwindEdge)
Convert the CallInst to InvokeInst with the specified unwind edge basic block.
Definition: Local.cpp:1424
Invoke instruction.
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
iterator begin()
Definition: CallGraph.h:193
void recalculate(FT &F)
recalculate - compute a dominator tree for the given function
std::vector< CallRecord > CalledFunctionsVector
Definition: CallGraph.h:178
Value * SimplifyInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr)
See if we can compute a simplified version of this instruction.
void setPersonalityFn(Constant *Fn)
Definition: Function.cpp:1223
static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
void pop_back()
Definition: ilist.h:331
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:102
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:479
static GCRegistry::Add< ErlangGC > A("erlang","erlang-compatible garbage collector")
Root of the metadata hierarchy.
Definition: Metadata.h:55
const BasicBlock * getParent() const
Definition: Instruction.h:62
bool doesNotThrow() const
Determine if the call cannot unwind.
Definition: CallSite.h:462
iterator begin()
Definition: ValueMap.h:137
iterator_range< arg_iterator > args()
Definition: Function.h:568
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:44
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:139
an instruction to allocate memory on the stack
Definition: Instructions.h:60