LLVM  3.7.0
InlineFunction.cpp
Go to the documentation of this file.
1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements inlining of a function into a call site, resolving
11 // parameters and the return value as appropriate.
12 //
13 //===----------------------------------------------------------------------===//
14 
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/SetVector.h"
19 #include "llvm/ADT/StringExtras.h"
26 #include "llvm/IR/Attributes.h"
27 #include "llvm/IR/CallSite.h"
28 #include "llvm/IR/CFG.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DebugInfo.h"
32 #include "llvm/IR/DerivedTypes.h"
33 #include "llvm/IR/DIBuilder.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/IRBuilder.h"
36 #include "llvm/IR/Instructions.h"
37 #include "llvm/IR/IntrinsicInst.h"
38 #include "llvm/IR/Intrinsics.h"
39 #include "llvm/IR/MDBuilder.h"
40 #include "llvm/IR/Module.h"
43 #include <algorithm>
44 using namespace llvm;
45 
46 static cl::opt<bool>
47 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
48  cl::Hidden,
49  cl::desc("Convert noalias attributes to metadata during inlining."));
50 
51 static cl::opt<bool>
52 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
53  cl::init(true), cl::Hidden,
54  cl::desc("Convert align attributes to assumptions during inlining."));
55 
57  bool InsertLifetime) {
58  return InlineFunction(CallSite(CI), IFI, InsertLifetime);
59 }
61  bool InsertLifetime) {
62  return InlineFunction(CallSite(II), IFI, InsertLifetime);
63 }
64 
65 namespace {
66  /// A class for recording information about inlining through an invoke.
67  class InvokeInliningInfo {
68  BasicBlock *OuterResumeDest; ///< Destination of the invoke's unwind.
69  BasicBlock *InnerResumeDest; ///< Destination for the callee's resume.
70  LandingPadInst *CallerLPad; ///< LandingPadInst associated with the invoke.
71  PHINode *InnerEHValuesPHI; ///< PHI for EH values from landingpad insts.
72  SmallVector<Value*, 8> UnwindDestPHIValues;
73 
74  public:
75  InvokeInliningInfo(InvokeInst *II)
76  : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(nullptr),
77  CallerLPad(nullptr), InnerEHValuesPHI(nullptr) {
78  // If there are PHI nodes in the unwind destination block, we need to keep
79  // track of which values came into them from the invoke before removing
80  // the edge from this block.
81  llvm::BasicBlock *InvokeBB = II->getParent();
82  BasicBlock::iterator I = OuterResumeDest->begin();
83  for (; isa<PHINode>(I); ++I) {
84  // Save the value to use for this edge.
85  PHINode *PHI = cast<PHINode>(I);
86  UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
87  }
88 
89  CallerLPad = cast<LandingPadInst>(I);
90  }
91 
92  /// The outer unwind destination is the target of
93  /// unwind edges introduced for calls within the inlined function.
94  BasicBlock *getOuterResumeDest() const {
95  return OuterResumeDest;
96  }
97 
98  BasicBlock *getInnerResumeDest();
99 
100  LandingPadInst *getLandingPadInst() const { return CallerLPad; }
101 
102  /// Forward the 'resume' instruction to the caller's landing pad block.
103  /// When the landing pad block has only one predecessor, this is
104  /// a simple branch. When there is more than one predecessor, we need to
105  /// split the landing pad block after the landingpad instruction and jump
106  /// to there.
107  void forwardResume(ResumeInst *RI,
108  SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
109 
110  /// Add incoming-PHI values to the unwind destination block for the given
111  /// basic block, using the values for the original invoke's source block.
112  void addIncomingPHIValuesFor(BasicBlock *BB) const {
113  addIncomingPHIValuesForInto(BB, OuterResumeDest);
114  }
115 
116  void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
117  BasicBlock::iterator I = dest->begin();
118  for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
119  PHINode *phi = cast<PHINode>(I);
120  phi->addIncoming(UnwindDestPHIValues[i], src);
121  }
122  }
123  };
124 }
125 
126 /// Get or create a target for the branch from ResumeInsts.
127 BasicBlock *InvokeInliningInfo::getInnerResumeDest() {
128  if (InnerResumeDest) return InnerResumeDest;
129 
130  // Split the landing pad.
131  BasicBlock::iterator SplitPoint = CallerLPad; ++SplitPoint;
132  InnerResumeDest =
133  OuterResumeDest->splitBasicBlock(SplitPoint,
134  OuterResumeDest->getName() + ".body");
135 
136  // The number of incoming edges we expect to the inner landing pad.
137  const unsigned PHICapacity = 2;
138 
139  // Create corresponding new PHIs for all the PHIs in the outer landing pad.
140  BasicBlock::iterator InsertPoint = InnerResumeDest->begin();
141  BasicBlock::iterator I = OuterResumeDest->begin();
142  for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
143  PHINode *OuterPHI = cast<PHINode>(I);
144  PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
145  OuterPHI->getName() + ".lpad-body",
146  InsertPoint);
147  OuterPHI->replaceAllUsesWith(InnerPHI);
148  InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
149  }
150 
151  // Create a PHI for the exception values.
152  InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
153  "eh.lpad-body", InsertPoint);
154  CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
155  InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
156 
157  // All done.
158  return InnerResumeDest;
159 }
160 
161 /// Forward the 'resume' instruction to the caller's landing pad block.
162 /// When the landing pad block has only one predecessor, this is a simple
163 /// branch. When there is more than one predecessor, we need to split the
164 /// landing pad block after the landingpad instruction and jump to there.
165 void InvokeInliningInfo::forwardResume(ResumeInst *RI,
166  SmallPtrSetImpl<LandingPadInst*> &InlinedLPads) {
167  BasicBlock *Dest = getInnerResumeDest();
168  BasicBlock *Src = RI->getParent();
169 
170  BranchInst::Create(Dest, Src);
171 
172  // Update the PHIs in the destination. They were inserted in an order which
173  // makes this work.
174  addIncomingPHIValuesForInto(Src, Dest);
175 
176  InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
177  RI->eraseFromParent();
178 }
179 
180 /// When we inline a basic block into an invoke,
181 /// we have to turn all of the calls that can throw into invokes.
182 /// This function analyze BB to see if there are any calls, and if so,
183 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
184 /// nodes in that block with the values specified in InvokeDestPHIValues.
186  InvokeInliningInfo &Invoke) {
187  for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
188  Instruction *I = BBI++;
189 
190  // We only need to check for function calls: inlined invoke
191  // instructions require no special handling.
192  CallInst *CI = dyn_cast<CallInst>(I);
193 
194  // If this call cannot unwind, don't convert it to an invoke.
195  // Inline asm calls cannot throw.
196  if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue()))
197  continue;
198 
199  // Convert this function call into an invoke instruction. First, split the
200  // basic block.
201  BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc");
202 
203  // Delete the unconditional branch inserted by splitBasicBlock
204  BB->getInstList().pop_back();
205 
206  // Create the new invoke instruction.
207  ImmutableCallSite CS(CI);
208  SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end());
210  Invoke.getOuterResumeDest(),
211  InvokeArgs, CI->getName(), BB);
212  II->setDebugLoc(CI->getDebugLoc());
213  II->setCallingConv(CI->getCallingConv());
214  II->setAttributes(CI->getAttributes());
215 
216  // Make sure that anything using the call now uses the invoke! This also
217  // updates the CallGraph if present, because it uses a WeakVH.
218  CI->replaceAllUsesWith(II);
219 
220  // Delete the original call
221  Split->getInstList().pop_front();
222 
223  // Update any PHI nodes in the exceptional block to indicate that there is
224  // now a new entry in them.
225  Invoke.addIncomingPHIValuesFor(BB);
226  return;
227  }
228 }
229 
230 /// If we inlined an invoke site, we need to convert calls
231 /// in the body of the inlined function into invokes.
232 ///
233 /// II is the invoke instruction being inlined. FirstNewBlock is the first
234 /// block of the inlined code (the last block is the end of the function),
235 /// and InlineCodeInfo is information about the code that got inlined.
236 static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock,
237  ClonedCodeInfo &InlinedCodeInfo) {
238  BasicBlock *InvokeDest = II->getUnwindDest();
239 
240  Function *Caller = FirstNewBlock->getParent();
241 
242  // The inlined code is currently at the end of the function, scan from the
243  // start of the inlined code to its end, checking for stuff we need to
244  // rewrite.
245  InvokeInliningInfo Invoke(II);
246 
247  // Get all of the inlined landing pad instructions.
249  for (Function::iterator I = FirstNewBlock, E = Caller->end(); I != E; ++I)
250  if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
251  InlinedLPads.insert(II->getLandingPadInst());
252 
253  // Append the clauses from the outer landing pad instruction into the inlined
254  // landing pad instructions.
255  LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
256  for (LandingPadInst *InlinedLPad : InlinedLPads) {
257  unsigned OuterNum = OuterLPad->getNumClauses();
258  InlinedLPad->reserveClauses(OuterNum);
259  for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
260  InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
261  if (OuterLPad->isCleanup())
262  InlinedLPad->setCleanup(true);
263  }
264 
265  for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; ++BB){
266  if (InlinedCodeInfo.ContainsCalls)
268 
269  // Forward any resumes that are remaining here.
270  if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
271  Invoke.forwardResume(RI, InlinedLPads);
272  }
273 
274  // Now that everything is happy, we have one final detail. The PHI nodes in
275  // the exception destination block still have entries due to the original
276  // invoke instruction. Eliminate these entries (which might even delete the
277  // PHI node) now.
278  InvokeDest->removePredecessor(II->getParent());
279 }
280 
281 /// When inlining a function that contains noalias scope metadata,
282 /// this metadata needs to be cloned so that the inlined blocks
283 /// have different "unqiue scopes" at every call site. Were this not done, then
284 /// aliasing scopes from a function inlined into a caller multiple times could
285 /// not be differentiated (and this would lead to miscompiles because the
286 /// non-aliasing property communicated by the metadata could have
287 /// call-site-specific control dependencies).
289  const Function *CalledFunc = CS.getCalledFunction();
291 
292  // Note: We could only clone the metadata if it is already used in the
293  // caller. I'm omitting that check here because it might confuse
294  // inter-procedural alias analysis passes. We can revisit this if it becomes
295  // an efficiency or overhead problem.
296 
297  for (Function::const_iterator I = CalledFunc->begin(), IE = CalledFunc->end();
298  I != IE; ++I)
299  for (BasicBlock::const_iterator J = I->begin(), JE = I->end(); J != JE; ++J) {
300  if (const MDNode *M = J->getMetadata(LLVMContext::MD_alias_scope))
301  MD.insert(M);
302  if (const MDNode *M = J->getMetadata(LLVMContext::MD_noalias))
303  MD.insert(M);
304  }
305 
306  if (MD.empty())
307  return;
308 
309  // Walk the existing metadata, adding the complete (perhaps cyclic) chain to
310  // the set.
311  SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
312  while (!Queue.empty()) {
313  const MDNode *M = cast<MDNode>(Queue.pop_back_val());
314  for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i)
315  if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i)))
316  if (MD.insert(M1))
317  Queue.push_back(M1);
318  }
319 
320  // Now we have a complete set of all metadata in the chains used to specify
321  // the noalias scopes and the lists of those scopes.
322  SmallVector<TempMDTuple, 16> DummyNodes;
324  for (SetVector<const MDNode *>::iterator I = MD.begin(), IE = MD.end();
325  I != IE; ++I) {
326  DummyNodes.push_back(MDTuple::getTemporary(CalledFunc->getContext(), None));
327  MDMap[*I].reset(DummyNodes.back().get());
328  }
329 
330  // Create new metadata nodes to replace the dummy nodes, replacing old
331  // metadata references with either a dummy node or an already-created new
332  // node.
333  for (SetVector<const MDNode *>::iterator I = MD.begin(), IE = MD.end();
334  I != IE; ++I) {
336  for (unsigned i = 0, ie = (*I)->getNumOperands(); i != ie; ++i) {
337  const Metadata *V = (*I)->getOperand(i);
338  if (const MDNode *M = dyn_cast<MDNode>(V))
339  NewOps.push_back(MDMap[M]);
340  else
341  NewOps.push_back(const_cast<Metadata *>(V));
342  }
343 
344  MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps);
345  MDTuple *TempM = cast<MDTuple>(MDMap[*I]);
346  assert(TempM->isTemporary() && "Expected temporary node");
347 
348  TempM->replaceAllUsesWith(NewM);
349  }
350 
351  // Now replace the metadata in the new inlined instructions with the
352  // repacements from the map.
353  for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
354  VMI != VMIE; ++VMI) {
355  if (!VMI->second)
356  continue;
357 
358  Instruction *NI = dyn_cast<Instruction>(VMI->second);
359  if (!NI)
360  continue;
361 
363  MDNode *NewMD = MDMap[M];
364  // If the call site also had alias scope metadata (a list of scopes to
365  // which instructions inside it might belong), propagate those scopes to
366  // the inlined instructions.
367  if (MDNode *CSM =
369  NewMD = MDNode::concatenate(NewMD, CSM);
371  } else if (NI->mayReadOrWriteMemory()) {
372  if (MDNode *M =
375  }
376 
378  MDNode *NewMD = MDMap[M];
379  // If the call site also had noalias metadata (a list of scopes with
380  // which instructions inside it don't alias), propagate those scopes to
381  // the inlined instructions.
382  if (MDNode *CSM =
384  NewMD = MDNode::concatenate(NewMD, CSM);
386  } else if (NI->mayReadOrWriteMemory()) {
389  }
390  }
391 }
392 
393 /// If the inlined function has noalias arguments,
394 /// then add new alias scopes for each noalias argument, tag the mapped noalias
395 /// parameters with noalias metadata specifying the new scope, and tag all
396 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
398  const DataLayout &DL, AliasAnalysis *AA) {
400  return;
401 
402  const Function *CalledFunc = CS.getCalledFunction();
404 
405  for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
406  E = CalledFunc->arg_end(); I != E; ++I) {
407  if (I->hasNoAliasAttr() && !I->hasNUses(0))
408  NoAliasArgs.push_back(I);
409  }
410 
411  if (NoAliasArgs.empty())
412  return;
413 
414  // To do a good job, if a noalias variable is captured, we need to know if
415  // the capture point dominates the particular use we're considering.
416  DominatorTree DT;
417  DT.recalculate(const_cast<Function&>(*CalledFunc));
418 
419  // noalias indicates that pointer values based on the argument do not alias
420  // pointer values which are not based on it. So we add a new "scope" for each
421  // noalias function argument. Accesses using pointers based on that argument
422  // become part of that alias scope, accesses using pointers not based on that
423  // argument are tagged as noalias with that scope.
424 
426  MDBuilder MDB(CalledFunc->getContext());
427 
428  // Create a new scope domain for this function.
429  MDNode *NewDomain =
430  MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
431  for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
432  const Argument *A = NoAliasArgs[i];
433 
434  std::string Name = CalledFunc->getName();
435  if (A->hasName()) {
436  Name += ": %";
437  Name += A->getName();
438  } else {
439  Name += ": argument ";
440  Name += utostr(i);
441  }
442 
443  // Note: We always create a new anonymous root here. This is true regardless
444  // of the linkage of the callee because the aliasing "scope" is not just a
445  // property of the callee, but also all control dependencies in the caller.
446  MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
447  NewScopes.insert(std::make_pair(A, NewScope));
448  }
449 
450  // Iterate over all new instructions in the map; for all memory-access
451  // instructions, add the alias scope metadata.
452  for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
453  VMI != VMIE; ++VMI) {
454  if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
455  if (!VMI->second)
456  continue;
457 
458  Instruction *NI = dyn_cast<Instruction>(VMI->second);
459  if (!NI)
460  continue;
461 
462  bool IsArgMemOnlyCall = false, IsFuncCall = false;
464 
465  if (const LoadInst *LI = dyn_cast<LoadInst>(I))
466  PtrArgs.push_back(LI->getPointerOperand());
467  else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
468  PtrArgs.push_back(SI->getPointerOperand());
469  else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
470  PtrArgs.push_back(VAAI->getPointerOperand());
471  else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
472  PtrArgs.push_back(CXI->getPointerOperand());
473  else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
474  PtrArgs.push_back(RMWI->getPointerOperand());
475  else if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
476  // If we know that the call does not access memory, then we'll still
477  // know that about the inlined clone of this call site, and we don't
478  // need to add metadata.
479  if (ICS.doesNotAccessMemory())
480  continue;
481 
482  IsFuncCall = true;
483  if (AA) {
487  IsArgMemOnlyCall = true;
488  }
489 
490  for (ImmutableCallSite::arg_iterator AI = ICS.arg_begin(),
491  AE = ICS.arg_end(); AI != AE; ++AI) {
492  // We need to check the underlying objects of all arguments, not just
493  // the pointer arguments, because we might be passing pointers as
494  // integers, etc.
495  // However, if we know that the call only accesses pointer arguments,
496  // then we only need to check the pointer arguments.
497  if (IsArgMemOnlyCall && !(*AI)->getType()->isPointerTy())
498  continue;
499 
500  PtrArgs.push_back(*AI);
501  }
502  }
503 
504  // If we found no pointers, then this instruction is not suitable for
505  // pairing with an instruction to receive aliasing metadata.
506  // However, if this is a call, this we might just alias with none of the
507  // noalias arguments.
508  if (PtrArgs.empty() && !IsFuncCall)
509  continue;
510 
511  // It is possible that there is only one underlying object, but you
512  // need to go through several PHIs to see it, and thus could be
513  // repeated in the Objects list.
515  SmallVector<Metadata *, 4> Scopes, NoAliases;
516 
518  for (unsigned i = 0, ie = PtrArgs.size(); i != ie; ++i) {
519  SmallVector<Value *, 4> Objects;
520  GetUnderlyingObjects(const_cast<Value*>(PtrArgs[i]),
521  Objects, DL, /* MaxLookup = */ 0);
522 
523  for (Value *O : Objects)
524  ObjSet.insert(O);
525  }
526 
527  // Figure out if we're derived from anything that is not a noalias
528  // argument.
529  bool CanDeriveViaCapture = false, UsesAliasingPtr = false;
530  for (const Value *V : ObjSet) {
531  // Is this value a constant that cannot be derived from any pointer
532  // value (we need to exclude constant expressions, for example, that
533  // are formed from arithmetic on global symbols).
534  bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
535  isa<ConstantPointerNull>(V) ||
536  isa<ConstantDataVector>(V) || isa<UndefValue>(V);
537  if (IsNonPtrConst)
538  continue;
539 
540  // If this is anything other than a noalias argument, then we cannot
541  // completely describe the aliasing properties using alias.scope
542  // metadata (and, thus, won't add any).
543  if (const Argument *A = dyn_cast<Argument>(V)) {
544  if (!A->hasNoAliasAttr())
545  UsesAliasingPtr = true;
546  } else {
547  UsesAliasingPtr = true;
548  }
549 
550  // If this is not some identified function-local object (which cannot
551  // directly alias a noalias argument), or some other argument (which,
552  // by definition, also cannot alias a noalias argument), then we could
553  // alias a noalias argument that has been captured).
554  if (!isa<Argument>(V) &&
555  !isIdentifiedFunctionLocal(const_cast<Value*>(V)))
556  CanDeriveViaCapture = true;
557  }
558 
559  // A function call can always get captured noalias pointers (via other
560  // parameters, globals, etc.).
561  if (IsFuncCall && !IsArgMemOnlyCall)
562  CanDeriveViaCapture = true;
563 
564  // First, we want to figure out all of the sets with which we definitely
565  // don't alias. Iterate over all noalias set, and add those for which:
566  // 1. The noalias argument is not in the set of objects from which we
567  // definitely derive.
568  // 2. The noalias argument has not yet been captured.
569  // An arbitrary function that might load pointers could see captured
570  // noalias arguments via other noalias arguments or globals, and so we
571  // must always check for prior capture.
572  for (const Argument *A : NoAliasArgs) {
573  if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
574  // It might be tempting to skip the
575  // PointerMayBeCapturedBefore check if
576  // A->hasNoCaptureAttr() is true, but this is
577  // incorrect because nocapture only guarantees
578  // that no copies outlive the function, not
579  // that the value cannot be locally captured.
581  /* ReturnCaptures */ false,
582  /* StoreCaptures */ false, I, &DT)))
583  NoAliases.push_back(NewScopes[A]);
584  }
585 
586  if (!NoAliases.empty())
590  MDNode::get(CalledFunc->getContext(), NoAliases)));
591 
592  // Next, we want to figure out all of the sets to which we might belong.
593  // We might belong to a set if the noalias argument is in the set of
594  // underlying objects. If there is some non-noalias argument in our list
595  // of underlying objects, then we cannot add a scope because the fact
596  // that some access does not alias with any set of our noalias arguments
597  // cannot itself guarantee that it does not alias with this access
598  // (because there is some pointer of unknown origin involved and the
599  // other access might also depend on this pointer). We also cannot add
600  // scopes to arbitrary functions unless we know they don't access any
601  // non-parameter pointer-values.
602  bool CanAddScopes = !UsesAliasingPtr;
603  if (CanAddScopes && IsFuncCall)
604  CanAddScopes = IsArgMemOnlyCall;
605 
606  if (CanAddScopes)
607  for (const Argument *A : NoAliasArgs) {
608  if (ObjSet.count(A))
609  Scopes.push_back(NewScopes[A]);
610  }
611 
612  if (!Scopes.empty())
613  NI->setMetadata(
616  MDNode::get(CalledFunc->getContext(), Scopes)));
617  }
618  }
619 }
620 
621 /// If the inlined function has non-byval align arguments, then
622 /// add @llvm.assume-based alignment assumptions to preserve this information.
625  return;
626  auto &DL = CS.getCaller()->getParent()->getDataLayout();
627 
628  // To avoid inserting redundant assumptions, we should check for assumptions
629  // already in the caller. To do this, we might need a DT of the caller.
630  DominatorTree DT;
631  bool DTCalculated = false;
632 
633  Function *CalledFunc = CS.getCalledFunction();
634  for (Function::arg_iterator I = CalledFunc->arg_begin(),
635  E = CalledFunc->arg_end();
636  I != E; ++I) {
637  unsigned Align = I->getType()->isPointerTy() ? I->getParamAlignment() : 0;
638  if (Align && !I->hasByValOrInAllocaAttr() && !I->hasNUses(0)) {
639  if (!DTCalculated) {
640  DT.recalculate(const_cast<Function&>(*CS.getInstruction()->getParent()
641  ->getParent()));
642  DTCalculated = true;
643  }
644 
645  // If we can already prove the asserted alignment in the context of the
646  // caller, then don't bother inserting the assumption.
647  Value *Arg = CS.getArgument(I->getArgNo());
648  if (getKnownAlignment(Arg, DL, CS.getInstruction(),
649  &IFI.ACT->getAssumptionCache(*CalledFunc),
650  &DT) >= Align)
651  continue;
652 
654  .CreateAlignmentAssumption(DL, Arg, Align);
655  }
656  }
657 }
658 
659 /// Once we have cloned code over from a callee into the caller,
660 /// update the specified callgraph to reflect the changes we made.
661 /// Note that it's possible that not all code was copied over, so only
662 /// some edges of the callgraph may remain.
664  Function::iterator FirstNewBlock,
665  ValueToValueMapTy &VMap,
666  InlineFunctionInfo &IFI) {
667  CallGraph &CG = *IFI.CG;
668  const Function *Caller = CS.getInstruction()->getParent()->getParent();
669  const Function *Callee = CS.getCalledFunction();
670  CallGraphNode *CalleeNode = CG[Callee];
671  CallGraphNode *CallerNode = CG[Caller];
672 
673  // Since we inlined some uninlined call sites in the callee into the caller,
674  // add edges from the caller to all of the callees of the callee.
675  CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
676 
677  // Consider the case where CalleeNode == CallerNode.
679  if (CalleeNode == CallerNode) {
680  CallCache.assign(I, E);
681  I = CallCache.begin();
682  E = CallCache.end();
683  }
684 
685  for (; I != E; ++I) {
686  const Value *OrigCall = I->first;
687 
688  ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
689  // Only copy the edge if the call was inlined!
690  if (VMI == VMap.end() || VMI->second == nullptr)
691  continue;
692 
693  // If the call was inlined, but then constant folded, there is no edge to
694  // add. Check for this case.
695  Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
696  if (!NewCall)
697  continue;
698 
699  // We do not treat intrinsic calls like real function calls because we
700  // expect them to become inline code; do not add an edge for an intrinsic.
701  CallSite CS = CallSite(NewCall);
702  if (CS && CS.getCalledFunction() && CS.getCalledFunction()->isIntrinsic())
703  continue;
704 
705  // Remember that this call site got inlined for the client of
706  // InlineFunction.
707  IFI.InlinedCalls.push_back(NewCall);
708 
709  // It's possible that inlining the callsite will cause it to go from an
710  // indirect to a direct call by resolving a function pointer. If this
711  // happens, set the callee of the new call site to a more precise
712  // destination. This can also happen if the call graph node of the caller
713  // was just unnecessarily imprecise.
714  if (!I->second->getFunction())
715  if (Function *F = CallSite(NewCall).getCalledFunction()) {
716  // Indirect call site resolved to direct call.
717  CallerNode->addCalledFunction(CallSite(NewCall), CG[F]);
718 
719  continue;
720  }
721 
722  CallerNode->addCalledFunction(CallSite(NewCall), I->second);
723  }
724 
725  // Update the call graph by deleting the edge from Callee to Caller. We must
726  // do this after the loop above in case Caller and Callee are the same.
727  CallerNode->removeCallEdgeFor(CS);
728 }
729 
730 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
731  BasicBlock *InsertBlock,
732  InlineFunctionInfo &IFI) {
733  Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
734  IRBuilder<> Builder(InsertBlock->begin());
735 
736  Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy));
737 
738  // Always generate a memcpy of alignment 1 here because we don't know
739  // the alignment of the src pointer. Other optimizations can infer
740  // better alignment.
741  Builder.CreateMemCpy(Dst, Src, Size, /*Align=*/1);
742 }
743 
744 /// When inlining a call site that has a byval argument,
745 /// we have to make the implicit memcpy explicit by adding it.
747  const Function *CalledFunc,
748  InlineFunctionInfo &IFI,
749  unsigned ByValAlignment) {
750  PointerType *ArgTy = cast<PointerType>(Arg->getType());
751  Type *AggTy = ArgTy->getElementType();
752 
753  Function *Caller = TheCall->getParent()->getParent();
754 
755  // If the called function is readonly, then it could not mutate the caller's
756  // copy of the byval'd memory. In this case, it is safe to elide the copy and
757  // temporary.
758  if (CalledFunc->onlyReadsMemory()) {
759  // If the byval argument has a specified alignment that is greater than the
760  // passed in pointer, then we either have to round up the input pointer or
761  // give up on this transformation.
762  if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment.
763  return Arg;
764 
765  const DataLayout &DL = Caller->getParent()->getDataLayout();
766 
767  // If the pointer is already known to be sufficiently aligned, or if we can
768  // round it up to a larger alignment, then we don't need a temporary.
769  if (getOrEnforceKnownAlignment(Arg, ByValAlignment, DL, TheCall,
770  &IFI.ACT->getAssumptionCache(*Caller)) >=
771  ByValAlignment)
772  return Arg;
773 
774  // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
775  // for code quality, but rarely happens and is required for correctness.
776  }
777 
778  // Create the alloca. If we have DataLayout, use nice alignment.
779  unsigned Align =
780  Caller->getParent()->getDataLayout().getPrefTypeAlignment(AggTy);
781 
782  // If the byval had an alignment specified, we *must* use at least that
783  // alignment, as it is required by the byval argument (and uses of the
784  // pointer inside the callee).
785  Align = std::max(Align, ByValAlignment);
786 
787  Value *NewAlloca = new AllocaInst(AggTy, nullptr, Align, Arg->getName(),
788  &*Caller->begin()->begin());
789  IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
790 
791  // Uses of the argument in the function should use our new alloca
792  // instead.
793  return NewAlloca;
794 }
795 
796 // Check whether this Value is used by a lifetime intrinsic.
797 static bool isUsedByLifetimeMarker(Value *V) {
798  for (User *U : V->users()) {
799  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
800  switch (II->getIntrinsicID()) {
801  default: break;
802  case Intrinsic::lifetime_start:
803  case Intrinsic::lifetime_end:
804  return true;
805  }
806  }
807  }
808  return false;
809 }
810 
811 // Check whether the given alloca already has
812 // lifetime.start or lifetime.end intrinsics.
813 static bool hasLifetimeMarkers(AllocaInst *AI) {
814  Type *Ty = AI->getType();
815  Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
816  Ty->getPointerAddressSpace());
817  if (Ty == Int8PtrTy)
818  return isUsedByLifetimeMarker(AI);
819 
820  // Do a scan to find all the casts to i8*.
821  for (User *U : AI->users()) {
822  if (U->getType() != Int8PtrTy) continue;
823  if (U->stripPointerCasts() != AI) continue;
824  if (isUsedByLifetimeMarker(U))
825  return true;
826  }
827  return false;
828 }
829 
830 /// Rebuild the entire inlined-at chain for this instruction so that the top of
831 /// the chain now is inlined-at the new call site.
832 static DebugLoc
835  SmallVector<DILocation *, 3> InlinedAtLocations;
836  DILocation *Last = InlinedAtNode;
837  DILocation *CurInlinedAt = DL;
838 
839  // Gather all the inlined-at nodes
840  while (DILocation *IA = CurInlinedAt->getInlinedAt()) {
841  // Skip any we've already built nodes for
842  if (DILocation *Found = IANodes[IA]) {
843  Last = Found;
844  break;
845  }
846 
847  InlinedAtLocations.push_back(IA);
848  CurInlinedAt = IA;
849  }
850 
851  // Starting from the top, rebuild the nodes to point to the new inlined-at
852  // location (then rebuilding the rest of the chain behind it) and update the
853  // map of already-constructed inlined-at nodes.
854  for (auto I = InlinedAtLocations.rbegin(), E = InlinedAtLocations.rend();
855  I != E; ++I) {
856  const DILocation *MD = *I;
857  Last = IANodes[MD] = DILocation::getDistinct(
858  Ctx, MD->getLine(), MD->getColumn(), MD->getScope(), Last);
859  }
860 
861  // And finally create the normal location for this instruction, referring to
862  // the new inlined-at chain.
863  return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(), Last);
864 }
865 
866 /// Update inlined instructions' line numbers to
867 /// to encode location where these instructions are inlined.
869  Instruction *TheCall) {
870  DebugLoc TheCallDL = TheCall->getDebugLoc();
871  if (!TheCallDL)
872  return;
873 
874  auto &Ctx = Fn->getContext();
875  DILocation *InlinedAtNode = TheCallDL;
876 
877  // Create a unique call site, not to be confused with any other call from the
878  // same location.
879  InlinedAtNode = DILocation::getDistinct(
880  Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
881  InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
882 
883  // Cache the inlined-at nodes as they're built so they are reused, without
884  // this every instruction's inlined-at chain would become distinct from each
885  // other.
887 
888  for (; FI != Fn->end(); ++FI) {
889  for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
890  BI != BE; ++BI) {
891  DebugLoc DL = BI->getDebugLoc();
892  if (!DL) {
893  // If the inlined instruction has no line number, make it look as if it
894  // originates from the call location. This is important for
895  // ((__always_inline__, __nodebug__)) functions which must use caller
896  // location for all instructions in their function body.
897 
898  // Don't update static allocas, as they may get moved later.
899  if (auto *AI = dyn_cast<AllocaInst>(BI))
900  if (isa<Constant>(AI->getArraySize()))
901  continue;
902 
903  BI->setDebugLoc(TheCallDL);
904  } else {
905  BI->setDebugLoc(updateInlinedAtInfo(DL, InlinedAtNode, BI->getContext(), IANodes));
906  }
907  }
908  }
909 }
910 
911 /// This function inlines the called function into the basic block of the
912 /// caller. This returns false if it is not possible to inline this call.
913 /// The program is still in a well defined state if this occurs though.
914 ///
915 /// Note that this only does one level of inlining. For example, if the
916 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
917 /// exists in the instruction stream. Similarly this will inline a recursive
918 /// function by one level.
920  bool InsertLifetime) {
921  Instruction *TheCall = CS.getInstruction();
922  assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
923  "Instruction not in function!");
924 
925  // If IFI has any state in it, zap it before we fill it in.
926  IFI.reset();
927 
928  const Function *CalledFunc = CS.getCalledFunction();
929  if (!CalledFunc || // Can't inline external function or indirect
930  CalledFunc->isDeclaration() || // call, or call to a vararg function!
931  CalledFunc->getFunctionType()->isVarArg()) return false;
932 
933  // If the call to the callee cannot throw, set the 'nounwind' flag on any
934  // calls that we inline.
935  bool MarkNoUnwind = CS.doesNotThrow();
936 
937  BasicBlock *OrigBB = TheCall->getParent();
938  Function *Caller = OrigBB->getParent();
939 
940  // GC poses two hazards to inlining, which only occur when the callee has GC:
941  // 1. If the caller has no GC, then the callee's GC must be propagated to the
942  // caller.
943  // 2. If the caller has a differing GC, it is invalid to inline.
944  if (CalledFunc->hasGC()) {
945  if (!Caller->hasGC())
946  Caller->setGC(CalledFunc->getGC());
947  else if (CalledFunc->getGC() != Caller->getGC())
948  return false;
949  }
950 
951  // Get the personality function from the callee if it contains a landing pad.
952  Constant *CalledPersonality =
953  CalledFunc->hasPersonalityFn() ? CalledFunc->getPersonalityFn() : nullptr;
954 
955  // Find the personality function used by the landing pads of the caller. If it
956  // exists, then check to see that it matches the personality function used in
957  // the callee.
958  Constant *CallerPersonality =
959  Caller->hasPersonalityFn() ? Caller->getPersonalityFn() : nullptr;
960  if (CalledPersonality) {
961  if (!CallerPersonality)
962  Caller->setPersonalityFn(CalledPersonality);
963  // If the personality functions match, then we can perform the
964  // inlining. Otherwise, we can't inline.
965  // TODO: This isn't 100% true. Some personality functions are proper
966  // supersets of others and can be used in place of the other.
967  else if (CalledPersonality != CallerPersonality)
968  return false;
969  }
970 
971  // Get an iterator to the last basic block in the function, which will have
972  // the new function inlined after it.
973  Function::iterator LastBlock = &Caller->back();
974 
975  // Make sure to capture all of the return instructions from the cloned
976  // function.
978  ClonedCodeInfo InlinedFunctionInfo;
979  Function::iterator FirstNewBlock;
980 
981  { // Scope to destroy VMap after cloning.
982  ValueToValueMapTy VMap;
983  // Keep a list of pair (dst, src) to emit byval initializations.
985 
986  auto &DL = Caller->getParent()->getDataLayout();
987 
988  assert(CalledFunc->arg_size() == CS.arg_size() &&
989  "No varargs calls can be inlined!");
990 
991  // Calculate the vector of arguments to pass into the function cloner, which
992  // matches up the formal to the actual argument values.
994  unsigned ArgNo = 0;
995  for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
996  E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
997  Value *ActualArg = *AI;
998 
999  // When byval arguments actually inlined, we need to make the copy implied
1000  // by them explicit. However, we don't do this if the callee is readonly
1001  // or readnone, because the copy would be unneeded: the callee doesn't
1002  // modify the struct.
1003  if (CS.isByValArgument(ArgNo)) {
1004  ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
1005  CalledFunc->getParamAlignment(ArgNo+1));
1006  if (ActualArg != *AI)
1007  ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
1008  }
1009 
1010  VMap[I] = ActualArg;
1011  }
1012 
1013  // Add alignment assumptions if necessary. We do this before the inlined
1014  // instructions are actually cloned into the caller so that we can easily
1015  // check what will be known at the start of the inlined code.
1016  AddAlignmentAssumptions(CS, IFI);
1017 
1018  // We want the inliner to prune the code as it copies. We would LOVE to
1019  // have no dead or constant instructions leftover after inlining occurs
1020  // (which can happen, e.g., because an argument was constant), but we'll be
1021  // happy with whatever the cloner can do.
1022  CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
1023  /*ModuleLevelChanges=*/false, Returns, ".i",
1024  &InlinedFunctionInfo, TheCall);
1025 
1026  // Remember the first block that is newly cloned over.
1027  FirstNewBlock = LastBlock; ++FirstNewBlock;
1028 
1029  // Inject byval arguments initialization.
1030  for (std::pair<Value*, Value*> &Init : ByValInit)
1031  HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
1032  FirstNewBlock, IFI);
1033 
1034  // Update the callgraph if requested.
1035  if (IFI.CG)
1036  UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
1037 
1038  // Update inlined instructions' line number information.
1039  fixupLineNumbers(Caller, FirstNewBlock, TheCall);
1040 
1041  // Clone existing noalias metadata if necessary.
1042  CloneAliasScopeMetadata(CS, VMap);
1043 
1044  // Add noalias metadata if necessary.
1045  AddAliasScopeMetadata(CS, VMap, DL, IFI.AA);
1046 
1047  // FIXME: We could register any cloned assumptions instead of clearing the
1048  // whole function's cache.
1049  if (IFI.ACT)
1050  IFI.ACT->getAssumptionCache(*Caller).clear();
1051  }
1052 
1053  // If there are any alloca instructions in the block that used to be the entry
1054  // block for the callee, move them to the entry block of the caller. First
1055  // calculate which instruction they should be inserted before. We insert the
1056  // instructions at the end of the current alloca list.
1057  {
1058  BasicBlock::iterator InsertPoint = Caller->begin()->begin();
1059  for (BasicBlock::iterator I = FirstNewBlock->begin(),
1060  E = FirstNewBlock->end(); I != E; ) {
1061  AllocaInst *AI = dyn_cast<AllocaInst>(I++);
1062  if (!AI) continue;
1063 
1064  // If the alloca is now dead, remove it. This often occurs due to code
1065  // specialization.
1066  if (AI->use_empty()) {
1067  AI->eraseFromParent();
1068  continue;
1069  }
1070 
1071  if (!isa<Constant>(AI->getArraySize()))
1072  continue;
1073 
1074  // Keep track of the static allocas that we inline into the caller.
1075  IFI.StaticAllocas.push_back(AI);
1076 
1077  // Scan for the block of allocas that we can move over, and move them
1078  // all at once.
1079  while (isa<AllocaInst>(I) &&
1080  isa<Constant>(cast<AllocaInst>(I)->getArraySize())) {
1081  IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
1082  ++I;
1083  }
1084 
1085  // Transfer all of the allocas over in a block. Using splice means
1086  // that the instructions aren't removed from the symbol table, then
1087  // reinserted.
1088  Caller->getEntryBlock().getInstList().splice(InsertPoint,
1089  FirstNewBlock->getInstList(),
1090  AI, I);
1091  }
1092  // Move any dbg.declares describing the allocas into the entry basic block.
1093  DIBuilder DIB(*Caller->getParent());
1094  for (auto &AI : IFI.StaticAllocas)
1095  replaceDbgDeclareForAlloca(AI, AI, DIB, /*Deref=*/false);
1096  }
1097 
1098  bool InlinedMustTailCalls = false;
1099  if (InlinedFunctionInfo.ContainsCalls) {
1100  CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
1101  if (CallInst *CI = dyn_cast<CallInst>(TheCall))
1102  CallSiteTailKind = CI->getTailCallKind();
1103 
1104  for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
1105  ++BB) {
1106  for (Instruction &I : *BB) {
1107  CallInst *CI = dyn_cast<CallInst>(&I);
1108  if (!CI)
1109  continue;
1110 
1111  // We need to reduce the strength of any inlined tail calls. For
1112  // musttail, we have to avoid introducing potential unbounded stack
1113  // growth. For example, if functions 'f' and 'g' are mutually recursive
1114  // with musttail, we can inline 'g' into 'f' so long as we preserve
1115  // musttail on the cloned call to 'f'. If either the inlined call site
1116  // or the cloned call site is *not* musttail, the program already has
1117  // one frame of stack growth, so it's safe to remove musttail. Here is
1118  // a table of example transformations:
1119  //
1120  // f -> musttail g -> musttail f ==> f -> musttail f
1121  // f -> musttail g -> tail f ==> f -> tail f
1122  // f -> g -> musttail f ==> f -> f
1123  // f -> g -> tail f ==> f -> f
1124  CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
1125  ChildTCK = std::min(CallSiteTailKind, ChildTCK);
1126  CI->setTailCallKind(ChildTCK);
1127  InlinedMustTailCalls |= CI->isMustTailCall();
1128 
1129  // Calls inlined through a 'nounwind' call site should be marked
1130  // 'nounwind'.
1131  if (MarkNoUnwind)
1132  CI->setDoesNotThrow();
1133  }
1134  }
1135  }
1136 
1137  // Leave lifetime markers for the static alloca's, scoping them to the
1138  // function we just inlined.
1139  if (InsertLifetime && !IFI.StaticAllocas.empty()) {
1140  IRBuilder<> builder(FirstNewBlock->begin());
1141  for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
1142  AllocaInst *AI = IFI.StaticAllocas[ai];
1143 
1144  // If the alloca is already scoped to something smaller than the whole
1145  // function then there's no need to add redundant, less accurate markers.
1146  if (hasLifetimeMarkers(AI))
1147  continue;
1148 
1149  // Try to determine the size of the allocation.
1150  ConstantInt *AllocaSize = nullptr;
1151  if (ConstantInt *AIArraySize =
1152  dyn_cast<ConstantInt>(AI->getArraySize())) {
1153  auto &DL = Caller->getParent()->getDataLayout();
1154  Type *AllocaType = AI->getAllocatedType();
1155  uint64_t AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
1156  uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
1157 
1158  // Don't add markers for zero-sized allocas.
1159  if (AllocaArraySize == 0)
1160  continue;
1161 
1162  // Check that array size doesn't saturate uint64_t and doesn't
1163  // overflow when it's multiplied by type size.
1164  if (AllocaArraySize != ~0ULL &&
1165  UINT64_MAX / AllocaArraySize >= AllocaTypeSize) {
1166  AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
1167  AllocaArraySize * AllocaTypeSize);
1168  }
1169  }
1170 
1171  builder.CreateLifetimeStart(AI, AllocaSize);
1172  for (ReturnInst *RI : Returns) {
1173  // Don't insert llvm.lifetime.end calls between a musttail call and a
1174  // return. The return kills all local allocas.
1175  if (InlinedMustTailCalls &&
1177  continue;
1178  IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
1179  }
1180  }
1181  }
1182 
1183  // If the inlined code contained dynamic alloca instructions, wrap the inlined
1184  // code with llvm.stacksave/llvm.stackrestore intrinsics.
1185  if (InlinedFunctionInfo.ContainsDynamicAllocas) {
1186  Module *M = Caller->getParent();
1187  // Get the two intrinsics we care about.
1188  Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
1189  Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
1190 
1191  // Insert the llvm.stacksave.
1192  CallInst *SavedPtr = IRBuilder<>(FirstNewBlock, FirstNewBlock->begin())
1193  .CreateCall(StackSave, {}, "savedstack");
1194 
1195  // Insert a call to llvm.stackrestore before any return instructions in the
1196  // inlined function.
1197  for (ReturnInst *RI : Returns) {
1198  // Don't insert llvm.stackrestore calls between a musttail call and a
1199  // return. The return will restore the stack pointer.
1200  if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
1201  continue;
1202  IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
1203  }
1204  }
1205 
1206  // If we are inlining for an invoke instruction, we must make sure to rewrite
1207  // any call instructions into invoke instructions.
1208  if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
1209  HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo);
1210 
1211  // Handle any inlined musttail call sites. In order for a new call site to be
1212  // musttail, the source of the clone and the inlined call site must have been
1213  // musttail. Therefore it's safe to return without merging control into the
1214  // phi below.
1215  if (InlinedMustTailCalls) {
1216  // Check if we need to bitcast the result of any musttail calls.
1217  Type *NewRetTy = Caller->getReturnType();
1218  bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy;
1219 
1220  // Handle the returns preceded by musttail calls separately.
1221  SmallVector<ReturnInst *, 8> NormalReturns;
1222  for (ReturnInst *RI : Returns) {
1223  CallInst *ReturnedMustTail =
1225  if (!ReturnedMustTail) {
1226  NormalReturns.push_back(RI);
1227  continue;
1228  }
1229  if (!NeedBitCast)
1230  continue;
1231 
1232  // Delete the old return and any preceding bitcast.
1233  BasicBlock *CurBB = RI->getParent();
1234  auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
1235  RI->eraseFromParent();
1236  if (OldCast)
1237  OldCast->eraseFromParent();
1238 
1239  // Insert a new bitcast and return with the right type.
1240  IRBuilder<> Builder(CurBB);
1241  Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
1242  }
1243 
1244  // Leave behind the normal returns so we can merge control flow.
1245  std::swap(Returns, NormalReturns);
1246  }
1247 
1248  // If we cloned in _exactly one_ basic block, and if that block ends in a
1249  // return instruction, we splice the body of the inlined callee directly into
1250  // the calling basic block.
1251  if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
1252  // Move all of the instructions right before the call.
1253  OrigBB->getInstList().splice(TheCall, FirstNewBlock->getInstList(),
1254  FirstNewBlock->begin(), FirstNewBlock->end());
1255  // Remove the cloned basic block.
1256  Caller->getBasicBlockList().pop_back();
1257 
1258  // If the call site was an invoke instruction, add a branch to the normal
1259  // destination.
1260  if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
1261  BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
1262  NewBr->setDebugLoc(Returns[0]->getDebugLoc());
1263  }
1264 
1265  // If the return instruction returned a value, replace uses of the call with
1266  // uses of the returned value.
1267  if (!TheCall->use_empty()) {
1268  ReturnInst *R = Returns[0];
1269  if (TheCall == R->getReturnValue())
1270  TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1271  else
1272  TheCall->replaceAllUsesWith(R->getReturnValue());
1273  }
1274  // Since we are now done with the Call/Invoke, we can delete it.
1275  TheCall->eraseFromParent();
1276 
1277  // Since we are now done with the return instruction, delete it also.
1278  Returns[0]->eraseFromParent();
1279 
1280  // We are now done with the inlining.
1281  return true;
1282  }
1283 
1284  // Otherwise, we have the normal case, of more than one block to inline or
1285  // multiple return sites.
1286 
1287  // We want to clone the entire callee function into the hole between the
1288  // "starter" and "ender" blocks. How we accomplish this depends on whether
1289  // this is an invoke instruction or a call instruction.
1290  BasicBlock *AfterCallBB;
1291  BranchInst *CreatedBranchToNormalDest = nullptr;
1292  if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
1293 
1294  // Add an unconditional branch to make this look like the CallInst case...
1295  CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall);
1296 
1297  // Split the basic block. This guarantees that no PHI nodes will have to be
1298  // updated due to new incoming edges, and make the invoke case more
1299  // symmetric to the call case.
1300  AfterCallBB = OrigBB->splitBasicBlock(CreatedBranchToNormalDest,
1301  CalledFunc->getName()+".exit");
1302 
1303  } else { // It's a call
1304  // If this is a call instruction, we need to split the basic block that
1305  // the call lives in.
1306  //
1307  AfterCallBB = OrigBB->splitBasicBlock(TheCall,
1308  CalledFunc->getName()+".exit");
1309  }
1310 
1311  // Change the branch that used to go to AfterCallBB to branch to the first
1312  // basic block of the inlined function.
1313  //
1314  TerminatorInst *Br = OrigBB->getTerminator();
1315  assert(Br && Br->getOpcode() == Instruction::Br &&
1316  "splitBasicBlock broken!");
1317  Br->setOperand(0, FirstNewBlock);
1318 
1319 
1320  // Now that the function is correct, make it a little bit nicer. In
1321  // particular, move the basic blocks inserted from the end of the function
1322  // into the space made by splitting the source basic block.
1323  Caller->getBasicBlockList().splice(AfterCallBB, Caller->getBasicBlockList(),
1324  FirstNewBlock, Caller->end());
1325 
1326  // Handle all of the return instructions that we just cloned in, and eliminate
1327  // any users of the original call/invoke instruction.
1328  Type *RTy = CalledFunc->getReturnType();
1329 
1330  PHINode *PHI = nullptr;
1331  if (Returns.size() > 1) {
1332  // The PHI node should go at the front of the new basic block to merge all
1333  // possible incoming values.
1334  if (!TheCall->use_empty()) {
1335  PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(),
1336  AfterCallBB->begin());
1337  // Anything that used the result of the function call should now use the
1338  // PHI node as their operand.
1339  TheCall->replaceAllUsesWith(PHI);
1340  }
1341 
1342  // Loop over all of the return instructions adding entries to the PHI node
1343  // as appropriate.
1344  if (PHI) {
1345  for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
1346  ReturnInst *RI = Returns[i];
1347  assert(RI->getReturnValue()->getType() == PHI->getType() &&
1348  "Ret value not consistent in function!");
1349  PHI->addIncoming(RI->getReturnValue(), RI->getParent());
1350  }
1351  }
1352 
1353 
1354  // Add a branch to the merge points and remove return instructions.
1355  DebugLoc Loc;
1356  for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
1357  ReturnInst *RI = Returns[i];
1358  BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
1359  Loc = RI->getDebugLoc();
1360  BI->setDebugLoc(Loc);
1361  RI->eraseFromParent();
1362  }
1363  // We need to set the debug location to *somewhere* inside the
1364  // inlined function. The line number may be nonsensical, but the
1365  // instruction will at least be associated with the right
1366  // function.
1367  if (CreatedBranchToNormalDest)
1368  CreatedBranchToNormalDest->setDebugLoc(Loc);
1369  } else if (!Returns.empty()) {
1370  // Otherwise, if there is exactly one return value, just replace anything
1371  // using the return value of the call with the computed value.
1372  if (!TheCall->use_empty()) {
1373  if (TheCall == Returns[0]->getReturnValue())
1374  TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1375  else
1376  TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
1377  }
1378 
1379  // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
1380  BasicBlock *ReturnBB = Returns[0]->getParent();
1381  ReturnBB->replaceAllUsesWith(AfterCallBB);
1382 
1383  // Splice the code from the return block into the block that it will return
1384  // to, which contains the code that was after the call.
1385  AfterCallBB->getInstList().splice(AfterCallBB->begin(),
1386  ReturnBB->getInstList());
1387 
1388  if (CreatedBranchToNormalDest)
1389  CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
1390 
1391  // Delete the return instruction now and empty ReturnBB now.
1392  Returns[0]->eraseFromParent();
1393  ReturnBB->eraseFromParent();
1394  } else if (!TheCall->use_empty()) {
1395  // No returns, but something is using the return value of the call. Just
1396  // nuke the result.
1397  TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1398  }
1399 
1400  // Since we are now done with the Call/Invoke, we can delete it.
1401  TheCall->eraseFromParent();
1402 
1403  // If we inlined any musttail calls and the original return is now
1404  // unreachable, delete it. It can only contain a bitcast and ret.
1405  if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB))
1406  AfterCallBB->eraseFromParent();
1407 
1408  // We should always be able to fold the entry block of the function into the
1409  // single predecessor of the block...
1410  assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
1411  BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
1412 
1413  // Splice the code entry block into calling block, right before the
1414  // unconditional branch.
1415  CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
1416  OrigBB->getInstList().splice(Br, CalleeEntry->getInstList());
1417 
1418  // Remove the unconditional branch.
1419  OrigBB->getInstList().erase(Br);
1420 
1421  // Now we can remove the CalleeEntry block, which is now empty.
1422  Caller->getBasicBlockList().erase(CalleeEntry);
1423 
1424  // If we inserted a phi node, check to see if it has a single value (e.g. all
1425  // the entries are the same or undef). If so, remove the PHI so it doesn't
1426  // block other optimizations.
1427  if (PHI) {
1428  auto &DL = Caller->getParent()->getDataLayout();
1429  if (Value *V = SimplifyInstruction(PHI, DL, nullptr, nullptr,
1430  &IFI.ACT->getAssumptionCache(*Caller))) {
1431  PHI->replaceAllUsesWith(V);
1432  PHI->eraseFromParent();
1433  }
1434  }
1435 
1436  return true;
1437 }
const NoneType None
Definition: None.h:23
void setPersonalityFn(Constant *C)
Definition: Function.cpp:1001
ReturnInst - Return a value (possibly void), from a function.
const Value * getCalledValue() const
getCalledValue - Get a pointer to the function that is invoked by this instruction.
iplist< Instruction >::iterator eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing basic block and deletes it...
Definition: Instruction.cpp:70
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:104
void setDoesNotThrow()
void removePredecessor(BasicBlock *Pred, bool DontDeleteUselessPHIs=false)
Notify the BasicBlock that the predecessor Pred is no longer able to reach it.
Definition: BasicBlock.cpp:266
void addIncoming(Value *V, BasicBlock *BB)
addIncoming - Add an incoming value to the end of the PHI list
static cl::opt< bool > EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), cl::Hidden, cl::desc("Convert noalias attributes to metadata during inlining."))
unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
getOrEnforceKnownAlignment - If the specified pointer has an alignment that we can determine...
Definition: Local.cpp:927
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:223
LLVM Argument representation.
Definition: Argument.h:35
bool hasName() const
Definition: Value.h:228
MDNode * getScope() const
Definition: DebugLoc.cpp:36
CallGraph * CG
CG - If non-null, InlineFunction will update the callgraph to reflect the changes it makes...
Definition: Cloning.h:202
void pop_back()
Definition: ilist.h:559
static void Found()
ValTy * getArgument(unsigned ArgNo) const
Definition: CallSite.h:119
bool onlyReadsMemory() const
Determine if the function does not access or only reads memory.
Definition: Function.h:287
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:114
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
static MDTuple * getDistinct(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1036
bool InlineFunction(CallInst *C, InlineFunctionInfo &IFI, bool InsertLifetime=true)
InlineFunction - This function inlines the called function into the basic block of the caller...
iterator end()
Definition: Function.h:459
InstrTy * getInstruction() const
Definition: CallSite.h:82
AtomicCmpXchgInst - an instruction that atomically checks whether a specified value is in a memory lo...
Definition: Instructions.h:515
CallInst * getTerminatingMustTailCall()
Returns the call instruction marked 'musttail' prior to the terminating return instruction of this ba...
Definition: BasicBlock.cpp:134
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:942
unsigned getPrefTypeAlignment(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Definition: DataLayout.cpp:684
ModRefBehavior
ModRefBehavior - Summary of how a function affects memory in the program.
const char * getGC() const
Definition: Function.cpp:384
CallInst - This class represents a function call, abstracting a target machine's calling convention...
bool isIntrinsic() const
Definition: Function.h:160
static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI)
If the inlined function has non-byval align arguments, then add .assume-based alignment assumptions t...
Type * getReturnType() const
Definition: Function.cpp:233
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:111
arg_iterator arg_end()
Definition: Function.h:480
A debug info location.
Definition: DebugLoc.h:34
Metadata node.
Definition: Metadata.h:740
F(f)
FunTy * getCaller() const
getCaller - Return the caller function for this call site
Definition: CallSite.h:170
LoadInst - an instruction for reading from memory.
Definition: Instructions.h:177
static IntegerType * getInt64Ty(LLVMContext &C)
Definition: Type.cpp:240
AtomicRMWInst - an instruction that atomically reads a memory location, combines it with another valu...
Definition: Instructions.h:674
void GetUnderlyingObjects(Value *V, SmallVectorImpl< Value * > &Objects, const DataLayout &DL, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to GetUnderlyingObject except that it can look through phi and select instruct...
User::const_op_iterator arg_iterator
arg_iterator - The type of iterator to use when looping over actual arguments at this call site...
Definition: CallSite.h:147
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: Type.cpp:216
InlineFunctionInfo - This class captures the data input to the InlineFunction call, and records the auxiliary results produced by it.
Definition: Cloning.h:193
iterator end()
Get an iterator to the end of the SetVector.
Definition: SetVector.h:79
A node in the call graph for a module.
Definition: CallGraph.h:166
Tuple of metadata.
Definition: Metadata.h:972
static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap, const DataLayout &DL, AliasAnalysis *AA)
If the inlined function has noalias arguments, then add new alias scopes for each noalias argument...
size_t arg_size() const
Definition: Function.cpp:301
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:188
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:242
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:231
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:169
void addCalledFunction(CallSite CS, CallGraphNode *M)
Adds a function to the list of functions called by this one.
Definition: CallGraph.h:231
IterTy arg_end() const
Definition: CallSite.h:157
std::vector< CallRecord >::iterator iterator
Definition: CallGraph.h:182
void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, bool ModuleLevelChanges, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr, Instruction *TheCall=nullptr)
CloneAndPruneFunctionInto - This works exactly like CloneFunctionInto, except that it does some simpl...
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
DILocation * get() const
Get the underlying DILocation.
Definition: DebugLoc.cpp:22
iterator end()
Definition: CallGraph.h:189
A Use represents the edge between a Value definition and its users.
Definition: Use.h:69
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:517
bool doesNotThrow() const
Determine if the call cannot unwind.
Definition: CallSite.h:311
This file contains the simple types necessary to represent the attributes associated with functions a...
static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap)
When inlining a function that contains noalias scope metadata, this metadata needs to be cloned so th...
Number of individual test Apply this number of consecutive mutations to each input exit after the first new interesting input is found the minimized corpus is saved into the first input directory Number of jobs to run If min(jobs, NumberOfCpuCores()/2)\" is used.") FUZZER_FLAG_INT(reload
bool isMustTailCall() const
LandingPadInst * getLandingPadInst() const
getLandingPadInst - Get the landingpad instruction from the landing pad block (the unwind destination...
bool doesNotThrow() const
Determine if the call cannot unwind.
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
static void UpdateCallGraphAfterInlining(CallSite CS, Function::iterator FirstNewBlock, ValueToValueMapTy &VMap, InlineFunctionInfo &IFI)
Once we have cloned code over from a callee into the caller, update the specified callgraph to reflec...
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:102
LLVMContext & getContext() const
getContext - Return the LLVMContext in which this type was uniqued.
Definition: Type.h:125
bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const
Definition: SmallVector.h:57
const BasicBlock & back() const
Definition: Function.h:466
iterator find(const KeyT &Val)
Definition: ValueMap.h:132
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition: SetVector.h:69
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:59
static std::string utostr(uint64_t X, bool isNeg=false)
Definition: StringExtras.h:93
static void HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, InvokeInliningInfo &Invoke)
When we inline a basic block into an invoke, we have to turn all of the calls that can throw into inv...
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:866
unsigned getNumClauses() const
getNumClauses - Get the number of clauses for this landing pad.
StoreInst - an instruction for storing to memory.
Definition: Instructions.h:316
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:351
void replaceAllUsesWith(Metadata *MD)
RAUW a temporary.
Definition: Metadata.h:824
static TempMDTuple getTemporary(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Return a temporary node.
Definition: Metadata.h:1017
Debug location.
iterator begin()
Definition: Function.h:457
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:67
Type * getElementType() const
Definition: DerivedTypes.h:323
static unsigned getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
getKnownAlignment - Try to infer an alignment for the specified pointer.
Definition: Local.h:180
BasicBlock * getNormalDest() const
PointerType - Class to represent pointers.
Definition: DerivedTypes.h:449
bool mayReadOrWriteMemory() const
mayReadOrWriteMemory - Return true if this instruction may read or write memory.
Definition: Instruction.h:362
static bool isUsedByLifetimeMarker(Value *V)
AliasAnalysis * AA
Definition: Cloning.h:203
static cl::opt< bool > PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", cl::init(true), cl::Hidden, cl::desc("Convert align attributes to assumptions during inlining."))
unsigned getLine() const
Definition: DebugLoc.cpp:26
void clear()
Clear the cache of .assume intrinsics for a function.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:325
LandingPadInst - The landingpad instruction holds all of the information necessary to generate correc...
Subclasses of this class are all able to terminate a basic block.
Definition: InstrTypes.h:35
void setDebugLoc(DebugLoc Loc)
setDebugLoc - Set the debug location information for this instruction.
Definition: Instruction.h:227
LLVM Basic Block Representation.
Definition: BasicBlock.h:65
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:41
BranchInst - Conditional or Unconditional Branch instruction.
FunTy * getCalledFunction() const
getCalledFunction - Return the function being called if this is a direct call, otherwise return null ...
Definition: CallSite.h:99
OnlyAccessesArgumentPointees - The only memory references in this function (if it has any) are non-vo...
This is an important base class in LLVM.
Definition: Constant.h:41
PointerType * getType() const
getType - Overload to return most specific pointer type
Definition: Instructions.h:115
ResumeInst - Resume the propagation of an exception.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
Definition: CallSite.h:327
ReturnInst * CreateRet(Value *V)
Create a 'ret <val>' instruction.
Definition: IRBuilder.h:597
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:264
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:114
bool hasPersonalityFn() const
Get the personality function associated with this function.
Definition: Function.h:132
const DebugLoc & getDebugLoc() const
getDebugLoc - Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:230
SmallVector< WeakVH, 8 > InlinedCalls
InlinedCalls - InlineFunction fills this in with callsites that were inlined from the callee...
Definition: Cloning.h:212
AssumptionCacheTracker * ACT
Definition: Cloning.h:204
const InstListType & getInstList() const
Return the underlying instruction list container.
Definition: BasicBlock.h:252
static bool hasLifetimeMarkers(AllocaInst *AI)
Value * getOperand(unsigned i) const
Definition: User.h:118
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:117
arg_iterator arg_begin()
Definition: Function.h:472
void setTailCallKind(TailCallKind TCK)
static UndefValue * get(Type *T)
get() - Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1473
VAArgInst - This class represents the va_arg llvm instruction, which returns an argument of the speci...
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:519
OnlyReadsArgumentPointees - The only memory references in this function (if it has any) are non-volat...
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:283
virtual ModRefBehavior getModRefBehavior(ImmutableCallSite CS)
getModRefBehavior - Return the behavior when calling the given call site.
iterator erase(iterator where)
Definition: ilist.h:465
void setMetadata(unsigned KindID, MDNode *Node)
setMetadata - Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1083
unsigned getCol() const
Definition: DebugLoc.cpp:31
bool isIdentifiedFunctionLocal(const Value *V)
isIdentifiedFunctionLocal - Return true if V is umabigously identified at the function-level.
iterator end()
Definition: ValueMap.h:112
static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M, BasicBlock *InsertBlock, InlineFunctionInfo &IFI)
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:936
static InvokeInst * Create(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:217
const BasicBlockListType & getBasicBlockList() const
Definition: Function.h:436
BasicBlock * getUnwindDest() const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:299
This is the shared class of boolean and integer constants.
Definition: Constants.h:47
iterator end()
Definition: BasicBlock.h:233
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1253
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:861
Module.h This file contains the declarations for the Module class.
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:222
MDNode * getMetadata(unsigned KindID) const
getMetadata - Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:167
TailCallKind getTailCallKind() const
unsigned arg_size() const
Definition: CallSite.h:162
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:582
static BranchInst * Create(BasicBlock *IfTrue, Instruction *InsertBefore=nullptr)
static void fixupLineNumbers(Function *Fn, Function::iterator FI, Instruction *TheCall)
Update inlined instructions' line numbers to to encode location where these instructions are inlined...
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
const BasicBlock & getEntryBlock() const
Definition: Function.h:442
static cl::opt< AlignMode > Align(cl::desc("Load/store alignment support"), cl::Hidden, cl::init(NoStrictAlign), cl::values(clEnumValN(StrictAlign,"aarch64-strict-align","Disallow all unaligned memory accesses"), clEnumValN(NoStrictAlign,"aarch64-no-strict-align","Allow unaligned memory accesses"), clEnumValEnd))
void splice(iterator where, iplist &L2)
Definition: ilist.h:570
void setOperand(unsigned i, Value *Val)
Definition: User.h:122
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:576
bool replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress, DIBuilder &Builder, bool Deref)
Replaces llvm.dbg.declare instruction when an alloca is replaced with a new value.
Definition: Local.cpp:1080
Value * getIncomingValueForBlock(const BasicBlock *BB) const
iterator_range< user_iterator > users()
Definition: Value.h:300
LLVM_ATTRIBUTE_UNUSED_RESULT std::enable_if< !is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:285
bool hasGC() const
hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm to use during code generatio...
Definition: Function.cpp:379
void setGC(const char *Str)
Definition: Function.cpp:390
const AttributeSet & getAttributes() const
getAttributes - Return the parameter attributes for this call.
bool ContainsCalls
ContainsCalls - This is set to true if the cloned code contains a normal call instruction.
Definition: Cloning.h:60
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1030
SmallVector< AllocaInst *, 4 > StaticAllocas
StaticAllocas - InlineFunction fills this in with all static allocas that get copied into the caller...
Definition: Cloning.h:208
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.cpp:372
The basic data container for the call graph of a Module of IR.
Definition: CallGraph.h:75
Constant * getPersonalityFn() const
Definition: Function.h:133
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:128
ImmutableCallSite - establish a view to a call site for examination.
Definition: CallSite.h:418
static MDNode * concatenate(MDNode *A, MDNode *B)
Methods for metadata merging.
Definition: Metadata.cpp:780
iplist< BasicBlock >::iterator eraseFromParent()
Unlink 'this' from the containing function and delete it.
Definition: BasicBlock.cpp:97
#define I(x, y, z)
Definition: MD5.cpp:54
TerminatorInst * getTerminator()
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:124
FunctionType * getFunctionType() const
Definition: Function.cpp:227
static Value * HandleByValArgument(Value *Arg, Instruction *TheCall, const Function *CalledFunc, InlineFunctionInfo &IFI, unsigned ByValAlignment)
When inlining a call site that has a byval argument, we have to make the implicit memcpy explicit by ...
ClonedCodeInfo - This struct can be used to capture information about code being cloned, while it is being cloned.
Definition: Cloning.h:57
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="")
Split the basic block into two basic blocks at the specified instruction.
Definition: BasicBlock.cpp:348
LLVMContext & getContext() const
Definition: Metadata.h:799
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:371
bool isVarArg() const
Definition: DerivedTypes.h:120
bool use_empty() const
Definition: Value.h:275
bool ContainsDynamicAllocas
ContainsDynamicAllocas - This is set to true if the cloned code contains a 'dynamic' alloca...
Definition: Cloning.h:66
unsigned getParamAlignment(unsigned i) const
Extract the alignment for a call or parameter (0=unknown).
Definition: Function.h:261
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:365
LLVM Value Representation.
Definition: Value.h:69
void pop_front()
Definition: ilist.h:555
vector_type::const_iterator iterator
Definition: SetVector.h:45
unsigned getOpcode() const
getOpcode() returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:112
A vector that has set insertion semantics.
Definition: SetVector.h:37
void removeCallEdgeFor(CallSite CS)
Removes the edge in the node for the specified call site.
Definition: CallGraph.cpp:200
const Value * getArraySize() const
getArraySize - Get the number of elements allocated.
Definition: Instructions.h:110
InvokeInst - Invoke instruction.
AssumptionCache & getAssumptionCache(Function &F)
Get the cached assumptions for a function.
IterTy arg_begin() const
arg_begin/arg_end - Return iterators corresponding to the actual argument list for a call site...
Definition: CallSite.h:151
static DebugLoc updateInlinedAtInfo(DebugLoc DL, DILocation *InlinedAtNode, LLVMContext &Ctx, DenseMap< const DILocation *, DILocation * > &IANodes)
Rebuild the entire inlined-at chain for this instruction so that the top of the chain now is inlined-...
bool isCleanup() const
isCleanup - Return 'true' if this landingpad instruction is a cleanup.
iterator begin()
Definition: CallGraph.h:188
void recalculate(FT &F)
recalculate - compute a dominator tree for the given function
CallingConv::ID getCallingConv() const
getCallingConv/setCallingConv - Get or set the calling convention of this function call...
std::vector< CallRecord > CalledFunctionsVector
Definition: CallGraph.h:173
Value * SimplifyInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr)
SimplifyInstruction - See if we can compute a simplified version of this instruction.
static void Split(std::vector< std::string > &V, StringRef S)
Split - Splits a string of comma separated items in to a vector of strings.
Type * getAllocatedType() const
getAllocatedType - Return the type that is being allocated by the instruction.
Definition: Instructions.h:122
Root of the metadata hierarchy.
Definition: Metadata.h:45
const BasicBlock * getParent() const
Definition: Instruction.h:72
iterator begin()
Definition: ValueMap.h:111
IntrinsicInst - A useful wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:37
AllocaInst - an instruction to allocate memory on the stack.
Definition: Instructions.h:76
bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures, bool StoreCaptures, const Instruction *I, DominatorTree *DT, bool IncludeI=false)
PointerMayBeCapturedBefore - Return true if this pointer value may be captured by the enclosing funct...
static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...