LLVM 20.0.0git
InlineFunction.cpp
Go to the documentation of this file.
1//===- InlineFunction.cpp - Code to perform function inlining -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements inlining of a function into a call site, resolving
10// parameters and the return value as appropriate.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/ADT/DenseMap.h"
15#include "llvm/ADT/STLExtras.h"
16#include "llvm/ADT/SetVector.h"
35#include "llvm/IR/Argument.h"
37#include "llvm/IR/Attributes.h"
38#include "llvm/IR/BasicBlock.h"
39#include "llvm/IR/CFG.h"
40#include "llvm/IR/Constant.h"
42#include "llvm/IR/Constants.h"
43#include "llvm/IR/DataLayout.h"
44#include "llvm/IR/DebugInfo.h"
46#include "llvm/IR/DebugLoc.h"
48#include "llvm/IR/Dominators.h"
50#include "llvm/IR/Function.h"
52#include "llvm/IR/IRBuilder.h"
53#include "llvm/IR/InlineAsm.h"
54#include "llvm/IR/InstrTypes.h"
55#include "llvm/IR/Instruction.h"
58#include "llvm/IR/Intrinsics.h"
59#include "llvm/IR/LLVMContext.h"
60#include "llvm/IR/MDBuilder.h"
61#include "llvm/IR/Metadata.h"
62#include "llvm/IR/Module.h"
65#include "llvm/IR/Type.h"
66#include "llvm/IR/User.h"
67#include "llvm/IR/Value.h"
75#include <algorithm>
76#include <cassert>
77#include <cstdint>
78#include <deque>
79#include <iterator>
80#include <limits>
81#include <optional>
82#include <string>
83#include <utility>
84#include <vector>
85
86#define DEBUG_TYPE "inline-function"
87
88using namespace llvm;
89using namespace llvm::memprof;
91
92static cl::opt<bool>
93EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
95 cl::desc("Convert noalias attributes to metadata during inlining."));
96
97static cl::opt<bool>
98 UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden,
99 cl::init(true),
100 cl::desc("Use the llvm.experimental.noalias.scope.decl "
101 "intrinsic during inlining."));
102
103// Disabled by default, because the added alignment assumptions may increase
104// compile-time and block optimizations. This option is not suitable for use
105// with frontends that emit comprehensive parameter alignment annotations.
106static cl::opt<bool>
107PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
108 cl::init(false), cl::Hidden,
109 cl::desc("Convert align attributes to assumptions during inlining."));
110
112 "max-inst-checked-for-throw-during-inlining", cl::Hidden,
113 cl::desc("the maximum number of instructions analyzed for may throw during "
114 "attribute inference in inlined body"),
115 cl::init(4));
116
117namespace {
118
119 /// A class for recording information about inlining a landing pad.
120 class LandingPadInliningInfo {
121 /// Destination of the invoke's unwind.
122 BasicBlock *OuterResumeDest;
123
124 /// Destination for the callee's resume.
125 BasicBlock *InnerResumeDest = nullptr;
126
127 /// LandingPadInst associated with the invoke.
128 LandingPadInst *CallerLPad = nullptr;
129
130 /// PHI for EH values from landingpad insts.
131 PHINode *InnerEHValuesPHI = nullptr;
132
133 SmallVector<Value*, 8> UnwindDestPHIValues;
134
135 public:
136 LandingPadInliningInfo(InvokeInst *II)
137 : OuterResumeDest(II->getUnwindDest()) {
138 // If there are PHI nodes in the unwind destination block, we need to keep
139 // track of which values came into them from the invoke before removing
140 // the edge from this block.
141 BasicBlock *InvokeBB = II->getParent();
142 BasicBlock::iterator I = OuterResumeDest->begin();
143 for (; isa<PHINode>(I); ++I) {
144 // Save the value to use for this edge.
145 PHINode *PHI = cast<PHINode>(I);
146 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
147 }
148
149 CallerLPad = cast<LandingPadInst>(I);
150 }
151
152 /// The outer unwind destination is the target of
153 /// unwind edges introduced for calls within the inlined function.
154 BasicBlock *getOuterResumeDest() const {
155 return OuterResumeDest;
156 }
157
158 BasicBlock *getInnerResumeDest();
159
160 LandingPadInst *getLandingPadInst() const { return CallerLPad; }
161
162 /// Forward the 'resume' instruction to the caller's landing pad block.
163 /// When the landing pad block has only one predecessor, this is
164 /// a simple branch. When there is more than one predecessor, we need to
165 /// split the landing pad block after the landingpad instruction and jump
166 /// to there.
167 void forwardResume(ResumeInst *RI,
169
170 /// Add incoming-PHI values to the unwind destination block for the given
171 /// basic block, using the values for the original invoke's source block.
172 void addIncomingPHIValuesFor(BasicBlock *BB) const {
173 addIncomingPHIValuesForInto(BB, OuterResumeDest);
174 }
175
176 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
177 BasicBlock::iterator I = dest->begin();
178 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
179 PHINode *phi = cast<PHINode>(I);
180 phi->addIncoming(UnwindDestPHIValues[i], src);
181 }
182 }
183 };
184} // end anonymous namespace
185
187 auto *I = BB.getFirstNonPHI();
188 while (I) {
189 if (auto *IntrinsicCall = dyn_cast<ConvergenceControlInst>(I)) {
190 if (IntrinsicCall->isEntry()) {
191 return IntrinsicCall;
192 }
193 }
194 I = I->getNextNode();
195 }
196 return nullptr;
197}
198
199/// Get or create a target for the branch from ResumeInsts.
200BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
201 if (InnerResumeDest) return InnerResumeDest;
202
203 // Split the landing pad.
204 BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator();
205 InnerResumeDest =
206 OuterResumeDest->splitBasicBlock(SplitPoint,
207 OuterResumeDest->getName() + ".body");
208
209 // The number of incoming edges we expect to the inner landing pad.
210 const unsigned PHICapacity = 2;
211
212 // Create corresponding new PHIs for all the PHIs in the outer landing pad.
213 BasicBlock::iterator InsertPoint = InnerResumeDest->begin();
214 BasicBlock::iterator I = OuterResumeDest->begin();
215 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
216 PHINode *OuterPHI = cast<PHINode>(I);
217 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
218 OuterPHI->getName() + ".lpad-body");
219 InnerPHI->insertBefore(InsertPoint);
220 OuterPHI->replaceAllUsesWith(InnerPHI);
221 InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
222 }
223
224 // Create a PHI for the exception values.
225 InnerEHValuesPHI =
226 PHINode::Create(CallerLPad->getType(), PHICapacity, "eh.lpad-body");
227 InnerEHValuesPHI->insertBefore(InsertPoint);
228 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
229 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
230
231 // All done.
232 return InnerResumeDest;
233}
234
235/// Forward the 'resume' instruction to the caller's landing pad block.
236/// When the landing pad block has only one predecessor, this is a simple
237/// branch. When there is more than one predecessor, we need to split the
238/// landing pad block after the landingpad instruction and jump to there.
239void LandingPadInliningInfo::forwardResume(
241 BasicBlock *Dest = getInnerResumeDest();
242 BasicBlock *Src = RI->getParent();
243
244 BranchInst::Create(Dest, Src);
245
246 // Update the PHIs in the destination. They were inserted in an order which
247 // makes this work.
248 addIncomingPHIValuesForInto(Src, Dest);
249
250 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
251 RI->eraseFromParent();
252}
253
254/// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
255static Value *getParentPad(Value *EHPad) {
256 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
257 return FPI->getParentPad();
258 return cast<CatchSwitchInst>(EHPad)->getParentPad();
259}
260
262
263/// Helper for getUnwindDestToken that does the descendant-ward part of
264/// the search.
266 UnwindDestMemoTy &MemoMap) {
267 SmallVector<Instruction *, 8> Worklist(1, EHPad);
268
269 while (!Worklist.empty()) {
270 Instruction *CurrentPad = Worklist.pop_back_val();
271 // We only put pads on the worklist that aren't in the MemoMap. When
272 // we find an unwind dest for a pad we may update its ancestors, but
273 // the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
274 // so they should never get updated while queued on the worklist.
275 assert(!MemoMap.count(CurrentPad));
276 Value *UnwindDestToken = nullptr;
277 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
278 if (CatchSwitch->hasUnwindDest()) {
279 UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
280 } else {
281 // Catchswitch doesn't have a 'nounwind' variant, and one might be
282 // annotated as "unwinds to caller" when really it's nounwind (see
283 // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
284 // parent's unwind dest from this. We can check its catchpads'
285 // descendants, since they might include a cleanuppad with an
286 // "unwinds to caller" cleanupret, which can be trusted.
287 for (auto HI = CatchSwitch->handler_begin(),
288 HE = CatchSwitch->handler_end();
289 HI != HE && !UnwindDestToken; ++HI) {
290 BasicBlock *HandlerBlock = *HI;
291 auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI());
292 for (User *Child : CatchPad->users()) {
293 // Intentionally ignore invokes here -- since the catchswitch is
294 // marked "unwind to caller", it would be a verifier error if it
295 // contained an invoke which unwinds out of it, so any invoke we'd
296 // encounter must unwind to some child of the catch.
297 if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
298 continue;
299
300 Instruction *ChildPad = cast<Instruction>(Child);
301 auto Memo = MemoMap.find(ChildPad);
302 if (Memo == MemoMap.end()) {
303 // Haven't figured out this child pad yet; queue it.
304 Worklist.push_back(ChildPad);
305 continue;
306 }
307 // We've already checked this child, but might have found that
308 // it offers no proof either way.
309 Value *ChildUnwindDestToken = Memo->second;
310 if (!ChildUnwindDestToken)
311 continue;
312 // We already know the child's unwind dest, which can either
313 // be ConstantTokenNone to indicate unwind to caller, or can
314 // be another child of the catchpad. Only the former indicates
315 // the unwind dest of the catchswitch.
316 if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
317 UnwindDestToken = ChildUnwindDestToken;
318 break;
319 }
320 assert(getParentPad(ChildUnwindDestToken) == CatchPad);
321 }
322 }
323 }
324 } else {
325 auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
326 for (User *U : CleanupPad->users()) {
327 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
328 if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
329 UnwindDestToken = RetUnwindDest->getFirstNonPHI();
330 else
331 UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext());
332 break;
333 }
334 Value *ChildUnwindDestToken;
335 if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
336 ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
337 } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
338 Instruction *ChildPad = cast<Instruction>(U);
339 auto Memo = MemoMap.find(ChildPad);
340 if (Memo == MemoMap.end()) {
341 // Haven't resolved this child yet; queue it and keep searching.
342 Worklist.push_back(ChildPad);
343 continue;
344 }
345 // We've checked this child, but still need to ignore it if it
346 // had no proof either way.
347 ChildUnwindDestToken = Memo->second;
348 if (!ChildUnwindDestToken)
349 continue;
350 } else {
351 // Not a relevant user of the cleanuppad
352 continue;
353 }
354 // In a well-formed program, the child/invoke must either unwind to
355 // an(other) child of the cleanup, or exit the cleanup. In the
356 // first case, continue searching.
357 if (isa<Instruction>(ChildUnwindDestToken) &&
358 getParentPad(ChildUnwindDestToken) == CleanupPad)
359 continue;
360 UnwindDestToken = ChildUnwindDestToken;
361 break;
362 }
363 }
364 // If we haven't found an unwind dest for CurrentPad, we may have queued its
365 // children, so move on to the next in the worklist.
366 if (!UnwindDestToken)
367 continue;
368
369 // Now we know that CurrentPad unwinds to UnwindDestToken. It also exits
370 // any ancestors of CurrentPad up to but not including UnwindDestToken's
371 // parent pad. Record this in the memo map, and check to see if the
372 // original EHPad being queried is one of the ones exited.
373 Value *UnwindParent;
374 if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
375 UnwindParent = getParentPad(UnwindPad);
376 else
377 UnwindParent = nullptr;
378 bool ExitedOriginalPad = false;
379 for (Instruction *ExitedPad = CurrentPad;
380 ExitedPad && ExitedPad != UnwindParent;
381 ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) {
382 // Skip over catchpads since they just follow their catchswitches.
383 if (isa<CatchPadInst>(ExitedPad))
384 continue;
385 MemoMap[ExitedPad] = UnwindDestToken;
386 ExitedOriginalPad |= (ExitedPad == EHPad);
387 }
388
389 if (ExitedOriginalPad)
390 return UnwindDestToken;
391
392 // Continue the search.
393 }
394
395 // No definitive information is contained within this funclet.
396 return nullptr;
397}
398
399/// Given an EH pad, find where it unwinds. If it unwinds to an EH pad,
400/// return that pad instruction. If it unwinds to caller, return
401/// ConstantTokenNone. If it does not have a definitive unwind destination,
402/// return nullptr.
403///
404/// This routine gets invoked for calls in funclets in inlinees when inlining
405/// an invoke. Since many funclets don't have calls inside them, it's queried
406/// on-demand rather than building a map of pads to unwind dests up front.
407/// Determining a funclet's unwind dest may require recursively searching its
408/// descendants, and also ancestors and cousins if the descendants don't provide
409/// an answer. Since most funclets will have their unwind dest immediately
410/// available as the unwind dest of a catchswitch or cleanupret, this routine
411/// searches top-down from the given pad and then up. To avoid worst-case
412/// quadratic run-time given that approach, it uses a memo map to avoid
413/// re-processing funclet trees. The callers that rewrite the IR as they go
414/// take advantage of this, for correctness, by checking/forcing rewritten
415/// pads' entries to match the original callee view.
417 UnwindDestMemoTy &MemoMap) {
418 // Catchpads unwind to the same place as their catchswitch;
419 // redirct any queries on catchpads so the code below can
420 // deal with just catchswitches and cleanuppads.
421 if (auto *CPI = dyn_cast<CatchPadInst>(EHPad))
422 EHPad = CPI->getCatchSwitch();
423
424 // Check if we've already determined the unwind dest for this pad.
425 auto Memo = MemoMap.find(EHPad);
426 if (Memo != MemoMap.end())
427 return Memo->second;
428
429 // Search EHPad and, if necessary, its descendants.
430 Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap);
431 assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));
432 if (UnwindDestToken)
433 return UnwindDestToken;
434
435 // No information is available for this EHPad from itself or any of its
436 // descendants. An unwind all the way out to a pad in the caller would
437 // need also to agree with the unwind dest of the parent funclet, so
438 // search up the chain to try to find a funclet with information. Put
439 // null entries in the memo map to avoid re-processing as we go up.
440 MemoMap[EHPad] = nullptr;
441#ifndef NDEBUG
443 TempMemos.insert(EHPad);
444#endif
445 Instruction *LastUselessPad = EHPad;
446 Value *AncestorToken;
447 for (AncestorToken = getParentPad(EHPad);
448 auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
449 AncestorToken = getParentPad(AncestorToken)) {
450 // Skip over catchpads since they just follow their catchswitches.
451 if (isa<CatchPadInst>(AncestorPad))
452 continue;
453 // If the MemoMap had an entry mapping AncestorPad to nullptr, since we
454 // haven't yet called getUnwindDestTokenHelper for AncestorPad in this
455 // call to getUnwindDestToken, that would mean that AncestorPad had no
456 // information in itself, its descendants, or its ancestors. If that
457 // were the case, then we should also have recorded the lack of information
458 // for the descendant that we're coming from. So assert that we don't
459 // find a null entry in the MemoMap for AncestorPad.
460 assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);
461 auto AncestorMemo = MemoMap.find(AncestorPad);
462 if (AncestorMemo == MemoMap.end()) {
463 UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap);
464 } else {
465 UnwindDestToken = AncestorMemo->second;
466 }
467 if (UnwindDestToken)
468 break;
469 LastUselessPad = AncestorPad;
470 MemoMap[LastUselessPad] = nullptr;
471#ifndef NDEBUG
472 TempMemos.insert(LastUselessPad);
473#endif
474 }
475
476 // We know that getUnwindDestTokenHelper was called on LastUselessPad and
477 // returned nullptr (and likewise for EHPad and any of its ancestors up to
478 // LastUselessPad), so LastUselessPad has no information from below. Since
479 // getUnwindDestTokenHelper must investigate all downward paths through
480 // no-information nodes to prove that a node has no information like this,
481 // and since any time it finds information it records it in the MemoMap for
482 // not just the immediately-containing funclet but also any ancestors also
483 // exited, it must be the case that, walking downward from LastUselessPad,
484 // visiting just those nodes which have not been mapped to an unwind dest
485 // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since
486 // they are just used to keep getUnwindDestTokenHelper from repeating work),
487 // any node visited must have been exhaustively searched with no information
488 // for it found.
489 SmallVector<Instruction *, 8> Worklist(1, LastUselessPad);
490 while (!Worklist.empty()) {
491 Instruction *UselessPad = Worklist.pop_back_val();
492 auto Memo = MemoMap.find(UselessPad);
493 if (Memo != MemoMap.end() && Memo->second) {
494 // Here the name 'UselessPad' is a bit of a misnomer, because we've found
495 // that it is a funclet that does have information about unwinding to
496 // a particular destination; its parent was a useless pad.
497 // Since its parent has no information, the unwind edge must not escape
498 // the parent, and must target a sibling of this pad. This local unwind
499 // gives us no information about EHPad. Leave it and the subtree rooted
500 // at it alone.
501 assert(getParentPad(Memo->second) == getParentPad(UselessPad));
502 continue;
503 }
504 // We know we don't have information for UselesPad. If it has an entry in
505 // the MemoMap (mapping it to nullptr), it must be one of the TempMemos
506 // added on this invocation of getUnwindDestToken; if a previous invocation
507 // recorded nullptr, it would have had to prove that the ancestors of
508 // UselessPad, which include LastUselessPad, had no information, and that
509 // in turn would have required proving that the descendants of
510 // LastUselesPad, which include EHPad, have no information about
511 // LastUselessPad, which would imply that EHPad was mapped to nullptr in
512 // the MemoMap on that invocation, which isn't the case if we got here.
513 assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad));
514 // Assert as we enumerate users that 'UselessPad' doesn't have any unwind
515 // information that we'd be contradicting by making a map entry for it
516 // (which is something that getUnwindDestTokenHelper must have proved for
517 // us to get here). Just assert on is direct users here; the checks in
518 // this downward walk at its descendants will verify that they don't have
519 // any unwind edges that exit 'UselessPad' either (i.e. they either have no
520 // unwind edges or unwind to a sibling).
521 MemoMap[UselessPad] = UnwindDestToken;
522 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
523 assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad");
524 for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
525 auto *CatchPad = HandlerBlock->getFirstNonPHI();
526 for (User *U : CatchPad->users()) {
527 assert(
528 (!isa<InvokeInst>(U) ||
530 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
531 CatchPad)) &&
532 "Expected useless pad");
533 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
534 Worklist.push_back(cast<Instruction>(U));
535 }
536 }
537 } else {
538 assert(isa<CleanupPadInst>(UselessPad));
539 for (User *U : UselessPad->users()) {
540 assert(!isa<CleanupReturnInst>(U) && "Expected useless pad");
541 assert((!isa<InvokeInst>(U) ||
543 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
544 UselessPad)) &&
545 "Expected useless pad");
546 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
547 Worklist.push_back(cast<Instruction>(U));
548 }
549 }
550 }
551
552 return UnwindDestToken;
553}
554
555/// When we inline a basic block into an invoke,
556/// we have to turn all of the calls that can throw into invokes.
557/// This function analyze BB to see if there are any calls, and if so,
558/// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
559/// nodes in that block with the values specified in InvokeDestPHIValues.
561 BasicBlock *BB, BasicBlock *UnwindEdge,
562 UnwindDestMemoTy *FuncletUnwindMap = nullptr) {
564 // We only need to check for function calls: inlined invoke
565 // instructions require no special handling.
566 CallInst *CI = dyn_cast<CallInst>(&I);
567
568 if (!CI || CI->doesNotThrow())
569 continue;
570
571 // We do not need to (and in fact, cannot) convert possibly throwing calls
572 // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into
573 // invokes. The caller's "segment" of the deoptimization continuation
574 // attached to the newly inlined @llvm.experimental_deoptimize
575 // (resp. @llvm.experimental.guard) call should contain the exception
576 // handling logic, if any.
577 if (auto *F = CI->getCalledFunction())
578 if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
579 F->getIntrinsicID() == Intrinsic::experimental_guard)
580 continue;
581
582 if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
583 // This call is nested inside a funclet. If that funclet has an unwind
584 // destination within the inlinee, then unwinding out of this call would
585 // be UB. Rewriting this call to an invoke which targets the inlined
586 // invoke's unwind dest would give the call's parent funclet multiple
587 // unwind destinations, which is something that subsequent EH table
588 // generation can't handle and that the veirifer rejects. So when we
589 // see such a call, leave it as a call.
590 auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
591 Value *UnwindDestToken =
592 getUnwindDestToken(FuncletPad, *FuncletUnwindMap);
593 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
594 continue;
595#ifndef NDEBUG
596 Instruction *MemoKey;
597 if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
598 MemoKey = CatchPad->getCatchSwitch();
599 else
600 MemoKey = FuncletPad;
601 assert(FuncletUnwindMap->count(MemoKey) &&
602 (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
603 "must get memoized to avoid confusing later searches");
604#endif // NDEBUG
605 }
606
607 changeToInvokeAndSplitBasicBlock(CI, UnwindEdge);
608 return BB;
609 }
610 return nullptr;
611}
612
613/// If we inlined an invoke site, we need to convert calls
614/// in the body of the inlined function into invokes.
615///
616/// II is the invoke instruction being inlined. FirstNewBlock is the first
617/// block of the inlined code (the last block is the end of the function),
618/// and InlineCodeInfo is information about the code that got inlined.
619static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock,
620 ClonedCodeInfo &InlinedCodeInfo) {
621 BasicBlock *InvokeDest = II->getUnwindDest();
622
623 Function *Caller = FirstNewBlock->getParent();
624
625 // The inlined code is currently at the end of the function, scan from the
626 // start of the inlined code to its end, checking for stuff we need to
627 // rewrite.
628 LandingPadInliningInfo Invoke(II);
629
630 // Get all of the inlined landing pad instructions.
632 for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end();
633 I != E; ++I)
634 if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
635 InlinedLPads.insert(II->getLandingPadInst());
636
637 // Append the clauses from the outer landing pad instruction into the inlined
638 // landing pad instructions.
639 LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
640 for (LandingPadInst *InlinedLPad : InlinedLPads) {
641 unsigned OuterNum = OuterLPad->getNumClauses();
642 InlinedLPad->reserveClauses(OuterNum);
643 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
644 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
645 if (OuterLPad->isCleanup())
646 InlinedLPad->setCleanup(true);
647 }
648
649 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
650 BB != E; ++BB) {
651 if (InlinedCodeInfo.ContainsCalls)
653 &*BB, Invoke.getOuterResumeDest()))
654 // Update any PHI nodes in the exceptional block to indicate that there
655 // is now a new entry in them.
656 Invoke.addIncomingPHIValuesFor(NewBB);
657
658 // Forward any resumes that are remaining here.
659 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
660 Invoke.forwardResume(RI, InlinedLPads);
661 }
662
663 // Now that everything is happy, we have one final detail. The PHI nodes in
664 // the exception destination block still have entries due to the original
665 // invoke instruction. Eliminate these entries (which might even delete the
666 // PHI node) now.
667 InvokeDest->removePredecessor(II->getParent());
668}
669
670/// If we inlined an invoke site, we need to convert calls
671/// in the body of the inlined function into invokes.
672///
673/// II is the invoke instruction being inlined. FirstNewBlock is the first
674/// block of the inlined code (the last block is the end of the function),
675/// and InlineCodeInfo is information about the code that got inlined.
676static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock,
677 ClonedCodeInfo &InlinedCodeInfo) {
678 BasicBlock *UnwindDest = II->getUnwindDest();
679 Function *Caller = FirstNewBlock->getParent();
680
681 assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!");
682
683 // If there are PHI nodes in the unwind destination block, we need to keep
684 // track of which values came into them from the invoke before removing the
685 // edge from this block.
686 SmallVector<Value *, 8> UnwindDestPHIValues;
687 BasicBlock *InvokeBB = II->getParent();
688 for (PHINode &PHI : UnwindDest->phis()) {
689 // Save the value to use for this edge.
690 UnwindDestPHIValues.push_back(PHI.getIncomingValueForBlock(InvokeBB));
691 }
692
693 // Add incoming-PHI values to the unwind destination block for the given basic
694 // block, using the values for the original invoke's source block.
695 auto UpdatePHINodes = [&](BasicBlock *Src) {
696 BasicBlock::iterator I = UnwindDest->begin();
697 for (Value *V : UnwindDestPHIValues) {
698 PHINode *PHI = cast<PHINode>(I);
699 PHI->addIncoming(V, Src);
700 ++I;
701 }
702 };
703
704 // This connects all the instructions which 'unwind to caller' to the invoke
705 // destination.
706 UnwindDestMemoTy FuncletUnwindMap;
707 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
708 BB != E; ++BB) {
709 if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
710 if (CRI->unwindsToCaller()) {
711 auto *CleanupPad = CRI->getCleanupPad();
712 CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI->getIterator());
713 CRI->eraseFromParent();
714 UpdatePHINodes(&*BB);
715 // Finding a cleanupret with an unwind destination would confuse
716 // subsequent calls to getUnwindDestToken, so map the cleanuppad
717 // to short-circuit any such calls and recognize this as an "unwind
718 // to caller" cleanup.
719 assert(!FuncletUnwindMap.count(CleanupPad) ||
720 isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
721 FuncletUnwindMap[CleanupPad] =
722 ConstantTokenNone::get(Caller->getContext());
723 }
724 }
725
726 Instruction *I = BB->getFirstNonPHI();
727 if (!I->isEHPad())
728 continue;
729
730 Instruction *Replacement = nullptr;
731 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
732 if (CatchSwitch->unwindsToCaller()) {
733 Value *UnwindDestToken;
734 if (auto *ParentPad =
735 dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
736 // This catchswitch is nested inside another funclet. If that
737 // funclet has an unwind destination within the inlinee, then
738 // unwinding out of this catchswitch would be UB. Rewriting this
739 // catchswitch to unwind to the inlined invoke's unwind dest would
740 // give the parent funclet multiple unwind destinations, which is
741 // something that subsequent EH table generation can't handle and
742 // that the veirifer rejects. So when we see such a call, leave it
743 // as "unwind to caller".
744 UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap);
745 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
746 continue;
747 } else {
748 // This catchswitch has no parent to inherit constraints from, and
749 // none of its descendants can have an unwind edge that exits it and
750 // targets another funclet in the inlinee. It may or may not have a
751 // descendant that definitively has an unwind to caller. In either
752 // case, we'll have to assume that any unwinds out of it may need to
753 // be routed to the caller, so treat it as though it has a definitive
754 // unwind to caller.
755 UnwindDestToken = ConstantTokenNone::get(Caller->getContext());
756 }
757 auto *NewCatchSwitch = CatchSwitchInst::Create(
758 CatchSwitch->getParentPad(), UnwindDest,
759 CatchSwitch->getNumHandlers(), CatchSwitch->getName(),
760 CatchSwitch->getIterator());
761 for (BasicBlock *PadBB : CatchSwitch->handlers())
762 NewCatchSwitch->addHandler(PadBB);
763 // Propagate info for the old catchswitch over to the new one in
764 // the unwind map. This also serves to short-circuit any subsequent
765 // checks for the unwind dest of this catchswitch, which would get
766 // confused if they found the outer handler in the callee.
767 FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
768 Replacement = NewCatchSwitch;
769 }
770 } else if (!isa<FuncletPadInst>(I)) {
771 llvm_unreachable("unexpected EHPad!");
772 }
773
774 if (Replacement) {
775 Replacement->takeName(I);
776 I->replaceAllUsesWith(Replacement);
777 I->eraseFromParent();
778 UpdatePHINodes(&*BB);
779 }
780 }
781
782 if (InlinedCodeInfo.ContainsCalls)
783 for (Function::iterator BB = FirstNewBlock->getIterator(),
784 E = Caller->end();
785 BB != E; ++BB)
787 &*BB, UnwindDest, &FuncletUnwindMap))
788 // Update any PHI nodes in the exceptional block to indicate that there
789 // is now a new entry in them.
790 UpdatePHINodes(NewBB);
791
792 // Now that everything is happy, we have one final detail. The PHI nodes in
793 // the exception destination block still have entries due to the original
794 // invoke instruction. Eliminate these entries (which might even delete the
795 // PHI node) now.
796 UnwindDest->removePredecessor(InvokeBB);
797}
798
799static bool haveCommonPrefix(MDNode *MIBStackContext,
800 MDNode *CallsiteStackContext) {
801 assert(MIBStackContext->getNumOperands() > 0 &&
802 CallsiteStackContext->getNumOperands() > 0);
803 // Because of the context trimming performed during matching, the callsite
804 // context could have more stack ids than the MIB. We match up to the end of
805 // the shortest stack context.
806 for (auto MIBStackIter = MIBStackContext->op_begin(),
807 CallsiteStackIter = CallsiteStackContext->op_begin();
808 MIBStackIter != MIBStackContext->op_end() &&
809 CallsiteStackIter != CallsiteStackContext->op_end();
810 MIBStackIter++, CallsiteStackIter++) {
811 auto *Val1 = mdconst::dyn_extract<ConstantInt>(*MIBStackIter);
812 auto *Val2 = mdconst::dyn_extract<ConstantInt>(*CallsiteStackIter);
813 assert(Val1 && Val2);
814 if (Val1->getZExtValue() != Val2->getZExtValue())
815 return false;
816 }
817 return true;
818}
819
820static void removeMemProfMetadata(CallBase *Call) {
821 Call->setMetadata(LLVMContext::MD_memprof, nullptr);
822}
823
825 Call->setMetadata(LLVMContext::MD_callsite, nullptr);
826}
827
829 const std::vector<Metadata *> &MIBList) {
830 assert(!MIBList.empty());
831 // Remove existing memprof, which will either be replaced or may not be needed
832 // if we are able to use a single allocation type function attribute.
835 for (Metadata *MIB : MIBList)
836 CallStack.addCallStack(cast<MDNode>(MIB));
837 bool MemprofMDAttached = CallStack.buildAndAttachMIBMetadata(CI);
838 assert(MemprofMDAttached == CI->hasMetadata(LLVMContext::MD_memprof));
839 if (!MemprofMDAttached)
840 // If we used a function attribute remove the callsite metadata as well.
842}
843
844// Update the metadata on the inlined copy ClonedCall of a call OrigCall in the
845// inlined callee body, based on the callsite metadata InlinedCallsiteMD from
846// the call that was inlined.
847static void propagateMemProfHelper(const CallBase *OrigCall,
848 CallBase *ClonedCall,
849 MDNode *InlinedCallsiteMD) {
850 MDNode *OrigCallsiteMD = ClonedCall->getMetadata(LLVMContext::MD_callsite);
851 MDNode *ClonedCallsiteMD = nullptr;
852 // Check if the call originally had callsite metadata, and update it for the
853 // new call in the inlined body.
854 if (OrigCallsiteMD) {
855 // The cloned call's context is now the concatenation of the original call's
856 // callsite metadata and the callsite metadata on the call where it was
857 // inlined.
858 ClonedCallsiteMD = MDNode::concatenate(OrigCallsiteMD, InlinedCallsiteMD);
859 ClonedCall->setMetadata(LLVMContext::MD_callsite, ClonedCallsiteMD);
860 }
861
862 // Update any memprof metadata on the cloned call.
863 MDNode *OrigMemProfMD = ClonedCall->getMetadata(LLVMContext::MD_memprof);
864 if (!OrigMemProfMD)
865 return;
866 // We currently expect that allocations with memprof metadata also have
867 // callsite metadata for the allocation's part of the context.
868 assert(OrigCallsiteMD);
869
870 // New call's MIB list.
871 std::vector<Metadata *> NewMIBList;
872
873 // For each MIB metadata, check if its call stack context starts with the
874 // new clone's callsite metadata. If so, that MIB goes onto the cloned call in
875 // the inlined body. If not, it stays on the out-of-line original call.
876 for (auto &MIBOp : OrigMemProfMD->operands()) {
877 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
878 // Stack is first operand of MIB.
879 MDNode *StackMD = getMIBStackNode(MIB);
880 assert(StackMD);
881 // See if the new cloned callsite context matches this profiled context.
882 if (haveCommonPrefix(StackMD, ClonedCallsiteMD))
883 // Add it to the cloned call's MIB list.
884 NewMIBList.push_back(MIB);
885 }
886 if (NewMIBList.empty()) {
887 removeMemProfMetadata(ClonedCall);
888 removeCallsiteMetadata(ClonedCall);
889 return;
890 }
891 if (NewMIBList.size() < OrigMemProfMD->getNumOperands())
892 updateMemprofMetadata(ClonedCall, NewMIBList);
893}
894
895// Update memprof related metadata (!memprof and !callsite) based on the
896// inlining of Callee into the callsite at CB. The updates include merging the
897// inlined callee's callsite metadata with that of the inlined call,
898// and moving the subset of any memprof contexts to the inlined callee
899// allocations if they match the new inlined call stack.
900static void
902 bool ContainsMemProfMetadata,
904 MDNode *CallsiteMD = CB.getMetadata(LLVMContext::MD_callsite);
905 // Only need to update if the inlined callsite had callsite metadata, or if
906 // there was any memprof metadata inlined.
907 if (!CallsiteMD && !ContainsMemProfMetadata)
908 return;
909
910 // Propagate metadata onto the cloned calls in the inlined callee.
911 for (const auto &Entry : VMap) {
912 // See if this is a call that has been inlined and remapped, and not
913 // simplified away in the process.
914 auto *OrigCall = dyn_cast_or_null<CallBase>(Entry.first);
915 auto *ClonedCall = dyn_cast_or_null<CallBase>(Entry.second);
916 if (!OrigCall || !ClonedCall)
917 continue;
918 // If the inlined callsite did not have any callsite metadata, then it isn't
919 // involved in any profiled call contexts, and we can remove any memprof
920 // metadata on the cloned call.
921 if (!CallsiteMD) {
922 removeMemProfMetadata(ClonedCall);
923 removeCallsiteMetadata(ClonedCall);
924 continue;
925 }
926 propagateMemProfHelper(OrigCall, ClonedCall, CallsiteMD);
927 }
928}
929
930/// When inlining a call site that has !llvm.mem.parallel_loop_access,
931/// !llvm.access.group, !alias.scope or !noalias metadata, that metadata should
932/// be propagated to all memory-accessing cloned instructions.
934 Function::iterator FEnd) {
935 MDNode *MemParallelLoopAccess =
936 CB.getMetadata(LLVMContext::MD_mem_parallel_loop_access);
937 MDNode *AccessGroup = CB.getMetadata(LLVMContext::MD_access_group);
938 MDNode *AliasScope = CB.getMetadata(LLVMContext::MD_alias_scope);
939 MDNode *NoAlias = CB.getMetadata(LLVMContext::MD_noalias);
940 if (!MemParallelLoopAccess && !AccessGroup && !AliasScope && !NoAlias)
941 return;
942
943 for (BasicBlock &BB : make_range(FStart, FEnd)) {
944 for (Instruction &I : BB) {
945 // This metadata is only relevant for instructions that access memory.
946 if (!I.mayReadOrWriteMemory())
947 continue;
948
949 if (MemParallelLoopAccess) {
950 // TODO: This probably should not overwrite MemParalleLoopAccess.
951 MemParallelLoopAccess = MDNode::concatenate(
952 I.getMetadata(LLVMContext::MD_mem_parallel_loop_access),
953 MemParallelLoopAccess);
954 I.setMetadata(LLVMContext::MD_mem_parallel_loop_access,
955 MemParallelLoopAccess);
956 }
957
958 if (AccessGroup)
959 I.setMetadata(LLVMContext::MD_access_group, uniteAccessGroups(
960 I.getMetadata(LLVMContext::MD_access_group), AccessGroup));
961
962 if (AliasScope)
963 I.setMetadata(LLVMContext::MD_alias_scope, MDNode::concatenate(
964 I.getMetadata(LLVMContext::MD_alias_scope), AliasScope));
965
966 if (NoAlias)
967 I.setMetadata(LLVMContext::MD_noalias, MDNode::concatenate(
968 I.getMetadata(LLVMContext::MD_noalias), NoAlias));
969 }
970 }
971}
972
973/// Bundle operands of the inlined function must be added to inlined call sites.
975 Instruction *CallSiteEHPad) {
976 for (Instruction &II : llvm::make_early_inc_range(*InlinedBB)) {
977 CallBase *I = dyn_cast<CallBase>(&II);
978 if (!I)
979 continue;
980 // Skip call sites which already have a "funclet" bundle.
981 if (I->getOperandBundle(LLVMContext::OB_funclet))
982 continue;
983 // Skip call sites which are nounwind intrinsics (as long as they don't
984 // lower into regular function calls in the course of IR transformations).
985 auto *CalledFn =
986 dyn_cast<Function>(I->getCalledOperand()->stripPointerCasts());
987 if (CalledFn && CalledFn->isIntrinsic() && I->doesNotThrow() &&
988 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
989 continue;
990
992 I->getOperandBundlesAsDefs(OpBundles);
993 OpBundles.emplace_back("funclet", CallSiteEHPad);
994
995 Instruction *NewInst = CallBase::Create(I, OpBundles, I->getIterator());
996 NewInst->takeName(I);
997 I->replaceAllUsesWith(NewInst);
998 I->eraseFromParent();
999 }
1000}
1001
1002namespace {
1003/// Utility for cloning !noalias and !alias.scope metadata. When a code region
1004/// using scoped alias metadata is inlined, the aliasing relationships may not
1005/// hold between the two version. It is necessary to create a deep clone of the
1006/// metadata, putting the two versions in separate scope domains.
1007class ScopedAliasMetadataDeepCloner {
1010 MetadataMap MDMap;
1011 void addRecursiveMetadataUses();
1012
1013public:
1014 ScopedAliasMetadataDeepCloner(const Function *F);
1015
1016 /// Create a new clone of the scoped alias metadata, which will be used by
1017 /// subsequent remap() calls.
1018 void clone();
1019
1020 /// Remap instructions in the given range from the original to the cloned
1021 /// metadata.
1022 void remap(Function::iterator FStart, Function::iterator FEnd);
1023};
1024} // namespace
1025
1026ScopedAliasMetadataDeepCloner::ScopedAliasMetadataDeepCloner(
1027 const Function *F) {
1028 for (const BasicBlock &BB : *F) {
1029 for (const Instruction &I : BB) {
1030 if (const MDNode *M = I.getMetadata(LLVMContext::MD_alias_scope))
1031 MD.insert(M);
1032 if (const MDNode *M = I.getMetadata(LLVMContext::MD_noalias))
1033 MD.insert(M);
1034
1035 // We also need to clone the metadata in noalias intrinsics.
1036 if (const auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
1037 MD.insert(Decl->getScopeList());
1038 }
1039 }
1040 addRecursiveMetadataUses();
1041}
1042
1043void ScopedAliasMetadataDeepCloner::addRecursiveMetadataUses() {
1044 SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
1045 while (!Queue.empty()) {
1046 const MDNode *M = cast<MDNode>(Queue.pop_back_val());
1047 for (const Metadata *Op : M->operands())
1048 if (const MDNode *OpMD = dyn_cast<MDNode>(Op))
1049 if (MD.insert(OpMD))
1050 Queue.push_back(OpMD);
1051 }
1052}
1053
1054void ScopedAliasMetadataDeepCloner::clone() {
1055 assert(MDMap.empty() && "clone() already called ?");
1056
1058 for (const MDNode *I : MD) {
1059 DummyNodes.push_back(MDTuple::getTemporary(I->getContext(), {}));
1060 MDMap[I].reset(DummyNodes.back().get());
1061 }
1062
1063 // Create new metadata nodes to replace the dummy nodes, replacing old
1064 // metadata references with either a dummy node or an already-created new
1065 // node.
1067 for (const MDNode *I : MD) {
1068 for (const Metadata *Op : I->operands()) {
1069 if (const MDNode *M = dyn_cast<MDNode>(Op))
1070 NewOps.push_back(MDMap[M]);
1071 else
1072 NewOps.push_back(const_cast<Metadata *>(Op));
1073 }
1074
1075 MDNode *NewM = MDNode::get(I->getContext(), NewOps);
1076 MDTuple *TempM = cast<MDTuple>(MDMap[I]);
1077 assert(TempM->isTemporary() && "Expected temporary node");
1078
1079 TempM->replaceAllUsesWith(NewM);
1080 NewOps.clear();
1081 }
1082}
1083
1084void ScopedAliasMetadataDeepCloner::remap(Function::iterator FStart,
1085 Function::iterator FEnd) {
1086 if (MDMap.empty())
1087 return; // Nothing to do.
1088
1089 for (BasicBlock &BB : make_range(FStart, FEnd)) {
1090 for (Instruction &I : BB) {
1091 // TODO: The null checks for the MDMap.lookup() results should no longer
1092 // be necessary.
1093 if (MDNode *M = I.getMetadata(LLVMContext::MD_alias_scope))
1094 if (MDNode *MNew = MDMap.lookup(M))
1095 I.setMetadata(LLVMContext::MD_alias_scope, MNew);
1096
1097 if (MDNode *M = I.getMetadata(LLVMContext::MD_noalias))
1098 if (MDNode *MNew = MDMap.lookup(M))
1099 I.setMetadata(LLVMContext::MD_noalias, MNew);
1100
1101 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
1102 if (MDNode *MNew = MDMap.lookup(Decl->getScopeList()))
1103 Decl->setScopeList(MNew);
1104 }
1105 }
1106}
1107
1108/// If the inlined function has noalias arguments,
1109/// then add new alias scopes for each noalias argument, tag the mapped noalias
1110/// parameters with noalias metadata specifying the new scope, and tag all
1111/// non-derived loads, stores and memory intrinsics with the new alias scopes.
1113 const DataLayout &DL, AAResults *CalleeAAR,
1114 ClonedCodeInfo &InlinedFunctionInfo) {
1116 return;
1117
1118 const Function *CalledFunc = CB.getCalledFunction();
1120
1121 for (const Argument &Arg : CalledFunc->args())
1122 if (CB.paramHasAttr(Arg.getArgNo(), Attribute::NoAlias) && !Arg.use_empty())
1123 NoAliasArgs.push_back(&Arg);
1124
1125 if (NoAliasArgs.empty())
1126 return;
1127
1128 // To do a good job, if a noalias variable is captured, we need to know if
1129 // the capture point dominates the particular use we're considering.
1130 DominatorTree DT;
1131 DT.recalculate(const_cast<Function&>(*CalledFunc));
1132
1133 // noalias indicates that pointer values based on the argument do not alias
1134 // pointer values which are not based on it. So we add a new "scope" for each
1135 // noalias function argument. Accesses using pointers based on that argument
1136 // become part of that alias scope, accesses using pointers not based on that
1137 // argument are tagged as noalias with that scope.
1138
1140 MDBuilder MDB(CalledFunc->getContext());
1141
1142 // Create a new scope domain for this function.
1143 MDNode *NewDomain =
1144 MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
1145 for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
1146 const Argument *A = NoAliasArgs[i];
1147
1148 std::string Name = std::string(CalledFunc->getName());
1149 if (A->hasName()) {
1150 Name += ": %";
1151 Name += A->getName();
1152 } else {
1153 Name += ": argument ";
1154 Name += utostr(i);
1155 }
1156
1157 // Note: We always create a new anonymous root here. This is true regardless
1158 // of the linkage of the callee because the aliasing "scope" is not just a
1159 // property of the callee, but also all control dependencies in the caller.
1160 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
1161 NewScopes.insert(std::make_pair(A, NewScope));
1162
1163 if (UseNoAliasIntrinsic) {
1164 // Introduce a llvm.experimental.noalias.scope.decl for the noalias
1165 // argument.
1166 MDNode *AScopeList = MDNode::get(CalledFunc->getContext(), NewScope);
1167 auto *NoAliasDecl =
1169 // Ignore the result for now. The result will be used when the
1170 // llvm.noalias intrinsic is introduced.
1171 (void)NoAliasDecl;
1172 }
1173 }
1174
1175 // Iterate over all new instructions in the map; for all memory-access
1176 // instructions, add the alias scope metadata.
1177 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
1178 VMI != VMIE; ++VMI) {
1179 if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
1180 if (!VMI->second)
1181 continue;
1182
1183 Instruction *NI = dyn_cast<Instruction>(VMI->second);
1184 if (!NI || InlinedFunctionInfo.isSimplified(I, NI))
1185 continue;
1186
1187 bool IsArgMemOnlyCall = false, IsFuncCall = false;
1189
1190 if (const LoadInst *LI = dyn_cast<LoadInst>(I))
1191 PtrArgs.push_back(LI->getPointerOperand());
1192 else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
1193 PtrArgs.push_back(SI->getPointerOperand());
1194 else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
1195 PtrArgs.push_back(VAAI->getPointerOperand());
1196 else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
1197 PtrArgs.push_back(CXI->getPointerOperand());
1198 else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
1199 PtrArgs.push_back(RMWI->getPointerOperand());
1200 else if (const auto *Call = dyn_cast<CallBase>(I)) {
1201 // If we know that the call does not access memory, then we'll still
1202 // know that about the inlined clone of this call site, and we don't
1203 // need to add metadata.
1204 if (Call->doesNotAccessMemory())
1205 continue;
1206
1207 IsFuncCall = true;
1208 if (CalleeAAR) {
1209 MemoryEffects ME = CalleeAAR->getMemoryEffects(Call);
1210
1211 // We'll retain this knowledge without additional metadata.
1213 continue;
1214
1215 if (ME.onlyAccessesArgPointees())
1216 IsArgMemOnlyCall = true;
1217 }
1218
1219 for (Value *Arg : Call->args()) {
1220 // Only care about pointer arguments. If a noalias argument is
1221 // accessed through a non-pointer argument, it must be captured
1222 // first (e.g. via ptrtoint), and we protect against captures below.
1223 if (!Arg->getType()->isPointerTy())
1224 continue;
1225
1226 PtrArgs.push_back(Arg);
1227 }
1228 }
1229
1230 // If we found no pointers, then this instruction is not suitable for
1231 // pairing with an instruction to receive aliasing metadata.
1232 // However, if this is a call, this we might just alias with none of the
1233 // noalias arguments.
1234 if (PtrArgs.empty() && !IsFuncCall)
1235 continue;
1236
1237 // It is possible that there is only one underlying object, but you
1238 // need to go through several PHIs to see it, and thus could be
1239 // repeated in the Objects list.
1242
1243 for (const Value *V : PtrArgs) {
1245 getUnderlyingObjects(V, Objects, /* LI = */ nullptr);
1246
1247 for (const Value *O : Objects)
1248 ObjSet.insert(O);
1249 }
1250
1251 // Figure out if we're derived from anything that is not a noalias
1252 // argument.
1253 bool RequiresNoCaptureBefore = false, UsesAliasingPtr = false,
1254 UsesUnknownObject = false;
1255 for (const Value *V : ObjSet) {
1256 // Is this value a constant that cannot be derived from any pointer
1257 // value (we need to exclude constant expressions, for example, that
1258 // are formed from arithmetic on global symbols).
1259 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1260 isa<ConstantPointerNull>(V) ||
1261 isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1262 if (IsNonPtrConst)
1263 continue;
1264
1265 // If this is anything other than a noalias argument, then we cannot
1266 // completely describe the aliasing properties using alias.scope
1267 // metadata (and, thus, won't add any).
1268 if (const Argument *A = dyn_cast<Argument>(V)) {
1269 if (!CB.paramHasAttr(A->getArgNo(), Attribute::NoAlias))
1270 UsesAliasingPtr = true;
1271 } else {
1272 UsesAliasingPtr = true;
1273 }
1274
1275 if (isEscapeSource(V)) {
1276 // An escape source can only alias with a noalias argument if it has
1277 // been captured beforehand.
1278 RequiresNoCaptureBefore = true;
1279 } else if (!isa<Argument>(V) && !isIdentifiedObject(V)) {
1280 // If this is neither an escape source, nor some identified object
1281 // (which cannot directly alias a noalias argument), nor some other
1282 // argument (which, by definition, also cannot alias a noalias
1283 // argument), conservatively do not make any assumptions.
1284 UsesUnknownObject = true;
1285 }
1286 }
1287
1288 // Nothing we can do if the used underlying object cannot be reliably
1289 // determined.
1290 if (UsesUnknownObject)
1291 continue;
1292
1293 // A function call can always get captured noalias pointers (via other
1294 // parameters, globals, etc.).
1295 if (IsFuncCall && !IsArgMemOnlyCall)
1296 RequiresNoCaptureBefore = true;
1297
1298 // First, we want to figure out all of the sets with which we definitely
1299 // don't alias. Iterate over all noalias set, and add those for which:
1300 // 1. The noalias argument is not in the set of objects from which we
1301 // definitely derive.
1302 // 2. The noalias argument has not yet been captured.
1303 // An arbitrary function that might load pointers could see captured
1304 // noalias arguments via other noalias arguments or globals, and so we
1305 // must always check for prior capture.
1306 for (const Argument *A : NoAliasArgs) {
1307 if (ObjSet.contains(A))
1308 continue; // May be based on a noalias argument.
1309
1310 // It might be tempting to skip the PointerMayBeCapturedBefore check if
1311 // A->hasNoCaptureAttr() is true, but this is incorrect because
1312 // nocapture only guarantees that no copies outlive the function, not
1313 // that the value cannot be locally captured.
1314 if (!RequiresNoCaptureBefore ||
1315 !PointerMayBeCapturedBefore(A, /* ReturnCaptures */ false,
1316 /* StoreCaptures */ false, I, &DT))
1317 NoAliases.push_back(NewScopes[A]);
1318 }
1319
1320 if (!NoAliases.empty())
1321 NI->setMetadata(LLVMContext::MD_noalias,
1323 NI->getMetadata(LLVMContext::MD_noalias),
1324 MDNode::get(CalledFunc->getContext(), NoAliases)));
1325
1326 // Next, we want to figure out all of the sets to which we might belong.
1327 // We might belong to a set if the noalias argument is in the set of
1328 // underlying objects. If there is some non-noalias argument in our list
1329 // of underlying objects, then we cannot add a scope because the fact
1330 // that some access does not alias with any set of our noalias arguments
1331 // cannot itself guarantee that it does not alias with this access
1332 // (because there is some pointer of unknown origin involved and the
1333 // other access might also depend on this pointer). We also cannot add
1334 // scopes to arbitrary functions unless we know they don't access any
1335 // non-parameter pointer-values.
1336 bool CanAddScopes = !UsesAliasingPtr;
1337 if (CanAddScopes && IsFuncCall)
1338 CanAddScopes = IsArgMemOnlyCall;
1339
1340 if (CanAddScopes)
1341 for (const Argument *A : NoAliasArgs) {
1342 if (ObjSet.count(A))
1343 Scopes.push_back(NewScopes[A]);
1344 }
1345
1346 if (!Scopes.empty())
1347 NI->setMetadata(
1348 LLVMContext::MD_alias_scope,
1349 MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope),
1350 MDNode::get(CalledFunc->getContext(), Scopes)));
1351 }
1352 }
1353}
1354
1356 ReturnInst *End) {
1357
1358 assert(Begin->getParent() == End->getParent() &&
1359 "Expected to be in same basic block!");
1360 auto BeginIt = Begin->getIterator();
1361 assert(BeginIt != End->getIterator() && "Non-empty BB has empty iterator");
1363 ++BeginIt, End->getIterator(), InlinerAttributeWindow + 1);
1364}
1365
1366// Add attributes from CB params and Fn attributes that can always be propagated
1367// to the corresponding argument / inner callbases.
1369 ValueToValueMapTy &VMap,
1370 ClonedCodeInfo &InlinedFunctionInfo) {
1371 auto *CalledFunction = CB.getCalledFunction();
1372 auto &Context = CalledFunction->getContext();
1373
1374 // Collect valid attributes for all params.
1375 SmallVector<AttrBuilder> ValidObjParamAttrs, ValidExactParamAttrs;
1376 bool HasAttrToPropagate = false;
1377
1378 // Attributes we can only propagate if the exact parameter is forwarded.
1379 // We can propagate both poison generating and UB generating attributes
1380 // without any extra checks. The only attribute that is tricky to propagate
1381 // is `noundef` (skipped for now) as that can create new UB where previous
1382 // behavior was just using a poison value.
1383 static const Attribute::AttrKind ExactAttrsToPropagate[] = {
1384 Attribute::Dereferenceable, Attribute::DereferenceableOrNull,
1385 Attribute::NonNull, Attribute::Alignment, Attribute::Range};
1386
1387 for (unsigned I = 0, E = CB.arg_size(); I < E; ++I) {
1388 ValidObjParamAttrs.emplace_back(AttrBuilder{CB.getContext()});
1389 ValidExactParamAttrs.emplace_back(AttrBuilder{CB.getContext()});
1390 // Access attributes can be propagated to any param with the same underlying
1391 // object as the argument.
1392 if (CB.paramHasAttr(I, Attribute::ReadNone))
1393 ValidObjParamAttrs.back().addAttribute(Attribute::ReadNone);
1394 if (CB.paramHasAttr(I, Attribute::ReadOnly))
1395 ValidObjParamAttrs.back().addAttribute(Attribute::ReadOnly);
1396
1397 for (Attribute::AttrKind AK : ExactAttrsToPropagate) {
1398 Attribute Attr = CB.getParamAttr(I, AK);
1399 if (Attr.isValid())
1400 ValidExactParamAttrs.back().addAttribute(Attr);
1401 }
1402
1403 HasAttrToPropagate |= ValidObjParamAttrs.back().hasAttributes();
1404 HasAttrToPropagate |= ValidExactParamAttrs.back().hasAttributes();
1405 }
1406
1407 // Won't be able to propagate anything.
1408 if (!HasAttrToPropagate)
1409 return;
1410
1411 for (BasicBlock &BB : *CalledFunction) {
1412 for (Instruction &Ins : BB) {
1413 const auto *InnerCB = dyn_cast<CallBase>(&Ins);
1414 if (!InnerCB)
1415 continue;
1416 auto *NewInnerCB = dyn_cast_or_null<CallBase>(VMap.lookup(InnerCB));
1417 if (!NewInnerCB)
1418 continue;
1419 // The InnerCB might have be simplified during the inlining
1420 // process which can make propagation incorrect.
1421 if (InlinedFunctionInfo.isSimplified(InnerCB, NewInnerCB))
1422 continue;
1423
1424 AttributeList AL = NewInnerCB->getAttributes();
1425 for (unsigned I = 0, E = InnerCB->arg_size(); I < E; ++I) {
1426 // It's unsound or requires special handling to propagate
1427 // attributes to byval arguments. Even if CalledFunction
1428 // doesn't e.g. write to the argument (readonly), the call to
1429 // NewInnerCB may write to its by-value copy.
1430 if (NewInnerCB->paramHasAttr(I, Attribute::ByVal))
1431 continue;
1432
1433 // Don't bother propagating attrs to constants.
1434 if (match(NewInnerCB->getArgOperand(I),
1436 continue;
1437
1438 // Check if the underlying value for the parameter is an argument.
1439 const Argument *Arg = dyn_cast<Argument>(InnerCB->getArgOperand(I));
1440 unsigned ArgNo;
1441 if (Arg) {
1442 ArgNo = Arg->getArgNo();
1443 // For dereferenceable, dereferenceable_or_null, align, etc...
1444 // we don't want to propagate if the existing param has the same
1445 // attribute with "better" constraints. So remove from the
1446 // new AL if the region of the existing param is larger than
1447 // what we can propagate.
1448 AttrBuilder NewAB{
1449 Context, AttributeSet::get(Context, ValidExactParamAttrs[ArgNo])};
1450 if (AL.getParamDereferenceableBytes(I) >
1451 NewAB.getDereferenceableBytes())
1452 NewAB.removeAttribute(Attribute::Dereferenceable);
1453 if (AL.getParamDereferenceableOrNullBytes(I) >
1454 NewAB.getDereferenceableOrNullBytes())
1455 NewAB.removeAttribute(Attribute::DereferenceableOrNull);
1456 if (AL.getParamAlignment(I).valueOrOne() >
1457 NewAB.getAlignment().valueOrOne())
1458 NewAB.removeAttribute(Attribute::Alignment);
1459 if (auto ExistingRange = AL.getParamRange(I)) {
1460 if (auto NewRange = NewAB.getRange()) {
1461 ConstantRange CombinedRange =
1462 ExistingRange->intersectWith(*NewRange);
1463 NewAB.removeAttribute(Attribute::Range);
1464 NewAB.addRangeAttr(CombinedRange);
1465 }
1466 }
1467 AL = AL.addParamAttributes(Context, I, NewAB);
1468 } else if (NewInnerCB->getArgOperand(I)->getType()->isPointerTy()) {
1469 // Check if the underlying value for the parameter is an argument.
1470 const Value *UnderlyingV =
1471 getUnderlyingObject(InnerCB->getArgOperand(I));
1472 Arg = dyn_cast<Argument>(UnderlyingV);
1473 if (!Arg)
1474 continue;
1475 ArgNo = Arg->getArgNo();
1476 } else {
1477 continue;
1478 }
1479
1480 // If so, propagate its access attributes.
1481 AL = AL.addParamAttributes(Context, I, ValidObjParamAttrs[ArgNo]);
1482
1483 // We can have conflicting attributes from the inner callsite and
1484 // to-be-inlined callsite. In that case, choose the most
1485 // restrictive.
1486
1487 // readonly + writeonly means we can never deref so make readnone.
1488 if (AL.hasParamAttr(I, Attribute::ReadOnly) &&
1489 AL.hasParamAttr(I, Attribute::WriteOnly))
1490 AL = AL.addParamAttribute(Context, I, Attribute::ReadNone);
1491
1492 // If have readnone, need to clear readonly/writeonly
1493 if (AL.hasParamAttr(I, Attribute::ReadNone)) {
1494 AL = AL.removeParamAttribute(Context, I, Attribute::ReadOnly);
1495 AL = AL.removeParamAttribute(Context, I, Attribute::WriteOnly);
1496 }
1497
1498 // Writable cannot exist in conjunction w/ readonly/readnone
1499 if (AL.hasParamAttr(I, Attribute::ReadOnly) ||
1500 AL.hasParamAttr(I, Attribute::ReadNone))
1501 AL = AL.removeParamAttribute(Context, I, Attribute::Writable);
1502 }
1503 NewInnerCB->setAttributes(AL);
1504 }
1505 }
1506}
1507
1508// Only allow these white listed attributes to be propagated back to the
1509// callee. This is because other attributes may only be valid on the call
1510// itself, i.e. attributes such as signext and zeroext.
1511
1512// Attributes that are always okay to propagate as if they are violated its
1513// immediate UB.
1515 AttrBuilder Valid(CB.getContext());
1516 if (auto DerefBytes = CB.getRetDereferenceableBytes())
1517 Valid.addDereferenceableAttr(DerefBytes);
1518 if (auto DerefOrNullBytes = CB.getRetDereferenceableOrNullBytes())
1519 Valid.addDereferenceableOrNullAttr(DerefOrNullBytes);
1520 if (CB.hasRetAttr(Attribute::NoAlias))
1521 Valid.addAttribute(Attribute::NoAlias);
1522 if (CB.hasRetAttr(Attribute::NoUndef))
1523 Valid.addAttribute(Attribute::NoUndef);
1524 return Valid;
1525}
1526
1527// Attributes that need additional checks as propagating them may change
1528// behavior or cause new UB.
1530 AttrBuilder Valid(CB.getContext());
1531 if (CB.hasRetAttr(Attribute::NonNull))
1532 Valid.addAttribute(Attribute::NonNull);
1533 if (CB.hasRetAttr(Attribute::Alignment))
1534 Valid.addAlignmentAttr(CB.getRetAlign());
1535 if (std::optional<ConstantRange> Range = CB.getRange())
1536 Valid.addRangeAttr(*Range);
1537 return Valid;
1538}
1539
1541 ClonedCodeInfo &InlinedFunctionInfo) {
1544 if (!ValidUB.hasAttributes() && !ValidPG.hasAttributes())
1545 return;
1546 auto *CalledFunction = CB.getCalledFunction();
1547 auto &Context = CalledFunction->getContext();
1548
1549 for (auto &BB : *CalledFunction) {
1550 auto *RI = dyn_cast<ReturnInst>(BB.getTerminator());
1551 if (!RI || !isa<CallBase>(RI->getOperand(0)))
1552 continue;
1553 auto *RetVal = cast<CallBase>(RI->getOperand(0));
1554 // Check that the cloned RetVal exists and is a call, otherwise we cannot
1555 // add the attributes on the cloned RetVal. Simplification during inlining
1556 // could have transformed the cloned instruction.
1557 auto *NewRetVal = dyn_cast_or_null<CallBase>(VMap.lookup(RetVal));
1558 if (!NewRetVal)
1559 continue;
1560
1561 // The RetVal might have be simplified during the inlining
1562 // process which can make propagation incorrect.
1563 if (InlinedFunctionInfo.isSimplified(RetVal, NewRetVal))
1564 continue;
1565 // Backward propagation of attributes to the returned value may be incorrect
1566 // if it is control flow dependent.
1567 // Consider:
1568 // @callee {
1569 // %rv = call @foo()
1570 // %rv2 = call @bar()
1571 // if (%rv2 != null)
1572 // return %rv2
1573 // if (%rv == null)
1574 // exit()
1575 // return %rv
1576 // }
1577 // caller() {
1578 // %val = call nonnull @callee()
1579 // }
1580 // Here we cannot add the nonnull attribute on either foo or bar. So, we
1581 // limit the check to both RetVal and RI are in the same basic block and
1582 // there are no throwing/exiting instructions between these instructions.
1583 if (RI->getParent() != RetVal->getParent() ||
1585 continue;
1586 // Add to the existing attributes of NewRetVal, i.e. the cloned call
1587 // instruction.
1588 // NB! When we have the same attribute already existing on NewRetVal, but
1589 // with a differing value, the AttributeList's merge API honours the already
1590 // existing attribute value (i.e. attributes such as dereferenceable,
1591 // dereferenceable_or_null etc). See AttrBuilder::merge for more details.
1592 AttributeList AL = NewRetVal->getAttributes();
1593 if (ValidUB.getDereferenceableBytes() < AL.getRetDereferenceableBytes())
1594 ValidUB.removeAttribute(Attribute::Dereferenceable);
1595 if (ValidUB.getDereferenceableOrNullBytes() <
1596 AL.getRetDereferenceableOrNullBytes())
1597 ValidUB.removeAttribute(Attribute::DereferenceableOrNull);
1598 AttributeList NewAL = AL.addRetAttributes(Context, ValidUB);
1599 // Attributes that may generate poison returns are a bit tricky. If we
1600 // propagate them, other uses of the callsite might have their behavior
1601 // change or cause UB (if they have noundef) b.c of the new potential
1602 // poison.
1603 // Take the following three cases:
1604 //
1605 // 1)
1606 // define nonnull ptr @foo() {
1607 // %p = call ptr @bar()
1608 // call void @use(ptr %p) willreturn nounwind
1609 // ret ptr %p
1610 // }
1611 //
1612 // 2)
1613 // define noundef nonnull ptr @foo() {
1614 // %p = call ptr @bar()
1615 // call void @use(ptr %p) willreturn nounwind
1616 // ret ptr %p
1617 // }
1618 //
1619 // 3)
1620 // define nonnull ptr @foo() {
1621 // %p = call noundef ptr @bar()
1622 // ret ptr %p
1623 // }
1624 //
1625 // In case 1, we can't propagate nonnull because poison value in @use may
1626 // change behavior or trigger UB.
1627 // In case 2, we don't need to be concerned about propagating nonnull, as
1628 // any new poison at @use will trigger UB anyways.
1629 // In case 3, we can never propagate nonnull because it may create UB due to
1630 // the noundef on @bar.
1631 if (ValidPG.getAlignment().valueOrOne() < AL.getRetAlignment().valueOrOne())
1632 ValidPG.removeAttribute(Attribute::Alignment);
1633 if (ValidPG.hasAttributes()) {
1634 Attribute CBRange = ValidPG.getAttribute(Attribute::Range);
1635 if (CBRange.isValid()) {
1636 Attribute NewRange = AL.getRetAttr(Attribute::Range);
1637 if (NewRange.isValid()) {
1638 ValidPG.addRangeAttr(
1639 CBRange.getRange().intersectWith(NewRange.getRange()));
1640 }
1641 }
1642 // Three checks.
1643 // If the callsite has `noundef`, then a poison due to violating the
1644 // return attribute will create UB anyways so we can always propagate.
1645 // Otherwise, if the return value (callee to be inlined) has `noundef`, we
1646 // can't propagate as a new poison return will cause UB.
1647 // Finally, check if the return value has no uses whose behavior may
1648 // change/may cause UB if we potentially return poison. At the moment this
1649 // is implemented overly conservatively with a single-use check.
1650 // TODO: Update the single-use check to iterate through uses and only bail
1651 // if we have a potentially dangerous use.
1652
1653 if (CB.hasRetAttr(Attribute::NoUndef) ||
1654 (RetVal->hasOneUse() && !RetVal->hasRetAttr(Attribute::NoUndef)))
1655 NewAL = NewAL.addRetAttributes(Context, ValidPG);
1656 }
1657 NewRetVal->setAttributes(NewAL);
1658 }
1659}
1660
1661/// If the inlined function has non-byval align arguments, then
1662/// add @llvm.assume-based alignment assumptions to preserve this information.
1665 return;
1666
1668 auto &DL = CB.getDataLayout();
1669
1670 // To avoid inserting redundant assumptions, we should check for assumptions
1671 // already in the caller. To do this, we might need a DT of the caller.
1672 DominatorTree DT;
1673 bool DTCalculated = false;
1674
1675 Function *CalledFunc = CB.getCalledFunction();
1676 for (Argument &Arg : CalledFunc->args()) {
1677 if (!Arg.getType()->isPointerTy() || Arg.hasPassPointeeByValueCopyAttr() ||
1678 Arg.hasNUses(0))
1679 continue;
1680 MaybeAlign Alignment = Arg.getParamAlign();
1681 if (!Alignment)
1682 continue;
1683
1684 if (!DTCalculated) {
1685 DT.recalculate(*CB.getCaller());
1686 DTCalculated = true;
1687 }
1688 // If we can already prove the asserted alignment in the context of the
1689 // caller, then don't bother inserting the assumption.
1690 Value *ArgVal = CB.getArgOperand(Arg.getArgNo());
1691 if (getKnownAlignment(ArgVal, DL, &CB, AC, &DT) >= *Alignment)
1692 continue;
1693
1695 DL, ArgVal, Alignment->value());
1696 AC->registerAssumption(cast<AssumeInst>(NewAsmp));
1697 }
1698}
1699
1700static void HandleByValArgumentInit(Type *ByValType, Value *Dst, Value *Src,
1701 Module *M, BasicBlock *InsertBlock,
1702 InlineFunctionInfo &IFI,
1703 Function *CalledFunc) {
1704 IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
1705
1706 Value *Size =
1707 Builder.getInt64(M->getDataLayout().getTypeStoreSize(ByValType));
1708
1709 // Always generate a memcpy of alignment 1 here because we don't know
1710 // the alignment of the src pointer. Other optimizations can infer
1711 // better alignment.
1712 CallInst *CI = Builder.CreateMemCpy(Dst, /*DstAlign*/ Align(1), Src,
1713 /*SrcAlign*/ Align(1), Size);
1714
1715 // The verifier requires that all calls of debug-info-bearing functions
1716 // from debug-info-bearing functions have a debug location (for inlining
1717 // purposes). Assign a dummy location to satisfy the constraint.
1718 if (!CI->getDebugLoc() && InsertBlock->getParent()->getSubprogram())
1719 if (DISubprogram *SP = CalledFunc->getSubprogram())
1720 CI->setDebugLoc(DILocation::get(SP->getContext(), 0, 0, SP));
1721}
1722
1723/// When inlining a call site that has a byval argument,
1724/// we have to make the implicit memcpy explicit by adding it.
1725static Value *HandleByValArgument(Type *ByValType, Value *Arg,
1726 Instruction *TheCall,
1727 const Function *CalledFunc,
1728 InlineFunctionInfo &IFI,
1729 MaybeAlign ByValAlignment) {
1730 Function *Caller = TheCall->getFunction();
1731 const DataLayout &DL = Caller->getDataLayout();
1732
1733 // If the called function is readonly, then it could not mutate the caller's
1734 // copy of the byval'd memory. In this case, it is safe to elide the copy and
1735 // temporary.
1736 if (CalledFunc->onlyReadsMemory()) {
1737 // If the byval argument has a specified alignment that is greater than the
1738 // passed in pointer, then we either have to round up the input pointer or
1739 // give up on this transformation.
1740 if (ByValAlignment.valueOrOne() == 1)
1741 return Arg;
1742
1743 AssumptionCache *AC =
1744 IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
1745
1746 // If the pointer is already known to be sufficiently aligned, or if we can
1747 // round it up to a larger alignment, then we don't need a temporary.
1748 if (getOrEnforceKnownAlignment(Arg, *ByValAlignment, DL, TheCall, AC) >=
1749 *ByValAlignment)
1750 return Arg;
1751
1752 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
1753 // for code quality, but rarely happens and is required for correctness.
1754 }
1755
1756 // Create the alloca. If we have DataLayout, use nice alignment.
1757 Align Alignment = DL.getPrefTypeAlign(ByValType);
1758
1759 // If the byval had an alignment specified, we *must* use at least that
1760 // alignment, as it is required by the byval argument (and uses of the
1761 // pointer inside the callee).
1762 if (ByValAlignment)
1763 Alignment = std::max(Alignment, *ByValAlignment);
1764
1765 AllocaInst *NewAlloca =
1766 new AllocaInst(ByValType, Arg->getType()->getPointerAddressSpace(),
1767 nullptr, Alignment, Arg->getName());
1768 NewAlloca->insertBefore(Caller->begin()->begin());
1769 IFI.StaticAllocas.push_back(NewAlloca);
1770
1771 // Uses of the argument in the function should use our new alloca
1772 // instead.
1773 return NewAlloca;
1774}
1775
1776// Check whether this Value is used by a lifetime intrinsic.
1778 for (User *U : V->users())
1779 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U))
1780 if (II->isLifetimeStartOrEnd())
1781 return true;
1782 return false;
1783}
1784
1785// Check whether the given alloca already has
1786// lifetime.start or lifetime.end intrinsics.
1788 Type *Ty = AI->getType();
1789 Type *Int8PtrTy =
1790 PointerType::get(Ty->getContext(), Ty->getPointerAddressSpace());
1791 if (Ty == Int8PtrTy)
1792 return isUsedByLifetimeMarker(AI);
1793
1794 // Do a scan to find all the casts to i8*.
1795 for (User *U : AI->users()) {
1796 if (U->getType() != Int8PtrTy) continue;
1797 if (U->stripPointerCasts() != AI) continue;
1799 return true;
1800 }
1801 return false;
1802}
1803
1804/// Return the result of AI->isStaticAlloca() if AI were moved to the entry
1805/// block. Allocas used in inalloca calls and allocas of dynamic array size
1806/// cannot be static.
1808 return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca();
1809}
1810
1811/// Returns a DebugLoc for a new DILocation which is a clone of \p OrigDL
1812/// inlined at \p InlinedAt. \p IANodes is an inlined-at cache.
1813static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt,
1814 LLVMContext &Ctx,
1816 auto IA = DebugLoc::appendInlinedAt(OrigDL, InlinedAt, Ctx, IANodes);
1817 return DILocation::get(Ctx, OrigDL.getLine(), OrigDL.getCol(),
1818 OrigDL.getScope(), IA);
1819}
1820
1821/// Update inlined instructions' line numbers to
1822/// to encode location where these instructions are inlined.
1824 Instruction *TheCall, bool CalleeHasDebugInfo) {
1825 const DebugLoc &TheCallDL = TheCall->getDebugLoc();
1826 if (!TheCallDL)
1827 return;
1828
1829 auto &Ctx = Fn->getContext();
1830 DILocation *InlinedAtNode = TheCallDL;
1831
1832 // Create a unique call site, not to be confused with any other call from the
1833 // same location.
1834 InlinedAtNode = DILocation::getDistinct(
1835 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1836 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1837
1838 // Cache the inlined-at nodes as they're built so they are reused, without
1839 // this every instruction's inlined-at chain would become distinct from each
1840 // other.
1842
1843 // Check if we are not generating inline line tables and want to use
1844 // the call site location instead.
1845 bool NoInlineLineTables = Fn->hasFnAttribute("no-inline-line-tables");
1846
1847 // Helper-util for updating the metadata attached to an instruction.
1848 auto UpdateInst = [&](Instruction &I) {
1849 // Loop metadata needs to be updated so that the start and end locs
1850 // reference inlined-at locations.
1851 auto updateLoopInfoLoc = [&Ctx, &InlinedAtNode,
1852 &IANodes](Metadata *MD) -> Metadata * {
1853 if (auto *Loc = dyn_cast_or_null<DILocation>(MD))
1854 return inlineDebugLoc(Loc, InlinedAtNode, Ctx, IANodes).get();
1855 return MD;
1856 };
1857 updateLoopMetadataDebugLocations(I, updateLoopInfoLoc);
1858
1859 if (!NoInlineLineTables)
1860 if (DebugLoc DL = I.getDebugLoc()) {
1861 DebugLoc IDL =
1862 inlineDebugLoc(DL, InlinedAtNode, I.getContext(), IANodes);
1863 I.setDebugLoc(IDL);
1864 return;
1865 }
1866
1867 if (CalleeHasDebugInfo && !NoInlineLineTables)
1868 return;
1869
1870 // If the inlined instruction has no line number, or if inline info
1871 // is not being generated, make it look as if it originates from the call
1872 // location. This is important for ((__always_inline, __nodebug__))
1873 // functions which must use caller location for all instructions in their
1874 // function body.
1875
1876 // Don't update static allocas, as they may get moved later.
1877 if (auto *AI = dyn_cast<AllocaInst>(&I))
1879 return;
1880
1881 // Do not force a debug loc for pseudo probes, since they do not need to
1882 // be debuggable, and also they are expected to have a zero/null dwarf
1883 // discriminator at this point which could be violated otherwise.
1884 if (isa<PseudoProbeInst>(I))
1885 return;
1886
1887 I.setDebugLoc(TheCallDL);
1888 };
1889
1890 // Helper-util for updating debug-info records attached to instructions.
1891 auto UpdateDVR = [&](DbgRecord *DVR) {
1892 assert(DVR->getDebugLoc() && "Debug Value must have debug loc");
1893 if (NoInlineLineTables) {
1894 DVR->setDebugLoc(TheCallDL);
1895 return;
1896 }
1897 DebugLoc DL = DVR->getDebugLoc();
1898 DebugLoc IDL =
1899 inlineDebugLoc(DL, InlinedAtNode,
1900 DVR->getMarker()->getParent()->getContext(), IANodes);
1901 DVR->setDebugLoc(IDL);
1902 };
1903
1904 // Iterate over all instructions, updating metadata and debug-info records.
1905 for (; FI != Fn->end(); ++FI) {
1906 for (Instruction &I : *FI) {
1907 UpdateInst(I);
1908 for (DbgRecord &DVR : I.getDbgRecordRange()) {
1909 UpdateDVR(&DVR);
1910 }
1911 }
1912
1913 // Remove debug info intrinsics if we're not keeping inline info.
1914 if (NoInlineLineTables) {
1915 BasicBlock::iterator BI = FI->begin();
1916 while (BI != FI->end()) {
1917 if (isa<DbgInfoIntrinsic>(BI)) {
1918 BI = BI->eraseFromParent();
1919 continue;
1920 } else {
1921 BI->dropDbgRecords();
1922 }
1923 ++BI;
1924 }
1925 }
1926 }
1927}
1928
1929#undef DEBUG_TYPE
1930#define DEBUG_TYPE "assignment-tracking"
1931/// Find Alloca and linked DbgAssignIntrinsic for locals escaped by \p CB.
1933 const CallBase &CB) {
1934 at::StorageToVarsMap EscapedLocals;
1936
1937 LLVM_DEBUG(
1938 errs() << "# Finding caller local variables escaped by callee\n");
1939 for (const Value *Arg : CB.args()) {
1940 LLVM_DEBUG(errs() << "INSPECT: " << *Arg << "\n");
1941 if (!Arg->getType()->isPointerTy()) {
1942 LLVM_DEBUG(errs() << " | SKIP: Not a pointer\n");
1943 continue;
1944 }
1945
1946 const Instruction *I = dyn_cast<Instruction>(Arg);
1947 if (!I) {
1948 LLVM_DEBUG(errs() << " | SKIP: Not result of instruction\n");
1949 continue;
1950 }
1951
1952 // Walk back to the base storage.
1953 assert(Arg->getType()->isPtrOrPtrVectorTy());
1954 APInt TmpOffset(DL.getIndexTypeSizeInBits(Arg->getType()), 0, false);
1955 const AllocaInst *Base = dyn_cast<AllocaInst>(
1956 Arg->stripAndAccumulateConstantOffsets(DL, TmpOffset, true));
1957 if (!Base) {
1958 LLVM_DEBUG(errs() << " | SKIP: Couldn't walk back to base storage\n");
1959 continue;
1960 }
1961
1962 assert(Base);
1963 LLVM_DEBUG(errs() << " | BASE: " << *Base << "\n");
1964 // We only need to process each base address once - skip any duplicates.
1965 if (!SeenBases.insert(Base).second)
1966 continue;
1967
1968 // Find all local variables associated with the backing storage.
1969 auto CollectAssignsForStorage = [&](auto *DbgAssign) {
1970 // Skip variables from inlined functions - they are not local variables.
1971 if (DbgAssign->getDebugLoc().getInlinedAt())
1972 return;
1973 LLVM_DEBUG(errs() << " > DEF : " << *DbgAssign << "\n");
1974 EscapedLocals[Base].insert(at::VarRecord(DbgAssign));
1975 };
1976 for_each(at::getAssignmentMarkers(Base), CollectAssignsForStorage);
1977 for_each(at::getDVRAssignmentMarkers(Base), CollectAssignsForStorage);
1978 }
1979 return EscapedLocals;
1980}
1981
1983 const CallBase &CB) {
1984 LLVM_DEBUG(errs() << "trackInlinedStores into "
1985 << Start->getParent()->getName() << " from "
1986 << CB.getCalledFunction()->getName() << "\n");
1987 const DataLayout &DL = CB.getDataLayout();
1989}
1990
1991/// Update inlined instructions' DIAssignID metadata. We need to do this
1992/// otherwise a function inlined more than once into the same function
1993/// will cause DIAssignID to be shared by many instructions.
1996 // Loop over all the inlined instructions. If we find a DIAssignID
1997 // attachment or use, replace it with a new version.
1998 for (auto BBI = Start; BBI != End; ++BBI) {
1999 for (Instruction &I : *BBI)
2000 at::remapAssignID(Map, I);
2001 }
2002}
2003#undef DEBUG_TYPE
2004#define DEBUG_TYPE "inline-function"
2005
2006/// Update the block frequencies of the caller after a callee has been inlined.
2007///
2008/// Each block cloned into the caller has its block frequency scaled by the
2009/// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of
2010/// callee's entry block gets the same frequency as the callsite block and the
2011/// relative frequencies of all cloned blocks remain the same after cloning.
2012static void updateCallerBFI(BasicBlock *CallSiteBlock,
2013 const ValueToValueMapTy &VMap,
2014 BlockFrequencyInfo *CallerBFI,
2015 BlockFrequencyInfo *CalleeBFI,
2016 const BasicBlock &CalleeEntryBlock) {
2018 for (auto Entry : VMap) {
2019 if (!isa<BasicBlock>(Entry.first) || !Entry.second)
2020 continue;
2021 auto *OrigBB = cast<BasicBlock>(Entry.first);
2022 auto *ClonedBB = cast<BasicBlock>(Entry.second);
2023 BlockFrequency Freq = CalleeBFI->getBlockFreq(OrigBB);
2024 if (!ClonedBBs.insert(ClonedBB).second) {
2025 // Multiple blocks in the callee might get mapped to one cloned block in
2026 // the caller since we prune the callee as we clone it. When that happens,
2027 // we want to use the maximum among the original blocks' frequencies.
2028 BlockFrequency NewFreq = CallerBFI->getBlockFreq(ClonedBB);
2029 if (NewFreq > Freq)
2030 Freq = NewFreq;
2031 }
2032 CallerBFI->setBlockFreq(ClonedBB, Freq);
2033 }
2034 BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock));
2035 CallerBFI->setBlockFreqAndScale(
2036 EntryClone, CallerBFI->getBlockFreq(CallSiteBlock), ClonedBBs);
2037}
2038
2039/// Update the branch metadata for cloned call instructions.
2040static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap,
2041 const ProfileCount &CalleeEntryCount,
2042 const CallBase &TheCall, ProfileSummaryInfo *PSI,
2043 BlockFrequencyInfo *CallerBFI) {
2044 if (CalleeEntryCount.isSynthetic() || CalleeEntryCount.getCount() < 1)
2045 return;
2046 auto CallSiteCount =
2047 PSI ? PSI->getProfileCount(TheCall, CallerBFI) : std::nullopt;
2048 int64_t CallCount =
2049 std::min(CallSiteCount.value_or(0), CalleeEntryCount.getCount());
2050 updateProfileCallee(Callee, -CallCount, &VMap);
2051}
2052
2054 Function *Callee, int64_t EntryDelta,
2056 auto CalleeCount = Callee->getEntryCount();
2057 if (!CalleeCount)
2058 return;
2059
2060 const uint64_t PriorEntryCount = CalleeCount->getCount();
2061
2062 // Since CallSiteCount is an estimate, it could exceed the original callee
2063 // count and has to be set to 0 so guard against underflow.
2064 const uint64_t NewEntryCount =
2065 (EntryDelta < 0 && static_cast<uint64_t>(-EntryDelta) > PriorEntryCount)
2066 ? 0
2067 : PriorEntryCount + EntryDelta;
2068
2069 auto updateVTableProfWeight = [](CallBase *CB, const uint64_t NewEntryCount,
2070 const uint64_t PriorEntryCount) {
2072 if (VPtr)
2073 scaleProfData(*VPtr, NewEntryCount, PriorEntryCount);
2074 };
2075
2076 // During inlining ?
2077 if (VMap) {
2078 uint64_t CloneEntryCount = PriorEntryCount - NewEntryCount;
2079 for (auto Entry : *VMap) {
2080 if (isa<CallInst>(Entry.first))
2081 if (auto *CI = dyn_cast_or_null<CallInst>(Entry.second)) {
2082 CI->updateProfWeight(CloneEntryCount, PriorEntryCount);
2083 updateVTableProfWeight(CI, CloneEntryCount, PriorEntryCount);
2084 }
2085
2086 if (isa<InvokeInst>(Entry.first))
2087 if (auto *II = dyn_cast_or_null<InvokeInst>(Entry.second)) {
2088 II->updateProfWeight(CloneEntryCount, PriorEntryCount);
2089 updateVTableProfWeight(II, CloneEntryCount, PriorEntryCount);
2090 }
2091 }
2092 }
2093
2094 if (EntryDelta) {
2095 Callee->setEntryCount(NewEntryCount);
2096
2097 for (BasicBlock &BB : *Callee)
2098 // No need to update the callsite if it is pruned during inlining.
2099 if (!VMap || VMap->count(&BB))
2100 for (Instruction &I : BB) {
2101 if (CallInst *CI = dyn_cast<CallInst>(&I)) {
2102 CI->updateProfWeight(NewEntryCount, PriorEntryCount);
2103 updateVTableProfWeight(CI, NewEntryCount, PriorEntryCount);
2104 }
2105 if (InvokeInst *II = dyn_cast<InvokeInst>(&I)) {
2106 II->updateProfWeight(NewEntryCount, PriorEntryCount);
2107 updateVTableProfWeight(II, NewEntryCount, PriorEntryCount);
2108 }
2109 }
2110 }
2111}
2112
2113/// An operand bundle "clang.arc.attachedcall" on a call indicates the call
2114/// result is implicitly consumed by a call to retainRV or claimRV immediately
2115/// after the call. This function inlines the retainRV/claimRV calls.
2116///
2117/// There are three cases to consider:
2118///
2119/// 1. If there is a call to autoreleaseRV that takes a pointer to the returned
2120/// object in the callee return block, the autoreleaseRV call and the
2121/// retainRV/claimRV call in the caller cancel out. If the call in the caller
2122/// is a claimRV call, a call to objc_release is emitted.
2123///
2124/// 2. If there is a call in the callee return block that doesn't have operand
2125/// bundle "clang.arc.attachedcall", the operand bundle on the original call
2126/// is transferred to the call in the callee.
2127///
2128/// 3. Otherwise, a call to objc_retain is inserted if the call in the caller is
2129/// a retainRV call.
2130static void
2132 const SmallVectorImpl<ReturnInst *> &Returns) {
2133 assert(objcarc::isRetainOrClaimRV(RVCallKind) && "unexpected ARC function");
2134 bool IsRetainRV = RVCallKind == objcarc::ARCInstKind::RetainRV,
2135 IsUnsafeClaimRV = !IsRetainRV;
2136
2137 for (auto *RI : Returns) {
2138 Value *RetOpnd = objcarc::GetRCIdentityRoot(RI->getOperand(0));
2139 bool InsertRetainCall = IsRetainRV;
2140 IRBuilder<> Builder(RI->getContext());
2141
2142 // Walk backwards through the basic block looking for either a matching
2143 // autoreleaseRV call or an unannotated call.
2144 auto InstRange = llvm::make_range(++(RI->getIterator().getReverse()),
2145 RI->getParent()->rend());
2146 for (Instruction &I : llvm::make_early_inc_range(InstRange)) {
2147 // Ignore casts.
2148 if (isa<CastInst>(I))
2149 continue;
2150
2151 if (auto *II = dyn_cast<IntrinsicInst>(&I)) {
2152 if (II->getIntrinsicID() != Intrinsic::objc_autoreleaseReturnValue ||
2153 !II->hasNUses(0) ||
2154 objcarc::GetRCIdentityRoot(II->getOperand(0)) != RetOpnd)
2155 break;
2156
2157 // If we've found a matching authoreleaseRV call:
2158 // - If claimRV is attached to the call, insert a call to objc_release
2159 // and erase the autoreleaseRV call.
2160 // - If retainRV is attached to the call, just erase the autoreleaseRV
2161 // call.
2162 if (IsUnsafeClaimRV) {
2163 Builder.SetInsertPoint(II);
2164 Builder.CreateIntrinsic(Intrinsic::objc_release, {}, RetOpnd);
2165 }
2166 II->eraseFromParent();
2167 InsertRetainCall = false;
2168 break;
2169 }
2170
2171 auto *CI = dyn_cast<CallInst>(&I);
2172
2173 if (!CI)
2174 break;
2175
2176 if (objcarc::GetRCIdentityRoot(CI) != RetOpnd ||
2178 break;
2179
2180 // If we've found an unannotated call that defines RetOpnd, add a
2181 // "clang.arc.attachedcall" operand bundle.
2182 Value *BundleArgs[] = {*objcarc::getAttachedARCFunction(&CB)};
2183 OperandBundleDef OB("clang.arc.attachedcall", BundleArgs);
2184 auto *NewCall = CallBase::addOperandBundle(
2185 CI, LLVMContext::OB_clang_arc_attachedcall, OB, CI->getIterator());
2186 NewCall->copyMetadata(*CI);
2187 CI->replaceAllUsesWith(NewCall);
2188 CI->eraseFromParent();
2189 InsertRetainCall = false;
2190 break;
2191 }
2192
2193 if (InsertRetainCall) {
2194 // The retainRV is attached to the call and we've failed to find a
2195 // matching autoreleaseRV or an annotated call in the callee. Emit a call
2196 // to objc_retain.
2197 Builder.SetInsertPoint(RI);
2198 Builder.CreateIntrinsic(Intrinsic::objc_retain, {}, RetOpnd);
2199 }
2200 }
2201}
2202
2203// In contextual profiling, when an inline succeeds, we want to remap the
2204// indices of the callee into the index space of the caller. We can't just leave
2205// them as-is because the same callee may appear in other places in this caller
2206// (other callsites), and its (callee's) counters and sub-contextual profile
2207// tree would be potentially different.
2208// Not all BBs of the callee may survive the opportunistic DCE InlineFunction
2209// does (same goes for callsites in the callee).
2210// We will return a pair of vectors, one for basic block IDs and one for
2211// callsites. For such a vector V, V[Idx] will be -1 if the callee
2212// instrumentation with index Idx did not survive inlining, and a new value
2213// otherwise.
2214// This function will update the caller's instrumentation intrinsics
2215// accordingly, mapping indices as described above. We also replace the "name"
2216// operand because we use it to distinguish between "own" instrumentation and
2217// "from callee" instrumentation when performing the traversal of the CFG of the
2218// caller. We traverse depth-first from the callsite's BB and up to the point we
2219// hit BBs owned by the caller.
2220// The return values will be then used to update the contextual
2221// profile. Note: we only update the "name" and "index" operands in the
2222// instrumentation intrinsics, we leave the hash and total nr of indices as-is,
2223// it's not worth updating those.
2224static const std::pair<std::vector<int64_t>, std::vector<int64_t>>
2226 PGOContextualProfile &CtxProf, uint32_t CalleeCounters,
2227 uint32_t CalleeCallsites) {
2228 // We'll allocate a new ID to imported callsite counters and callsites. We're
2229 // using -1 to indicate a counter we delete. Most likely the entry ID, for
2230 // example, will be deleted - we don't want 2 IDs in the same BB, and the
2231 // entry would have been cloned in the callsite's old BB.
2232 std::vector<int64_t> CalleeCounterMap;
2233 std::vector<int64_t> CalleeCallsiteMap;
2234 CalleeCounterMap.resize(CalleeCounters, -1);
2235 CalleeCallsiteMap.resize(CalleeCallsites, -1);
2236
2237 auto RewriteInstrIfNeeded = [&](InstrProfIncrementInst &Ins) -> bool {
2238 if (Ins.getNameValue() == &Caller)
2239 return false;
2240 const auto OldID = static_cast<uint32_t>(Ins.getIndex()->getZExtValue());
2241 if (CalleeCounterMap[OldID] == -1)
2242 CalleeCounterMap[OldID] = CtxProf.allocateNextCounterIndex(Caller);
2243 const auto NewID = static_cast<uint32_t>(CalleeCounterMap[OldID]);
2244
2245 Ins.setNameValue(&Caller);
2246 Ins.setIndex(NewID);
2247 return true;
2248 };
2249
2250 auto RewriteCallsiteInsIfNeeded = [&](InstrProfCallsite &Ins) -> bool {
2251 if (Ins.getNameValue() == &Caller)
2252 return false;
2253 const auto OldID = static_cast<uint32_t>(Ins.getIndex()->getZExtValue());
2254 if (CalleeCallsiteMap[OldID] == -1)
2255 CalleeCallsiteMap[OldID] = CtxProf.allocateNextCallsiteIndex(Caller);
2256 const auto NewID = static_cast<uint32_t>(CalleeCallsiteMap[OldID]);
2257
2258 Ins.setNameValue(&Caller);
2259 Ins.setIndex(NewID);
2260 return true;
2261 };
2262
2263 std::deque<BasicBlock *> Worklist;
2265 // We will traverse the BBs starting from the callsite BB. The callsite BB
2266 // will have at least a BB ID - maybe its own, and in any case the one coming
2267 // from the cloned function's entry BB. The other BBs we'll start seeing from
2268 // there on may or may not have BB IDs. BBs with IDs belonging to our caller
2269 // are definitely not coming from the imported function and form a boundary
2270 // past which we don't need to traverse anymore. BBs may have no
2271 // instrumentation (because we originally inserted instrumentation as per
2272 // MST), in which case we'll traverse past them. An invariant we'll keep is
2273 // that a BB will have at most 1 BB ID. For example, in the callsite BB, we
2274 // will delete the callee BB's instrumentation. This doesn't result in
2275 // information loss: the entry BB of the callee will have the same count as
2276 // the callsite's BB. At the end of this traversal, all the callee's
2277 // instrumentation would be mapped into the caller's instrumentation index
2278 // space. Some of the callee's counters may be deleted (as mentioned, this
2279 // should result in no loss of information).
2280 Worklist.push_back(StartBB);
2281 while (!Worklist.empty()) {
2282 auto *BB = Worklist.front();
2283 Worklist.pop_front();
2284 bool Changed = false;
2285 auto *BBID = CtxProfAnalysis::getBBInstrumentation(*BB);
2286 if (BBID) {
2287 Changed |= RewriteInstrIfNeeded(*BBID);
2288 // this may be the entryblock from the inlined callee, coming into a BB
2289 // that didn't have instrumentation because of MST decisions. Let's make
2290 // sure it's placed accordingly. This is a noop elsewhere.
2291 BBID->moveBefore(&*BB->getFirstInsertionPt());
2292 }
2293 for (auto &I : llvm::make_early_inc_range(*BB)) {
2294 if (auto *Inc = dyn_cast<InstrProfIncrementInst>(&I)) {
2295 if (isa<InstrProfIncrementInstStep>(Inc)) {
2296 // Step instrumentation is used for select instructions. Inlining may
2297 // have propagated a constant resulting in the condition of the select
2298 // being resolved, case in which function cloning resolves the value
2299 // of the select, and elides the select instruction. If that is the
2300 // case, the step parameter of the instrumentation will reflect that.
2301 // We can delete the instrumentation in that case.
2302 if (isa<Constant>(Inc->getStep())) {
2303 assert(!Inc->getNextNode() || !isa<SelectInst>(Inc->getNextNode()));
2304 Inc->eraseFromParent();
2305 } else {
2306 assert(isa_and_nonnull<SelectInst>(Inc->getNextNode()));
2307 RewriteInstrIfNeeded(*Inc);
2308 }
2309 } else if (Inc != BBID) {
2310 // If we're here it means that the BB had more than 1 IDs, presumably
2311 // some coming from the callee. We "made up our mind" to keep the
2312 // first one (which may or may not have been originally the caller's).
2313 // All the others are superfluous and we delete them.
2314 Inc->eraseFromParent();
2315 Changed = true;
2316 }
2317 } else if (auto *CS = dyn_cast<InstrProfCallsite>(&I)) {
2318 Changed |= RewriteCallsiteInsIfNeeded(*CS);
2319 }
2320 }
2321 if (!BBID || Changed)
2322 for (auto *Succ : successors(BB))
2323 if (Seen.insert(Succ).second)
2324 Worklist.push_back(Succ);
2325 }
2326
2327 assert(
2328 llvm::all_of(CalleeCounterMap, [&](const auto &V) { return V != 0; }) &&
2329 "Counter index mapping should be either to -1 or to non-zero index, "
2330 "because the 0 "
2331 "index corresponds to the entry BB of the caller");
2332 assert(
2333 llvm::all_of(CalleeCallsiteMap, [&](const auto &V) { return V != 0; }) &&
2334 "Callsite index mapping should be either to -1 or to non-zero index, "
2335 "because there should have been at least a callsite - the inlined one "
2336 "- which would have had a 0 index.");
2337
2338 return {std::move(CalleeCounterMap), std::move(CalleeCallsiteMap)};
2339}
2340
2341// Inline. If successful, update the contextual profile (if a valid one is
2342// given).
2343// The contextual profile data is organized in trees, as follows:
2344// - each node corresponds to a function
2345// - the root of each tree corresponds to an "entrypoint" - e.g.
2346// RPC handler for server side
2347// - the path from the root to a node is a particular call path
2348// - the counters stored in a node are counter values observed in that
2349// particular call path ("context")
2350// - the edges between nodes are annotated with callsite IDs.
2351//
2352// Updating the contextual profile after an inlining means, at a high level,
2353// copying over the data of the callee, **intentionally without any value
2354// scaling**, and copying over the callees of the inlined callee.
2356 PGOContextualProfile &CtxProf,
2357 bool MergeAttributes,
2358 AAResults *CalleeAAR,
2359 bool InsertLifetime,
2360 Function *ForwardVarArgsTo) {
2361 if (!CtxProf)
2362 return InlineFunction(CB, IFI, MergeAttributes, CalleeAAR, InsertLifetime,
2363 ForwardVarArgsTo);
2364
2365 auto &Caller = *CB.getCaller();
2366 auto &Callee = *CB.getCalledFunction();
2367 auto *StartBB = CB.getParent();
2368
2369 // Get some preliminary data about the callsite before it might get inlined.
2370 // Inlining shouldn't delete the callee, but it's cleaner (and low-cost) to
2371 // get this data upfront and rely less on InlineFunction's behavior.
2372 const auto CalleeGUID = AssignGUIDPass::getGUID(Callee);
2373 auto *CallsiteIDIns = CtxProfAnalysis::getCallsiteInstrumentation(CB);
2374 const auto CallsiteID =
2375 static_cast<uint32_t>(CallsiteIDIns->getIndex()->getZExtValue());
2376
2377 const auto NumCalleeCounters = CtxProf.getNumCounters(Callee);
2378 const auto NumCalleeCallsites = CtxProf.getNumCallsites(Callee);
2379
2380 auto Ret = InlineFunction(CB, IFI, MergeAttributes, CalleeAAR, InsertLifetime,
2381 ForwardVarArgsTo);
2382 if (!Ret.isSuccess())
2383 return Ret;
2384
2385 // Inlining succeeded, we don't need the instrumentation of the inlined
2386 // callsite.
2387 CallsiteIDIns->eraseFromParent();
2388
2389 // Assinging Maps and then capturing references into it in the lambda because
2390 // captured structured bindings are a C++20 extension. We do also need a
2391 // capture here, though.
2392 const auto IndicesMaps = remapIndices(Caller, StartBB, CtxProf,
2393 NumCalleeCounters, NumCalleeCallsites);
2394 const uint32_t NewCountersSize = CtxProf.getNumCounters(Caller);
2395
2396 auto Updater = [&](PGOCtxProfContext &Ctx) {
2397 assert(Ctx.guid() == AssignGUIDPass::getGUID(Caller));
2398 const auto &[CalleeCounterMap, CalleeCallsiteMap] = IndicesMaps;
2399 assert(
2400 (Ctx.counters().size() +
2401 llvm::count_if(CalleeCounterMap, [](auto V) { return V != -1; }) ==
2402 NewCountersSize) &&
2403 "The caller's counters size should have grown by the number of new "
2404 "distinct counters inherited from the inlined callee.");
2405 Ctx.resizeCounters(NewCountersSize);
2406 // If the callsite wasn't exercised in this context, the value of the
2407 // counters coming from it is 0 - which it is right now, after resizing them
2408 // - and so we're done.
2409 auto CSIt = Ctx.callsites().find(CallsiteID);
2410 if (CSIt == Ctx.callsites().end())
2411 return;
2412 auto CalleeCtxIt = CSIt->second.find(CalleeGUID);
2413 // The callsite was exercised, but not with this callee (so presumably this
2414 // is an indirect callsite). Again, we're done here.
2415 if (CalleeCtxIt == CSIt->second.end())
2416 return;
2417
2418 // Let's pull in the counter values and the subcontexts coming from the
2419 // inlined callee.
2420 auto &CalleeCtx = CalleeCtxIt->second;
2421 assert(CalleeCtx.guid() == CalleeGUID);
2422
2423 for (auto I = 0U; I < CalleeCtx.counters().size(); ++I) {
2424 const int64_t NewIndex = CalleeCounterMap[I];
2425 if (NewIndex >= 0) {
2426 assert(NewIndex != 0 && "counter index mapping shouldn't happen to a 0 "
2427 "index, that's the caller's entry BB");
2428 Ctx.counters()[NewIndex] = CalleeCtx.counters()[I];
2429 }
2430 }
2431 for (auto &[I, OtherSet] : CalleeCtx.callsites()) {
2432 const int64_t NewCSIdx = CalleeCallsiteMap[I];
2433 if (NewCSIdx >= 0) {
2434 assert(NewCSIdx != 0 &&
2435 "callsite index mapping shouldn't happen to a 0 index, the "
2436 "caller must've had at least one callsite (with such an index)");
2437 Ctx.ingestAllContexts(NewCSIdx, std::move(OtherSet));
2438 }
2439 }
2440 // We know the traversal is preorder, so it wouldn't have yet looked at the
2441 // sub-contexts of this context that it's currently visiting. Meaning, the
2442 // erase below invalidates no iterators.
2443 auto Deleted = Ctx.callsites().erase(CallsiteID);
2444 assert(Deleted);
2445 (void)Deleted;
2446 };
2447 CtxProf.update(Updater, Caller);
2448 return Ret;
2449}
2450
2451/// This function inlines the called function into the basic block of the
2452/// caller. This returns false if it is not possible to inline this call.
2453/// The program is still in a well defined state if this occurs though.
2454///
2455/// Note that this only does one level of inlining. For example, if the
2456/// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
2457/// exists in the instruction stream. Similarly this will inline a recursive
2458/// function by one level.
2460 bool MergeAttributes,
2461 AAResults *CalleeAAR,
2462 bool InsertLifetime,
2463 Function *ForwardVarArgsTo) {
2464 assert(CB.getParent() && CB.getFunction() && "Instruction not in function!");
2465
2466 // FIXME: we don't inline callbr yet.
2467 if (isa<CallBrInst>(CB))
2468 return InlineResult::failure("We don't inline callbr yet.");
2469
2470 // If IFI has any state in it, zap it before we fill it in.
2471 IFI.reset();
2472
2473 Function *CalledFunc = CB.getCalledFunction();
2474 if (!CalledFunc || // Can't inline external function or indirect
2475 CalledFunc->isDeclaration()) // call!
2476 return InlineResult::failure("external or indirect");
2477
2478 // The inliner does not know how to inline through calls with operand bundles
2479 // in general ...
2480 Value *ConvergenceControlToken = nullptr;
2481 if (CB.hasOperandBundles()) {
2482 for (int i = 0, e = CB.getNumOperandBundles(); i != e; ++i) {
2483 auto OBUse = CB.getOperandBundleAt(i);
2484 uint32_t Tag = OBUse.getTagID();
2485 // ... but it knows how to inline through "deopt" operand bundles ...
2486 if (Tag == LLVMContext::OB_deopt)
2487 continue;
2488 // ... and "funclet" operand bundles.
2489 if (Tag == LLVMContext::OB_funclet)
2490 continue;
2492 continue;
2493 if (Tag == LLVMContext::OB_kcfi)
2494 continue;
2496 ConvergenceControlToken = OBUse.Inputs[0].get();
2497 continue;
2498 }
2499
2500 return InlineResult::failure("unsupported operand bundle");
2501 }
2502 }
2503
2504 // FIXME: The check below is redundant and incomplete. According to spec, if a
2505 // convergent call is missing a token, then the caller is using uncontrolled
2506 // convergence. If the callee has an entry intrinsic, then the callee is using
2507 // controlled convergence, and the call cannot be inlined. A proper
2508 // implemenation of this check requires a whole new analysis that identifies
2509 // convergence in every function. For now, we skip that and just do this one
2510 // cursory check. The underlying assumption is that in a compiler flow that
2511 // fully implements convergence control tokens, there is no mixing of
2512 // controlled and uncontrolled convergent operations in the whole program.
2513 if (CB.isConvergent()) {
2514 if (!ConvergenceControlToken &&
2515 getConvergenceEntry(CalledFunc->getEntryBlock())) {
2516 return InlineResult::failure(
2517 "convergent call needs convergencectrl operand");
2518 }
2519 }
2520
2521 // If the call to the callee cannot throw, set the 'nounwind' flag on any
2522 // calls that we inline.
2523 bool MarkNoUnwind = CB.doesNotThrow();
2524
2525 BasicBlock *OrigBB = CB.getParent();
2526 Function *Caller = OrigBB->getParent();
2527
2528 // GC poses two hazards to inlining, which only occur when the callee has GC:
2529 // 1. If the caller has no GC, then the callee's GC must be propagated to the
2530 // caller.
2531 // 2. If the caller has a differing GC, it is invalid to inline.
2532 if (CalledFunc->hasGC()) {
2533 if (!Caller->hasGC())
2534 Caller->setGC(CalledFunc->getGC());
2535 else if (CalledFunc->getGC() != Caller->getGC())
2536 return InlineResult::failure("incompatible GC");
2537 }
2538
2539 // Get the personality function from the callee if it contains a landing pad.
2540 Constant *CalledPersonality =
2541 CalledFunc->hasPersonalityFn()
2542 ? CalledFunc->getPersonalityFn()->stripPointerCasts()
2543 : nullptr;
2544
2545 // Find the personality function used by the landing pads of the caller. If it
2546 // exists, then check to see that it matches the personality function used in
2547 // the callee.
2548 Constant *CallerPersonality =
2549 Caller->hasPersonalityFn()
2550 ? Caller->getPersonalityFn()->stripPointerCasts()
2551 : nullptr;
2552 if (CalledPersonality) {
2553 if (!CallerPersonality)
2554 Caller->setPersonalityFn(CalledPersonality);
2555 // If the personality functions match, then we can perform the
2556 // inlining. Otherwise, we can't inline.
2557 // TODO: This isn't 100% true. Some personality functions are proper
2558 // supersets of others and can be used in place of the other.
2559 else if (CalledPersonality != CallerPersonality)
2560 return InlineResult::failure("incompatible personality");
2561 }
2562
2563 // We need to figure out which funclet the callsite was in so that we may
2564 // properly nest the callee.
2565 Instruction *CallSiteEHPad = nullptr;
2566 if (CallerPersonality) {
2567 EHPersonality Personality = classifyEHPersonality(CallerPersonality);
2568 if (isScopedEHPersonality(Personality)) {
2569 std::optional<OperandBundleUse> ParentFunclet =
2571 if (ParentFunclet)
2572 CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
2573
2574 // OK, the inlining site is legal. What about the target function?
2575
2576 if (CallSiteEHPad) {
2577 if (Personality == EHPersonality::MSVC_CXX) {
2578 // The MSVC personality cannot tolerate catches getting inlined into
2579 // cleanup funclets.
2580 if (isa<CleanupPadInst>(CallSiteEHPad)) {
2581 // Ok, the call site is within a cleanuppad. Let's check the callee
2582 // for catchpads.
2583 for (const BasicBlock &CalledBB : *CalledFunc) {
2584 if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
2585 return InlineResult::failure("catch in cleanup funclet");
2586 }
2587 }
2588 } else if (isAsynchronousEHPersonality(Personality)) {
2589 // SEH is even less tolerant, there may not be any sort of exceptional
2590 // funclet in the callee.
2591 for (const BasicBlock &CalledBB : *CalledFunc) {
2592 if (CalledBB.isEHPad())
2593 return InlineResult::failure("SEH in cleanup funclet");
2594 }
2595 }
2596 }
2597 }
2598 }
2599
2600 // Determine if we are dealing with a call in an EHPad which does not unwind
2601 // to caller.
2602 bool EHPadForCallUnwindsLocally = false;
2603 if (CallSiteEHPad && isa<CallInst>(CB)) {
2604 UnwindDestMemoTy FuncletUnwindMap;
2605 Value *CallSiteUnwindDestToken =
2606 getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap);
2607
2608 EHPadForCallUnwindsLocally =
2609 CallSiteUnwindDestToken &&
2610 !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
2611 }
2612
2613 // Get an iterator to the last basic block in the function, which will have
2614 // the new function inlined after it.
2615 Function::iterator LastBlock = --Caller->end();
2616
2617 // Make sure to capture all of the return instructions from the cloned
2618 // function.
2620 ClonedCodeInfo InlinedFunctionInfo;
2621 Function::iterator FirstNewBlock;
2622
2623 { // Scope to destroy VMap after cloning.
2624 ValueToValueMapTy VMap;
2625 struct ByValInit {
2626 Value *Dst;
2627 Value *Src;
2628 Type *Ty;
2629 };
2630 // Keep a list of pair (dst, src) to emit byval initializations.
2631 SmallVector<ByValInit, 4> ByValInits;
2632
2633 // When inlining a function that contains noalias scope metadata,
2634 // this metadata needs to be cloned so that the inlined blocks
2635 // have different "unique scopes" at every call site.
2636 // Track the metadata that must be cloned. Do this before other changes to
2637 // the function, so that we do not get in trouble when inlining caller ==
2638 // callee.
2639 ScopedAliasMetadataDeepCloner SAMetadataCloner(CB.getCalledFunction());
2640
2641 auto &DL = Caller->getDataLayout();
2642
2643 // Calculate the vector of arguments to pass into the function cloner, which
2644 // matches up the formal to the actual argument values.
2645 auto AI = CB.arg_begin();
2646 unsigned ArgNo = 0;
2647 for (Function::arg_iterator I = CalledFunc->arg_begin(),
2648 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
2649 Value *ActualArg = *AI;
2650
2651 // When byval arguments actually inlined, we need to make the copy implied
2652 // by them explicit. However, we don't do this if the callee is readonly
2653 // or readnone, because the copy would be unneeded: the callee doesn't
2654 // modify the struct.
2655 if (CB.isByValArgument(ArgNo)) {
2656 ActualArg = HandleByValArgument(CB.getParamByValType(ArgNo), ActualArg,
2657 &CB, CalledFunc, IFI,
2658 CalledFunc->getParamAlign(ArgNo));
2659 if (ActualArg != *AI)
2660 ByValInits.push_back(
2661 {ActualArg, (Value *)*AI, CB.getParamByValType(ArgNo)});
2662 }
2663
2664 VMap[&*I] = ActualArg;
2665 }
2666
2667 // TODO: Remove this when users have been updated to the assume bundles.
2668 // Add alignment assumptions if necessary. We do this before the inlined
2669 // instructions are actually cloned into the caller so that we can easily
2670 // check what will be known at the start of the inlined code.
2671 AddAlignmentAssumptions(CB, IFI);
2672
2673 AssumptionCache *AC =
2674 IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
2675
2676 /// Preserve all attributes on of the call and its parameters.
2677 salvageKnowledge(&CB, AC);
2678
2679 // We want the inliner to prune the code as it copies. We would LOVE to
2680 // have no dead or constant instructions leftover after inlining occurs
2681 // (which can happen, e.g., because an argument was constant), but we'll be
2682 // happy with whatever the cloner can do.
2683 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
2684 /*ModuleLevelChanges=*/false, Returns, ".i",
2685 &InlinedFunctionInfo);
2686 // Remember the first block that is newly cloned over.
2687 FirstNewBlock = LastBlock; ++FirstNewBlock;
2688
2689 // Insert retainRV/clainRV runtime calls.
2691 if (RVCallKind != objcarc::ARCInstKind::None)
2692 inlineRetainOrClaimRVCalls(CB, RVCallKind, Returns);
2693
2694 // Updated caller/callee profiles only when requested. For sample loader
2695 // inlining, the context-sensitive inlinee profile doesn't need to be
2696 // subtracted from callee profile, and the inlined clone also doesn't need
2697 // to be scaled based on call site count.
2698 if (IFI.UpdateProfile) {
2699 if (IFI.CallerBFI != nullptr && IFI.CalleeBFI != nullptr)
2700 // Update the BFI of blocks cloned into the caller.
2701 updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI,
2702 CalledFunc->front());
2703
2704 if (auto Profile = CalledFunc->getEntryCount())
2705 updateCallProfile(CalledFunc, VMap, *Profile, CB, IFI.PSI,
2706 IFI.CallerBFI);
2707 }
2708
2709 // Inject byval arguments initialization.
2710 for (ByValInit &Init : ByValInits)
2711 HandleByValArgumentInit(Init.Ty, Init.Dst, Init.Src, Caller->getParent(),
2712 &*FirstNewBlock, IFI, CalledFunc);
2713
2714 std::optional<OperandBundleUse> ParentDeopt =
2716 if (ParentDeopt) {
2718
2719 for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) {
2720 CallBase *ICS = dyn_cast_or_null<CallBase>(VH);
2721 if (!ICS)
2722 continue; // instruction was DCE'd or RAUW'ed to undef
2723
2724 OpDefs.clear();
2725
2726 OpDefs.reserve(ICS->getNumOperandBundles());
2727
2728 for (unsigned COBi = 0, COBe = ICS->getNumOperandBundles(); COBi < COBe;
2729 ++COBi) {
2730 auto ChildOB = ICS->getOperandBundleAt(COBi);
2731 if (ChildOB.getTagID() != LLVMContext::OB_deopt) {
2732 // If the inlined call has other operand bundles, let them be
2733 OpDefs.emplace_back(ChildOB);
2734 continue;
2735 }
2736
2737 // It may be useful to separate this logic (of handling operand
2738 // bundles) out to a separate "policy" component if this gets crowded.
2739 // Prepend the parent's deoptimization continuation to the newly
2740 // inlined call's deoptimization continuation.
2741 std::vector<Value *> MergedDeoptArgs;
2742 MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
2743 ChildOB.Inputs.size());
2744
2745 llvm::append_range(MergedDeoptArgs, ParentDeopt->Inputs);
2746 llvm::append_range(MergedDeoptArgs, ChildOB.Inputs);
2747
2748 OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));
2749 }
2750
2751 Instruction *NewI = CallBase::Create(ICS, OpDefs, ICS->getIterator());
2752
2753 // Note: the RAUW does the appropriate fixup in VMap, so we need to do
2754 // this even if the call returns void.
2755 ICS->replaceAllUsesWith(NewI);
2756
2757 VH = nullptr;
2758 ICS->eraseFromParent();
2759 }
2760 }
2761
2762 // For 'nodebug' functions, the associated DISubprogram is always null.
2763 // Conservatively avoid propagating the callsite debug location to
2764 // instructions inlined from a function whose DISubprogram is not null.
2765 fixupLineNumbers(Caller, FirstNewBlock, &CB,
2766 CalledFunc->getSubprogram() != nullptr);
2767
2768 if (isAssignmentTrackingEnabled(*Caller->getParent())) {
2769 // Interpret inlined stores to caller-local variables as assignments.
2770 trackInlinedStores(FirstNewBlock, Caller->end(), CB);
2771
2772 // Update DIAssignID metadata attachments and uses so that they are
2773 // unique to this inlined instance.
2774 fixupAssignments(FirstNewBlock, Caller->end());
2775 }
2776
2777 // Now clone the inlined noalias scope metadata.
2778 SAMetadataCloner.clone();
2779 SAMetadataCloner.remap(FirstNewBlock, Caller->end());
2780
2781 // Add noalias metadata if necessary.
2782 AddAliasScopeMetadata(CB, VMap, DL, CalleeAAR, InlinedFunctionInfo);
2783
2784 // Clone return attributes on the callsite into the calls within the inlined
2785 // function which feed into its return value.
2786 AddReturnAttributes(CB, VMap, InlinedFunctionInfo);
2787
2788 // Clone attributes on the params of the callsite to calls within the
2789 // inlined function which use the same param.
2790 AddParamAndFnBasicAttributes(CB, VMap, InlinedFunctionInfo);
2791
2792 propagateMemProfMetadata(CalledFunc, CB,
2793 InlinedFunctionInfo.ContainsMemProfMetadata, VMap);
2794
2795 // Propagate metadata on the callsite if necessary.
2796 PropagateCallSiteMetadata(CB, FirstNewBlock, Caller->end());
2797
2798 // Register any cloned assumptions.
2799 if (IFI.GetAssumptionCache)
2800 for (BasicBlock &NewBlock :
2801 make_range(FirstNewBlock->getIterator(), Caller->end()))
2802 for (Instruction &I : NewBlock)
2803 if (auto *II = dyn_cast<AssumeInst>(&I))
2804 IFI.GetAssumptionCache(*Caller).registerAssumption(II);
2805 }
2806
2807 if (ConvergenceControlToken) {
2808 IntrinsicInst *IntrinsicCall = getConvergenceEntry(*FirstNewBlock);
2809 if (IntrinsicCall) {
2810 IntrinsicCall->replaceAllUsesWith(ConvergenceControlToken);
2811 IntrinsicCall->eraseFromParent();
2812 }
2813 }
2814
2815 // If there are any alloca instructions in the block that used to be the entry
2816 // block for the callee, move them to the entry block of the caller. First
2817 // calculate which instruction they should be inserted before. We insert the
2818 // instructions at the end of the current alloca list.
2819 {
2820 BasicBlock::iterator InsertPoint = Caller->begin()->begin();
2821 for (BasicBlock::iterator I = FirstNewBlock->begin(),
2822 E = FirstNewBlock->end(); I != E; ) {
2823 AllocaInst *AI = dyn_cast<AllocaInst>(I++);
2824 if (!AI) continue;
2825
2826 // If the alloca is now dead, remove it. This often occurs due to code
2827 // specialization.
2828 if (AI->use_empty()) {
2829 AI->eraseFromParent();
2830 continue;
2831 }
2832
2834 continue;
2835
2836 // Keep track of the static allocas that we inline into the caller.
2837 IFI.StaticAllocas.push_back(AI);
2838
2839 // Scan for the block of allocas that we can move over, and move them
2840 // all at once.
2841 while (isa<AllocaInst>(I) &&
2842 !cast<AllocaInst>(I)->use_empty() &&
2843 allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) {
2844 IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
2845 ++I;
2846 }
2847
2848 // Transfer all of the allocas over in a block. Using splice means
2849 // that the instructions aren't removed from the symbol table, then
2850 // reinserted.
2851 I.setTailBit(true);
2852 Caller->getEntryBlock().splice(InsertPoint, &*FirstNewBlock,
2853 AI->getIterator(), I);
2854 }
2855 }
2856
2857 SmallVector<Value*,4> VarArgsToForward;
2858 SmallVector<AttributeSet, 4> VarArgsAttrs;
2859 for (unsigned i = CalledFunc->getFunctionType()->getNumParams();
2860 i < CB.arg_size(); i++) {
2861 VarArgsToForward.push_back(CB.getArgOperand(i));
2862 VarArgsAttrs.push_back(CB.getAttributes().getParamAttrs(i));
2863 }
2864
2865 bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false;
2866 if (InlinedFunctionInfo.ContainsCalls) {
2867 CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
2868 if (CallInst *CI = dyn_cast<CallInst>(&CB))
2869 CallSiteTailKind = CI->getTailCallKind();
2870
2871 // For inlining purposes, the "notail" marker is the same as no marker.
2872 if (CallSiteTailKind == CallInst::TCK_NoTail)
2873 CallSiteTailKind = CallInst::TCK_None;
2874
2875 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
2876 ++BB) {
2878 CallInst *CI = dyn_cast<CallInst>(&I);
2879 if (!CI)
2880 continue;
2881
2882 // Forward varargs from inlined call site to calls to the
2883 // ForwardVarArgsTo function, if requested, and to musttail calls.
2884 if (!VarArgsToForward.empty() &&
2885 ((ForwardVarArgsTo &&
2886 CI->getCalledFunction() == ForwardVarArgsTo) ||
2887 CI->isMustTailCall())) {
2888 // Collect attributes for non-vararg parameters.
2889 AttributeList Attrs = CI->getAttributes();
2891 if (!Attrs.isEmpty() || !VarArgsAttrs.empty()) {
2892 for (unsigned ArgNo = 0;
2893 ArgNo < CI->getFunctionType()->getNumParams(); ++ArgNo)
2894 ArgAttrs.push_back(Attrs.getParamAttrs(ArgNo));
2895 }
2896
2897 // Add VarArg attributes.
2898 ArgAttrs.append(VarArgsAttrs.begin(), VarArgsAttrs.end());
2899 Attrs = AttributeList::get(CI->getContext(), Attrs.getFnAttrs(),
2900 Attrs.getRetAttrs(), ArgAttrs);
2901 // Add VarArgs to existing parameters.
2902 SmallVector<Value *, 6> Params(CI->args());
2903 Params.append(VarArgsToForward.begin(), VarArgsToForward.end());
2904 CallInst *NewCI = CallInst::Create(
2905 CI->getFunctionType(), CI->getCalledOperand(), Params, "", CI->getIterator());
2906 NewCI->setDebugLoc(CI->getDebugLoc());
2907 NewCI->setAttributes(Attrs);
2908 NewCI->setCallingConv(CI->getCallingConv());
2909 CI->replaceAllUsesWith(NewCI);
2910 CI->eraseFromParent();
2911 CI = NewCI;
2912 }
2913
2914 if (Function *F = CI->getCalledFunction())
2915 InlinedDeoptimizeCalls |=
2916 F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
2917
2918 // We need to reduce the strength of any inlined tail calls. For
2919 // musttail, we have to avoid introducing potential unbounded stack
2920 // growth. For example, if functions 'f' and 'g' are mutually recursive
2921 // with musttail, we can inline 'g' into 'f' so long as we preserve
2922 // musttail on the cloned call to 'f'. If either the inlined call site
2923 // or the cloned call site is *not* musttail, the program already has
2924 // one frame of stack growth, so it's safe to remove musttail. Here is
2925 // a table of example transformations:
2926 //
2927 // f -> musttail g -> musttail f ==> f -> musttail f
2928 // f -> musttail g -> tail f ==> f -> tail f
2929 // f -> g -> musttail f ==> f -> f
2930 // f -> g -> tail f ==> f -> f
2931 //
2932 // Inlined notail calls should remain notail calls.
2933 CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
2934 if (ChildTCK != CallInst::TCK_NoTail)
2935 ChildTCK = std::min(CallSiteTailKind, ChildTCK);
2936 CI->setTailCallKind(ChildTCK);
2937 InlinedMustTailCalls |= CI->isMustTailCall();
2938
2939 // Call sites inlined through a 'nounwind' call site should be
2940 // 'nounwind' as well. However, avoid marking call sites explicitly
2941 // where possible. This helps expose more opportunities for CSE after
2942 // inlining, commonly when the callee is an intrinsic.
2943 if (MarkNoUnwind && !CI->doesNotThrow())
2944 CI->setDoesNotThrow();
2945 }
2946 }
2947 }
2948
2949 // Leave lifetime markers for the static alloca's, scoping them to the
2950 // function we just inlined.
2951 // We need to insert lifetime intrinsics even at O0 to avoid invalid
2952 // access caused by multithreaded coroutines. The check
2953 // `Caller->isPresplitCoroutine()` would affect AlwaysInliner at O0 only.
2954 if ((InsertLifetime || Caller->isPresplitCoroutine()) &&
2955 !IFI.StaticAllocas.empty()) {
2956 IRBuilder<> builder(&*FirstNewBlock, FirstNewBlock->begin());
2957 for (AllocaInst *AI : IFI.StaticAllocas) {
2958 // Don't mark swifterror allocas. They can't have bitcast uses.
2959 if (AI->isSwiftError())
2960 continue;
2961
2962 // If the alloca is already scoped to something smaller than the whole
2963 // function then there's no need to add redundant, less accurate markers.
2964 if (hasLifetimeMarkers(AI))
2965 continue;
2966
2967 // Try to determine the size of the allocation.
2968 ConstantInt *AllocaSize = nullptr;
2969 if (ConstantInt *AIArraySize =
2970 dyn_cast<ConstantInt>(AI->getArraySize())) {
2971 auto &DL = Caller->getDataLayout();
2972 Type *AllocaType = AI->getAllocatedType();
2973 TypeSize AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
2974 uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
2975
2976 // Don't add markers for zero-sized allocas.
2977 if (AllocaArraySize == 0)
2978 continue;
2979
2980 // Check that array size doesn't saturate uint64_t and doesn't
2981 // overflow when it's multiplied by type size.
2982 if (!AllocaTypeSize.isScalable() &&
2983 AllocaArraySize != std::numeric_limits<uint64_t>::max() &&
2984 std::numeric_limits<uint64_t>::max() / AllocaArraySize >=
2985 AllocaTypeSize.getFixedValue()) {
2986 AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
2987 AllocaArraySize * AllocaTypeSize);
2988 }
2989 }
2990
2991 builder.CreateLifetimeStart(AI, AllocaSize);
2992 for (ReturnInst *RI : Returns) {
2993 // Don't insert llvm.lifetime.end calls between a musttail or deoptimize
2994 // call and a return. The return kills all local allocas.
2995 if (InlinedMustTailCalls &&
2996 RI->getParent()->getTerminatingMustTailCall())
2997 continue;
2998 if (InlinedDeoptimizeCalls &&
2999 RI->getParent()->getTerminatingDeoptimizeCall())
3000 continue;
3001 IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
3002 }
3003 }
3004 }
3005
3006 // If the inlined code contained dynamic alloca instructions, wrap the inlined
3007 // code with llvm.stacksave/llvm.stackrestore intrinsics.
3008 if (InlinedFunctionInfo.ContainsDynamicAllocas) {
3009 // Insert the llvm.stacksave.
3010 CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
3011 .CreateStackSave("savedstack");
3012
3013 // Insert a call to llvm.stackrestore before any return instructions in the
3014 // inlined function.
3015 for (ReturnInst *RI : Returns) {
3016 // Don't insert llvm.stackrestore calls between a musttail or deoptimize
3017 // call and a return. The return will restore the stack pointer.
3018 if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
3019 continue;
3020 if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())
3021 continue;
3022 IRBuilder<>(RI).CreateStackRestore(SavedPtr);
3023 }
3024 }
3025
3026 // If we are inlining for an invoke instruction, we must make sure to rewrite
3027 // any call instructions into invoke instructions. This is sensitive to which
3028 // funclet pads were top-level in the inlinee, so must be done before
3029 // rewriting the "parent pad" links.
3030 if (auto *II = dyn_cast<InvokeInst>(&CB)) {
3031 BasicBlock *UnwindDest = II->getUnwindDest();
3032 Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI();
3033 if (isa<LandingPadInst>(FirstNonPHI)) {
3034 HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo);
3035 } else {
3036 HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo);
3037 }
3038 }
3039
3040 // Update the lexical scopes of the new funclets and callsites.
3041 // Anything that had 'none' as its parent is now nested inside the callsite's
3042 // EHPad.
3043 if (CallSiteEHPad) {
3044 for (Function::iterator BB = FirstNewBlock->getIterator(),
3045 E = Caller->end();
3046 BB != E; ++BB) {
3047 // Add bundle operands to inlined call sites.
3048 PropagateOperandBundles(BB, CallSiteEHPad);
3049
3050 // It is problematic if the inlinee has a cleanupret which unwinds to
3051 // caller and we inline it into a call site which doesn't unwind but into
3052 // an EH pad that does. Such an edge must be dynamically unreachable.
3053 // As such, we replace the cleanupret with unreachable.
3054 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
3055 if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
3056 changeToUnreachable(CleanupRet);
3057
3058 Instruction *I = BB->getFirstNonPHI();
3059 if (!I->isEHPad())
3060 continue;
3061
3062 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
3063 if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
3064 CatchSwitch->setParentPad(CallSiteEHPad);
3065 } else {
3066 auto *FPI = cast<FuncletPadInst>(I);
3067 if (isa<ConstantTokenNone>(FPI->getParentPad()))
3068 FPI->setParentPad(CallSiteEHPad);
3069 }
3070 }
3071 }
3072
3073 if (InlinedDeoptimizeCalls) {
3074 // We need to at least remove the deoptimizing returns from the Return set,
3075 // so that the control flow from those returns does not get merged into the
3076 // caller (but terminate it instead). If the caller's return type does not
3077 // match the callee's return type, we also need to change the return type of
3078 // the intrinsic.
3079 if (Caller->getReturnType() == CB.getType()) {
3080 llvm::erase_if(Returns, [](ReturnInst *RI) {
3081 return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;
3082 });
3083 } else {
3084 SmallVector<ReturnInst *, 8> NormalReturns;
3085 Function *NewDeoptIntrinsic = Intrinsic::getOrInsertDeclaration(
3086 Caller->getParent(), Intrinsic::experimental_deoptimize,
3087 {Caller->getReturnType()});
3088
3089 for (ReturnInst *RI : Returns) {
3090 CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();
3091 if (!DeoptCall) {
3092 NormalReturns.push_back(RI);
3093 continue;
3094 }
3095
3096 // The calling convention on the deoptimize call itself may be bogus,
3097 // since the code we're inlining may have undefined behavior (and may
3098 // never actually execute at runtime); but all
3099 // @llvm.experimental.deoptimize declarations have to have the same
3100 // calling convention in a well-formed module.
3101 auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv();
3102 NewDeoptIntrinsic->setCallingConv(CallingConv);
3103 auto *CurBB = RI->getParent();
3104 RI->eraseFromParent();
3105
3106 SmallVector<Value *, 4> CallArgs(DeoptCall->args());
3107
3109 DeoptCall->getOperandBundlesAsDefs(OpBundles);
3110 auto DeoptAttributes = DeoptCall->getAttributes();
3111 DeoptCall->eraseFromParent();
3112 assert(!OpBundles.empty() &&
3113 "Expected at least the deopt operand bundle");
3114
3115 IRBuilder<> Builder(CurBB);
3116 CallInst *NewDeoptCall =
3117 Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
3118 NewDeoptCall->setCallingConv(CallingConv);
3119 NewDeoptCall->setAttributes(DeoptAttributes);
3120 if (NewDeoptCall->getType()->isVoidTy())
3121 Builder.CreateRetVoid();
3122 else
3123 Builder.CreateRet(NewDeoptCall);
3124 // Since the ret type is changed, remove the incompatible attributes.
3126 NewDeoptCall->getType(), NewDeoptCall->getRetAttributes()));
3127 }
3128
3129 // Leave behind the normal returns so we can merge control flow.
3130 std::swap(Returns, NormalReturns);
3131 }
3132 }
3133
3134 // Handle any inlined musttail call sites. In order for a new call site to be
3135 // musttail, the source of the clone and the inlined call site must have been
3136 // musttail. Therefore it's safe to return without merging control into the
3137 // phi below.
3138 if (InlinedMustTailCalls) {
3139 // Check if we need to bitcast the result of any musttail calls.
3140 Type *NewRetTy = Caller->getReturnType();
3141 bool NeedBitCast = !CB.use_empty() && CB.getType() != NewRetTy;
3142
3143 // Handle the returns preceded by musttail calls separately.
3144 SmallVector<ReturnInst *, 8> NormalReturns;
3145 for (ReturnInst *RI : Returns) {
3146 CallInst *ReturnedMustTail =
3147 RI->getParent()->getTerminatingMustTailCall();
3148 if (!ReturnedMustTail) {
3149 NormalReturns.push_back(RI);
3150 continue;
3151 }
3152 if (!NeedBitCast)
3153 continue;
3154
3155 // Delete the old return and any preceding bitcast.
3156 BasicBlock *CurBB = RI->getParent();
3157 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
3158 RI->eraseFromParent();
3159 if (OldCast)
3160 OldCast->eraseFromParent();
3161
3162 // Insert a new bitcast and return with the right type.
3163 IRBuilder<> Builder(CurBB);
3164 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
3165 }
3166
3167 // Leave behind the normal returns so we can merge control flow.
3168 std::swap(Returns, NormalReturns);
3169 }
3170
3171 // Now that all of the transforms on the inlined code have taken place but
3172 // before we splice the inlined code into the CFG and lose track of which
3173 // blocks were actually inlined, collect the call sites. We only do this if
3174 // call graph updates weren't requested, as those provide value handle based
3175 // tracking of inlined call sites instead. Calls to intrinsics are not
3176 // collected because they are not inlineable.
3177 if (InlinedFunctionInfo.ContainsCalls) {
3178 // Otherwise just collect the raw call sites that were inlined.
3179 for (BasicBlock &NewBB :
3180 make_range(FirstNewBlock->getIterator(), Caller->end()))
3181 for (Instruction &I : NewBB)
3182 if (auto *CB = dyn_cast<CallBase>(&I))
3183 if (!(CB->getCalledFunction() &&
3185 IFI.InlinedCallSites.push_back(CB);
3186 }
3187
3188 // If we cloned in _exactly one_ basic block, and if that block ends in a
3189 // return instruction, we splice the body of the inlined callee directly into
3190 // the calling basic block.
3191 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
3192 // Move all of the instructions right before the call.
3193 OrigBB->splice(CB.getIterator(), &*FirstNewBlock, FirstNewBlock->begin(),
3194 FirstNewBlock->end());
3195 // Remove the cloned basic block.
3196 Caller->back().eraseFromParent();
3197
3198 // If the call site was an invoke instruction, add a branch to the normal
3199 // destination.
3200 if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
3201 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), CB.getIterator());
3202 NewBr->setDebugLoc(Returns[0]->getDebugLoc());
3203 }
3204
3205 // If the return instruction returned a value, replace uses of the call with
3206 // uses of the returned value.
3207 if (!CB.use_empty()) {
3208 ReturnInst *R = Returns[0];
3209 if (&CB == R->getReturnValue())
3211 else
3212 CB.replaceAllUsesWith(R->getReturnValue());
3213 }
3214 // Since we are now done with the Call/Invoke, we can delete it.
3215 CB.eraseFromParent();
3216
3217 // Since we are now done with the return instruction, delete it also.
3218 Returns[0]->eraseFromParent();
3219
3220 if (MergeAttributes)
3221 AttributeFuncs::mergeAttributesForInlining(*Caller, *CalledFunc);
3222
3223 // We are now done with the inlining.
3224 return InlineResult::success();
3225 }
3226
3227 // Otherwise, we have the normal case, of more than one block to inline or
3228 // multiple return sites.
3229
3230 // We want to clone the entire callee function into the hole between the
3231 // "starter" and "ender" blocks. How we accomplish this depends on whether
3232 // this is an invoke instruction or a call instruction.
3233 BasicBlock *AfterCallBB;
3234 BranchInst *CreatedBranchToNormalDest = nullptr;
3235 if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
3236
3237 // Add an unconditional branch to make this look like the CallInst case...
3238 CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), CB.getIterator());
3239
3240 // Split the basic block. This guarantees that no PHI nodes will have to be
3241 // updated due to new incoming edges, and make the invoke case more
3242 // symmetric to the call case.
3243 AfterCallBB =
3244 OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(),
3245 CalledFunc->getName() + ".exit");
3246
3247 } else { // It's a call
3248 // If this is a call instruction, we need to split the basic block that
3249 // the call lives in.
3250 //
3251 AfterCallBB = OrigBB->splitBasicBlock(CB.getIterator(),
3252 CalledFunc->getName() + ".exit");
3253 }
3254
3255 if (IFI.CallerBFI) {
3256 // Copy original BB's block frequency to AfterCallBB
3257 IFI.CallerBFI->setBlockFreq(AfterCallBB,
3258 IFI.CallerBFI->getBlockFreq(OrigBB));
3259 }
3260
3261 // Change the branch that used to go to AfterCallBB to branch to the first
3262 // basic block of the inlined function.
3263 //
3264 Instruction *Br = OrigBB->getTerminator();
3265 assert(Br && Br->getOpcode() == Instruction::Br &&
3266 "splitBasicBlock broken!");
3267 Br->setOperand(0, &*FirstNewBlock);
3268
3269 // Now that the function is correct, make it a little bit nicer. In
3270 // particular, move the basic blocks inserted from the end of the function
3271 // into the space made by splitting the source basic block.
3272 Caller->splice(AfterCallBB->getIterator(), Caller, FirstNewBlock,
3273 Caller->end());
3274
3275 // Handle all of the return instructions that we just cloned in, and eliminate
3276 // any users of the original call/invoke instruction.
3277 Type *RTy = CalledFunc->getReturnType();
3278
3279 PHINode *PHI = nullptr;
3280 if (Returns.size() > 1) {
3281 // The PHI node should go at the front of the new basic block to merge all
3282 // possible incoming values.
3283 if (!CB.use_empty()) {
3284 PHI = PHINode::Create(RTy, Returns.size(), CB.getName());
3285 PHI->insertBefore(AfterCallBB->begin());
3286 // Anything that used the result of the function call should now use the
3287 // PHI node as their operand.
3289 }
3290
3291 // Loop over all of the return instructions adding entries to the PHI node
3292 // as appropriate.
3293 if (PHI) {
3294 for (ReturnInst *RI : Returns) {
3295 assert(RI->getReturnValue()->getType() == PHI->getType() &&
3296 "Ret value not consistent in function!");
3297 PHI->addIncoming(RI->getReturnValue(), RI->getParent());
3298 }
3299 }
3300
3301 // Add a branch to the merge points and remove return instructions.
3302 DebugLoc Loc;
3303 for (ReturnInst *RI : Returns) {
3304 BranchInst *BI = BranchInst::Create(AfterCallBB, RI->getIterator());
3305 Loc = RI->getDebugLoc();
3306 BI->setDebugLoc(Loc);
3307 RI->eraseFromParent();
3308 }
3309 // We need to set the debug location to *somewhere* inside the
3310 // inlined function. The line number may be nonsensical, but the
3311 // instruction will at least be associated with the right
3312 // function.
3313 if (CreatedBranchToNormalDest)
3314 CreatedBranchToNormalDest->setDebugLoc(Loc);
3315 } else if (!Returns.empty()) {
3316 // Otherwise, if there is exactly one return value, just replace anything
3317 // using the return value of the call with the computed value.
3318 if (!CB.use_empty()) {
3319 if (&CB == Returns[0]->getReturnValue())
3321 else
3322 CB.replaceAllUsesWith(Returns[0]->getReturnValue());
3323 }
3324
3325 // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
3326 BasicBlock *ReturnBB = Returns[0]->getParent();
3327 ReturnBB->replaceAllUsesWith(AfterCallBB);
3328
3329 // Splice the code from the return block into the block that it will return
3330 // to, which contains the code that was after the call.
3331 AfterCallBB->splice(AfterCallBB->begin(), ReturnBB);
3332
3333 if (CreatedBranchToNormalDest)
3334 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
3335
3336 // Delete the return instruction now and empty ReturnBB now.
3337 Returns[0]->eraseFromParent();
3338 ReturnBB->eraseFromParent();
3339 } else if (!CB.use_empty()) {
3340 // No returns, but something is using the return value of the call. Just
3341 // nuke the result.
3343 }
3344
3345 // Since we are now done with the Call/Invoke, we can delete it.
3346 CB.eraseFromParent();
3347
3348 // If we inlined any musttail calls and the original return is now
3349 // unreachable, delete it. It can only contain a bitcast and ret.
3350 if (InlinedMustTailCalls && pred_empty(AfterCallBB))
3351 AfterCallBB->eraseFromParent();
3352
3353 // We should always be able to fold the entry block of the function into the
3354 // single predecessor of the block...
3355 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
3356 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
3357
3358 // Splice the code entry block into calling block, right before the
3359 // unconditional branch.
3360 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
3361 OrigBB->splice(Br->getIterator(), CalleeEntry);
3362
3363 // Remove the unconditional branch.
3364 Br->eraseFromParent();
3365
3366 // Now we can remove the CalleeEntry block, which is now empty.
3367 CalleeEntry->eraseFromParent();
3368
3369 // If we inserted a phi node, check to see if it has a single value (e.g. all
3370 // the entries are the same or undef). If so, remove the PHI so it doesn't
3371 // block other optimizations.
3372 if (PHI) {
3373 AssumptionCache *AC =
3374 IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
3375 auto &DL = Caller->getDataLayout();
3376 if (Value *V = simplifyInstruction(PHI, {DL, nullptr, nullptr, AC})) {
3377 PHI->replaceAllUsesWith(V);
3378 PHI->eraseFromParent();
3379 }
3380 }
3381
3382 if (MergeAttributes)
3383 AttributeFuncs::mergeAttributesForInlining(*Caller, *CalledFunc);
3384
3385 return InlineResult::success();
3386}
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
static void UpdatePHINodes(BasicBlock *OrigBB, BasicBlock *NewBB, ArrayRef< BasicBlock * > Preds, BranchInst *BI, bool HasLoopExit)
Update the PHI nodes in OrigBB to include the values coming from NewBB.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static cl::opt< bool > NoAliases("csky-no-aliases", cl::desc("Disable the emission of assembler pseudo instructions"), cl::init(false), cl::Hidden)
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define LLVM_DEBUG(...)
Definition: Debug.h:106
This file defines the DenseMap class.
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
static AttrBuilder IdentifyValidUBGeneratingAttributes(CallBase &CB)
static at::StorageToVarsMap collectEscapedLocals(const DataLayout &DL, const CallBase &CB)
Find Alloca and linked DbgAssignIntrinsic for locals escaped by CB.
static void fixupLineNumbers(Function *Fn, Function::iterator FI, Instruction *TheCall, bool CalleeHasDebugInfo)
Update inlined instructions' line numbers to to encode location where these instructions are inlined.
static void removeCallsiteMetadata(CallBase *Call)
static void propagateMemProfHelper(const CallBase *OrigCall, CallBase *ClonedCall, MDNode *InlinedCallsiteMD)
static Value * getUnwindDestToken(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Given an EH pad, find where it unwinds.
static cl::opt< bool > PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", cl::init(false), cl::Hidden, cl::desc("Convert align attributes to assumptions during inlining."))
static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
static Value * getUnwindDestTokenHelper(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Helper for getUnwindDestToken that does the descendant-ward part of the search.
static BasicBlock * HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, BasicBlock *UnwindEdge, UnwindDestMemoTy *FuncletUnwindMap=nullptr)
When we inline a basic block into an invoke, we have to turn all of the calls that can throw into inv...
static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &IANodes)
Returns a DebugLoc for a new DILocation which is a clone of OrigDL inlined at InlinedAt.
static cl::opt< bool > UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden, cl::init(true), cl::desc("Use the llvm.experimental.noalias.scope.decl " "intrinsic during inlining."))
static void PropagateCallSiteMetadata(CallBase &CB, Function::iterator FStart, Function::iterator FEnd)
When inlining a call site that has !llvm.mem.parallel_loop_access, !llvm.access.group,...
static AttrBuilder IdentifyValidPoisonGeneratingAttributes(CallBase &CB)
static void propagateMemProfMetadata(Function *Callee, CallBase &CB, bool ContainsMemProfMetadata, const ValueMap< const Value *, WeakTrackingVH > &VMap)
static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap, const ProfileCount &CalleeEntryCount, const CallBase &TheCall, ProfileSummaryInfo *PSI, BlockFrequencyInfo *CallerBFI)
Update the branch metadata for cloned call instructions.
static void updateCallerBFI(BasicBlock *CallSiteBlock, const ValueToValueMapTy &VMap, BlockFrequencyInfo *CallerBFI, BlockFrequencyInfo *CalleeBFI, const BasicBlock &CalleeEntryBlock)
Update the block frequencies of the caller after a callee has been inlined.
static void AddReturnAttributes(CallBase &CB, ValueToValueMapTy &VMap, ClonedCodeInfo &InlinedFunctionInfo)
static bool MayContainThrowingOrExitingCallAfterCB(CallBase *Begin, ReturnInst *End)
static void HandleByValArgumentInit(Type *ByValType, Value *Dst, Value *Src, Module *M, BasicBlock *InsertBlock, InlineFunctionInfo &IFI, Function *CalledFunc)
static cl::opt< bool > EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), cl::Hidden, cl::desc("Convert noalias attributes to metadata during inlining."))
static void AddAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap, const DataLayout &DL, AAResults *CalleeAAR, ClonedCodeInfo &InlinedFunctionInfo)
If the inlined function has noalias arguments, then add new alias scopes for each noalias argument,...
static const std::pair< std::vector< int64_t >, std::vector< int64_t > > remapIndices(Function &Caller, BasicBlock *StartBB, PGOContextualProfile &CtxProf, uint32_t CalleeCounters, uint32_t CalleeCallsites)
static IntrinsicInst * getConvergenceEntry(BasicBlock &BB)
static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
static void inlineRetainOrClaimRVCalls(CallBase &CB, objcarc::ARCInstKind RVCallKind, const SmallVectorImpl< ReturnInst * > &Returns)
An operand bundle "clang.arc.attachedcall" on a call indicates the call result is implicitly consumed...
static Value * getParentPad(Value *EHPad)
Helper for getUnwindDestToken/getUnwindDestTokenHelper.
static void fixupAssignments(Function::iterator Start, Function::iterator End)
Update inlined instructions' DIAssignID metadata.
static bool allocaWouldBeStaticInEntry(const AllocaInst *AI)
Return the result of AI->isStaticAlloca() if AI were moved to the entry block.
static bool isUsedByLifetimeMarker(Value *V)
static void removeMemProfMetadata(CallBase *Call)
static Value * HandleByValArgument(Type *ByValType, Value *Arg, Instruction *TheCall, const Function *CalledFunc, InlineFunctionInfo &IFI, MaybeAlign ByValAlignment)
When inlining a call site that has a byval argument, we have to make the implicit memcpy explicit by ...
static void AddAlignmentAssumptions(CallBase &CB, InlineFunctionInfo &IFI)
If the inlined function has non-byval align arguments, then add @llvm.assume-based alignment assumpti...
static void trackInlinedStores(Function::iterator Start, Function::iterator End, const CallBase &CB)
static cl::opt< unsigned > InlinerAttributeWindow("max-inst-checked-for-throw-during-inlining", cl::Hidden, cl::desc("the maximum number of instructions analyzed for may throw during " "attribute inference in inlined body"), cl::init(4))
static void AddParamAndFnBasicAttributes(const CallBase &CB, ValueToValueMapTy &VMap, ClonedCodeInfo &InlinedFunctionInfo)
static bool haveCommonPrefix(MDNode *MIBStackContext, MDNode *CallsiteStackContext)
static void PropagateOperandBundles(Function::iterator InlinedBB, Instruction *CallSiteEHPad)
Bundle operands of the inlined function must be added to inlined call sites.
static bool hasLifetimeMarkers(AllocaInst *AI)
static void updateMemprofMetadata(CallBase *CI, const std::vector< Metadata * > &MIBList)
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Load MIR Sample Profile
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
This file contains the declarations for metadata subclasses.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
This file defines common analysis utilities used by the ObjC ARC Optimizer.
This file defines ARC utility functions which are used by various parts of the compiler.
This file contains the declarations for profiling metadata utility functions.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
MemoryEffects getMemoryEffects(const CallBase *Call)
Return the behavior of the given call site.
Class for arbitrary precision integers.
Definition: APInt.h:78
an instruction to allocate memory on the stack
Definition: Instructions.h:63
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:149
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:99
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:117
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition: Instructions.h:139
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:95
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
Definition: Argument.h:49
static uint64_t getGUID(const Function &F)
A cache of @llvm.assume calls within a function.
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:501
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:704
AttrBuilder & addAlignmentAttr(MaybeAlign Align)
This turns an alignment into the form used internally in Attribute.
Attribute getAttribute(Attribute::AttrKind Kind) const
Return Attribute with the given Kind.
uint64_t getDereferenceableBytes() const
Retrieve the number of dereferenceable bytes, if the dereferenceable attribute exists (zero is return...
Definition: Attributes.h:1149
bool hasAttributes() const
Return true if the builder has IR-level attributes.
Definition: Attributes.h:1119
AttrBuilder & addAttribute(Attribute::AttrKind Val)
Add an attribute to the builder.
MaybeAlign getAlignment() const
Retrieve the alignment attribute, if it exists.
Definition: Attributes.h:1138
AttrBuilder & addDereferenceableAttr(uint64_t Bytes)
This turns the number of dereferenceable bytes into the form used internally in Attribute.
uint64_t getDereferenceableOrNullBytes() const
Retrieve the number of dereferenceable_or_null bytes, if the dereferenceable_or_null attribute exists...
Definition: Attributes.h:1155
AttrBuilder & removeAttribute(Attribute::AttrKind Val)
Remove an attribute from the builder.
AttrBuilder & addDereferenceableOrNullAttr(uint64_t Bytes)
This turns the number of dereferenceable_or_null bytes into the form used internally in Attribute.
AttrBuilder & addRangeAttr(const ConstantRange &CR)
Add range attribute.
AttributeList addRetAttributes(LLVMContext &C, const AttrBuilder &B) const
Add a return value attribute to the list.
Definition: Attributes.h:616
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
AttributeSet getParamAttrs(unsigned ArgNo) const
The attributes for the argument or parameter at the given index are returned.
AttributeSet removeAttribute(LLVMContext &C, Attribute::AttrKind Kind) const
Remove the specified attribute from this set.
Definition: Attributes.cpp:933
static AttributeSet get(LLVMContext &C, const AttrBuilder &B)
Definition: Attributes.cpp:897
const ConstantRange & getRange() const
Returns the value of the range attribute.
Definition: Attributes.cpp:496
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:86
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:208
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:448
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:517
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:367
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
Definition: BasicBlock.cpp:577
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:219
SymbolTableList< BasicBlock >::iterator eraseFromParent()
Unlink 'this' from the containing function and delete it.
Definition: BasicBlock.cpp:279
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:177
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
void splice(BasicBlock::iterator ToIt, BasicBlock *FromBB)
Transfer all instructions from FromBB to this basic block at ToIt.
Definition: BasicBlock.h:631
void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)
Update PHI nodes in this BasicBlock before removal of predecessor Pred.
Definition: BasicBlock.cpp:516
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
void setBlockFreq(const BasicBlock *BB, BlockFrequency Freq)
void setBlockFreqAndScale(const BasicBlock *ReferenceBB, BlockFrequency Freq, SmallPtrSetImpl< BasicBlock * > &BlocksToScale)
Set the frequency of ReferenceBB to Freq and scale the frequencies of the blocks in BlocksToScale suc...
BlockFrequency getBlockFreq(const BasicBlock *BB) const
getblockFreq - Return block frequency.
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1120
void setCallingConv(CallingConv::ID CC)
Definition: InstrTypes.h:1411
void setDoesNotThrow()
Definition: InstrTypes.h:1924
MaybeAlign getRetAlign() const
Extract the alignment of the return value.
Definition: InstrTypes.h:1737
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Definition: InstrTypes.h:2020
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Definition: InstrTypes.h:2051
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1349
void removeRetAttrs(const AttributeMask &AttrsToRemove)
Removes the attributes from the return value.
Definition: InstrTypes.h:1544
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
Definition: InstrTypes.h:1581
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
Definition: InstrTypes.h:1964
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1407
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1269
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
Definition: InstrTypes.h:1629
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
Definition: InstrTypes.h:1682
static CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
AttributeSet getRetAttributes() const
Return the return attributes for this call.
Definition: InstrTypes.h:1431
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
Definition: InstrTypes.h:1764
Value * getCalledOperand() const
Definition: InstrTypes.h:1342
void setAttributes(AttributeList A)
Set the attributes for this call.
Definition: InstrTypes.h:1428
std::optional< ConstantRange > getRange() const
If this return value has a range attribute, return the value range of the argument.
bool doesNotThrow() const
Determine if the call cannot unwind.
Definition: InstrTypes.h:1923
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1294
uint64_t getRetDereferenceableBytes() const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
Definition: InstrTypes.h:1808
bool isConvergent() const
Determine if the invoke is convergent.
Definition: InstrTypes.h:1935
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1207
static CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
uint64_t getRetDereferenceableOrNullBytes() const
Extract the number of dereferenceable_or_null bytes for a call (0=unknown).
Definition: InstrTypes.h:1823
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1285
unsigned arg_size() const
Definition: InstrTypes.h:1292
AttributeList getAttributes() const
Return the attributes for this call.
Definition: InstrTypes.h:1425
bool hasOperandBundles() const
Return true if this User has any operand bundles.
Definition: InstrTypes.h:1969
Function * getCaller()
Helper to get the caller (the parent function).
This class represents a function call, abstracting a target machine's calling convention.
void setTailCallKind(TailCallKind TCK)
TailCallKind getTailCallKind() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
bool isMustTailCall() const
static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, InsertPosition InsertBefore=nullptr)
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
This class represents a range of values.
Definition: ConstantRange.h:47
ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
Definition: Constants.cpp:1522
This is an important base class in LLVM.
Definition: Constant.h:42
const Constant * stripPointerCasts() const
Definition: Constant.h:218
static InstrProfIncrementInst * getBBInstrumentation(BasicBlock &BB)
Get the instruction instrumenting a BB, or nullptr if not present.
static InstrProfCallsite * getCallsiteInstrumentation(CallBase &CB)
Get the instruction instrumenting a callsite, or nullptr if that cannot be found.
Debug location.
Subprogram description.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
Base class for non-instruction debug metadata records that have positions within IR.
A debug info location.
Definition: DebugLoc.h:33
unsigned getLine() const
Definition: DebugLoc.cpp:24
DILocation * get() const
Get the underlying DILocation.
Definition: DebugLoc.cpp:20
MDNode * getScope() const
Definition: DebugLoc.cpp:34
static DebugLoc appendInlinedAt(const DebugLoc &DL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &Cache)
Rebuild the entire inlined-at chain for this instruction so that the top of the chain now is inlined-...
Definition: DebugLoc.cpp:110
unsigned getCol() const
Definition: DebugLoc.cpp:29
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:156
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:152
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:211
Implements a dense probed hash-table based set.
Definition: DenseSet.h:278
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:144
Class to represent profile counts.
Definition: Function.h:292
uint64_t getCount() const
Definition: Function.h:300
const BasicBlock & getEntryBlock() const
Definition: Function.h:809
BasicBlockListType::iterator iterator
Definition: Function.h:68
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:216
const BasicBlock & front() const
Definition: Function.h:860
iterator_range< arg_iterator > args()
Definition: Function.h:892
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition: Metadata.cpp:1874
bool hasGC() const
hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm to use during code generatio...
Definition: Function.h:345
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:277
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition: Function.h:905
Constant * getPersonalityFn() const
Get the personality function associated with this function.
Definition: Function.cpp:1048
arg_iterator arg_end()
Definition: Function.h:877
arg_iterator arg_begin()
Definition: Function.h:868
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition: Function.h:256
MaybeAlign getParamAlign(unsigned ArgNo) const
Definition: Function.h:488
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:369
const std::string & getGC() const
Definition: Function.cpp:835
std::optional< ProfileCount > getEntryCount(bool AllowSynthetic=false) const
Get the entry count for this function.
Definition: Function.cpp:1133
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:221
iterator end()
Definition: Function.h:855
void setCallingConv(CallingConv::ID CC)
Definition: Function.h:281
bool onlyReadsMemory() const
Determine if the function does not access or only reads memory.
Definition: Function.cpp:892
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:731
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:296
CallInst * CreateStackSave(const Twine &Name="")
Create a call to llvm.stacksave.
Definition: IRBuilder.h:1068
CallInst * CreateLifetimeStart(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.start intrinsic.
Definition: IRBuilder.cpp:460
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Definition: IRBuilder.cpp:890
CallInst * CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, unsigned Alignment, Value *OffsetValue=nullptr)
Create an assume intrinsic call that represents an alignment assumption on the provided pointer.
Definition: IRBuilder.cpp:1254
ReturnInst * CreateRet(Value *V)
Create a 'ret <val>' instruction.
Definition: IRBuilder.h:1119
ConstantInt * getInt64(uint64_t C)
Get a constant 64-bit value.
Definition: IRBuilder.h:488
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2155
ReturnInst * CreateRetVoid()
Create a 'ret void' instruction.
Definition: IRBuilder.h:1114
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2444
CallInst * CreateLifetimeEnd(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.end intrinsic.
Definition: IRBuilder.cpp:472
CallInst * CreateStackRestore(Value *Ptr, const Twine &Name="")
Create a call to llvm.stackrestore.
Definition: IRBuilder.h:1075
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:177
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memcpy between the specified pointers.
Definition: IRBuilder.h:655
Instruction * CreateNoAliasScopeDeclaration(Value *Scope)
Create a llvm.experimental.noalias.scope.decl intrinsic call.
Definition: IRBuilder.cpp:532
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2697
This class captures the data input to the InlineFunction call, and records the auxiliary results prod...
Definition: Cloning.h:255
ProfileSummaryInfo * PSI
Definition: Cloning.h:268
bool UpdateProfile
Update profile for callee as well as cloned version.
Definition: Cloning.h:288
function_ref< AssumptionCache &(Function &)> GetAssumptionCache
If non-null, InlineFunction will update the callgraph to reflect the changes it makes.
Definition: Cloning.h:267
BlockFrequencyInfo * CalleeBFI
Definition: Cloning.h:269
SmallVector< AllocaInst *, 4 > StaticAllocas
InlineFunction fills this in with all static allocas that get copied into the caller.
Definition: Cloning.h:273
BlockFrequencyInfo * CallerBFI
Definition: Cloning.h:269
SmallVector< CallBase *, 8 > InlinedCallSites
All of the new call sites inlined into the caller.
Definition: Cloning.h:284
InlineResult is basically true or false.
Definition: InlineCost.h:179
static InlineResult success()
Definition: InlineCost.h:184
static InlineResult failure(const char *Reason)
Definition: InlineCost.h:185
This represents the llvm.instrprof.callsite intrinsic.
This represents the llvm.instrprof.increment intrinsic.
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
Definition: Instruction.cpp:97
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:471
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
Definition: Instruction.h:368
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
Definition: Instruction.h:829
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:92
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:70
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:386
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1679
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:274
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:468
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Definition: Instruction.cpp:74
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
static bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Invoke instruction.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
An instruction for reading from memory.
Definition: Instructions.h:176
MDNode * createAnonymousAliasScope(MDNode *Domain, StringRef Name=StringRef())
Return metadata appropriate for an alias scope root node.
Definition: MDBuilder.h:174
MDNode * createAnonymousAliasScopeDomain(StringRef Name=StringRef())
Return metadata appropriate for an alias scope domain node.
Definition: MDBuilder.h:167
Metadata node.
Definition: Metadata.h:1069
void replaceAllUsesWith(Metadata *MD)
RAUW a temporary.
Definition: Metadata.h:1266
static MDNode * concatenate(MDNode *A, MDNode *B)
Methods for metadata merging.
Definition: Metadata.cpp:1114
bool isTemporary() const
Definition: Metadata.h:1253
ArrayRef< MDOperand > operands() const
Definition: Metadata.h:1428
op_iterator op_end() const
Definition: Metadata.h:1424
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1543
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1436
op_iterator op_begin() const
Definition: Metadata.h:1420
LLVMContext & getContext() const
Definition: Metadata.h:1233
Tuple of metadata.
Definition: Metadata.h:1473
static TempMDTuple getTemporary(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Return a temporary node.
Definition: Metadata.h:1520
bool onlyAccessesInaccessibleMem() const
Whether this function only (at most) accesses inaccessible memory.
Definition: ModRef.h:211
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
Definition: ModRef.h:201
Root of the metadata hierarchy.
Definition: Metadata.h:62
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
A container for an operand bundle being viewed as a set of values rather than a set of uses.
Definition: InstrTypes.h:1073
The instrumented contextual profile, produced by the CtxProfAnalysis.
void update(Visitor, const Function &F)
uint32_t getNumCounters(const Function &F) const
uint32_t allocateNextCounterIndex(const Function &F)
uint32_t getNumCallsites(const Function &F) const
uint32_t allocateNextCallsiteIndex(const Function &F)
A node (context) in the loaded contextual profile, suitable for mutation during IPO passes.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1878
Analysis providing profile information.
std::optional< uint64_t > getProfileCount(const CallBase &CallInst, BlockFrequencyInfo *BFI, bool AllowSynthetic=false) const
Returns the profile count for CallInst.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
A vector that has set insertion semantics.
Definition: SetVector.h:57
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:363
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:452
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:384
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:458
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:519
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:937
void reserve(size_type N)
Definition: SmallVector.h:663
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:683
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
An instruction for storing to memory.
Definition: Instructions.h:292
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
static IntegerType * getInt64Ty(LLVMContext &C)
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:139
void setOperand(unsigned i, Value *Val)
Definition: User.h:233
Value * getOperand(unsigned i) const
Definition: User.h:228
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
See the file comment.
Definition: ValueMap.h:84
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: ValueMap.h:164
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: ValueMap.h:151
iterator begin()
Definition: ValueMap.h:134
iterator end()
Definition: ValueMap.h:135
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
iterator_range< user_iterator > users()
Definition: Value.h:421
bool use_empty() const
Definition: Value.h:344
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:213
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
Class to build a trie of call stack contexts for a particular profiled allocation call,...
Helper class to iterate through stack ids in both metadata (memprof MIB and callsite) and the corresp...
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
AttributeMask typeIncompatible(Type *Ty, AttributeSet AS, AttributeSafetyKind ASK=ASK_ALL)
Which attributes cannot be applied to a type.
void mergeAttributesForInlining(Function &Caller, const Function &Callee)
Merge caller's and callee's attributes.
Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
Definition: Intrinsics.cpp:731
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
Definition: PatternMatch.h:864
AssignmentMarkerRange getAssignmentMarkers(DIAssignID *ID)
Return a range of dbg.assign intrinsics which use \ID as an operand.
Definition: DebugInfo.cpp:1866
void trackAssignments(Function::iterator Start, Function::iterator End, const StorageToVarsMap &Vars, const DataLayout &DL, bool DebugPrints=false)
Track assignments to Vars between Start and End.
Definition: DebugInfo.cpp:2112
void remapAssignID(DenseMap< DIAssignID *, DIAssignID * > &Map, Instruction &I)
Replace DIAssignID uses and attachments with IDs from Map.
Definition: DebugInfo.cpp:1982
SmallVector< DbgVariableRecord * > getDVRAssignmentMarkers(const Instruction *Inst)
Definition: DebugInfo.h:240
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
MDNode * getMIBStackNode(const MDNode *MIB)
Returns the stack node from an MIB metadata node.
constexpr double phi
Definition: MathExtras.h:61
ARCInstKind getAttachedARCFunctionKind(const CallBase *CB)
This function returns the ARCInstKind of the function attached to operand bundle clang_arc_attachedca...
Definition: ObjCARCUtil.h:60
ARCInstKind
Equivalence classes of instructions in the ARC Model.
std::optional< Function * > getAttachedARCFunction(const CallBase *CB)
This function returns operand bundle clang_arc_attachedcall's argument, which is the address of the A...
Definition: ObjCARCUtil.h:43
bool isRetainOrClaimRV(ARCInstKind Kind)
Check whether the function is retainRV/unsafeClaimRV.
Definition: ObjCARCUtil.h:52
const Value * GetRCIdentityRoot(const Value *V)
The RCIdentity root of a value V is a dominating value U for which retaining or releasing U is equiva...
bool hasAttachedCallOpBundle(const CallBase *CB)
Definition: ObjCARCUtil.h:29
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1732
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1739
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1697
BasicBlock * changeToInvokeAndSplitBasicBlock(CallInst *CI, BasicBlock *UnwindEdge, DomTreeUpdater *DTU=nullptr)
Convert the CallInst to InvokeInst with the specified unwind edge basic block.
Definition: Local.cpp:2992
auto successors(const MachineBasicBlock *BB)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures, bool StoreCaptures, const Instruction *I, const DominatorTree *DT, bool IncludeI=false, unsigned MaxUsesToExplore=0, const LoopInfo *LI=nullptr)
PointerMayBeCapturedBefore - Return true if this pointer value may be captured by the enclosing funct...
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2115
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:657
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
Definition: Local.h:242
Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
Definition: Local.cpp:1578
void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, bool ModuleLevelChanges, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr)
This works exactly like CloneFunctionInto, except that it does some simple constant prop and DCE on t...
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
Definition: Local.cpp:2906
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
void updateProfileCallee(Function *Callee, int64_t EntryDelta, const ValueMap< const Value *, WeakTrackingVH > *VMap=nullptr)
Updates profile information by adjusting the entry count by adding EntryDelta then scaling callsite i...
bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
Definition: DebugInfo.cpp:2298
MDNode * uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2)
Compute the union of two access-group lists.
InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, bool MergeAttributes=false, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr)
This function inlines the called function into the basic block of the caller.
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNonEscapingLocalOb...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition: STLExtras.h:1945
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:2099
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
bool pred_empty(const BasicBlock *BB)
Definition: CFG.h:118
void updateLoopMetadataDebugLocations(Instruction &I, function_ref< Metadata *(Metadata *)> Updater)
Update the debug locations contained within the MD_loop metadata attached to the instruction I,...
Definition: DebugInfo.cpp:439
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
void scaleProfData(Instruction &I, uint64_t S, uint64_t T)
Scaling the profile data attached to 'I' using the ratio of S/T.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
This struct can be used to capture information about code being cloned, while it is being cloned.
Definition: Cloning.h:63
bool ContainsDynamicAllocas
This is set to true if the cloned code contains a 'dynamic' alloca.
Definition: Cloning.h:74
bool isSimplified(const Value *From, const Value *To) const
Definition: Cloning.h:88
bool ContainsCalls
This is set to true if the cloned code contains a normal call instruction.
Definition: Cloning.h:65
bool ContainsMemProfMetadata
This is set to true if there is memprof related metadata (memprof or callsite metadata) in the cloned...
Definition: Cloning.h:69
std::vector< WeakTrackingVH > OperandBundleCallSites
All cloned call sites that have operand bundles attached are appended to this vector.
Definition: Cloning.h:79
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
static Instruction * tryGetVTableInstruction(CallBase *CB)
Helper struct for trackAssignments, below.
Definition: DebugInfo.h:283