LLVM 23.0.0git
InlineFunction.cpp
Go to the documentation of this file.
1//===- InlineFunction.cpp - Code to perform function inlining -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements inlining of a function into a call site, resolving
10// parameters and the return value as appropriate.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/ADT/DenseMap.h"
15#include "llvm/ADT/STLExtras.h"
16#include "llvm/ADT/SetVector.h"
35#include "llvm/IR/Argument.h"
37#include "llvm/IR/Attributes.h"
38#include "llvm/IR/BasicBlock.h"
39#include "llvm/IR/CFG.h"
40#include "llvm/IR/Constant.h"
42#include "llvm/IR/Constants.h"
43#include "llvm/IR/DataLayout.h"
44#include "llvm/IR/DebugInfo.h"
46#include "llvm/IR/DebugLoc.h"
48#include "llvm/IR/Dominators.h"
50#include "llvm/IR/Function.h"
52#include "llvm/IR/IRBuilder.h"
53#include "llvm/IR/InlineAsm.h"
54#include "llvm/IR/InstrTypes.h"
55#include "llvm/IR/Instruction.h"
58#include "llvm/IR/Intrinsics.h"
59#include "llvm/IR/LLVMContext.h"
60#include "llvm/IR/MDBuilder.h"
61#include "llvm/IR/Metadata.h"
62#include "llvm/IR/Module.h"
65#include "llvm/IR/Type.h"
66#include "llvm/IR/User.h"
67#include "llvm/IR/Value.h"
75#include <algorithm>
76#include <cassert>
77#include <cstdint>
78#include <deque>
79#include <iterator>
80#include <optional>
81#include <string>
82#include <utility>
83#include <vector>
84
85#define DEBUG_TYPE "inline-function"
86
87using namespace llvm;
88using namespace llvm::memprof;
90
91static cl::opt<bool>
92EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
94 cl::desc("Convert noalias attributes to metadata during inlining."));
95
96static cl::opt<bool>
97 UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden,
98 cl::init(true),
99 cl::desc("Use the llvm.experimental.noalias.scope.decl "
100 "intrinsic during inlining."));
101
102// Disabled by default, because the added alignment assumptions may increase
103// compile-time and block optimizations. This option is not suitable for use
104// with frontends that emit comprehensive parameter alignment annotations.
105static cl::opt<bool>
106PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
107 cl::init(false), cl::Hidden,
108 cl::desc("Convert align attributes to assumptions during inlining."));
109
111 "max-inst-checked-for-throw-during-inlining", cl::Hidden,
112 cl::desc("the maximum number of instructions analyzed for may throw during "
113 "attribute inference in inlined body"),
114 cl::init(4));
115
116namespace {
117
118 /// A class for recording information about inlining a landing pad.
119 class LandingPadInliningInfo {
120 /// Destination of the invoke's unwind.
121 BasicBlock *OuterResumeDest;
122
123 /// Destination for the callee's resume.
124 BasicBlock *InnerResumeDest = nullptr;
125
126 /// LandingPadInst associated with the invoke.
127 LandingPadInst *CallerLPad = nullptr;
128
129 /// PHI for EH values from landingpad insts.
130 PHINode *InnerEHValuesPHI = nullptr;
131
132 SmallVector<Value*, 8> UnwindDestPHIValues;
133
134 public:
135 LandingPadInliningInfo(InvokeInst *II)
136 : OuterResumeDest(II->getUnwindDest()) {
137 // If there are PHI nodes in the unwind destination block, we need to keep
138 // track of which values came into them from the invoke before removing
139 // the edge from this block.
140 BasicBlock *InvokeBB = II->getParent();
141 BasicBlock::iterator I = OuterResumeDest->begin();
142 for (; isa<PHINode>(I); ++I) {
143 // Save the value to use for this edge.
145 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
146 }
147
148 CallerLPad = cast<LandingPadInst>(I);
149 }
150
151 /// The outer unwind destination is the target of
152 /// unwind edges introduced for calls within the inlined function.
153 BasicBlock *getOuterResumeDest() const {
154 return OuterResumeDest;
155 }
156
157 BasicBlock *getInnerResumeDest();
158
159 LandingPadInst *getLandingPadInst() const { return CallerLPad; }
160
161 /// Forward the 'resume' instruction to the caller's landing pad block.
162 /// When the landing pad block has only one predecessor, this is
163 /// a simple branch. When there is more than one predecessor, we need to
164 /// split the landing pad block after the landingpad instruction and jump
165 /// to there.
166 void forwardResume(ResumeInst *RI,
167 SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
168
169 /// Add incoming-PHI values to the unwind destination block for the given
170 /// basic block, using the values for the original invoke's source block.
171 void addIncomingPHIValuesFor(BasicBlock *BB) const {
172 addIncomingPHIValuesForInto(BB, OuterResumeDest);
173 }
174
175 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
176 BasicBlock::iterator I = dest->begin();
177 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
178 PHINode *phi = cast<PHINode>(I);
179 phi->addIncoming(UnwindDestPHIValues[i], src);
180 }
181 }
182 };
183} // end anonymous namespace
184
187 while (It != BB.end()) {
188 if (auto *IntrinsicCall = dyn_cast<ConvergenceControlInst>(It)) {
189 if (IntrinsicCall->isEntry()) {
190 return IntrinsicCall;
191 }
192 }
193 It = std::next(It);
194 }
195 return nullptr;
196}
197
198/// Get or create a target for the branch from ResumeInsts.
199BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
200 if (InnerResumeDest) return InnerResumeDest;
201
202 // Split the landing pad.
203 BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator();
204 InnerResumeDest =
205 OuterResumeDest->splitBasicBlock(SplitPoint,
206 OuterResumeDest->getName() + ".body");
207
208 // The number of incoming edges we expect to the inner landing pad.
209 const unsigned PHICapacity = 2;
210
211 // Create corresponding new PHIs for all the PHIs in the outer landing pad.
212 BasicBlock::iterator InsertPoint = InnerResumeDest->begin();
213 BasicBlock::iterator I = OuterResumeDest->begin();
214 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
215 PHINode *OuterPHI = cast<PHINode>(I);
216 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
217 OuterPHI->getName() + ".lpad-body");
218 InnerPHI->insertBefore(InsertPoint);
219 OuterPHI->replaceAllUsesWith(InnerPHI);
220 InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
221 }
222
223 // Create a PHI for the exception values.
224 InnerEHValuesPHI =
225 PHINode::Create(CallerLPad->getType(), PHICapacity, "eh.lpad-body");
226 InnerEHValuesPHI->insertBefore(InsertPoint);
227 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
228 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
229
230 // All done.
231 return InnerResumeDest;
232}
233
234/// Forward the 'resume' instruction to the caller's landing pad block.
235/// When the landing pad block has only one predecessor, this is a simple
236/// branch. When there is more than one predecessor, we need to split the
237/// landing pad block after the landingpad instruction and jump to there.
238void LandingPadInliningInfo::forwardResume(
239 ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) {
240 BasicBlock *Dest = getInnerResumeDest();
241 BasicBlock *Src = RI->getParent();
242
243 auto *BI = UncondBrInst::Create(Dest, Src);
244 BI->setDebugLoc(RI->getDebugLoc());
245
246 // Update the PHIs in the destination. They were inserted in an order which
247 // makes this work.
248 addIncomingPHIValuesForInto(Src, Dest);
249
250 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
251 RI->eraseFromParent();
252}
253
254/// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
255static Value *getParentPad(Value *EHPad) {
256 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
257 return FPI->getParentPad();
258 return cast<CatchSwitchInst>(EHPad)->getParentPad();
259}
260
262
263/// Helper for getUnwindDestToken that does the descendant-ward part of
264/// the search.
266 UnwindDestMemoTy &MemoMap) {
267 SmallVector<Instruction *, 8> Worklist(1, EHPad);
268
269 while (!Worklist.empty()) {
270 Instruction *CurrentPad = Worklist.pop_back_val();
271 // We only put pads on the worklist that aren't in the MemoMap. When
272 // we find an unwind dest for a pad we may update its ancestors, but
273 // the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
274 // so they should never get updated while queued on the worklist.
275 assert(!MemoMap.count(CurrentPad));
276 Value *UnwindDestToken = nullptr;
277 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
278 if (CatchSwitch->hasUnwindDest()) {
279 UnwindDestToken = &*CatchSwitch->getUnwindDest()->getFirstNonPHIIt();
280 } else {
281 // Catchswitch doesn't have a 'nounwind' variant, and one might be
282 // annotated as "unwinds to caller" when really it's nounwind (see
283 // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
284 // parent's unwind dest from this. We can check its catchpads'
285 // descendants, since they might include a cleanuppad with an
286 // "unwinds to caller" cleanupret, which can be trusted.
287 for (auto HI = CatchSwitch->handler_begin(),
288 HE = CatchSwitch->handler_end();
289 HI != HE && !UnwindDestToken; ++HI) {
290 BasicBlock *HandlerBlock = *HI;
291 auto *CatchPad =
292 cast<CatchPadInst>(&*HandlerBlock->getFirstNonPHIIt());
293 for (User *Child : CatchPad->users()) {
294 // Intentionally ignore invokes here -- since the catchswitch is
295 // marked "unwind to caller", it would be a verifier error if it
296 // contained an invoke which unwinds out of it, so any invoke we'd
297 // encounter must unwind to some child of the catch.
298 if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
299 continue;
300
301 Instruction *ChildPad = cast<Instruction>(Child);
302 auto Memo = MemoMap.find(ChildPad);
303 if (Memo == MemoMap.end()) {
304 // Haven't figured out this child pad yet; queue it.
305 Worklist.push_back(ChildPad);
306 continue;
307 }
308 // We've already checked this child, but might have found that
309 // it offers no proof either way.
310 Value *ChildUnwindDestToken = Memo->second;
311 if (!ChildUnwindDestToken)
312 continue;
313 // We already know the child's unwind dest, which can either
314 // be ConstantTokenNone to indicate unwind to caller, or can
315 // be another child of the catchpad. Only the former indicates
316 // the unwind dest of the catchswitch.
317 if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
318 UnwindDestToken = ChildUnwindDestToken;
319 break;
320 }
321 assert(getParentPad(ChildUnwindDestToken) == CatchPad);
322 }
323 }
324 }
325 } else {
326 auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
327 for (User *U : CleanupPad->users()) {
328 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
329 if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
330 UnwindDestToken = &*RetUnwindDest->getFirstNonPHIIt();
331 else
332 UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext());
333 break;
334 }
335 Value *ChildUnwindDestToken;
336 if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
337 ChildUnwindDestToken = &*Invoke->getUnwindDest()->getFirstNonPHIIt();
338 } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
339 Instruction *ChildPad = cast<Instruction>(U);
340 auto Memo = MemoMap.find(ChildPad);
341 if (Memo == MemoMap.end()) {
342 // Haven't resolved this child yet; queue it and keep searching.
343 Worklist.push_back(ChildPad);
344 continue;
345 }
346 // We've checked this child, but still need to ignore it if it
347 // had no proof either way.
348 ChildUnwindDestToken = Memo->second;
349 if (!ChildUnwindDestToken)
350 continue;
351 } else {
352 // Not a relevant user of the cleanuppad
353 continue;
354 }
355 // In a well-formed program, the child/invoke must either unwind to
356 // an(other) child of the cleanup, or exit the cleanup. In the
357 // first case, continue searching.
358 if (isa<Instruction>(ChildUnwindDestToken) &&
359 getParentPad(ChildUnwindDestToken) == CleanupPad)
360 continue;
361 UnwindDestToken = ChildUnwindDestToken;
362 break;
363 }
364 }
365 // If we haven't found an unwind dest for CurrentPad, we may have queued its
366 // children, so move on to the next in the worklist.
367 if (!UnwindDestToken)
368 continue;
369
370 // Now we know that CurrentPad unwinds to UnwindDestToken. It also exits
371 // any ancestors of CurrentPad up to but not including UnwindDestToken's
372 // parent pad. Record this in the memo map, and check to see if the
373 // original EHPad being queried is one of the ones exited.
374 Value *UnwindParent;
375 if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
376 UnwindParent = getParentPad(UnwindPad);
377 else
378 UnwindParent = nullptr;
379 bool ExitedOriginalPad = false;
380 for (Instruction *ExitedPad = CurrentPad;
381 ExitedPad && ExitedPad != UnwindParent;
382 ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) {
383 // Skip over catchpads since they just follow their catchswitches.
384 if (isa<CatchPadInst>(ExitedPad))
385 continue;
386 MemoMap[ExitedPad] = UnwindDestToken;
387 ExitedOriginalPad |= (ExitedPad == EHPad);
388 }
389
390 if (ExitedOriginalPad)
391 return UnwindDestToken;
392
393 // Continue the search.
394 }
395
396 // No definitive information is contained within this funclet.
397 return nullptr;
398}
399
400/// Given an EH pad, find where it unwinds. If it unwinds to an EH pad,
401/// return that pad instruction. If it unwinds to caller, return
402/// ConstantTokenNone. If it does not have a definitive unwind destination,
403/// return nullptr.
404///
405/// This routine gets invoked for calls in funclets in inlinees when inlining
406/// an invoke. Since many funclets don't have calls inside them, it's queried
407/// on-demand rather than building a map of pads to unwind dests up front.
408/// Determining a funclet's unwind dest may require recursively searching its
409/// descendants, and also ancestors and cousins if the descendants don't provide
410/// an answer. Since most funclets will have their unwind dest immediately
411/// available as the unwind dest of a catchswitch or cleanupret, this routine
412/// searches top-down from the given pad and then up. To avoid worst-case
413/// quadratic run-time given that approach, it uses a memo map to avoid
414/// re-processing funclet trees. The callers that rewrite the IR as they go
415/// take advantage of this, for correctness, by checking/forcing rewritten
416/// pads' entries to match the original callee view.
418 UnwindDestMemoTy &MemoMap) {
419 // Catchpads unwind to the same place as their catchswitch;
420 // redirct any queries on catchpads so the code below can
421 // deal with just catchswitches and cleanuppads.
422 if (auto *CPI = dyn_cast<CatchPadInst>(EHPad))
423 EHPad = CPI->getCatchSwitch();
424
425 // Check if we've already determined the unwind dest for this pad.
426 auto Memo = MemoMap.find(EHPad);
427 if (Memo != MemoMap.end())
428 return Memo->second;
429
430 // Search EHPad and, if necessary, its descendants.
431 Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap);
432 assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));
433 if (UnwindDestToken)
434 return UnwindDestToken;
435
436 // No information is available for this EHPad from itself or any of its
437 // descendants. An unwind all the way out to a pad in the caller would
438 // need also to agree with the unwind dest of the parent funclet, so
439 // search up the chain to try to find a funclet with information. Put
440 // null entries in the memo map to avoid re-processing as we go up.
441 MemoMap[EHPad] = nullptr;
442#ifndef NDEBUG
444 TempMemos.insert(EHPad);
445#endif
446 Instruction *LastUselessPad = EHPad;
447 Value *AncestorToken;
448 for (AncestorToken = getParentPad(EHPad);
449 auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
450 AncestorToken = getParentPad(AncestorToken)) {
451 // Skip over catchpads since they just follow their catchswitches.
452 if (isa<CatchPadInst>(AncestorPad))
453 continue;
454 // If the MemoMap had an entry mapping AncestorPad to nullptr, since we
455 // haven't yet called getUnwindDestTokenHelper for AncestorPad in this
456 // call to getUnwindDestToken, that would mean that AncestorPad had no
457 // information in itself, its descendants, or its ancestors. If that
458 // were the case, then we should also have recorded the lack of information
459 // for the descendant that we're coming from. So assert that we don't
460 // find a null entry in the MemoMap for AncestorPad.
461 assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);
462 auto AncestorMemo = MemoMap.find(AncestorPad);
463 if (AncestorMemo == MemoMap.end()) {
464 UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap);
465 } else {
466 UnwindDestToken = AncestorMemo->second;
467 }
468 if (UnwindDestToken)
469 break;
470 LastUselessPad = AncestorPad;
471 MemoMap[LastUselessPad] = nullptr;
472#ifndef NDEBUG
473 TempMemos.insert(LastUselessPad);
474#endif
475 }
476
477 // We know that getUnwindDestTokenHelper was called on LastUselessPad and
478 // returned nullptr (and likewise for EHPad and any of its ancestors up to
479 // LastUselessPad), so LastUselessPad has no information from below. Since
480 // getUnwindDestTokenHelper must investigate all downward paths through
481 // no-information nodes to prove that a node has no information like this,
482 // and since any time it finds information it records it in the MemoMap for
483 // not just the immediately-containing funclet but also any ancestors also
484 // exited, it must be the case that, walking downward from LastUselessPad,
485 // visiting just those nodes which have not been mapped to an unwind dest
486 // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since
487 // they are just used to keep getUnwindDestTokenHelper from repeating work),
488 // any node visited must have been exhaustively searched with no information
489 // for it found.
490 SmallVector<Instruction *, 8> Worklist(1, LastUselessPad);
491 while (!Worklist.empty()) {
492 Instruction *UselessPad = Worklist.pop_back_val();
493 auto Memo = MemoMap.find(UselessPad);
494 if (Memo != MemoMap.end() && Memo->second) {
495 // Here the name 'UselessPad' is a bit of a misnomer, because we've found
496 // that it is a funclet that does have information about unwinding to
497 // a particular destination; its parent was a useless pad.
498 // Since its parent has no information, the unwind edge must not escape
499 // the parent, and must target a sibling of this pad. This local unwind
500 // gives us no information about EHPad. Leave it and the subtree rooted
501 // at it alone.
502 assert(getParentPad(Memo->second) == getParentPad(UselessPad));
503 continue;
504 }
505 // We know we don't have information for UselesPad. If it has an entry in
506 // the MemoMap (mapping it to nullptr), it must be one of the TempMemos
507 // added on this invocation of getUnwindDestToken; if a previous invocation
508 // recorded nullptr, it would have had to prove that the ancestors of
509 // UselessPad, which include LastUselessPad, had no information, and that
510 // in turn would have required proving that the descendants of
511 // LastUselesPad, which include EHPad, have no information about
512 // LastUselessPad, which would imply that EHPad was mapped to nullptr in
513 // the MemoMap on that invocation, which isn't the case if we got here.
514 assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad));
515 // Assert as we enumerate users that 'UselessPad' doesn't have any unwind
516 // information that we'd be contradicting by making a map entry for it
517 // (which is something that getUnwindDestTokenHelper must have proved for
518 // us to get here). Just assert on is direct users here; the checks in
519 // this downward walk at its descendants will verify that they don't have
520 // any unwind edges that exit 'UselessPad' either (i.e. they either have no
521 // unwind edges or unwind to a sibling).
522 MemoMap[UselessPad] = UnwindDestToken;
523 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
524 assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad");
525 for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
526 auto *CatchPad = &*HandlerBlock->getFirstNonPHIIt();
527 for (User *U : CatchPad->users()) {
528 assert((!isa<InvokeInst>(U) ||
530 ->getUnwindDest()
531 ->getFirstNonPHIIt()) == CatchPad)) &&
532 "Expected useless pad");
534 Worklist.push_back(cast<Instruction>(U));
535 }
536 }
537 } else {
538 assert(isa<CleanupPadInst>(UselessPad));
539 for (User *U : UselessPad->users()) {
540 assert(!isa<CleanupReturnInst>(U) && "Expected useless pad");
541 assert(
542 (!isa<InvokeInst>(U) ||
544 &*cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHIIt()) ==
545 UselessPad)) &&
546 "Expected useless pad");
548 Worklist.push_back(cast<Instruction>(U));
549 }
550 }
551 }
552
553 return UnwindDestToken;
554}
555
556/// When we inline a basic block into an invoke,
557/// we have to turn all of the calls that can throw into invokes.
558/// This function analyze BB to see if there are any calls, and if so,
559/// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
560/// nodes in that block with the values specified in InvokeDestPHIValues.
562 BasicBlock *BB, BasicBlock *UnwindEdge,
563 SmallSetVector<const Value *, 4> &OriginallyIndirectCalls,
564 UnwindDestMemoTy *FuncletUnwindMap = nullptr) {
566 // We only need to check for function calls: inlined invoke
567 // instructions require no special handling.
569
570 if (!CI || CI->doesNotThrow())
571 continue;
572
573 // We do not need to (and in fact, cannot) convert possibly throwing calls
574 // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into
575 // invokes. The caller's "segment" of the deoptimization continuation
576 // attached to the newly inlined @llvm.experimental_deoptimize
577 // (resp. @llvm.experimental.guard) call should contain the exception
578 // handling logic, if any.
579 if (auto *F = CI->getCalledFunction())
580 if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
581 F->getIntrinsicID() == Intrinsic::experimental_guard)
582 continue;
583
584 if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
585 // This call is nested inside a funclet. If that funclet has an unwind
586 // destination within the inlinee, then unwinding out of this call would
587 // be UB. Rewriting this call to an invoke which targets the inlined
588 // invoke's unwind dest would give the call's parent funclet multiple
589 // unwind destinations, which is something that subsequent EH table
590 // generation can't handle and that the veirifer rejects. So when we
591 // see such a call, leave it as a call.
592 auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
593 Value *UnwindDestToken =
594 getUnwindDestToken(FuncletPad, *FuncletUnwindMap);
595 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
596 continue;
597#ifndef NDEBUG
598 Instruction *MemoKey;
599 if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
600 MemoKey = CatchPad->getCatchSwitch();
601 else
602 MemoKey = FuncletPad;
603 assert(FuncletUnwindMap->count(MemoKey) &&
604 (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
605 "must get memoized to avoid confusing later searches");
606#endif // NDEBUG
607 }
608
609 bool WasIndirect = OriginallyIndirectCalls.remove(CI);
610 changeToInvokeAndSplitBasicBlock(CI, UnwindEdge);
611 if (WasIndirect)
612 OriginallyIndirectCalls.insert(BB->getTerminator());
613 return BB;
614 }
615 return nullptr;
616}
617
618/// If we inlined an invoke site, we need to convert calls
619/// in the body of the inlined function into invokes.
620///
621/// II is the invoke instruction being inlined. FirstNewBlock is the first
622/// block of the inlined code (the last block is the end of the function),
623/// and InlineCodeInfo is information about the code that got inlined.
624static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock,
625 ClonedCodeInfo &InlinedCodeInfo) {
626 BasicBlock *InvokeDest = II->getUnwindDest();
627
628 Function *Caller = FirstNewBlock->getParent();
629
630 // The inlined code is currently at the end of the function, scan from the
631 // start of the inlined code to its end, checking for stuff we need to
632 // rewrite.
633 LandingPadInliningInfo Invoke(II);
634
635 // Get all of the inlined landing pad instructions.
637 for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end();
638 I != E; ++I)
639 if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
640 InlinedLPads.insert(II->getLandingPadInst());
641
642 // Append the clauses from the outer landing pad instruction into the inlined
643 // landing pad instructions.
644 LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
645 for (LandingPadInst *InlinedLPad : InlinedLPads) {
646 unsigned OuterNum = OuterLPad->getNumClauses();
647 InlinedLPad->reserveClauses(OuterNum);
648 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
649 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
650 if (OuterLPad->isCleanup())
651 InlinedLPad->setCleanup(true);
652 }
653
654 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
655 BB != E; ++BB) {
656 if (InlinedCodeInfo.ContainsCalls)
658 &*BB, Invoke.getOuterResumeDest(),
659 InlinedCodeInfo.OriginallyIndirectCalls))
660 // Update any PHI nodes in the exceptional block to indicate that there
661 // is now a new entry in them.
662 Invoke.addIncomingPHIValuesFor(NewBB);
663
664 // Forward any resumes that are remaining here.
665 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
666 Invoke.forwardResume(RI, InlinedLPads);
667 }
668
669 // Now that everything is happy, we have one final detail. The PHI nodes in
670 // the exception destination block still have entries due to the original
671 // invoke instruction. Eliminate these entries (which might even delete the
672 // PHI node) now.
673 InvokeDest->removePredecessor(II->getParent());
674}
675
676/// If we inlined an invoke site, we need to convert calls
677/// in the body of the inlined function into invokes.
678///
679/// II is the invoke instruction being inlined. FirstNewBlock is the first
680/// block of the inlined code (the last block is the end of the function),
681/// and InlineCodeInfo is information about the code that got inlined.
682static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock,
683 ClonedCodeInfo &InlinedCodeInfo) {
684 BasicBlock *UnwindDest = II->getUnwindDest();
685 Function *Caller = FirstNewBlock->getParent();
686
687 assert(UnwindDest->getFirstNonPHIIt()->isEHPad() && "unexpected BasicBlock!");
688
689 // If there are PHI nodes in the unwind destination block, we need to keep
690 // track of which values came into them from the invoke before removing the
691 // edge from this block.
692 SmallVector<Value *, 8> UnwindDestPHIValues;
693 BasicBlock *InvokeBB = II->getParent();
694 for (PHINode &PHI : UnwindDest->phis()) {
695 // Save the value to use for this edge.
696 UnwindDestPHIValues.push_back(PHI.getIncomingValueForBlock(InvokeBB));
697 }
698
699 // Add incoming-PHI values to the unwind destination block for the given basic
700 // block, using the values for the original invoke's source block.
701 auto UpdatePHINodes = [&](BasicBlock *Src) {
702 BasicBlock::iterator I = UnwindDest->begin();
703 for (Value *V : UnwindDestPHIValues) {
705 PHI->addIncoming(V, Src);
706 ++I;
707 }
708 };
709
710 // This connects all the instructions which 'unwind to caller' to the invoke
711 // destination.
712 UnwindDestMemoTy FuncletUnwindMap;
713 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
714 BB != E; ++BB) {
715 if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
716 if (CRI->unwindsToCaller()) {
717 auto *CleanupPad = CRI->getCleanupPad();
718 CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI->getIterator());
719 CRI->eraseFromParent();
720 UpdatePHINodes(&*BB);
721 // Finding a cleanupret with an unwind destination would confuse
722 // subsequent calls to getUnwindDestToken, so map the cleanuppad
723 // to short-circuit any such calls and recognize this as an "unwind
724 // to caller" cleanup.
725 assert(!FuncletUnwindMap.count(CleanupPad) ||
726 isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
727 FuncletUnwindMap[CleanupPad] =
728 ConstantTokenNone::get(Caller->getContext());
729 }
730 }
731
732 BasicBlock::iterator I = BB->getFirstNonPHIIt();
733 if (!I->isEHPad())
734 continue;
735
736 Instruction *Replacement = nullptr;
737 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
738 if (CatchSwitch->unwindsToCaller()) {
739 Value *UnwindDestToken;
740 if (auto *ParentPad =
741 dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
742 // This catchswitch is nested inside another funclet. If that
743 // funclet has an unwind destination within the inlinee, then
744 // unwinding out of this catchswitch would be UB. Rewriting this
745 // catchswitch to unwind to the inlined invoke's unwind dest would
746 // give the parent funclet multiple unwind destinations, which is
747 // something that subsequent EH table generation can't handle and
748 // that the veirifer rejects. So when we see such a call, leave it
749 // as "unwind to caller".
750 UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap);
751 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
752 continue;
753 } else {
754 // This catchswitch has no parent to inherit constraints from, and
755 // none of its descendants can have an unwind edge that exits it and
756 // targets another funclet in the inlinee. It may or may not have a
757 // descendant that definitively has an unwind to caller. In either
758 // case, we'll have to assume that any unwinds out of it may need to
759 // be routed to the caller, so treat it as though it has a definitive
760 // unwind to caller.
761 UnwindDestToken = ConstantTokenNone::get(Caller->getContext());
762 }
763 auto *NewCatchSwitch = CatchSwitchInst::Create(
764 CatchSwitch->getParentPad(), UnwindDest,
765 CatchSwitch->getNumHandlers(), CatchSwitch->getName(),
766 CatchSwitch->getIterator());
767 for (BasicBlock *PadBB : CatchSwitch->handlers())
768 NewCatchSwitch->addHandler(PadBB);
769 // Propagate info for the old catchswitch over to the new one in
770 // the unwind map. This also serves to short-circuit any subsequent
771 // checks for the unwind dest of this catchswitch, which would get
772 // confused if they found the outer handler in the callee.
773 FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
774 Replacement = NewCatchSwitch;
775 }
776 } else if (!isa<FuncletPadInst>(I)) {
777 llvm_unreachable("unexpected EHPad!");
778 }
779
780 if (Replacement) {
781 Replacement->takeName(&*I);
782 I->replaceAllUsesWith(Replacement);
783 I->eraseFromParent();
784 UpdatePHINodes(&*BB);
785 }
786 }
787
788 if (InlinedCodeInfo.ContainsCalls)
789 for (Function::iterator BB = FirstNewBlock->getIterator(),
790 E = Caller->end();
791 BB != E; ++BB)
793 &*BB, UnwindDest, InlinedCodeInfo.OriginallyIndirectCalls,
794 &FuncletUnwindMap))
795 // Update any PHI nodes in the exceptional block to indicate that there
796 // is now a new entry in them.
797 UpdatePHINodes(NewBB);
798
799 // Now that everything is happy, we have one final detail. The PHI nodes in
800 // the exception destination block still have entries due to the original
801 // invoke instruction. Eliminate these entries (which might even delete the
802 // PHI node) now.
803 UnwindDest->removePredecessor(InvokeBB);
804}
805
806static bool haveCommonPrefix(MDNode *MIBStackContext,
807 MDNode *CallsiteStackContext) {
808 assert(MIBStackContext->getNumOperands() > 0 &&
809 CallsiteStackContext->getNumOperands() > 0);
810 // Because of the context trimming performed during matching, the callsite
811 // context could have more stack ids than the MIB. We match up to the end of
812 // the shortest stack context.
813 for (auto MIBStackIter = MIBStackContext->op_begin(),
814 CallsiteStackIter = CallsiteStackContext->op_begin();
815 MIBStackIter != MIBStackContext->op_end() &&
816 CallsiteStackIter != CallsiteStackContext->op_end();
817 MIBStackIter++, CallsiteStackIter++) {
818 auto *Val1 = mdconst::dyn_extract<ConstantInt>(*MIBStackIter);
819 auto *Val2 = mdconst::dyn_extract<ConstantInt>(*CallsiteStackIter);
820 assert(Val1 && Val2);
821 if (Val1->getZExtValue() != Val2->getZExtValue())
822 return false;
823 }
824 return true;
825}
826
828 Call->setMetadata(LLVMContext::MD_memprof, nullptr);
829}
830
832 Call->setMetadata(LLVMContext::MD_callsite, nullptr);
833}
834
836 const std::vector<Metadata *> &MIBList,
838 assert(!MIBList.empty());
839 // Remove existing memprof, which will either be replaced or may not be needed
840 // if we are able to use a single allocation type function attribute.
843 for (Metadata *MIB : MIBList)
844 CallStack.addCallStack(cast<MDNode>(MIB));
845 bool MemprofMDAttached = CallStack.buildAndAttachMIBMetadata(CI);
846 assert(MemprofMDAttached == CI->hasMetadata(LLVMContext::MD_memprof));
847 if (!MemprofMDAttached)
848 // If we used a function attribute remove the callsite metadata as well.
850}
851
852// Update the metadata on the inlined copy ClonedCall of a call OrigCall in the
853// inlined callee body, based on the callsite metadata InlinedCallsiteMD from
854// the call that was inlined.
855static void propagateMemProfHelper(const CallBase *OrigCall,
856 CallBase *ClonedCall,
857 MDNode *InlinedCallsiteMD,
859 MDNode *OrigCallsiteMD = ClonedCall->getMetadata(LLVMContext::MD_callsite);
860 MDNode *ClonedCallsiteMD = nullptr;
861 // Check if the call originally had callsite metadata, and update it for the
862 // new call in the inlined body.
863 if (OrigCallsiteMD) {
864 // The cloned call's context is now the concatenation of the original call's
865 // callsite metadata and the callsite metadata on the call where it was
866 // inlined.
867 ClonedCallsiteMD = MDNode::concatenate(OrigCallsiteMD, InlinedCallsiteMD);
868 ClonedCall->setMetadata(LLVMContext::MD_callsite, ClonedCallsiteMD);
869 }
870
871 // Update any memprof metadata on the cloned call.
872 MDNode *OrigMemProfMD = ClonedCall->getMetadata(LLVMContext::MD_memprof);
873 if (!OrigMemProfMD)
874 return;
875 // We currently expect that allocations with memprof metadata also have
876 // callsite metadata for the allocation's part of the context.
877 assert(OrigCallsiteMD);
878
879 // New call's MIB list.
880 std::vector<Metadata *> NewMIBList;
881
882 // For each MIB metadata, check if its call stack context starts with the
883 // new clone's callsite metadata. If so, that MIB goes onto the cloned call in
884 // the inlined body. If not, it stays on the out-of-line original call.
885 for (auto &MIBOp : OrigMemProfMD->operands()) {
886 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
887 // Stack is first operand of MIB.
888 MDNode *StackMD = getMIBStackNode(MIB);
889 assert(StackMD);
890 // See if the new cloned callsite context matches this profiled context.
891 if (haveCommonPrefix(StackMD, ClonedCallsiteMD))
892 // Add it to the cloned call's MIB list.
893 NewMIBList.push_back(MIB);
894 }
895 if (NewMIBList.empty()) {
896 removeMemProfMetadata(ClonedCall);
897 removeCallsiteMetadata(ClonedCall);
898 return;
899 }
900 if (NewMIBList.size() < OrigMemProfMD->getNumOperands())
901 updateMemprofMetadata(ClonedCall, NewMIBList, ORE);
902}
903
904// Update memprof related metadata (!memprof and !callsite) based on the
905// inlining of Callee into the callsite at CB. The updates include merging the
906// inlined callee's callsite metadata with that of the inlined call,
907// and moving the subset of any memprof contexts to the inlined callee
908// allocations if they match the new inlined call stack.
909static void
911 bool ContainsMemProfMetadata,
914 MDNode *CallsiteMD = CB.getMetadata(LLVMContext::MD_callsite);
915 // Only need to update if the inlined callsite had callsite metadata, or if
916 // there was any memprof metadata inlined.
917 if (!CallsiteMD && !ContainsMemProfMetadata)
918 return;
919
920 // Propagate metadata onto the cloned calls in the inlined callee.
921 for (const auto &Entry : VMap) {
922 // See if this is a call that has been inlined and remapped, and not
923 // simplified away in the process.
924 auto *OrigCall = dyn_cast_or_null<CallBase>(Entry.first);
925 auto *ClonedCall = dyn_cast_or_null<CallBase>(Entry.second);
926 if (!OrigCall || !ClonedCall)
927 continue;
928 // If the inlined callsite did not have any callsite metadata, then it isn't
929 // involved in any profiled call contexts, and we can remove any memprof
930 // metadata on the cloned call.
931 if (!CallsiteMD) {
932 removeMemProfMetadata(ClonedCall);
933 removeCallsiteMetadata(ClonedCall);
934 continue;
935 }
936 propagateMemProfHelper(OrigCall, ClonedCall, CallsiteMD, ORE);
937 }
938}
939
940/// When inlining a call site that has !llvm.mem.parallel_loop_access,
941/// !llvm.access.group, !alias.scope or !noalias metadata, that metadata should
942/// be propagated to all memory-accessing cloned instructions.
944 Function::iterator FEnd) {
945 MDNode *MemParallelLoopAccess =
946 CB.getMetadata(LLVMContext::MD_mem_parallel_loop_access);
947 MDNode *AccessGroup = CB.getMetadata(LLVMContext::MD_access_group);
948 MDNode *AliasScope = CB.getMetadata(LLVMContext::MD_alias_scope);
949 MDNode *NoAlias = CB.getMetadata(LLVMContext::MD_noalias);
950 if (!MemParallelLoopAccess && !AccessGroup && !AliasScope && !NoAlias)
951 return;
952
953 for (BasicBlock &BB : make_range(FStart, FEnd)) {
954 for (Instruction &I : BB) {
955 // This metadata is only relevant for instructions that access memory.
956 if (!I.mayReadOrWriteMemory())
957 continue;
958
959 if (MemParallelLoopAccess) {
960 // TODO: This probably should not overwrite MemParalleLoopAccess.
961 MemParallelLoopAccess = MDNode::concatenate(
962 I.getMetadata(LLVMContext::MD_mem_parallel_loop_access),
963 MemParallelLoopAccess);
964 I.setMetadata(LLVMContext::MD_mem_parallel_loop_access,
965 MemParallelLoopAccess);
966 }
967
968 if (AccessGroup)
969 I.setMetadata(LLVMContext::MD_access_group, uniteAccessGroups(
970 I.getMetadata(LLVMContext::MD_access_group), AccessGroup));
971
972 if (AliasScope)
973 I.setMetadata(LLVMContext::MD_alias_scope, MDNode::concatenate(
974 I.getMetadata(LLVMContext::MD_alias_scope), AliasScope));
975
976 if (NoAlias)
977 I.setMetadata(LLVMContext::MD_noalias, MDNode::concatenate(
978 I.getMetadata(LLVMContext::MD_noalias), NoAlias));
979 }
980 }
981}
982
983/// Track inlining chain via inlined.from metadata for dontcall diagnostics.
984static void PropagateInlinedFromMetadata(CallBase &CB, StringRef CalledFuncName,
985 StringRef CallerFuncName,
986 Function::iterator FStart,
987 Function::iterator FEnd) {
988 LLVMContext &Ctx = CB.getContext();
989 uint64_t InlineSiteLoc = 0;
990 if (auto *MD = CB.getMetadata("srcloc"))
991 if (auto *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0)))
992 InlineSiteLoc = CI->getZExtValue();
993
994 auto *I64Ty = Type::getInt64Ty(Ctx);
995 auto MakeMDInt = [&](uint64_t V) {
996 return ConstantAsMetadata::get(ConstantInt::get(I64Ty, V));
997 };
998
999 for (BasicBlock &BB : make_range(FStart, FEnd)) {
1000 for (Instruction &I : BB) {
1001 auto *CI = dyn_cast<CallInst>(&I);
1002 if (!CI || !CI->getMetadata("srcloc"))
1003 continue;
1004 auto *Callee = CI->getCalledFunction();
1005 if (!Callee || (!Callee->hasFnAttribute("dontcall-error") &&
1006 !Callee->hasFnAttribute("dontcall-warn")))
1007 continue;
1008
1010 if (MDNode *Existing = CI->getMetadata("inlined.from"))
1011 append_range(Ops, Existing->operands());
1012 else {
1013 Ops.push_back(MDString::get(Ctx, CalledFuncName));
1014 Ops.push_back(MakeMDInt(0));
1015 }
1016 Ops.push_back(MDString::get(Ctx, CallerFuncName));
1017 Ops.push_back(MakeMDInt(InlineSiteLoc));
1018 CI->setMetadata("inlined.from", MDNode::get(Ctx, Ops));
1019 }
1020 }
1021}
1022
1023/// Bundle operands of the inlined function must be added to inlined call sites.
1025 Instruction *CallSiteEHPad) {
1026 for (Instruction &II : llvm::make_early_inc_range(*InlinedBB)) {
1028 if (!I)
1029 continue;
1030 // Skip call sites which already have a "funclet" bundle.
1031 if (I->getOperandBundle(LLVMContext::OB_funclet))
1032 continue;
1033 // Skip call sites which are nounwind intrinsics (as long as they don't
1034 // lower into regular function calls in the course of IR transformations).
1035 auto *CalledFn =
1036 dyn_cast<Function>(I->getCalledOperand()->stripPointerCasts());
1037 if (CalledFn && CalledFn->isIntrinsic() && I->doesNotThrow() &&
1038 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
1039 continue;
1040
1042 I->getOperandBundlesAsDefs(OpBundles);
1043 OpBundles.emplace_back("funclet", CallSiteEHPad);
1044
1045 Instruction *NewInst = CallBase::Create(I, OpBundles, I->getIterator());
1046 NewInst->takeName(I);
1047 I->replaceAllUsesWith(NewInst);
1048 I->eraseFromParent();
1049 }
1050}
1051
1052namespace {
1053/// Utility for cloning !noalias and !alias.scope metadata. When a code region
1054/// using scoped alias metadata is inlined, the aliasing relationships may not
1055/// hold between the two version. It is necessary to create a deep clone of the
1056/// metadata, putting the two versions in separate scope domains.
1057class ScopedAliasMetadataDeepCloner {
1058 using MetadataMap = DenseMap<const MDNode *, TrackingMDNodeRef>;
1059 SetVector<const MDNode *> MD;
1060 MetadataMap MDMap;
1061 void addRecursiveMetadataUses();
1062
1063public:
1064 ScopedAliasMetadataDeepCloner(const Function *F);
1065
1066 /// Create a new clone of the scoped alias metadata, which will be used by
1067 /// subsequent remap() calls.
1068 void clone();
1069
1070 /// Remap instructions in the given range from the original to the cloned
1071 /// metadata.
1072 void remap(Function::iterator FStart, Function::iterator FEnd);
1073};
1074} // namespace
1075
1076ScopedAliasMetadataDeepCloner::ScopedAliasMetadataDeepCloner(
1077 const Function *F) {
1078 for (const BasicBlock &BB : *F) {
1079 for (const Instruction &I : BB) {
1080 if (const MDNode *M = I.getMetadata(LLVMContext::MD_alias_scope))
1081 MD.insert(M);
1082 if (const MDNode *M = I.getMetadata(LLVMContext::MD_noalias))
1083 MD.insert(M);
1084
1085 // We also need to clone the metadata in noalias intrinsics.
1086 if (const auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
1087 MD.insert(Decl->getScopeList());
1088 }
1089 }
1090 addRecursiveMetadataUses();
1091}
1092
1093void ScopedAliasMetadataDeepCloner::addRecursiveMetadataUses() {
1094 SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
1095 while (!Queue.empty()) {
1096 const MDNode *M = cast<MDNode>(Queue.pop_back_val());
1097 for (const Metadata *Op : M->operands())
1098 if (const MDNode *OpMD = dyn_cast<MDNode>(Op))
1099 if (MD.insert(OpMD))
1100 Queue.push_back(OpMD);
1101 }
1102}
1103
1104void ScopedAliasMetadataDeepCloner::clone() {
1105 assert(MDMap.empty() && "clone() already called ?");
1106
1108 for (const MDNode *I : MD) {
1109 DummyNodes.push_back(MDTuple::getTemporary(I->getContext(), {}));
1110 MDMap[I].reset(DummyNodes.back().get());
1111 }
1112
1113 // Create new metadata nodes to replace the dummy nodes, replacing old
1114 // metadata references with either a dummy node or an already-created new
1115 // node.
1117 for (const MDNode *I : MD) {
1118 for (const Metadata *Op : I->operands()) {
1119 if (const MDNode *M = dyn_cast<MDNode>(Op))
1120 NewOps.push_back(MDMap[M]);
1121 else
1122 NewOps.push_back(const_cast<Metadata *>(Op));
1123 }
1124
1125 MDNode *NewM = MDNode::get(I->getContext(), NewOps);
1126 MDTuple *TempM = cast<MDTuple>(MDMap[I]);
1127 assert(TempM->isTemporary() && "Expected temporary node");
1128
1129 TempM->replaceAllUsesWith(NewM);
1130 NewOps.clear();
1131 }
1132}
1133
1134void ScopedAliasMetadataDeepCloner::remap(Function::iterator FStart,
1135 Function::iterator FEnd) {
1136 if (MDMap.empty())
1137 return; // Nothing to do.
1138
1139 for (BasicBlock &BB : make_range(FStart, FEnd)) {
1140 for (Instruction &I : BB) {
1141 // TODO: The null checks for the MDMap.lookup() results should no longer
1142 // be necessary.
1143 if (MDNode *M = I.getMetadata(LLVMContext::MD_alias_scope))
1144 if (MDNode *MNew = MDMap.lookup(M))
1145 I.setMetadata(LLVMContext::MD_alias_scope, MNew);
1146
1147 if (MDNode *M = I.getMetadata(LLVMContext::MD_noalias))
1148 if (MDNode *MNew = MDMap.lookup(M))
1149 I.setMetadata(LLVMContext::MD_noalias, MNew);
1150
1151 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
1152 if (MDNode *MNew = MDMap.lookup(Decl->getScopeList()))
1153 Decl->setScopeList(MNew);
1154 }
1155 }
1156}
1157
1158/// If the inlined function has noalias arguments,
1159/// then add new alias scopes for each noalias argument, tag the mapped noalias
1160/// parameters with noalias metadata specifying the new scope, and tag all
1161/// non-derived loads, stores and memory intrinsics with the new alias scopes.
1163 const DataLayout &DL, AAResults *CalleeAAR,
1164 ClonedCodeInfo &InlinedFunctionInfo) {
1166 return;
1167
1168 const Function *CalledFunc = CB.getCalledFunction();
1170
1171 for (const Argument &Arg : CalledFunc->args())
1172 if (CB.paramHasAttr(Arg.getArgNo(), Attribute::NoAlias) && !Arg.use_empty())
1173 NoAliasArgs.push_back(&Arg);
1174
1175 if (NoAliasArgs.empty())
1176 return;
1177
1178 // To do a good job, if a noalias variable is captured, we need to know if
1179 // the capture point dominates the particular use we're considering.
1180 DominatorTree DT;
1181 DT.recalculate(const_cast<Function&>(*CalledFunc));
1182
1183 // noalias indicates that pointer values based on the argument do not alias
1184 // pointer values which are not based on it. So we add a new "scope" for each
1185 // noalias function argument. Accesses using pointers based on that argument
1186 // become part of that alias scope, accesses using pointers not based on that
1187 // argument are tagged as noalias with that scope.
1188
1190 MDBuilder MDB(CalledFunc->getContext());
1191
1192 // Create a new scope domain for this function.
1193 MDNode *NewDomain =
1194 MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
1195 for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
1196 const Argument *A = NoAliasArgs[i];
1197
1198 std::string Name = std::string(CalledFunc->getName());
1199 if (A->hasName()) {
1200 Name += ": %";
1201 Name += A->getName();
1202 } else {
1203 Name += ": argument ";
1204 Name += utostr(i);
1205 }
1206
1207 // Note: We always create a new anonymous root here. This is true regardless
1208 // of the linkage of the callee because the aliasing "scope" is not just a
1209 // property of the callee, but also all control dependencies in the caller.
1210 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
1211 NewScopes.insert(std::make_pair(A, NewScope));
1212
1213 if (UseNoAliasIntrinsic) {
1214 // Introduce a llvm.experimental.noalias.scope.decl for the noalias
1215 // argument.
1216 MDNode *AScopeList = MDNode::get(CalledFunc->getContext(), NewScope);
1217 auto *NoAliasDecl =
1218 IRBuilder<>(&CB).CreateNoAliasScopeDeclaration(AScopeList);
1219 // Ignore the result for now. The result will be used when the
1220 // llvm.noalias intrinsic is introduced.
1221 (void)NoAliasDecl;
1222 }
1223 }
1224
1225 // Iterate over all new instructions in the map; for all memory-access
1226 // instructions, add the alias scope metadata.
1227 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
1228 VMI != VMIE; ++VMI) {
1229 if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
1230 if (!VMI->second)
1231 continue;
1232
1233 Instruction *NI = dyn_cast<Instruction>(VMI->second);
1234 if (!NI || InlinedFunctionInfo.isSimplified(I, NI))
1235 continue;
1236
1237 bool IsArgMemOnlyCall = false, IsFuncCall = false;
1239
1240 if (const LoadInst *LI = dyn_cast<LoadInst>(I))
1241 PtrArgs.push_back(LI->getPointerOperand());
1242 else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
1243 PtrArgs.push_back(SI->getPointerOperand());
1244 else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
1245 PtrArgs.push_back(VAAI->getPointerOperand());
1246 else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
1247 PtrArgs.push_back(CXI->getPointerOperand());
1248 else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
1249 PtrArgs.push_back(RMWI->getPointerOperand());
1250 else if (const auto *Call = dyn_cast<CallBase>(I)) {
1251 // If we know that the call does not access memory, then we'll still
1252 // know that about the inlined clone of this call site, and we don't
1253 // need to add metadata.
1254 if (Call->doesNotAccessMemory())
1255 continue;
1256
1257 IsFuncCall = true;
1258 if (CalleeAAR) {
1259 MemoryEffects ME = CalleeAAR->getMemoryEffects(Call);
1260
1261 // We'll retain this knowledge without additional metadata.
1263 continue;
1264
1265 if (ME.onlyAccessesArgPointees())
1266 IsArgMemOnlyCall = true;
1267 }
1268
1269 for (Value *Arg : Call->args()) {
1270 // Only care about pointer arguments. If a noalias argument is
1271 // accessed through a non-pointer argument, it must be captured
1272 // first (e.g. via ptrtoint), and we protect against captures below.
1273 if (!Arg->getType()->isPointerTy())
1274 continue;
1275
1276 PtrArgs.push_back(Arg);
1277 }
1278 }
1279
1280 // If we found no pointers, then this instruction is not suitable for
1281 // pairing with an instruction to receive aliasing metadata.
1282 // However, if this is a call, this we might just alias with none of the
1283 // noalias arguments.
1284 if (PtrArgs.empty() && !IsFuncCall)
1285 continue;
1286
1287 // It is possible that there is only one underlying object, but you
1288 // need to go through several PHIs to see it, and thus could be
1289 // repeated in the Objects list.
1292
1293 for (const Value *V : PtrArgs) {
1295 getUnderlyingObjects(V, Objects, /* LI = */ nullptr);
1296
1297 ObjSet.insert_range(Objects);
1298 }
1299
1300 // Figure out if we're derived from anything that is not a noalias
1301 // argument.
1302 bool RequiresNoCaptureBefore = false, UsesAliasingPtr = false,
1303 UsesUnknownObject = false;
1304 for (const Value *V : ObjSet) {
1305 // Is this value a constant that cannot be derived from any pointer
1306 // value (we need to exclude constant expressions, for example, that
1307 // are formed from arithmetic on global symbols).
1308 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1311 if (IsNonPtrConst)
1312 continue;
1313
1314 // If this is anything other than a noalias argument, then we cannot
1315 // completely describe the aliasing properties using alias.scope
1316 // metadata (and, thus, won't add any).
1317 if (const Argument *A = dyn_cast<Argument>(V)) {
1318 if (!CB.paramHasAttr(A->getArgNo(), Attribute::NoAlias))
1319 UsesAliasingPtr = true;
1320 } else {
1321 UsesAliasingPtr = true;
1322 }
1323
1324 if (isEscapeSource(V)) {
1325 // An escape source can only alias with a noalias argument if it has
1326 // been captured beforehand.
1327 RequiresNoCaptureBefore = true;
1328 } else if (!isa<Argument>(V) && !isIdentifiedObject(V)) {
1329 // If this is neither an escape source, nor some identified object
1330 // (which cannot directly alias a noalias argument), nor some other
1331 // argument (which, by definition, also cannot alias a noalias
1332 // argument), conservatively do not make any assumptions.
1333 UsesUnknownObject = true;
1334 }
1335 }
1336
1337 // Nothing we can do if the used underlying object cannot be reliably
1338 // determined.
1339 if (UsesUnknownObject)
1340 continue;
1341
1342 // A function call can always get captured noalias pointers (via other
1343 // parameters, globals, etc.).
1344 if (IsFuncCall && !IsArgMemOnlyCall)
1345 RequiresNoCaptureBefore = true;
1346
1347 // First, we want to figure out all of the sets with which we definitely
1348 // don't alias. Iterate over all noalias set, and add those for which:
1349 // 1. The noalias argument is not in the set of objects from which we
1350 // definitely derive.
1351 // 2. The noalias argument has not yet been captured.
1352 // An arbitrary function that might load pointers could see captured
1353 // noalias arguments via other noalias arguments or globals, and so we
1354 // must always check for prior capture.
1355 for (const Argument *A : NoAliasArgs) {
1356 if (ObjSet.contains(A))
1357 continue; // May be based on a noalias argument.
1358
1359 // It might be tempting to skip the PointerMayBeCapturedBefore check if
1360 // A->hasNoCaptureAttr() is true, but this is incorrect because
1361 // nocapture only guarantees that no copies outlive the function, not
1362 // that the value cannot be locally captured.
1363 if (!RequiresNoCaptureBefore ||
1365 A, /*ReturnCaptures=*/false, I, &DT, /*IncludeI=*/false,
1367 NoAliases.push_back(NewScopes[A]);
1368 }
1369
1370 if (!NoAliases.empty())
1371 NI->setMetadata(LLVMContext::MD_noalias,
1373 NI->getMetadata(LLVMContext::MD_noalias),
1374 MDNode::get(CalledFunc->getContext(), NoAliases)));
1375
1376 // Next, we want to figure out all of the sets to which we might belong.
1377 // We might belong to a set if the noalias argument is in the set of
1378 // underlying objects. If there is some non-noalias argument in our list
1379 // of underlying objects, then we cannot add a scope because the fact
1380 // that some access does not alias with any set of our noalias arguments
1381 // cannot itself guarantee that it does not alias with this access
1382 // (because there is some pointer of unknown origin involved and the
1383 // other access might also depend on this pointer). We also cannot add
1384 // scopes to arbitrary functions unless we know they don't access any
1385 // non-parameter pointer-values.
1386 bool CanAddScopes = !UsesAliasingPtr;
1387 if (CanAddScopes && IsFuncCall)
1388 CanAddScopes = IsArgMemOnlyCall;
1389
1390 if (CanAddScopes)
1391 for (const Argument *A : NoAliasArgs) {
1392 if (ObjSet.count(A))
1393 Scopes.push_back(NewScopes[A]);
1394 }
1395
1396 if (!Scopes.empty())
1397 NI->setMetadata(
1398 LLVMContext::MD_alias_scope,
1399 MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope),
1400 MDNode::get(CalledFunc->getContext(), Scopes)));
1401 }
1402 }
1403}
1404
1406 ReturnInst *End) {
1407
1408 assert(Begin->getParent() == End->getParent() &&
1409 "Expected to be in same basic block!");
1410 auto BeginIt = Begin->getIterator();
1411 assert(BeginIt != End->getIterator() && "Non-empty BB has empty iterator");
1413 ++BeginIt, End->getIterator(), InlinerAttributeWindow + 1);
1414}
1415
1416// Add attributes from CB params and Fn attributes that can always be propagated
1417// to the corresponding argument / inner callbases.
1419 ValueToValueMapTy &VMap,
1420 ClonedCodeInfo &InlinedFunctionInfo) {
1421 auto *CalledFunction = CB.getCalledFunction();
1422 auto &Context = CalledFunction->getContext();
1423
1424 // Collect valid attributes for all params.
1425 SmallVector<AttrBuilder> ValidObjParamAttrs, ValidExactParamAttrs;
1426 bool HasAttrToPropagate = false;
1427
1428 // Attributes we can only propagate if the exact parameter is forwarded.
1429 // We can propagate both poison generating and UB generating attributes
1430 // without any extra checks. The only attribute that is tricky to propagate
1431 // is `noundef` (skipped for now) as that can create new UB where previous
1432 // behavior was just using a poison value.
1433 static const Attribute::AttrKind ExactAttrsToPropagate[] = {
1434 Attribute::Dereferenceable, Attribute::DereferenceableOrNull,
1435 Attribute::NonNull, Attribute::NoFPClass,
1436 Attribute::Alignment, Attribute::Range};
1437
1438 for (unsigned I = 0, E = CB.arg_size(); I < E; ++I) {
1439 ValidObjParamAttrs.emplace_back(AttrBuilder{CB.getContext()});
1440 ValidExactParamAttrs.emplace_back(AttrBuilder{CB.getContext()});
1441 // Access attributes can be propagated to any param with the same underlying
1442 // object as the argument.
1443 if (CB.paramHasAttr(I, Attribute::ReadNone))
1444 ValidObjParamAttrs.back().addAttribute(Attribute::ReadNone);
1445 if (CB.paramHasAttr(I, Attribute::ReadOnly))
1446 ValidObjParamAttrs.back().addAttribute(Attribute::ReadOnly);
1447
1448 for (Attribute::AttrKind AK : ExactAttrsToPropagate) {
1449 Attribute Attr = CB.getParamAttr(I, AK);
1450 if (Attr.isValid())
1451 ValidExactParamAttrs.back().addAttribute(Attr);
1452 }
1453
1454 HasAttrToPropagate |= ValidObjParamAttrs.back().hasAttributes();
1455 HasAttrToPropagate |= ValidExactParamAttrs.back().hasAttributes();
1456 }
1457
1458 // Won't be able to propagate anything.
1459 if (!HasAttrToPropagate)
1460 return;
1461
1462 for (BasicBlock &BB : *CalledFunction) {
1463 for (Instruction &Ins : BB) {
1464 const auto *InnerCB = dyn_cast<CallBase>(&Ins);
1465 if (!InnerCB)
1466 continue;
1467 auto *NewInnerCB = dyn_cast_or_null<CallBase>(VMap.lookup(InnerCB));
1468 if (!NewInnerCB)
1469 continue;
1470 // The InnerCB might have be simplified during the inlining
1471 // process which can make propagation incorrect.
1472 if (InlinedFunctionInfo.isSimplified(InnerCB, NewInnerCB))
1473 continue;
1474
1475 AttributeList AL = NewInnerCB->getAttributes();
1476 for (unsigned I = 0, E = InnerCB->arg_size(); I < E; ++I) {
1477 // It's unsound or requires special handling to propagate
1478 // attributes to byval arguments. Even if CalledFunction
1479 // doesn't e.g. write to the argument (readonly), the call to
1480 // NewInnerCB may write to its by-value copy.
1481 if (NewInnerCB->paramHasAttr(I, Attribute::ByVal))
1482 continue;
1483
1484 // Don't bother propagating attrs to constants.
1485 if (match(NewInnerCB->getArgOperand(I),
1487 continue;
1488
1489 // Check if the underlying value for the parameter is an argument.
1490 const Argument *Arg = dyn_cast<Argument>(InnerCB->getArgOperand(I));
1491 unsigned ArgNo;
1492 if (Arg) {
1493 ArgNo = Arg->getArgNo();
1494 // For dereferenceable, dereferenceable_or_null, align, etc...
1495 // we don't want to propagate if the existing param has the same
1496 // attribute with "better" constraints. So remove from the
1497 // new AL if the region of the existing param is larger than
1498 // what we can propagate.
1499 AttrBuilder NewAB{
1500 Context, AttributeSet::get(Context, ValidExactParamAttrs[ArgNo])};
1501 if (AL.getParamDereferenceableBytes(I) >
1502 NewAB.getDereferenceableBytes())
1503 NewAB.removeAttribute(Attribute::Dereferenceable);
1504 if (AL.getParamDereferenceableOrNullBytes(I) >
1505 NewAB.getDereferenceableOrNullBytes())
1506 NewAB.removeAttribute(Attribute::DereferenceableOrNull);
1507 if (AL.getParamAlignment(I).valueOrOne() >
1508 NewAB.getAlignment().valueOrOne())
1509 NewAB.removeAttribute(Attribute::Alignment);
1510 if (auto ExistingRange = AL.getParamRange(I)) {
1511 if (auto NewRange = NewAB.getRange()) {
1512 ConstantRange CombinedRange =
1513 ExistingRange->intersectWith(*NewRange);
1514 NewAB.removeAttribute(Attribute::Range);
1515 NewAB.addRangeAttr(CombinedRange);
1516 }
1517 }
1518
1519 if (FPClassTest ExistingNoFP = AL.getParamNoFPClass(I))
1520 NewAB.addNoFPClassAttr(ExistingNoFP | NewAB.getNoFPClass());
1521
1522 AL = AL.addParamAttributes(Context, I, NewAB);
1523 } else if (NewInnerCB->getArgOperand(I)->getType()->isPointerTy()) {
1524 // Check if the underlying value for the parameter is an argument.
1525 const Value *UnderlyingV =
1526 getUnderlyingObject(InnerCB->getArgOperand(I));
1527 Arg = dyn_cast<Argument>(UnderlyingV);
1528 if (!Arg)
1529 continue;
1530 ArgNo = Arg->getArgNo();
1531 } else {
1532 continue;
1533 }
1534
1535 // If so, propagate its access attributes.
1536 AL = AL.addParamAttributes(Context, I, ValidObjParamAttrs[ArgNo]);
1537
1538 // We can have conflicting attributes from the inner callsite and
1539 // to-be-inlined callsite. In that case, choose the most
1540 // restrictive.
1541
1542 // readonly + writeonly means we can never deref so make readnone.
1543 if (AL.hasParamAttr(I, Attribute::ReadOnly) &&
1544 AL.hasParamAttr(I, Attribute::WriteOnly))
1545 AL = AL.addParamAttribute(Context, I, Attribute::ReadNone);
1546
1547 // If have readnone, need to clear readonly/writeonly
1548 if (AL.hasParamAttr(I, Attribute::ReadNone)) {
1549 AL = AL.removeParamAttribute(Context, I, Attribute::ReadOnly);
1550 AL = AL.removeParamAttribute(Context, I, Attribute::WriteOnly);
1551 }
1552
1553 // Writable cannot exist in conjunction w/ readonly/readnone
1554 if (AL.hasParamAttr(I, Attribute::ReadOnly) ||
1555 AL.hasParamAttr(I, Attribute::ReadNone))
1556 AL = AL.removeParamAttribute(Context, I, Attribute::Writable);
1557 }
1558 NewInnerCB->setAttributes(AL);
1559 }
1560 }
1561}
1562
1563// Only allow these white listed attributes to be propagated back to the
1564// callee. This is because other attributes may only be valid on the call
1565// itself, i.e. attributes such as signext and zeroext.
1566
1567// Attributes that are always okay to propagate as if they are violated its
1568// immediate UB.
1570 AttrBuilder Valid(CB.getContext());
1571 if (auto DerefBytes = CB.getRetDereferenceableBytes())
1572 Valid.addDereferenceableAttr(DerefBytes);
1573 if (auto DerefOrNullBytes = CB.getRetDereferenceableOrNullBytes())
1574 Valid.addDereferenceableOrNullAttr(DerefOrNullBytes);
1575 if (CB.hasRetAttr(Attribute::NoAlias))
1576 Valid.addAttribute(Attribute::NoAlias);
1577 if (CB.hasRetAttr(Attribute::NoUndef))
1578 Valid.addAttribute(Attribute::NoUndef);
1579 return Valid;
1580}
1581
1582// Attributes that need additional checks as propagating them may change
1583// behavior or cause new UB.
1585 AttrBuilder Valid(CB.getContext());
1586 if (CB.hasRetAttr(Attribute::NonNull))
1587 Valid.addAttribute(Attribute::NonNull);
1588 if (CB.hasRetAttr(Attribute::Alignment))
1589 Valid.addAlignmentAttr(CB.getRetAlign());
1590 if (std::optional<ConstantRange> Range = CB.getRange())
1591 Valid.addRangeAttr(*Range);
1592 if (CB.hasRetAttr(Attribute::NoFPClass))
1593 Valid.addNoFPClassAttr(CB.getRetNoFPClass());
1594 return Valid;
1595}
1596
1598 ClonedCodeInfo &InlinedFunctionInfo) {
1599 AttrBuilder CallSiteValidUB = IdentifyValidUBGeneratingAttributes(CB);
1600 AttrBuilder CallSiteValidPG = IdentifyValidPoisonGeneratingAttributes(CB);
1601 if (!CallSiteValidUB.hasAttributes() && !CallSiteValidPG.hasAttributes())
1602 return;
1603 auto *CalledFunction = CB.getCalledFunction();
1604 auto &Context = CalledFunction->getContext();
1605
1606 for (auto &BB : *CalledFunction) {
1607 auto *RI = dyn_cast<ReturnInst>(BB.getTerminator());
1608 if (!RI || !isa<CallBase>(RI->getOperand(0)))
1609 continue;
1610 auto *RetVal = cast<CallBase>(RI->getOperand(0));
1611 // Check that the cloned RetVal exists and is a call, otherwise we cannot
1612 // add the attributes on the cloned RetVal. Simplification during inlining
1613 // could have transformed the cloned instruction.
1614 auto *NewRetVal = dyn_cast_or_null<CallBase>(VMap.lookup(RetVal));
1615 if (!NewRetVal)
1616 continue;
1617
1618 // The RetVal might have be simplified during the inlining
1619 // process which can make propagation incorrect.
1620 if (InlinedFunctionInfo.isSimplified(RetVal, NewRetVal))
1621 continue;
1622 // Backward propagation of attributes to the returned value may be incorrect
1623 // if it is control flow dependent.
1624 // Consider:
1625 // @callee {
1626 // %rv = call @foo()
1627 // %rv2 = call @bar()
1628 // if (%rv2 != null)
1629 // return %rv2
1630 // if (%rv == null)
1631 // exit()
1632 // return %rv
1633 // }
1634 // caller() {
1635 // %val = call nonnull @callee()
1636 // }
1637 // Here we cannot add the nonnull attribute on either foo or bar. So, we
1638 // limit the check to both RetVal and RI are in the same basic block and
1639 // there are no throwing/exiting instructions between these instructions.
1640 if (RI->getParent() != RetVal->getParent() ||
1642 continue;
1643 // Add to the existing attributes of NewRetVal, i.e. the cloned call
1644 // instruction.
1645 // NB! When we have the same attribute already existing on NewRetVal, but
1646 // with a differing value, the AttributeList's merge API honours the already
1647 // existing attribute value (i.e. attributes such as dereferenceable,
1648 // dereferenceable_or_null etc). See AttrBuilder::merge for more details.
1649 AttrBuilder ValidUB = IdentifyValidUBGeneratingAttributes(CB);
1650 AttrBuilder ValidPG = IdentifyValidPoisonGeneratingAttributes(CB);
1651 AttributeList AL = NewRetVal->getAttributes();
1652 if (ValidUB.getDereferenceableBytes() < AL.getRetDereferenceableBytes())
1653 ValidUB.removeAttribute(Attribute::Dereferenceable);
1654 if (ValidUB.getDereferenceableOrNullBytes() <
1655 AL.getRetDereferenceableOrNullBytes())
1656 ValidUB.removeAttribute(Attribute::DereferenceableOrNull);
1657 AttributeList NewAL = AL.addRetAttributes(Context, ValidUB);
1658 // Attributes that may generate poison returns are a bit tricky. If we
1659 // propagate them, other uses of the callsite might have their behavior
1660 // change or cause UB (if they have noundef) b.c of the new potential
1661 // poison.
1662 // Take the following three cases:
1663 //
1664 // 1)
1665 // define nonnull ptr @foo() {
1666 // %p = call ptr @bar()
1667 // call void @use(ptr %p) willreturn nounwind
1668 // ret ptr %p
1669 // }
1670 //
1671 // 2)
1672 // define noundef nonnull ptr @foo() {
1673 // %p = call ptr @bar()
1674 // call void @use(ptr %p) willreturn nounwind
1675 // ret ptr %p
1676 // }
1677 //
1678 // 3)
1679 // define nonnull ptr @foo() {
1680 // %p = call noundef ptr @bar()
1681 // ret ptr %p
1682 // }
1683 //
1684 // In case 1, we can't propagate nonnull because poison value in @use may
1685 // change behavior or trigger UB.
1686 // In case 2, we don't need to be concerned about propagating nonnull, as
1687 // any new poison at @use will trigger UB anyways.
1688 // In case 3, we can never propagate nonnull because it may create UB due to
1689 // the noundef on @bar.
1690 if (ValidPG.getAlignment().valueOrOne() < AL.getRetAlignment().valueOrOne())
1691 ValidPG.removeAttribute(Attribute::Alignment);
1692 if (ValidPG.hasAttributes()) {
1693 Attribute CBRange = ValidPG.getAttribute(Attribute::Range);
1694 if (CBRange.isValid()) {
1695 Attribute NewRange = AL.getRetAttr(Attribute::Range);
1696 if (NewRange.isValid()) {
1697 ValidPG.addRangeAttr(
1698 CBRange.getRange().intersectWith(NewRange.getRange()));
1699 }
1700 }
1701
1702 Attribute CBNoFPClass = ValidPG.getAttribute(Attribute::NoFPClass);
1703 if (CBNoFPClass.isValid() && AL.hasRetAttr(Attribute::NoFPClass)) {
1704 ValidPG.addNoFPClassAttr(
1705 CBNoFPClass.getNoFPClass() |
1706 AL.getRetAttr(Attribute::NoFPClass).getNoFPClass());
1707 }
1708
1709 // Three checks.
1710 // If the callsite has `noundef`, then a poison due to violating the
1711 // return attribute will create UB anyways so we can always propagate.
1712 // Otherwise, if the return value (callee to be inlined) has `noundef`, we
1713 // can't propagate as a new poison return will cause UB.
1714 // Finally, check if the return value has no uses whose behavior may
1715 // change/may cause UB if we potentially return poison. At the moment this
1716 // is implemented overly conservatively with a single-use check.
1717 // TODO: Update the single-use check to iterate through uses and only bail
1718 // if we have a potentially dangerous use.
1719
1720 if (CB.hasRetAttr(Attribute::NoUndef) ||
1721 (RetVal->hasOneUse() && !RetVal->hasRetAttr(Attribute::NoUndef)))
1722 NewAL = NewAL.addRetAttributes(Context, ValidPG);
1723 }
1724 NewRetVal->setAttributes(NewAL);
1725 }
1726}
1727
1728/// If the inlined function has non-byval align arguments, then
1729/// add @llvm.assume-based alignment assumptions to preserve this information.
1732 return;
1733
1735 auto &DL = CB.getDataLayout();
1736
1737 // To avoid inserting redundant assumptions, we should check for assumptions
1738 // already in the caller. To do this, we might need a DT of the caller.
1739 DominatorTree DT;
1740 bool DTCalculated = false;
1741
1742 Function *CalledFunc = CB.getCalledFunction();
1743 for (Argument &Arg : CalledFunc->args()) {
1744 if (!Arg.getType()->isPointerTy() || Arg.hasPassPointeeByValueCopyAttr() ||
1745 Arg.use_empty())
1746 continue;
1747 MaybeAlign Alignment = Arg.getParamAlign();
1748 if (!Alignment)
1749 continue;
1750
1751 if (!DTCalculated) {
1752 DT.recalculate(*CB.getCaller());
1753 DTCalculated = true;
1754 }
1755 // If we can already prove the asserted alignment in the context of the
1756 // caller, then don't bother inserting the assumption.
1757 Value *ArgVal = CB.getArgOperand(Arg.getArgNo());
1758 if (getKnownAlignment(ArgVal, DL, &CB, AC, &DT) >= *Alignment)
1759 continue;
1760
1761 CallInst *NewAsmp = IRBuilder<>(&CB).CreateAlignmentAssumption(
1762 DL, ArgVal, Alignment->value());
1764 }
1765}
1766
1767static void HandleByValArgumentInit(Type *ByValType, Value *Dst, Value *Src,
1768 MaybeAlign SrcAlign, Module *M,
1769 BasicBlock *InsertBlock,
1770 InlineFunctionInfo &IFI,
1771 Function *CalledFunc) {
1772 IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
1773
1774 Value *Size =
1775 Builder.getInt64(M->getDataLayout().getTypeStoreSize(ByValType));
1776
1777 Align DstAlign = Dst->getPointerAlignment(M->getDataLayout());
1778
1779 // Generate a memcpy with the correct alignments.
1780 CallInst *CI = Builder.CreateMemCpy(Dst, DstAlign, Src, SrcAlign, Size);
1781
1782 // The verifier requires that all calls of debug-info-bearing functions
1783 // from debug-info-bearing functions have a debug location (for inlining
1784 // purposes). Assign a dummy location to satisfy the constraint.
1785 if (!CI->getDebugLoc() && InsertBlock->getParent()->getSubprogram())
1786 if (DISubprogram *SP = CalledFunc->getSubprogram())
1787 CI->setDebugLoc(DILocation::get(SP->getContext(), 0, 0, SP));
1788}
1789
1790/// When inlining a call site that has a byval argument,
1791/// we have to make the implicit memcpy explicit by adding it.
1792static Value *HandleByValArgument(Type *ByValType, Value *Arg,
1793 Instruction *TheCall,
1794 const Function *CalledFunc,
1795 InlineFunctionInfo &IFI,
1796 MaybeAlign ByValAlignment) {
1797 Function *Caller = TheCall->getFunction();
1798 const DataLayout &DL = Caller->getDataLayout();
1799
1800 // If the called function is readonly, then it could not mutate the caller's
1801 // copy of the byval'd memory. In this case, it is safe to elide the copy and
1802 // temporary.
1803 if (CalledFunc->onlyReadsMemory()) {
1804 // If the byval argument has a specified alignment that is greater than the
1805 // passed in pointer, then we either have to round up the input pointer or
1806 // give up on this transformation.
1807 if (ByValAlignment.valueOrOne() == 1)
1808 return Arg;
1809
1810 AssumptionCache *AC =
1811 IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
1812
1813 // If the pointer is already known to be sufficiently aligned, or if we can
1814 // round it up to a larger alignment, then we don't need a temporary.
1815 if (getOrEnforceKnownAlignment(Arg, *ByValAlignment, DL, TheCall, AC) >=
1816 *ByValAlignment)
1817 return Arg;
1818
1819 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
1820 // for code quality, but rarely happens and is required for correctness.
1821 }
1822
1823 // Create the alloca. If we have DataLayout, use nice alignment.
1824 Align Alignment = DL.getPrefTypeAlign(ByValType);
1825
1826 // If the byval had an alignment specified, we *must* use at least that
1827 // alignment, as it is required by the byval argument (and uses of the
1828 // pointer inside the callee).
1829 if (ByValAlignment)
1830 Alignment = std::max(Alignment, *ByValAlignment);
1831
1832 AllocaInst *NewAlloca =
1833 new AllocaInst(ByValType, Arg->getType()->getPointerAddressSpace(),
1834 nullptr, Alignment, Arg->getName());
1836 NewAlloca->insertBefore(Caller->begin()->begin());
1837 IFI.StaticAllocas.push_back(NewAlloca);
1838
1839 // Uses of the argument in the function should use our new alloca
1840 // instead.
1841 return NewAlloca;
1842}
1843
1844// Check whether this Value is used by a lifetime intrinsic.
1846 for (User *U : V->users())
1848 return true;
1849 return false;
1850}
1851
1852// Check whether the given alloca already has
1853// lifetime.start or lifetime.end intrinsics.
1855 Type *Ty = AI->getType();
1856 Type *Int8PtrTy =
1857 PointerType::get(Ty->getContext(), Ty->getPointerAddressSpace());
1858 if (Ty == Int8PtrTy)
1859 return isUsedByLifetimeMarker(AI);
1860
1861 // Do a scan to find all the casts to i8*.
1862 for (User *U : AI->users()) {
1863 if (U->getType() != Int8PtrTy) continue;
1864 if (U->stripPointerCasts() != AI) continue;
1866 return true;
1867 }
1868 return false;
1869}
1870
1871/// Return the result of AI->isStaticAlloca() if AI were moved to the entry
1872/// block. Allocas used in inalloca calls and allocas of dynamic array size
1873/// cannot be static.
1875 return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca();
1876}
1877
1878/// Returns a DebugLoc for a new DILocation which is a clone of \p OrigDL
1879/// inlined at \p InlinedAt. \p IANodes is an inlined-at cache.
1880static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt,
1881 LLVMContext &Ctx,
1883 auto IA = DebugLoc::appendInlinedAt(OrigDL, InlinedAt, Ctx, IANodes);
1884 return DILocation::get(Ctx, OrigDL.getLine(), OrigDL.getCol(),
1885 OrigDL.getScope(), IA, OrigDL.isImplicitCode(),
1886 OrigDL->getAtomGroup(), OrigDL->getAtomRank());
1887}
1888
1889/// Update inlined instructions' line numbers to
1890/// to encode location where these instructions are inlined.
1892 Instruction *TheCall, bool CalleeHasDebugInfo) {
1893 if (!TheCall->getDebugLoc())
1894 return;
1895
1896 // Don't propagate the source location atom from the call to inlined nodebug
1897 // instructions, and avoid putting it in the InlinedAt field of inlined
1898 // not-nodebug instructions. FIXME: Possibly worth transferring/generating
1899 // an atom for the returned value, otherwise we miss stepping on inlined
1900 // nodebug functions (which is different to existing behaviour).
1901 DebugLoc TheCallDL = TheCall->getDebugLoc()->getWithoutAtom();
1902
1903 auto &Ctx = Fn->getContext();
1904 DILocation *InlinedAtNode = TheCallDL;
1905
1906 // Create a unique call site, not to be confused with any other call from the
1907 // same location.
1908 InlinedAtNode = DILocation::getDistinct(
1909 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1910 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1911
1912 // Cache the inlined-at nodes as they're built so they are reused, without
1913 // this every instruction's inlined-at chain would become distinct from each
1914 // other.
1916
1917 // Check if we are not generating inline line tables and want to use
1918 // the call site location instead.
1919 bool NoInlineLineTables = Fn->hasFnAttribute("no-inline-line-tables");
1920
1921 // Helper-util for updating the metadata attached to an instruction.
1922 auto UpdateInst = [&](Instruction &I) {
1923 // Loop metadata needs to be updated so that the start and end locs
1924 // reference inlined-at locations.
1925 auto updateLoopInfoLoc = [&Ctx, &InlinedAtNode,
1926 &IANodes](Metadata *MD) -> Metadata * {
1927 if (auto *Loc = dyn_cast_or_null<DILocation>(MD))
1928 return inlineDebugLoc(Loc, InlinedAtNode, Ctx, IANodes).get();
1929 return MD;
1930 };
1931 updateLoopMetadataDebugLocations(I, updateLoopInfoLoc);
1932
1933 if (!NoInlineLineTables)
1934 if (DebugLoc DL = I.getDebugLoc()) {
1935 DebugLoc IDL =
1936 inlineDebugLoc(DL, InlinedAtNode, I.getContext(), IANodes);
1937 I.setDebugLoc(IDL);
1938 return;
1939 }
1940
1941 if (CalleeHasDebugInfo && !NoInlineLineTables)
1942 return;
1943
1944 // If the inlined instruction has no line number, or if inline info
1945 // is not being generated, make it look as if it originates from the call
1946 // location. This is important for ((__always_inline, __nodebug__))
1947 // functions which must use caller location for all instructions in their
1948 // function body.
1949
1950 // Don't update static allocas, as they may get moved later.
1951 if (auto *AI = dyn_cast<AllocaInst>(&I))
1953 return;
1954
1955 // Do not force a debug loc for pseudo probes, since they do not need to
1956 // be debuggable, and also they are expected to have a zero/null dwarf
1957 // discriminator at this point which could be violated otherwise.
1959 return;
1960
1961 I.setDebugLoc(TheCallDL);
1962 };
1963
1964 // Helper-util for updating debug-info records attached to instructions.
1965 auto UpdateDVR = [&](DbgRecord *DVR) {
1966 assert(DVR->getDebugLoc() && "Debug Value must have debug loc");
1967 if (NoInlineLineTables) {
1968 DVR->setDebugLoc(TheCallDL);
1969 return;
1970 }
1971 DebugLoc DL = DVR->getDebugLoc();
1972 DebugLoc IDL =
1973 inlineDebugLoc(DL, InlinedAtNode,
1974 DVR->getMarker()->getParent()->getContext(), IANodes);
1975 DVR->setDebugLoc(IDL);
1976 };
1977
1978 // Iterate over all instructions, updating metadata and debug-info records.
1979 for (; FI != Fn->end(); ++FI) {
1980 for (Instruction &I : *FI) {
1981 UpdateInst(I);
1982 for (DbgRecord &DVR : I.getDbgRecordRange()) {
1983 UpdateDVR(&DVR);
1984 }
1985 }
1986
1987 // Remove debug info records if we're not keeping inline info.
1988 if (NoInlineLineTables) {
1989 BasicBlock::iterator BI = FI->begin();
1990 while (BI != FI->end()) {
1991 BI->dropDbgRecords();
1992 ++BI;
1993 }
1994 }
1995 }
1996}
1997
1998#undef DEBUG_TYPE
1999#define DEBUG_TYPE "assignment-tracking"
2000/// Find Alloca and linked DbgAssignIntrinsic for locals escaped by \p CB.
2002 const CallBase &CB) {
2003 at::StorageToVarsMap EscapedLocals;
2005
2006 LLVM_DEBUG(
2007 errs() << "# Finding caller local variables escaped by callee\n");
2008 for (const Value *Arg : CB.args()) {
2009 LLVM_DEBUG(errs() << "INSPECT: " << *Arg << "\n");
2010 if (!Arg->getType()->isPointerTy()) {
2011 LLVM_DEBUG(errs() << " | SKIP: Not a pointer\n");
2012 continue;
2013 }
2014
2015 const Instruction *I = dyn_cast<Instruction>(Arg);
2016 if (!I) {
2017 LLVM_DEBUG(errs() << " | SKIP: Not result of instruction\n");
2018 continue;
2019 }
2020
2021 // Walk back to the base storage.
2022 assert(Arg->getType()->isPtrOrPtrVectorTy());
2023 APInt TmpOffset(DL.getIndexTypeSizeInBits(Arg->getType()), 0, false);
2025 Arg->stripAndAccumulateConstantOffsets(DL, TmpOffset, true));
2026 if (!Base) {
2027 LLVM_DEBUG(errs() << " | SKIP: Couldn't walk back to base storage\n");
2028 continue;
2029 }
2030
2031 assert(Base);
2032 LLVM_DEBUG(errs() << " | BASE: " << *Base << "\n");
2033 // We only need to process each base address once - skip any duplicates.
2034 if (!SeenBases.insert(Base).second)
2035 continue;
2036
2037 // Find all local variables associated with the backing storage.
2038 auto CollectAssignsForStorage = [&](DbgVariableRecord *DbgAssign) {
2039 // Skip variables from inlined functions - they are not local variables.
2040 if (DbgAssign->getDebugLoc().getInlinedAt())
2041 return;
2042 LLVM_DEBUG(errs() << " > DEF : " << *DbgAssign << "\n");
2043 EscapedLocals[Base].insert(at::VarRecord(DbgAssign));
2044 };
2045 for_each(at::getDVRAssignmentMarkers(Base), CollectAssignsForStorage);
2046 }
2047 return EscapedLocals;
2048}
2049
2051 const CallBase &CB) {
2052 LLVM_DEBUG(errs() << "trackInlinedStores into "
2053 << Start->getParent()->getName() << " from "
2054 << CB.getCalledFunction()->getName() << "\n");
2055 const DataLayout &DL = CB.getDataLayout();
2057}
2058
2059/// Update inlined instructions' DIAssignID metadata. We need to do this
2060/// otherwise a function inlined more than once into the same function
2061/// will cause DIAssignID to be shared by many instructions.
2064 // Loop over all the inlined instructions. If we find a DIAssignID
2065 // attachment or use, replace it with a new version.
2066 for (auto BBI = Start; BBI != End; ++BBI) {
2067 for (Instruction &I : *BBI)
2068 at::remapAssignID(Map, I);
2069 }
2070}
2071#undef DEBUG_TYPE
2072#define DEBUG_TYPE "inline-function"
2073
2074/// Update the block frequencies of the caller after a callee has been inlined.
2075///
2076/// Each block cloned into the caller has its block frequency scaled by the
2077/// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of
2078/// callee's entry block gets the same frequency as the callsite block and the
2079/// relative frequencies of all cloned blocks remain the same after cloning.
2080static void updateCallerBFI(BasicBlock *CallSiteBlock,
2081 const ValueToValueMapTy &VMap,
2082 BlockFrequencyInfo *CallerBFI,
2083 BlockFrequencyInfo *CalleeBFI,
2084 const BasicBlock &CalleeEntryBlock) {
2086 for (auto Entry : VMap) {
2087 if (!isa<BasicBlock>(Entry.first) || !Entry.second)
2088 continue;
2089 auto *OrigBB = cast<BasicBlock>(Entry.first);
2090 auto *ClonedBB = cast<BasicBlock>(Entry.second);
2091 BlockFrequency Freq = CalleeBFI->getBlockFreq(OrigBB);
2092 if (!ClonedBBs.insert(ClonedBB).second) {
2093 // Multiple blocks in the callee might get mapped to one cloned block in
2094 // the caller since we prune the callee as we clone it. When that happens,
2095 // we want to use the maximum among the original blocks' frequencies.
2096 BlockFrequency NewFreq = CallerBFI->getBlockFreq(ClonedBB);
2097 if (NewFreq > Freq)
2098 Freq = NewFreq;
2099 }
2100 CallerBFI->setBlockFreq(ClonedBB, Freq);
2101 }
2102 BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock));
2103 CallerBFI->setBlockFreqAndScale(
2104 EntryClone, CallerBFI->getBlockFreq(CallSiteBlock), ClonedBBs);
2105}
2106
2107/// Update the branch metadata for cloned call instructions.
2108static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap,
2109 const ProfileCount &CalleeEntryCount,
2110 const CallBase &TheCall, ProfileSummaryInfo *PSI,
2111 BlockFrequencyInfo *CallerBFI) {
2112 if (CalleeEntryCount.isSynthetic() || CalleeEntryCount.getCount() < 1)
2113 return;
2114 auto CallSiteCount =
2115 PSI ? PSI->getProfileCount(TheCall, CallerBFI) : std::nullopt;
2116 int64_t CallCount =
2117 std::min(CallSiteCount.value_or(0), CalleeEntryCount.getCount());
2118 updateProfileCallee(Callee, -CallCount, &VMap);
2119}
2120
2122 Function *Callee, int64_t EntryDelta,
2124 auto CalleeCount = Callee->getEntryCount();
2125 if (!CalleeCount)
2126 return;
2127
2128 const uint64_t PriorEntryCount = CalleeCount->getCount();
2129
2130 // Since CallSiteCount is an estimate, it could exceed the original callee
2131 // count and has to be set to 0 so guard against underflow.
2132 const uint64_t NewEntryCount =
2133 (EntryDelta < 0 && static_cast<uint64_t>(-EntryDelta) > PriorEntryCount)
2134 ? 0
2135 : PriorEntryCount + EntryDelta;
2136
2137 auto updateVTableProfWeight = [](CallBase *CB, const uint64_t NewEntryCount,
2138 const uint64_t PriorEntryCount) {
2140 if (VPtr)
2141 scaleProfData(*VPtr, NewEntryCount, PriorEntryCount);
2142 };
2143
2144 // During inlining ?
2145 if (VMap) {
2146 uint64_t CloneEntryCount = PriorEntryCount - NewEntryCount;
2147 for (auto Entry : *VMap) {
2148 if (isa<CallInst>(Entry.first))
2149 if (auto *CI = dyn_cast_or_null<CallInst>(Entry.second)) {
2150 CI->updateProfWeight(CloneEntryCount, PriorEntryCount);
2151 updateVTableProfWeight(CI, CloneEntryCount, PriorEntryCount);
2152 }
2153
2154 if (isa<InvokeInst>(Entry.first))
2155 if (auto *II = dyn_cast_or_null<InvokeInst>(Entry.second)) {
2156 II->updateProfWeight(CloneEntryCount, PriorEntryCount);
2157 updateVTableProfWeight(II, CloneEntryCount, PriorEntryCount);
2158 }
2159 }
2160 }
2161
2162 if (EntryDelta) {
2163 Callee->setEntryCount(NewEntryCount);
2164
2165 for (BasicBlock &BB : *Callee)
2166 // No need to update the callsite if it is pruned during inlining.
2167 if (!VMap || VMap->count(&BB))
2168 for (Instruction &I : BB) {
2169 if (CallInst *CI = dyn_cast<CallInst>(&I)) {
2170 CI->updateProfWeight(NewEntryCount, PriorEntryCount);
2171 updateVTableProfWeight(CI, NewEntryCount, PriorEntryCount);
2172 }
2174 II->updateProfWeight(NewEntryCount, PriorEntryCount);
2175 updateVTableProfWeight(II, NewEntryCount, PriorEntryCount);
2176 }
2177 }
2178 }
2179}
2180
2181/// An operand bundle "clang.arc.attachedcall" on a call indicates the call
2182/// result is implicitly consumed by a call to retainRV or claimRV immediately
2183/// after the call. This function inlines the retainRV/claimRV calls.
2184///
2185/// There are three cases to consider:
2186///
2187/// 1. If there is a call to autoreleaseRV that takes a pointer to the returned
2188/// object in the callee return block, the autoreleaseRV call and the
2189/// retainRV/claimRV call in the caller cancel out. If the call in the caller
2190/// is a claimRV call, a call to objc_release is emitted.
2191///
2192/// 2. If there is a call in the callee return block that doesn't have operand
2193/// bundle "clang.arc.attachedcall", the operand bundle on the original call
2194/// is transferred to the call in the callee.
2195///
2196/// 3. Otherwise, a call to objc_retain is inserted if the call in the caller is
2197/// a retainRV call.
2198static void
2200 const SmallVectorImpl<ReturnInst *> &Returns) {
2201 assert(objcarc::isRetainOrClaimRV(RVCallKind) && "unexpected ARC function");
2202 bool IsRetainRV = RVCallKind == objcarc::ARCInstKind::RetainRV,
2203 IsUnsafeClaimRV = !IsRetainRV;
2204
2205 for (auto *RI : Returns) {
2206 Value *RetOpnd = objcarc::GetRCIdentityRoot(RI->getOperand(0));
2207 bool InsertRetainCall = IsRetainRV;
2208 IRBuilder<> Builder(RI->getContext());
2209
2210 // Walk backwards through the basic block looking for either a matching
2211 // autoreleaseRV call or an unannotated call.
2212 auto InstRange = llvm::make_range(++(RI->getIterator().getReverse()),
2213 RI->getParent()->rend());
2214 for (Instruction &I : llvm::make_early_inc_range(InstRange)) {
2215 // Ignore casts.
2216 if (isa<CastInst>(I))
2217 continue;
2218
2219 if (auto *II = dyn_cast<IntrinsicInst>(&I)) {
2220 if (II->getIntrinsicID() != Intrinsic::objc_autoreleaseReturnValue ||
2221 !II->use_empty() ||
2222 objcarc::GetRCIdentityRoot(II->getOperand(0)) != RetOpnd)
2223 break;
2224
2225 // If we've found a matching authoreleaseRV call:
2226 // - If claimRV is attached to the call, insert a call to objc_release
2227 // and erase the autoreleaseRV call.
2228 // - If retainRV is attached to the call, just erase the autoreleaseRV
2229 // call.
2230 if (IsUnsafeClaimRV) {
2231 Builder.SetInsertPoint(II);
2232 Builder.CreateIntrinsic(Intrinsic::objc_release, RetOpnd);
2233 }
2234 II->eraseFromParent();
2235 InsertRetainCall = false;
2236 break;
2237 }
2238
2239 auto *CI = dyn_cast<CallInst>(&I);
2240
2241 if (!CI)
2242 break;
2243
2244 if (objcarc::GetRCIdentityRoot(CI) != RetOpnd ||
2246 break;
2247
2248 // If we've found an unannotated call that defines RetOpnd, add a
2249 // "clang.arc.attachedcall" operand bundle.
2250 Value *BundleArgs[] = {*objcarc::getAttachedARCFunction(&CB)};
2251 OperandBundleDef OB("clang.arc.attachedcall", BundleArgs);
2252 auto *NewCall = CallBase::addOperandBundle(
2253 CI, LLVMContext::OB_clang_arc_attachedcall, OB, CI->getIterator());
2254 NewCall->copyMetadata(*CI);
2255 CI->replaceAllUsesWith(NewCall);
2256 CI->eraseFromParent();
2257 InsertRetainCall = false;
2258 break;
2259 }
2260
2261 if (InsertRetainCall) {
2262 // The retainRV is attached to the call and we've failed to find a
2263 // matching autoreleaseRV or an annotated call in the callee. Emit a call
2264 // to objc_retain.
2265 Builder.SetInsertPoint(RI);
2266 Builder.CreateIntrinsic(Intrinsic::objc_retain, RetOpnd);
2267 }
2268 }
2269}
2270
2271// In contextual profiling, when an inline succeeds, we want to remap the
2272// indices of the callee into the index space of the caller. We can't just leave
2273// them as-is because the same callee may appear in other places in this caller
2274// (other callsites), and its (callee's) counters and sub-contextual profile
2275// tree would be potentially different.
2276// Not all BBs of the callee may survive the opportunistic DCE InlineFunction
2277// does (same goes for callsites in the callee).
2278// We will return a pair of vectors, one for basic block IDs and one for
2279// callsites. For such a vector V, V[Idx] will be -1 if the callee
2280// instrumentation with index Idx did not survive inlining, and a new value
2281// otherwise.
2282// This function will update the caller's instrumentation intrinsics
2283// accordingly, mapping indices as described above. We also replace the "name"
2284// operand because we use it to distinguish between "own" instrumentation and
2285// "from callee" instrumentation when performing the traversal of the CFG of the
2286// caller. We traverse depth-first from the callsite's BB and up to the point we
2287// hit BBs owned by the caller.
2288// The return values will be then used to update the contextual
2289// profile. Note: we only update the "name" and "index" operands in the
2290// instrumentation intrinsics, we leave the hash and total nr of indices as-is,
2291// it's not worth updating those.
2292static std::pair<std::vector<int64_t>, std::vector<int64_t>>
2294 PGOContextualProfile &CtxProf, uint32_t CalleeCounters,
2295 uint32_t CalleeCallsites) {
2296 // We'll allocate a new ID to imported callsite counters and callsites. We're
2297 // using -1 to indicate a counter we delete. Most likely the entry ID, for
2298 // example, will be deleted - we don't want 2 IDs in the same BB, and the
2299 // entry would have been cloned in the callsite's old BB.
2300 std::vector<int64_t> CalleeCounterMap;
2301 std::vector<int64_t> CalleeCallsiteMap;
2302 CalleeCounterMap.resize(CalleeCounters, -1);
2303 CalleeCallsiteMap.resize(CalleeCallsites, -1);
2304
2305 auto RewriteInstrIfNeeded = [&](InstrProfIncrementInst &Ins) -> bool {
2306 if (Ins.getNameValue() == &Caller)
2307 return false;
2308 const auto OldID = static_cast<uint32_t>(Ins.getIndex()->getZExtValue());
2309 if (CalleeCounterMap[OldID] == -1)
2310 CalleeCounterMap[OldID] = CtxProf.allocateNextCounterIndex(Caller);
2311 const auto NewID = static_cast<uint32_t>(CalleeCounterMap[OldID]);
2312
2313 Ins.setNameValue(&Caller);
2314 Ins.setIndex(NewID);
2315 return true;
2316 };
2317
2318 auto RewriteCallsiteInsIfNeeded = [&](InstrProfCallsite &Ins) -> bool {
2319 if (Ins.getNameValue() == &Caller)
2320 return false;
2321 const auto OldID = static_cast<uint32_t>(Ins.getIndex()->getZExtValue());
2322 if (CalleeCallsiteMap[OldID] == -1)
2323 CalleeCallsiteMap[OldID] = CtxProf.allocateNextCallsiteIndex(Caller);
2324 const auto NewID = static_cast<uint32_t>(CalleeCallsiteMap[OldID]);
2325
2326 Ins.setNameValue(&Caller);
2327 Ins.setIndex(NewID);
2328 return true;
2329 };
2330
2331 std::deque<BasicBlock *> Worklist;
2333 // We will traverse the BBs starting from the callsite BB. The callsite BB
2334 // will have at least a BB ID - maybe its own, and in any case the one coming
2335 // from the cloned function's entry BB. The other BBs we'll start seeing from
2336 // there on may or may not have BB IDs. BBs with IDs belonging to our caller
2337 // are definitely not coming from the imported function and form a boundary
2338 // past which we don't need to traverse anymore. BBs may have no
2339 // instrumentation (because we originally inserted instrumentation as per
2340 // MST), in which case we'll traverse past them. An invariant we'll keep is
2341 // that a BB will have at most 1 BB ID. For example, in the callsite BB, we
2342 // will delete the callee BB's instrumentation. This doesn't result in
2343 // information loss: the entry BB of the callee will have the same count as
2344 // the callsite's BB. At the end of this traversal, all the callee's
2345 // instrumentation would be mapped into the caller's instrumentation index
2346 // space. Some of the callee's counters may be deleted (as mentioned, this
2347 // should result in no loss of information).
2348 Worklist.push_back(StartBB);
2349 while (!Worklist.empty()) {
2350 auto *BB = Worklist.front();
2351 Worklist.pop_front();
2352 bool Changed = false;
2353 auto *BBID = CtxProfAnalysis::getBBInstrumentation(*BB);
2354 if (BBID) {
2355 Changed |= RewriteInstrIfNeeded(*BBID);
2356 // this may be the entryblock from the inlined callee, coming into a BB
2357 // that didn't have instrumentation because of MST decisions. Let's make
2358 // sure it's placed accordingly. This is a noop elsewhere.
2359 BBID->moveBefore(BB->getFirstInsertionPt());
2360 }
2361 for (auto &I : llvm::make_early_inc_range(*BB)) {
2362 if (auto *Inc = dyn_cast<InstrProfIncrementInst>(&I)) {
2364 // Step instrumentation is used for select instructions. Inlining may
2365 // have propagated a constant resulting in the condition of the select
2366 // being resolved, case in which function cloning resolves the value
2367 // of the select, and elides the select instruction. If that is the
2368 // case, the step parameter of the instrumentation will reflect that.
2369 // We can delete the instrumentation in that case.
2370 if (isa<Constant>(Inc->getStep())) {
2371 assert(!Inc->getNextNode() || !isa<SelectInst>(Inc->getNextNode()));
2372 Inc->eraseFromParent();
2373 } else {
2374 assert(isa_and_nonnull<SelectInst>(Inc->getNextNode()));
2375 RewriteInstrIfNeeded(*Inc);
2376 }
2377 } else if (Inc != BBID) {
2378 // If we're here it means that the BB had more than 1 IDs, presumably
2379 // some coming from the callee. We "made up our mind" to keep the
2380 // first one (which may or may not have been originally the caller's).
2381 // All the others are superfluous and we delete them.
2382 Inc->eraseFromParent();
2383 Changed = true;
2384 }
2385 } else if (auto *CS = dyn_cast<InstrProfCallsite>(&I)) {
2386 Changed |= RewriteCallsiteInsIfNeeded(*CS);
2387 }
2388 }
2389 if (!BBID || Changed)
2390 for (auto *Succ : successors(BB))
2391 if (Seen.insert(Succ).second)
2392 Worklist.push_back(Succ);
2393 }
2394
2395 assert(!llvm::is_contained(CalleeCounterMap, 0) &&
2396 "Counter index mapping should be either to -1 or to non-zero index, "
2397 "because the 0 "
2398 "index corresponds to the entry BB of the caller");
2399 assert(!llvm::is_contained(CalleeCallsiteMap, 0) &&
2400 "Callsite index mapping should be either to -1 or to non-zero index, "
2401 "because there should have been at least a callsite - the inlined one "
2402 "- which would have had a 0 index.");
2403
2404 return {std::move(CalleeCounterMap), std::move(CalleeCallsiteMap)};
2405}
2406
2407// Inline. If successful, update the contextual profile (if a valid one is
2408// given).
2409// The contextual profile data is organized in trees, as follows:
2410// - each node corresponds to a function
2411// - the root of each tree corresponds to an "entrypoint" - e.g.
2412// RPC handler for server side
2413// - the path from the root to a node is a particular call path
2414// - the counters stored in a node are counter values observed in that
2415// particular call path ("context")
2416// - the edges between nodes are annotated with callsite IDs.
2417//
2418// Updating the contextual profile after an inlining means, at a high level,
2419// copying over the data of the callee, **intentionally without any value
2420// scaling**, and copying over the callees of the inlined callee.
2421llvm::InlineResult
2423 PGOContextualProfile &CtxProf, bool MergeAttributes,
2424 AAResults *CalleeAAR, bool InsertLifetime,
2425 bool TrackInlineHistory, Function *ForwardVarArgsTo,
2427 if (!CtxProf.isInSpecializedModule())
2428 return InlineFunction(CB, IFI, MergeAttributes, CalleeAAR, InsertLifetime,
2429 TrackInlineHistory, ForwardVarArgsTo, ORE);
2430
2431 auto &Caller = *CB.getCaller();
2432 auto &Callee = *CB.getCalledFunction();
2433 auto *StartBB = CB.getParent();
2434
2435 // Get some preliminary data about the callsite before it might get inlined.
2436 // Inlining shouldn't delete the callee, but it's cleaner (and low-cost) to
2437 // get this data upfront and rely less on InlineFunction's behavior.
2438 const auto CalleeGUID = AssignGUIDPass::getGUID(Callee);
2439 auto *CallsiteIDIns = CtxProfAnalysis::getCallsiteInstrumentation(CB);
2440 const auto CallsiteID =
2441 static_cast<uint32_t>(CallsiteIDIns->getIndex()->getZExtValue());
2442
2443 const auto NumCalleeCounters = CtxProf.getNumCounters(Callee);
2444 const auto NumCalleeCallsites = CtxProf.getNumCallsites(Callee);
2445
2446 auto Ret = InlineFunction(CB, IFI, MergeAttributes, CalleeAAR, InsertLifetime,
2447 TrackInlineHistory, ForwardVarArgsTo, ORE);
2448 if (!Ret.isSuccess())
2449 return Ret;
2450
2451 // Inlining succeeded, we don't need the instrumentation of the inlined
2452 // callsite.
2453 CallsiteIDIns->eraseFromParent();
2454
2455 // Assinging Maps and then capturing references into it in the lambda because
2456 // captured structured bindings are a C++20 extension. We do also need a
2457 // capture here, though.
2458 const auto IndicesMaps = remapIndices(Caller, StartBB, CtxProf,
2459 NumCalleeCounters, NumCalleeCallsites);
2460 const uint32_t NewCountersSize = CtxProf.getNumCounters(Caller);
2461
2462 auto Updater = [&](PGOCtxProfContext &Ctx) {
2463 assert(Ctx.guid() == AssignGUIDPass::getGUID(Caller));
2464 const auto &[CalleeCounterMap, CalleeCallsiteMap] = IndicesMaps;
2465 assert(
2466 (Ctx.counters().size() +
2467 llvm::count_if(CalleeCounterMap, [](auto V) { return V != -1; }) ==
2468 NewCountersSize) &&
2469 "The caller's counters size should have grown by the number of new "
2470 "distinct counters inherited from the inlined callee.");
2471 Ctx.resizeCounters(NewCountersSize);
2472 // If the callsite wasn't exercised in this context, the value of the
2473 // counters coming from it is 0 - which it is right now, after resizing them
2474 // - and so we're done.
2475 auto CSIt = Ctx.callsites().find(CallsiteID);
2476 if (CSIt == Ctx.callsites().end())
2477 return;
2478 auto CalleeCtxIt = CSIt->second.find(CalleeGUID);
2479 // The callsite was exercised, but not with this callee (so presumably this
2480 // is an indirect callsite). Again, we're done here.
2481 if (CalleeCtxIt == CSIt->second.end())
2482 return;
2483
2484 // Let's pull in the counter values and the subcontexts coming from the
2485 // inlined callee.
2486 auto &CalleeCtx = CalleeCtxIt->second;
2487 assert(CalleeCtx.guid() == CalleeGUID);
2488
2489 for (auto I = 0U; I < CalleeCtx.counters().size(); ++I) {
2490 const int64_t NewIndex = CalleeCounterMap[I];
2491 if (NewIndex >= 0) {
2492 assert(NewIndex != 0 && "counter index mapping shouldn't happen to a 0 "
2493 "index, that's the caller's entry BB");
2494 Ctx.counters()[NewIndex] = CalleeCtx.counters()[I];
2495 }
2496 }
2497 for (auto &[I, OtherSet] : CalleeCtx.callsites()) {
2498 const int64_t NewCSIdx = CalleeCallsiteMap[I];
2499 if (NewCSIdx >= 0) {
2500 assert(NewCSIdx != 0 &&
2501 "callsite index mapping shouldn't happen to a 0 index, the "
2502 "caller must've had at least one callsite (with such an index)");
2503 Ctx.ingestAllContexts(NewCSIdx, std::move(OtherSet));
2504 }
2505 }
2506 // We know the traversal is preorder, so it wouldn't have yet looked at the
2507 // sub-contexts of this context that it's currently visiting. Meaning, the
2508 // erase below invalidates no iterators.
2509 auto Deleted = Ctx.callsites().erase(CallsiteID);
2510 assert(Deleted);
2511 (void)Deleted;
2512 };
2513 CtxProf.update(Updater, Caller);
2514 return Ret;
2515}
2516
2518 InlineFunctionInfo &IFI) {
2519 assert(CB.getParent() && CB.getFunction() && "Instruction not in function!");
2520
2521 // FIXME: we don't inline callbr yet.
2522 if (isa<CallBrInst>(CB))
2523 return InlineResult::failure("We don't inline callbr yet.");
2524
2525 // If IFI has any state in it, zap it before we fill it in.
2526 IFI.reset();
2527
2528 Function *CalledFunc = CB.getCalledFunction();
2529 if (!CalledFunc || // Can't inline external function or indirect
2530 CalledFunc->isDeclaration()) // call!
2531 return InlineResult::failure("external or indirect");
2532
2533 // Don't inline if we've already inlined this callee through this call site
2534 // before to prevent infinite inlining through mutually recursive functions.
2535 if (MDNode *InlineHistory = CB.getMetadata(LLVMContext::MD_inline_history)) {
2536 for (const auto &Op : InlineHistory->operands()) {
2537 if (auto *MD = dyn_cast_or_null<ValueAsMetadata>(Op)) {
2538 if (MD->getValue() == CalledFunc) {
2539 return InlineResult::failure("inline history");
2540 }
2541 }
2542 }
2543 }
2544
2545 // The inliner does not know how to inline through calls with operand bundles
2546 // in general ...
2547 if (CB.hasOperandBundles()) {
2548 for (int i = 0, e = CB.getNumOperandBundles(); i != e; ++i) {
2549 auto OBUse = CB.getOperandBundleAt(i);
2550 uint32_t Tag = OBUse.getTagID();
2551 // ... but it knows how to inline through "deopt" operand bundles ...
2553 continue;
2554 // ... and "funclet" operand bundles.
2556 continue;
2558 continue;
2560 continue;
2562 IFI.ConvergenceControlToken = OBUse.Inputs[0].get();
2563 continue;
2564 }
2565
2566 return InlineResult::failure("unsupported operand bundle");
2567 }
2568 }
2569
2570 // FIXME: The check below is redundant and incomplete. According to spec, if a
2571 // convergent call is missing a token, then the caller is using uncontrolled
2572 // convergence. If the callee has an entry intrinsic, then the callee is using
2573 // controlled convergence, and the call cannot be inlined. A proper
2574 // implemenation of this check requires a whole new analysis that identifies
2575 // convergence in every function. For now, we skip that and just do this one
2576 // cursory check. The underlying assumption is that in a compiler flow that
2577 // fully implements convergence control tokens, there is no mixing of
2578 // controlled and uncontrolled convergent operations in the whole program.
2579 if (CB.isConvergent()) {
2580 if (!IFI.ConvergenceControlToken &&
2581 getConvergenceEntry(CalledFunc->getEntryBlock())) {
2582 return InlineResult::failure(
2583 "convergent call needs convergencectrl operand");
2584 }
2585 }
2586
2587 const BasicBlock *OrigBB = CB.getParent();
2588 const Function *Caller = OrigBB->getParent();
2589
2590 // GC poses two hazards to inlining, which only occur when the callee has GC:
2591 // 1. If the caller has no GC, then the callee's GC must be propagated to the
2592 // caller.
2593 // 2. If the caller has a differing GC, it is invalid to inline.
2594 if (CalledFunc->hasGC()) {
2595 if (Caller->hasGC() && CalledFunc->getGC() != Caller->getGC())
2596 return InlineResult::failure("incompatible GC");
2597 }
2598
2599 // Get the personality function from the callee if it contains a landing pad.
2600 Constant *CalledPersonality =
2601 CalledFunc->hasPersonalityFn()
2602 ? CalledFunc->getPersonalityFn()->stripPointerCasts()
2603 : nullptr;
2604
2605 // Find the personality function used by the landing pads of the caller. If it
2606 // exists, then check to see that it matches the personality function used in
2607 // the callee.
2608 Constant *CallerPersonality =
2609 Caller->hasPersonalityFn()
2610 ? Caller->getPersonalityFn()->stripPointerCasts()
2611 : nullptr;
2612 if (CalledPersonality) {
2613 // If the personality functions match, then we can perform the
2614 // inlining. Otherwise, we can't inline.
2615 // TODO: This isn't 100% true. Some personality functions are proper
2616 // supersets of others and can be used in place of the other.
2617 if (CallerPersonality && CalledPersonality != CallerPersonality)
2618 return InlineResult::failure("incompatible personality");
2619 }
2620
2621 // We need to figure out which funclet the callsite was in so that we may
2622 // properly nest the callee.
2623 if (CallerPersonality) {
2624 EHPersonality Personality = classifyEHPersonality(CallerPersonality);
2625 if (isScopedEHPersonality(Personality)) {
2626 std::optional<OperandBundleUse> ParentFunclet =
2628 if (ParentFunclet)
2629 IFI.CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
2630
2631 // OK, the inlining site is legal. What about the target function?
2632
2633 if (IFI.CallSiteEHPad) {
2634 if (Personality == EHPersonality::MSVC_CXX) {
2635 // The MSVC personality cannot tolerate catches getting inlined into
2636 // cleanup funclets.
2638 // Ok, the call site is within a cleanuppad. Let's check the callee
2639 // for catchpads.
2640 for (const BasicBlock &CalledBB : *CalledFunc) {
2641 if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHIIt()))
2642 return InlineResult::failure("catch in cleanup funclet");
2643 }
2644 }
2645 } else if (isAsynchronousEHPersonality(Personality)) {
2646 // SEH is even less tolerant, there may not be any sort of exceptional
2647 // funclet in the callee.
2648 for (const BasicBlock &CalledBB : *CalledFunc) {
2649 if (CalledBB.isEHPad())
2650 return InlineResult::failure("SEH in cleanup funclet");
2651 }
2652 }
2653 }
2654 }
2655 }
2656
2657 return InlineResult::success();
2658}
2659
2660/// This function inlines the called function into the basic block of the
2661/// caller. This returns false if it is not possible to inline this call.
2662/// The program is still in a well defined state if this occurs though.
2663///
2664/// Note that this only does one level of inlining. For example, if the
2665/// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
2666/// exists in the instruction stream. Similarly this will inline a recursive
2667/// function by one level.
2669 bool MergeAttributes, AAResults *CalleeAAR,
2670 bool InsertLifetime, bool TrackInlineHistory,
2671 Function *ForwardVarArgsTo,
2673 BasicBlock *OrigBB = CB.getParent();
2674 Function *Caller = OrigBB->getParent();
2675 Function *CalledFunc = CB.getCalledFunction();
2676 assert(CalledFunc && !CalledFunc->isDeclaration() &&
2677 "CanInlineCallSite should have verified direct call to definition");
2678
2679 // Determine if we are dealing with a call in an EHPad which does not unwind
2680 // to caller.
2681 bool EHPadForCallUnwindsLocally = false;
2682 if (IFI.CallSiteEHPad && isa<CallInst>(CB)) {
2683 UnwindDestMemoTy FuncletUnwindMap;
2684 Value *CallSiteUnwindDestToken =
2685 getUnwindDestToken(IFI.CallSiteEHPad, FuncletUnwindMap);
2686
2687 EHPadForCallUnwindsLocally =
2688 CallSiteUnwindDestToken &&
2689 !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
2690 }
2691
2692 // Get an iterator to the last basic block in the function, which will have
2693 // the new function inlined after it.
2694 Function::iterator LastBlock = --Caller->end();
2695
2696 // Make sure to capture all of the return instructions from the cloned
2697 // function.
2699 ClonedCodeInfo InlinedFunctionInfo;
2700 Function::iterator FirstNewBlock;
2701
2702 // GC poses two hazards to inlining, which only occur when the callee has GC:
2703 // 1. If the caller has no GC, then the callee's GC must be propagated to the
2704 // caller.
2705 // 2. If the caller has a differing GC, it is invalid to inline.
2706 if (CalledFunc->hasGC()) {
2707 if (!Caller->hasGC())
2708 Caller->setGC(CalledFunc->getGC());
2709 else {
2710 assert(CalledFunc->getGC() == Caller->getGC() &&
2711 "CanInlineCallSite should have verified compatible GCs");
2712 }
2713 }
2714
2715 if (CalledFunc->hasPersonalityFn()) {
2716 Constant *CalledPersonality =
2717 CalledFunc->getPersonalityFn()->stripPointerCasts();
2718 if (!Caller->hasPersonalityFn()) {
2719 Caller->setPersonalityFn(CalledPersonality);
2720 } else
2721 assert(Caller->getPersonalityFn()->stripPointerCasts() ==
2722 CalledPersonality &&
2723 "CanInlineCallSite should have verified compatible personality");
2724 }
2725
2726 { // Scope to destroy VMap after cloning.
2727 ValueToValueMapTy VMap;
2728 struct ByValInit {
2729 Value *Dst;
2730 Value *Src;
2731 MaybeAlign SrcAlign;
2732 Type *Ty;
2733 };
2734 // Keep a list of tuples (dst, src, src_align) to emit byval
2735 // initializations. Src Alignment is only available though the callbase,
2736 // therefore has to be saved.
2737 SmallVector<ByValInit, 4> ByValInits;
2738
2739 // When inlining a function that contains noalias scope metadata,
2740 // this metadata needs to be cloned so that the inlined blocks
2741 // have different "unique scopes" at every call site.
2742 // Track the metadata that must be cloned. Do this before other changes to
2743 // the function, so that we do not get in trouble when inlining caller ==
2744 // callee.
2745 ScopedAliasMetadataDeepCloner SAMetadataCloner(CB.getCalledFunction());
2746
2747 auto &DL = Caller->getDataLayout();
2748
2749 // Calculate the vector of arguments to pass into the function cloner, which
2750 // matches up the formal to the actual argument values.
2751 auto AI = CB.arg_begin();
2752 unsigned ArgNo = 0;
2753 for (Function::arg_iterator I = CalledFunc->arg_begin(),
2754 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
2755 Value *ActualArg = *AI;
2756
2757 // When byval arguments actually inlined, we need to make the copy implied
2758 // by them explicit. However, we don't do this if the callee is readonly
2759 // or readnone, because the copy would be unneeded: the callee doesn't
2760 // modify the struct.
2761 if (CB.isByValArgument(ArgNo)) {
2762 ActualArg = HandleByValArgument(CB.getParamByValType(ArgNo), ActualArg,
2763 &CB, CalledFunc, IFI,
2764 CalledFunc->getParamAlign(ArgNo));
2765 if (ActualArg != *AI)
2766 ByValInits.push_back({ActualArg, (Value *)*AI,
2767 CB.getParamAlign(ArgNo),
2768 CB.getParamByValType(ArgNo)});
2769 }
2770
2771 VMap[&*I] = ActualArg;
2772 }
2773
2774 // TODO: Remove this when users have been updated to the assume bundles.
2775 // Add alignment assumptions if necessary. We do this before the inlined
2776 // instructions are actually cloned into the caller so that we can easily
2777 // check what will be known at the start of the inlined code.
2778 AddAlignmentAssumptions(CB, IFI);
2779
2780 AssumptionCache *AC =
2781 IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
2782
2783 /// Preserve all attributes on of the call and its parameters.
2784 salvageKnowledge(&CB, AC);
2785
2786 // We want the inliner to prune the code as it copies. We would LOVE to
2787 // have no dead or constant instructions leftover after inlining occurs
2788 // (which can happen, e.g., because an argument was constant), but we'll be
2789 // happy with whatever the cloner can do.
2790 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
2791 /*ModuleLevelChanges=*/false, Returns, ".i",
2792 InlinedFunctionInfo);
2793 // Remember the first block that is newly cloned over.
2794 FirstNewBlock = LastBlock; ++FirstNewBlock;
2795
2796 // Insert retainRV/clainRV runtime calls.
2798 if (RVCallKind != objcarc::ARCInstKind::None)
2799 inlineRetainOrClaimRVCalls(CB, RVCallKind, Returns);
2800
2801 // Updated caller/callee profiles only when requested. For sample loader
2802 // inlining, the context-sensitive inlinee profile doesn't need to be
2803 // subtracted from callee profile, and the inlined clone also doesn't need
2804 // to be scaled based on call site count.
2805 if (IFI.UpdateProfile) {
2806 if (IFI.CallerBFI != nullptr && IFI.CalleeBFI != nullptr)
2807 // Update the BFI of blocks cloned into the caller.
2808 updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI,
2809 CalledFunc->front());
2810
2811 if (auto Profile = CalledFunc->getEntryCount())
2812 updateCallProfile(CalledFunc, VMap, *Profile, CB, IFI.PSI,
2813 IFI.CallerBFI);
2814 }
2815
2816 // Inject byval arguments initialization.
2817 for (ByValInit &Init : ByValInits)
2818 HandleByValArgumentInit(Init.Ty, Init.Dst, Init.Src, Init.SrcAlign,
2819 Caller->getParent(), &*FirstNewBlock, IFI,
2820 CalledFunc);
2821
2822 std::optional<OperandBundleUse> ParentDeopt =
2824 if (ParentDeopt) {
2826
2827 for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) {
2829 if (!ICS)
2830 continue; // instruction was DCE'd or RAUW'ed to undef
2831
2832 OpDefs.clear();
2833
2834 OpDefs.reserve(ICS->getNumOperandBundles());
2835
2836 for (unsigned COBi = 0, COBe = ICS->getNumOperandBundles(); COBi < COBe;
2837 ++COBi) {
2838 auto ChildOB = ICS->getOperandBundleAt(COBi);
2839 if (ChildOB.getTagID() != LLVMContext::OB_deopt) {
2840 // If the inlined call has other operand bundles, let them be
2841 OpDefs.emplace_back(ChildOB);
2842 continue;
2843 }
2844
2845 // It may be useful to separate this logic (of handling operand
2846 // bundles) out to a separate "policy" component if this gets crowded.
2847 // Prepend the parent's deoptimization continuation to the newly
2848 // inlined call's deoptimization continuation.
2849 std::vector<Value *> MergedDeoptArgs;
2850 MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
2851 ChildOB.Inputs.size());
2852
2853 llvm::append_range(MergedDeoptArgs, ParentDeopt->Inputs);
2854 llvm::append_range(MergedDeoptArgs, ChildOB.Inputs);
2855
2856 OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));
2857 }
2858
2859 Instruction *NewI = CallBase::Create(ICS, OpDefs, ICS->getIterator());
2860
2861 // Note: the RAUW does the appropriate fixup in VMap, so we need to do
2862 // this even if the call returns void.
2863 ICS->replaceAllUsesWith(NewI);
2864
2865 VH = nullptr;
2866 ICS->eraseFromParent();
2867 }
2868 }
2869
2870 // For 'nodebug' functions, the associated DISubprogram is always null.
2871 // Conservatively avoid propagating the callsite debug location to
2872 // instructions inlined from a function whose DISubprogram is not null.
2873 fixupLineNumbers(Caller, FirstNewBlock, &CB,
2874 CalledFunc->getSubprogram() != nullptr);
2875
2876 if (isAssignmentTrackingEnabled(*Caller->getParent())) {
2877 // Interpret inlined stores to caller-local variables as assignments.
2878 trackInlinedStores(FirstNewBlock, Caller->end(), CB);
2879
2880 // Update DIAssignID metadata attachments and uses so that they are
2881 // unique to this inlined instance.
2882 fixupAssignments(FirstNewBlock, Caller->end());
2883 }
2884
2885 // Now clone the inlined noalias scope metadata.
2886 SAMetadataCloner.clone();
2887 SAMetadataCloner.remap(FirstNewBlock, Caller->end());
2888
2889 // Add noalias metadata if necessary.
2890 AddAliasScopeMetadata(CB, VMap, DL, CalleeAAR, InlinedFunctionInfo);
2891
2892 // Clone return attributes on the callsite into the calls within the inlined
2893 // function which feed into its return value.
2894 AddReturnAttributes(CB, VMap, InlinedFunctionInfo);
2895
2896 // Clone attributes on the params of the callsite to calls within the
2897 // inlined function which use the same param.
2898 AddParamAndFnBasicAttributes(CB, VMap, InlinedFunctionInfo);
2899
2901 CalledFunc, CB, InlinedFunctionInfo.ContainsMemProfMetadata, VMap, ORE);
2902
2903 // Propagate metadata on the callsite if necessary.
2904 PropagateCallSiteMetadata(CB, FirstNewBlock, Caller->end());
2905
2906 // Propagate implicit ref metadata.
2907 if (CalledFunc->hasMetadata(LLVMContext::MD_implicit_ref)) {
2909 CalledFunc->getMetadata(LLVMContext::MD_implicit_ref, MDs);
2910 for (MDNode *MD : MDs) {
2911 Caller->addMetadata(LLVMContext::MD_implicit_ref, *MD);
2912 }
2913 }
2914
2915 // Propagate inlined.from metadata for dontcall diagnostics.
2916 PropagateInlinedFromMetadata(CB, CalledFunc->getName(), Caller->getName(),
2917 FirstNewBlock, Caller->end());
2918
2919 // Register any cloned assumptions.
2920 if (IFI.GetAssumptionCache)
2921 for (BasicBlock &NewBlock :
2922 make_range(FirstNewBlock->getIterator(), Caller->end()))
2923 for (Instruction &I : NewBlock)
2924 if (auto *II = dyn_cast<AssumeInst>(&I))
2925 IFI.GetAssumptionCache(*Caller).registerAssumption(II);
2926 }
2927
2928 if (IFI.ConvergenceControlToken) {
2929 IntrinsicInst *IntrinsicCall = getConvergenceEntry(*FirstNewBlock);
2930 if (IntrinsicCall) {
2931 IntrinsicCall->replaceAllUsesWith(IFI.ConvergenceControlToken);
2932 IntrinsicCall->eraseFromParent();
2933 }
2934 }
2935
2936 // If there are any alloca instructions in the block that used to be the entry
2937 // block for the callee, move them to the entry block of the caller. First
2938 // calculate which instruction they should be inserted before. We insert the
2939 // instructions at the end of the current alloca list.
2940 {
2941 BasicBlock::iterator InsertPoint = Caller->begin()->begin();
2942 for (BasicBlock::iterator I = FirstNewBlock->begin(),
2943 E = FirstNewBlock->end(); I != E; ) {
2945 if (!AI) continue;
2946
2947 // If the alloca is now dead, remove it. This often occurs due to code
2948 // specialization.
2949 if (AI->use_empty()) {
2950 AI->eraseFromParent();
2951 continue;
2952 }
2953
2955 continue;
2956
2957 // Keep track of the static allocas that we inline into the caller.
2958 IFI.StaticAllocas.push_back(AI);
2959
2960 // Scan for the block of allocas that we can move over, and move them
2961 // all at once.
2962 while (isa<AllocaInst>(I) &&
2963 !cast<AllocaInst>(I)->use_empty() &&
2965 IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
2966 ++I;
2967 }
2968
2969 // Transfer all of the allocas over in a block. Using splice means
2970 // that the instructions aren't removed from the symbol table, then
2971 // reinserted.
2972 I.setTailBit(true);
2973 Caller->getEntryBlock().splice(InsertPoint, &*FirstNewBlock,
2974 AI->getIterator(), I);
2975 }
2976 }
2977
2978 // If the call to the callee cannot throw, set the 'nounwind' flag on any
2979 // calls that we inline.
2980 bool MarkNoUnwind = CB.doesNotThrow();
2981
2982 SmallVector<Value*,4> VarArgsToForward;
2983 SmallVector<AttributeSet, 4> VarArgsAttrs;
2984 for (unsigned i = CalledFunc->getFunctionType()->getNumParams();
2985 i < CB.arg_size(); i++) {
2986 VarArgsToForward.push_back(CB.getArgOperand(i));
2987 VarArgsAttrs.push_back(CB.getAttributes().getParamAttrs(i));
2988 }
2989
2990 bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false;
2991 if (InlinedFunctionInfo.ContainsCalls) {
2992 CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
2993 if (CallInst *CI = dyn_cast<CallInst>(&CB))
2994 CallSiteTailKind = CI->getTailCallKind();
2995
2996 // For inlining purposes, the "notail" marker is the same as no marker.
2997 if (CallSiteTailKind == CallInst::TCK_NoTail)
2998 CallSiteTailKind = CallInst::TCK_None;
2999
3000 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
3001 ++BB) {
3004 if (!CI)
3005 continue;
3006
3007 // Forward varargs from inlined call site to calls to the
3008 // ForwardVarArgsTo function, if requested, and to musttail calls.
3009 if (!VarArgsToForward.empty() &&
3010 ((ForwardVarArgsTo &&
3011 CI->getCalledFunction() == ForwardVarArgsTo) ||
3012 CI->isMustTailCall())) {
3013 // Collect attributes for non-vararg parameters.
3014 AttributeList Attrs = CI->getAttributes();
3016 if (!Attrs.isEmpty() || !VarArgsAttrs.empty()) {
3017 for (unsigned ArgNo = 0;
3018 ArgNo < CI->getFunctionType()->getNumParams(); ++ArgNo)
3019 ArgAttrs.push_back(Attrs.getParamAttrs(ArgNo));
3020 }
3021
3022 // Add VarArg attributes.
3023 ArgAttrs.append(VarArgsAttrs.begin(), VarArgsAttrs.end());
3024 Attrs = AttributeList::get(CI->getContext(), Attrs.getFnAttrs(),
3025 Attrs.getRetAttrs(), ArgAttrs);
3026 // Add VarArgs to existing parameters.
3027 SmallVector<Value *, 6> Params(CI->args());
3028 Params.append(VarArgsToForward.begin(), VarArgsToForward.end());
3029 CallInst *NewCI = CallInst::Create(
3030 CI->getFunctionType(), CI->getCalledOperand(), Params, "", CI->getIterator());
3031 NewCI->setDebugLoc(CI->getDebugLoc());
3032 NewCI->setAttributes(Attrs);
3033 NewCI->setCallingConv(CI->getCallingConv());
3034 CI->replaceAllUsesWith(NewCI);
3035 CI->eraseFromParent();
3036 CI = NewCI;
3037 }
3038
3039 if (Function *F = CI->getCalledFunction())
3040 InlinedDeoptimizeCalls |=
3041 F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
3042
3043 // We need to reduce the strength of any inlined tail calls. For
3044 // musttail, we have to avoid introducing potential unbounded stack
3045 // growth. For example, if functions 'f' and 'g' are mutually recursive
3046 // with musttail, we can inline 'g' into 'f' so long as we preserve
3047 // musttail on the cloned call to 'f'. If either the inlined call site
3048 // or the cloned call site is *not* musttail, the program already has
3049 // one frame of stack growth, so it's safe to remove musttail. Here is
3050 // a table of example transformations:
3051 //
3052 // f -> musttail g -> musttail f ==> f -> musttail f
3053 // f -> musttail g -> tail f ==> f -> tail f
3054 // f -> g -> musttail f ==> f -> f
3055 // f -> g -> tail f ==> f -> f
3056 //
3057 // Inlined notail calls should remain notail calls.
3058 CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
3059 if (ChildTCK != CallInst::TCK_NoTail)
3060 ChildTCK = std::min(CallSiteTailKind, ChildTCK);
3061 CI->setTailCallKind(ChildTCK);
3062 InlinedMustTailCalls |= CI->isMustTailCall();
3063
3064 // Call sites inlined through a 'nounwind' call site should be
3065 // 'nounwind' as well. However, avoid marking call sites explicitly
3066 // where possible. This helps expose more opportunities for CSE after
3067 // inlining, commonly when the callee is an intrinsic.
3068 if (MarkNoUnwind && !CI->doesNotThrow())
3069 CI->setDoesNotThrow();
3070 }
3071 }
3072 }
3073
3074 // Leave lifetime markers for the static alloca's, scoping them to the
3075 // function we just inlined.
3076 // We need to insert lifetime intrinsics even at O0 to avoid invalid
3077 // access caused by multithreaded coroutines. The check
3078 // `Caller->isPresplitCoroutine()` would affect AlwaysInliner at O0 only.
3079 if ((InsertLifetime || Caller->isPresplitCoroutine()) &&
3080 !IFI.StaticAllocas.empty()) {
3081 IRBuilder<> builder(&*FirstNewBlock, FirstNewBlock->begin());
3082 for (AllocaInst *AI : IFI.StaticAllocas) {
3083 // Don't mark swifterror allocas. They can't have bitcast uses.
3084 if (AI->isSwiftError())
3085 continue;
3086
3087 // If the alloca is already scoped to something smaller than the whole
3088 // function then there's no need to add redundant, less accurate markers.
3089 if (hasLifetimeMarkers(AI))
3090 continue;
3091
3092 std::optional<TypeSize> Size = AI->getAllocationSize(AI->getDataLayout());
3093 if (Size && Size->isZero())
3094 continue;
3095
3096 builder.CreateLifetimeStart(AI);
3097 for (ReturnInst *RI : Returns) {
3098 // Don't insert llvm.lifetime.end calls between a musttail or deoptimize
3099 // call and a return. The return kills all local allocas.
3100 if (InlinedMustTailCalls &&
3101 RI->getParent()->getTerminatingMustTailCall())
3102 continue;
3103 if (InlinedDeoptimizeCalls &&
3104 RI->getParent()->getTerminatingDeoptimizeCall())
3105 continue;
3106 IRBuilder<>(RI).CreateLifetimeEnd(AI);
3107 }
3108 }
3109 }
3110
3111 // If the inlined code contained dynamic alloca instructions, wrap the inlined
3112 // code with llvm.stacksave/llvm.stackrestore intrinsics.
3113 if (InlinedFunctionInfo.ContainsDynamicAllocas) {
3114 // Insert the llvm.stacksave.
3115 CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
3116 .CreateStackSave("savedstack");
3117
3118 // Insert a call to llvm.stackrestore before any return instructions in the
3119 // inlined function.
3120 for (ReturnInst *RI : Returns) {
3121 // Don't insert llvm.stackrestore calls between a musttail or deoptimize
3122 // call and a return. The return will restore the stack pointer.
3123 if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
3124 continue;
3125 if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())
3126 continue;
3127 IRBuilder<>(RI).CreateStackRestore(SavedPtr);
3128 }
3129 }
3130
3131 // If we are inlining for an invoke instruction, we must make sure to rewrite
3132 // any call instructions into invoke instructions. This is sensitive to which
3133 // funclet pads were top-level in the inlinee, so must be done before
3134 // rewriting the "parent pad" links.
3135 if (auto *II = dyn_cast<InvokeInst>(&CB)) {
3136 BasicBlock *UnwindDest = II->getUnwindDest();
3137 BasicBlock::iterator FirstNonPHI = UnwindDest->getFirstNonPHIIt();
3138 if (isa<LandingPadInst>(FirstNonPHI)) {
3139 HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo);
3140 } else {
3141 HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo);
3142 }
3143 }
3144
3145 // Update the lexical scopes of the new funclets and callsites.
3146 // Anything that had 'none' as its parent is now nested inside the callsite's
3147 // EHPad.
3148 if (IFI.CallSiteEHPad) {
3149 for (Function::iterator BB = FirstNewBlock->getIterator(),
3150 E = Caller->end();
3151 BB != E; ++BB) {
3152 // Add bundle operands to inlined call sites.
3154
3155 // It is problematic if the inlinee has a cleanupret which unwinds to
3156 // caller and we inline it into a call site which doesn't unwind but into
3157 // an EH pad that does. Such an edge must be dynamically unreachable.
3158 // As such, we replace the cleanupret with unreachable.
3159 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
3160 if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
3161 changeToUnreachable(CleanupRet);
3162
3163 BasicBlock::iterator I = BB->getFirstNonPHIIt();
3164 if (!I->isEHPad())
3165 continue;
3166
3167 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
3168 if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
3169 CatchSwitch->setParentPad(IFI.CallSiteEHPad);
3170 } else {
3171 auto *FPI = cast<FuncletPadInst>(I);
3172 if (isa<ConstantTokenNone>(FPI->getParentPad()))
3173 FPI->setParentPad(IFI.CallSiteEHPad);
3174 }
3175 }
3176 }
3177
3178 if (InlinedDeoptimizeCalls) {
3179 // We need to at least remove the deoptimizing returns from the Return set,
3180 // so that the control flow from those returns does not get merged into the
3181 // caller (but terminate it instead). If the caller's return type does not
3182 // match the callee's return type, we also need to change the return type of
3183 // the intrinsic.
3184 if (Caller->getReturnType() == CB.getType()) {
3185 llvm::erase_if(Returns, [](ReturnInst *RI) {
3186 return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;
3187 });
3188 } else {
3189 SmallVector<ReturnInst *, 8> NormalReturns;
3190 Function *NewDeoptIntrinsic = Intrinsic::getOrInsertDeclaration(
3191 Caller->getParent(), Intrinsic::experimental_deoptimize,
3192 {Caller->getReturnType()});
3193
3194 for (ReturnInst *RI : Returns) {
3195 CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();
3196 if (!DeoptCall) {
3197 NormalReturns.push_back(RI);
3198 continue;
3199 }
3200
3201 // The calling convention on the deoptimize call itself may be bogus,
3202 // since the code we're inlining may have undefined behavior (and may
3203 // never actually execute at runtime); but all
3204 // @llvm.experimental.deoptimize declarations have to have the same
3205 // calling convention in a well-formed module.
3206 auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv();
3207 NewDeoptIntrinsic->setCallingConv(CallingConv);
3208 auto *CurBB = RI->getParent();
3209 RI->eraseFromParent();
3210
3211 SmallVector<Value *, 4> CallArgs(DeoptCall->args());
3212
3214 DeoptCall->getOperandBundlesAsDefs(OpBundles);
3215 auto DeoptAttributes = DeoptCall->getAttributes();
3216 DeoptCall->eraseFromParent();
3217 assert(!OpBundles.empty() &&
3218 "Expected at least the deopt operand bundle");
3219
3220 IRBuilder<> Builder(CurBB);
3221 CallInst *NewDeoptCall =
3222 Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
3223 NewDeoptCall->setCallingConv(CallingConv);
3224 NewDeoptCall->setAttributes(DeoptAttributes);
3225 if (NewDeoptCall->getType()->isVoidTy())
3226 Builder.CreateRetVoid();
3227 else
3228 Builder.CreateRet(NewDeoptCall);
3229 // Since the ret type is changed, remove the incompatible attributes.
3230 NewDeoptCall->removeRetAttrs(AttributeFuncs::typeIncompatible(
3231 NewDeoptCall->getType(), NewDeoptCall->getRetAttributes()));
3232 }
3233
3234 // Leave behind the normal returns so we can merge control flow.
3235 std::swap(Returns, NormalReturns);
3236 }
3237 }
3238
3239 // Handle any inlined musttail call sites. In order for a new call site to be
3240 // musttail, the source of the clone and the inlined call site must have been
3241 // musttail. Therefore it's safe to return without merging control into the
3242 // phi below.
3243 if (InlinedMustTailCalls) {
3244 // Check if we need to bitcast the result of any musttail calls.
3245 Type *NewRetTy = Caller->getReturnType();
3246 bool NeedBitCast = !CB.use_empty() && CB.getType() != NewRetTy;
3247
3248 // Handle the returns preceded by musttail calls separately.
3249 SmallVector<ReturnInst *, 8> NormalReturns;
3250 for (ReturnInst *RI : Returns) {
3251 CallInst *ReturnedMustTail =
3252 RI->getParent()->getTerminatingMustTailCall();
3253 if (!ReturnedMustTail) {
3254 NormalReturns.push_back(RI);
3255 continue;
3256 }
3257 if (!NeedBitCast)
3258 continue;
3259
3260 // Delete the old return and any preceding bitcast.
3261 BasicBlock *CurBB = RI->getParent();
3262 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
3263 RI->eraseFromParent();
3264 if (OldCast)
3265 OldCast->eraseFromParent();
3266
3267 // Insert a new bitcast and return with the right type.
3268 IRBuilder<> Builder(CurBB);
3269 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
3270 }
3271
3272 // Leave behind the normal returns so we can merge control flow.
3273 std::swap(Returns, NormalReturns);
3274 }
3275
3276 // Now that all of the transforms on the inlined code have taken place but
3277 // before we splice the inlined code into the CFG and lose track of which
3278 // blocks were actually inlined, collect the call sites. We only do this if
3279 // call graph updates weren't requested, as those provide value handle based
3280 // tracking of inlined call sites instead. Calls to intrinsics are not
3281 // collected because they are not inlineable.
3282 if (InlinedFunctionInfo.ContainsCalls) {
3283 // Otherwise just collect the raw call sites that were inlined.
3284 for (BasicBlock &NewBB :
3285 make_range(FirstNewBlock->getIterator(), Caller->end()))
3286 for (Instruction &I : NewBB)
3287 if (auto *CB = dyn_cast<CallBase>(&I))
3288 if (!(CB->getCalledFunction() &&
3290 IFI.InlinedCallSites.push_back(CB);
3291 }
3292
3293 for (CallBase *ICB : IFI.InlinedCallSites) {
3294 // We only track inline history if requested, or if the inlined call site
3295 // was originally an indirect call (it may have become a direct call
3296 // during inlining).
3297 if (TrackInlineHistory ||
3298 InlinedFunctionInfo.OriginallyIndirectCalls.contains(ICB)) {
3299 // !inline_history is {Callee, CB.inline_history, ICB.inline_history}.
3300 // Metadata nodes may be null if the referenced function was erased from
3301 // the module.
3303 History.push_back(ValueAsMetadata::get(CalledFunc));
3304 if (MDNode *CBHistory = CB.getMetadata(LLVMContext::MD_inline_history)) {
3305 for (const auto &Op : CBHistory->operands()) {
3306 if (Op)
3307 History.push_back(Op.get());
3308 }
3309 }
3310 if (MDNode *CBHistory =
3311 ICB->getMetadata(LLVMContext::MD_inline_history)) {
3312 for (const auto &Op : CBHistory->operands()) {
3313 if (Op)
3314 History.push_back(Op.get());
3315 }
3316 }
3317 MDNode *NewHistory = MDNode::get(Caller->getContext(), History);
3318 ICB->setMetadata(LLVMContext::MD_inline_history, NewHistory);
3319 }
3320 }
3321
3322 // If we cloned in _exactly one_ basic block, and if that block ends in a
3323 // return instruction, we splice the body of the inlined callee directly into
3324 // the calling basic block.
3325 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
3326 // Move all of the instructions right before the call.
3327 OrigBB->splice(CB.getIterator(), &*FirstNewBlock, FirstNewBlock->begin(),
3328 FirstNewBlock->end());
3329 // Remove the cloned basic block.
3330 Caller->back().eraseFromParent();
3331
3332 // If the call site was an invoke instruction, add a branch to the normal
3333 // destination.
3334 if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
3335 UncondBrInst *NewBr =
3336 UncondBrInst::Create(II->getNormalDest(), CB.getIterator());
3337 NewBr->setDebugLoc(Returns[0]->getDebugLoc());
3338 }
3339
3340 // If the return instruction returned a value, replace uses of the call with
3341 // uses of the returned value.
3342 if (!CB.use_empty()) {
3343 ReturnInst *R = Returns[0];
3344 if (&CB == R->getReturnValue())
3346 else
3347 CB.replaceAllUsesWith(R->getReturnValue());
3348 }
3349 // Since we are now done with the Call/Invoke, we can delete it.
3350 CB.eraseFromParent();
3351
3352 // Since we are now done with the return instruction, delete it also.
3353 Returns[0]->eraseFromParent();
3354
3355 if (MergeAttributes)
3356 AttributeFuncs::mergeAttributesForInlining(*Caller, *CalledFunc);
3357
3358 // We are now done with the inlining.
3359 return;
3360 }
3361
3362 // Otherwise, we have the normal case, of more than one block to inline or
3363 // multiple return sites.
3364
3365 // We want to clone the entire callee function into the hole between the
3366 // "starter" and "ender" blocks. How we accomplish this depends on whether
3367 // this is an invoke instruction or a call instruction.
3368 BasicBlock *AfterCallBB;
3369 UncondBrInst *CreatedBranchToNormalDest = nullptr;
3370 if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
3371
3372 // Add an unconditional branch to make this look like the CallInst case...
3373 CreatedBranchToNormalDest =
3374 UncondBrInst::Create(II->getNormalDest(), CB.getIterator());
3375 // We intend to replace this DebugLoc with another later.
3376 CreatedBranchToNormalDest->setDebugLoc(DebugLoc::getTemporary());
3377
3378 // Split the basic block. This guarantees that no PHI nodes will have to be
3379 // updated due to new incoming edges, and make the invoke case more
3380 // symmetric to the call case.
3381 AfterCallBB =
3382 OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(),
3383 CalledFunc->getName() + ".exit");
3384
3385 } else { // It's a call
3386 // If this is a call instruction, we need to split the basic block that
3387 // the call lives in.
3388 //
3389 AfterCallBB = OrigBB->splitBasicBlock(CB.getIterator(),
3390 CalledFunc->getName() + ".exit");
3391 }
3392
3393 if (IFI.CallerBFI) {
3394 // Copy original BB's block frequency to AfterCallBB
3395 IFI.CallerBFI->setBlockFreq(AfterCallBB,
3396 IFI.CallerBFI->getBlockFreq(OrigBB));
3397 }
3398
3399 // Change the branch that used to go to AfterCallBB to branch to the first
3400 // basic block of the inlined function.
3401 //
3403 Br->setSuccessor(&*FirstNewBlock);
3404
3405 // Now that the function is correct, make it a little bit nicer. In
3406 // particular, move the basic blocks inserted from the end of the function
3407 // into the space made by splitting the source basic block.
3408 Caller->splice(AfterCallBB->getIterator(), Caller, FirstNewBlock,
3409 Caller->end());
3410
3411 // Handle all of the return instructions that we just cloned in, and eliminate
3412 // any users of the original call/invoke instruction.
3413 Type *RTy = CalledFunc->getReturnType();
3414
3415 PHINode *PHI = nullptr;
3416 if (Returns.size() > 1) {
3417 // The PHI node should go at the front of the new basic block to merge all
3418 // possible incoming values.
3419 if (!CB.use_empty()) {
3420 PHI = PHINode::Create(RTy, Returns.size(), CB.getName());
3421 PHI->insertBefore(AfterCallBB->begin());
3422 // Anything that used the result of the function call should now use the
3423 // PHI node as their operand.
3425 }
3426
3427 // Loop over all of the return instructions adding entries to the PHI node
3428 // as appropriate.
3429 if (PHI) {
3430 for (ReturnInst *RI : Returns) {
3431 assert(RI->getReturnValue()->getType() == PHI->getType() &&
3432 "Ret value not consistent in function!");
3433 PHI->addIncoming(RI->getReturnValue(), RI->getParent());
3434 }
3435 }
3436
3437 // Add a branch to the merge points and remove return instructions.
3438 DebugLoc Loc;
3439 for (ReturnInst *RI : Returns) {
3440 UncondBrInst *BI = UncondBrInst::Create(AfterCallBB, RI->getIterator());
3441 Loc = RI->getDebugLoc();
3442 BI->setDebugLoc(Loc);
3443 RI->eraseFromParent();
3444 }
3445 // We need to set the debug location to *somewhere* inside the
3446 // inlined function. The line number may be nonsensical, but the
3447 // instruction will at least be associated with the right
3448 // function.
3449 if (CreatedBranchToNormalDest)
3450 CreatedBranchToNormalDest->setDebugLoc(Loc);
3451 } else if (!Returns.empty()) {
3452 // Otherwise, if there is exactly one return value, just replace anything
3453 // using the return value of the call with the computed value.
3454 if (!CB.use_empty()) {
3455 if (&CB == Returns[0]->getReturnValue())
3457 else
3458 CB.replaceAllUsesWith(Returns[0]->getReturnValue());
3459 }
3460
3461 // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
3462 BasicBlock *ReturnBB = Returns[0]->getParent();
3463 ReturnBB->replaceAllUsesWith(AfterCallBB);
3464
3465 // Splice the code from the return block into the block that it will return
3466 // to, which contains the code that was after the call.
3467 AfterCallBB->splice(AfterCallBB->begin(), ReturnBB);
3468
3469 if (CreatedBranchToNormalDest)
3470 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
3471
3472 // Delete the return instruction now and empty ReturnBB now.
3473 Returns[0]->eraseFromParent();
3474 ReturnBB->eraseFromParent();
3475 } else if (!CB.use_empty()) {
3476 // In this case there are no returns to use, so there is no clear source
3477 // location for the "return".
3478 // FIXME: It may be correct to use the scope end line of the function here,
3479 // since this likely means we are falling out of the function.
3480 if (CreatedBranchToNormalDest)
3481 CreatedBranchToNormalDest->setDebugLoc(DebugLoc::getUnknown());
3482 // No returns, but something is using the return value of the call. Just
3483 // nuke the result.
3485 }
3486
3487 // Since we are now done with the Call/Invoke, we can delete it.
3488 CB.eraseFromParent();
3489
3490 // If we inlined any musttail calls and the original return is now
3491 // unreachable, delete it. It can only contain a bitcast and ret.
3492 if (InlinedMustTailCalls && pred_empty(AfterCallBB))
3493 AfterCallBB->eraseFromParent();
3494
3495 // We should always be able to fold the entry block of the function into the
3496 // single predecessor of the block...
3497 BasicBlock *CalleeEntry = Br->getSuccessor();
3498
3499 // Splice the code entry block into calling block, right before the
3500 // unconditional branch.
3501 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
3502 OrigBB->splice(Br->getIterator(), CalleeEntry);
3503
3504 // Remove the unconditional branch.
3505 Br->eraseFromParent();
3506
3507 // Now we can remove the CalleeEntry block, which is now empty.
3508 CalleeEntry->eraseFromParent();
3509
3510 // If we inserted a phi node, check to see if it has a single value (e.g. all
3511 // the entries are the same or undef). If so, remove the PHI so it doesn't
3512 // block other optimizations.
3513 if (PHI) {
3514 AssumptionCache *AC =
3515 IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
3516 auto &DL = Caller->getDataLayout();
3517 if (Value *V = simplifyInstruction(PHI, {DL, nullptr, nullptr, AC})) {
3518 PHI->replaceAllUsesWith(V);
3519 PHI->eraseFromParent();
3520 }
3521 }
3522
3523 if (MergeAttributes)
3524 AttributeFuncs::mergeAttributesForInlining(*Caller, *CalledFunc);
3525}
3526
3528 CallBase &CB, InlineFunctionInfo &IFI, bool MergeAttributes,
3529 AAResults *CalleeAAR, bool InsertLifetime, bool TrackInlineHistory,
3530 Function *ForwardVarArgsTo, OptimizationRemarkEmitter *ORE) {
3531 llvm::InlineResult Result = CanInlineCallSite(CB, IFI);
3532 if (Result.isSuccess()) {
3533 InlineFunctionImpl(CB, IFI, MergeAttributes, CalleeAAR, InsertLifetime,
3534 TrackInlineHistory, ForwardVarArgsTo, ORE);
3535 }
3536
3537 return Result;
3538}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
static void UpdatePHINodes(BasicBlock *OrigBB, BasicBlock *NewBB, ArrayRef< BasicBlock * > Preds, Instruction *BI, bool HasLoopExit)
Update the PHI nodes in OrigBB to include the values coming from NewBB.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static cl::opt< bool > NoAliases("csky-no-aliases", cl::desc("Disable the emission of assembler pseudo instructions"), cl::init(false), cl::Hidden)
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
static AttrBuilder IdentifyValidUBGeneratingAttributes(CallBase &CB)
DenseMap< Instruction *, Value * > UnwindDestMemoTy
static BasicBlock * HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, BasicBlock *UnwindEdge, SmallSetVector< const Value *, 4 > &OriginallyIndirectCalls, UnwindDestMemoTy *FuncletUnwindMap=nullptr)
When we inline a basic block into an invoke, we have to turn all of the calls that can throw into inv...
static at::StorageToVarsMap collectEscapedLocals(const DataLayout &DL, const CallBase &CB)
Find Alloca and linked DbgAssignIntrinsic for locals escaped by CB.
static void fixupLineNumbers(Function *Fn, Function::iterator FI, Instruction *TheCall, bool CalleeHasDebugInfo)
Update inlined instructions' line numbers to to encode location where these instructions are inlined.
static void removeCallsiteMetadata(CallBase *Call)
static void PropagateInlinedFromMetadata(CallBase &CB, StringRef CalledFuncName, StringRef CallerFuncName, Function::iterator FStart, Function::iterator FEnd)
Track inlining chain via inlined.from metadata for dontcall diagnostics.
static Value * getUnwindDestToken(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Given an EH pad, find where it unwinds.
static void propagateMemProfMetadata(Function *Callee, CallBase &CB, bool ContainsMemProfMetadata, const ValueMap< const Value *, WeakTrackingVH > &VMap, OptimizationRemarkEmitter *ORE)
static cl::opt< bool > PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", cl::init(false), cl::Hidden, cl::desc("Convert align attributes to assumptions during inlining."))
static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
static Value * getUnwindDestTokenHelper(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Helper for getUnwindDestToken that does the descendant-ward part of the search.
static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &IANodes)
Returns a DebugLoc for a new DILocation which is a clone of OrigDL inlined at InlinedAt.
static cl::opt< bool > UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden, cl::init(true), cl::desc("Use the llvm.experimental.noalias.scope.decl " "intrinsic during inlining."))
static void PropagateCallSiteMetadata(CallBase &CB, Function::iterator FStart, Function::iterator FEnd)
When inlining a call site that has !llvm.mem.parallel_loop_access, !llvm.access.group,...
static std::pair< std::vector< int64_t >, std::vector< int64_t > > remapIndices(Function &Caller, BasicBlock *StartBB, PGOContextualProfile &CtxProf, uint32_t CalleeCounters, uint32_t CalleeCallsites)
static AttrBuilder IdentifyValidPoisonGeneratingAttributes(CallBase &CB)
static void updateMemprofMetadata(CallBase *CI, const std::vector< Metadata * > &MIBList, OptimizationRemarkEmitter *ORE)
static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap, const ProfileCount &CalleeEntryCount, const CallBase &TheCall, ProfileSummaryInfo *PSI, BlockFrequencyInfo *CallerBFI)
Update the branch metadata for cloned call instructions.
static void updateCallerBFI(BasicBlock *CallSiteBlock, const ValueToValueMapTy &VMap, BlockFrequencyInfo *CallerBFI, BlockFrequencyInfo *CalleeBFI, const BasicBlock &CalleeEntryBlock)
Update the block frequencies of the caller after a callee has been inlined.
static void AddReturnAttributes(CallBase &CB, ValueToValueMapTy &VMap, ClonedCodeInfo &InlinedFunctionInfo)
static void HandleByValArgumentInit(Type *ByValType, Value *Dst, Value *Src, MaybeAlign SrcAlign, Module *M, BasicBlock *InsertBlock, InlineFunctionInfo &IFI, Function *CalledFunc)
static bool MayContainThrowingOrExitingCallAfterCB(CallBase *Begin, ReturnInst *End)
static cl::opt< bool > EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), cl::Hidden, cl::desc("Convert noalias attributes to metadata during inlining."))
static void AddAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap, const DataLayout &DL, AAResults *CalleeAAR, ClonedCodeInfo &InlinedFunctionInfo)
If the inlined function has noalias arguments, then add new alias scopes for each noalias argument,...
static IntrinsicInst * getConvergenceEntry(BasicBlock &BB)
static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
static void inlineRetainOrClaimRVCalls(CallBase &CB, objcarc::ARCInstKind RVCallKind, const SmallVectorImpl< ReturnInst * > &Returns)
An operand bundle "clang.arc.attachedcall" on a call indicates the call result is implicitly consumed...
static void fixupAssignments(Function::iterator Start, Function::iterator End)
Update inlined instructions' DIAssignID metadata.
static void propagateMemProfHelper(const CallBase *OrigCall, CallBase *ClonedCall, MDNode *InlinedCallsiteMD, OptimizationRemarkEmitter *ORE)
static bool allocaWouldBeStaticInEntry(const AllocaInst *AI)
Return the result of AI->isStaticAlloca() if AI were moved to the entry block.
static bool isUsedByLifetimeMarker(Value *V)
static void removeMemProfMetadata(CallBase *Call)
static Value * HandleByValArgument(Type *ByValType, Value *Arg, Instruction *TheCall, const Function *CalledFunc, InlineFunctionInfo &IFI, MaybeAlign ByValAlignment)
When inlining a call site that has a byval argument, we have to make the implicit memcpy explicit by ...
static void AddAlignmentAssumptions(CallBase &CB, InlineFunctionInfo &IFI)
If the inlined function has non-byval align arguments, then add @llvm.assume-based alignment assumpti...
static void trackInlinedStores(Function::iterator Start, Function::iterator End, const CallBase &CB)
static cl::opt< unsigned > InlinerAttributeWindow("max-inst-checked-for-throw-during-inlining", cl::Hidden, cl::desc("the maximum number of instructions analyzed for may throw during " "attribute inference in inlined body"), cl::init(4))
static void AddParamAndFnBasicAttributes(const CallBase &CB, ValueToValueMapTy &VMap, ClonedCodeInfo &InlinedFunctionInfo)
static bool haveCommonPrefix(MDNode *MIBStackContext, MDNode *CallsiteStackContext)
static void PropagateOperandBundles(Function::iterator InlinedBB, Instruction *CallSiteEHPad)
Bundle operands of the inlined function must be added to inlined call sites.
static bool hasLifetimeMarkers(AllocaInst *AI)
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Load MIR Sample Profile
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first DebugLoc that has line number information, given a range of instructions.
This file contains the declarations for metadata subclasses.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
This file defines common analysis utilities used by the ObjC ARC Optimizer.
This file defines ARC utility functions which are used by various parts of the compiler.
This file contains the declarations for profiling metadata utility functions.
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static Value * getParentPad(Value *EHPad)
LLVM_ABI MemoryEffects getMemoryEffects(const CallBase *Call)
Return the behavior of the given call site.
Class for arbitrary precision integers.
Definition APInt.h:78
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
PointerType * getType() const
Overload to return most specific pointer type.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
const Value * getArraySize() const
Get the number of elements allocated.
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
Definition Argument.h:50
static LLVM_ABI uint64_t getGUID(const Function &F)
A cache of @llvm.assume calls within a function.
LLVM_ABI void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
static LLVM_ABI AttributeSet get(LLVMContext &C, const AttrBuilder &B)
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:105
LLVM_ABI const ConstantRange & getRange() const
Returns the value of the range attribute.
LLVM_ABI FPClassTest getNoFPClass() const
Return the FPClassTest for nofpclass.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:124
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:261
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator end()
Definition BasicBlock.h:474
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:461
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:530
LLVM_ABI BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="")
Split the basic block into two basic blocks at the specified instruction.
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI SymbolTableList< BasicBlock >::iterator eraseFromParent()
Unlink 'this' from the containing function and delete it.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
void splice(BasicBlock::iterator ToIt, BasicBlock *FromBB)
Transfer all instructions from FromBB to this basic block at ToIt.
Definition BasicBlock.h:659
LLVM_ABI void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)
Update PHI nodes in this BasicBlock before removal of predecessor Pred.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
LLVM_ABI void setBlockFreq(const BasicBlock *BB, BlockFrequency Freq)
LLVM_ABI void setBlockFreqAndScale(const BasicBlock *ReferenceBB, BlockFrequency Freq, SmallPtrSetImpl< BasicBlock * > &BlocksToScale)
Set the frequency of ReferenceBB to Freq and scale the frequencies of the blocks in BlocksToScale suc...
LLVM_ABI BlockFrequency getBlockFreq(const BasicBlock *BB) const
getblockFreq - Return block frequency.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
void setCallingConv(CallingConv::ID CC)
LLVM_ABI FPClassTest getRetNoFPClass() const
Extract a test mask for disallowed floating-point value classes for the return value.
void setDoesNotThrow()
MaybeAlign getRetAlign() const
Extract the alignment of the return value.
LLVM_ABI void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
void removeRetAttrs(const AttributeMask &AttrsToRemove)
Removes the attributes from the return value.
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
static LLVM_ABI CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
MaybeAlign getParamAlign(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
AttributeSet getRetAttributes() const
Return the return attributes for this call.
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
Value * getCalledOperand() const
void setAttributes(AttributeList A)
Set the attributes for this call.
LLVM_ABI std::optional< ConstantRange > getRange() const
If this return value has a range attribute, return the value range of the argument.
bool doesNotThrow() const
Determine if the call cannot unwind.
Value * getArgOperand(unsigned i) const
uint64_t getRetDereferenceableBytes() const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
bool isConvergent() const
Determine if the invoke is convergent.
FunctionType * getFunctionType() const
static LLVM_ABI CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
uint64_t getRetDereferenceableOrNullBytes() const
Extract the number of dereferenceable_or_null bytes for a call (0=unknown).
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
This class represents a function call, abstracting a target machine's calling convention.
void setTailCallKind(TailCallKind TCK)
TailCallKind getTailCallKind() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
bool isMustTailCall() const
static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, InsertPosition InsertBefore=nullptr)
static ConstantAsMetadata * get(Constant *C)
Definition Metadata.h:537
This class represents a range of values.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
This is an important base class in LLVM.
Definition Constant.h:43
const Constant * stripPointerCasts() const
Definition Constant.h:219
static LLVM_ABI InstrProfIncrementInst * getBBInstrumentation(BasicBlock &BB)
Get the instruction instrumenting a BB, or nullptr if not present.
static LLVM_ABI InstrProfCallsite * getCallsiteInstrumentation(CallBase &CB)
Get the instruction instrumenting a callsite, or nullptr if that cannot be found.
const DILocation * getWithoutAtom() const
uint64_t getAtomGroup() const
uint8_t getAtomRank() const
Subprogram description. Uses SubclassData1.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Base class for non-instruction debug metadata records that have positions within IR.
Record of a variable value-assignment, aka a non instruction representation of the dbg....
A debug info location.
Definition DebugLoc.h:123
static DebugLoc getCompilerGenerated()
Definition DebugLoc.h:162
LLVM_ABI unsigned getLine() const
Definition DebugLoc.cpp:52
LLVM_ABI DILocation * get() const
Get the underlying DILocation.
Definition DebugLoc.cpp:48
LLVM_ABI MDNode * getScope() const
Definition DebugLoc.cpp:62
static LLVM_ABI DebugLoc appendInlinedAt(const DebugLoc &DL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &Cache)
Rebuild the entire inlined-at chain for this instruction so that the top of the chain now is inlined-...
Definition DebugLoc.cpp:136
static DebugLoc getTemporary()
Definition DebugLoc.h:160
LLVM_ABI unsigned getCol() const
Definition DebugLoc.cpp:57
LLVM_ABI bool isImplicitCode() const
Check if the DebugLoc corresponds to an implicit code.
Definition DebugLoc.cpp:85
static DebugLoc getUnknown()
Definition DebugLoc.h:161
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
bool empty() const
Definition DenseMap.h:109
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition DenseMap.h:174
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Class to represent profile counts.
Definition Function.h:299
uint64_t getCount() const
Definition Function.h:307
const BasicBlock & getEntryBlock() const
Definition Function.h:809
BasicBlockListType::iterator iterator
Definition Function.h:70
Argument * arg_iterator
Definition Function.h:73
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:211
const BasicBlock & front() const
Definition Function.h:860
iterator_range< arg_iterator > args()
Definition Function.h:892
DISubprogram * getSubprogram() const
Get the attached subprogram.
bool hasGC() const
hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm to use during code generatio...
Definition Function.h:346
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:272
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:905
Constant * getPersonalityFn() const
Get the personality function associated with this function.
arg_iterator arg_end()
Definition Function.h:877
arg_iterator arg_begin()
Definition Function.h:868
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition Function.h:251
MaybeAlign getParamAlign(unsigned ArgNo) const
Definition Function.h:489
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:358
const std::string & getGC() const
Definition Function.cpp:818
std::optional< ProfileCount > getEntryCount(bool AllowSynthetic=false) const
Get the entry count for this function.
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:216
iterator end()
Definition Function.h:855
void setCallingConv(CallingConv::ID CC)
Definition Function.h:276
bool onlyReadsMemory() const
Determine if the function does not access or only reads memory.
Definition Function.cpp:875
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:728
bool hasMetadata() const
Return true if this GlobalObject has any metadata attached to it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this GlobalObject.
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition Globals.cpp:337
LLVM_ABI CallInst * CreateLifetimeStart(Value *Ptr)
Create a lifetime.start intrinsic.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2811
This class captures the data input to the InlineFunction call, and records the auxiliary results prod...
Definition Cloning.h:259
Value * ConvergenceControlToken
Definition Cloning.h:284
ProfileSummaryInfo * PSI
Definition Cloning.h:272
bool UpdateProfile
Update profile for callee as well as cloned version.
Definition Cloning.h:289
Instruction * CallSiteEHPad
Definition Cloning.h:285
function_ref< AssumptionCache &(Function &)> GetAssumptionCache
If non-null, InlineFunction will update the callgraph to reflect the changes it makes.
Definition Cloning.h:271
BlockFrequencyInfo * CalleeBFI
Definition Cloning.h:273
SmallVector< AllocaInst *, 4 > StaticAllocas
InlineFunction fills this in with all static allocas that get copied into the caller.
Definition Cloning.h:277
BlockFrequencyInfo * CallerBFI
Definition Cloning.h:273
SmallVector< CallBase *, 8 > InlinedCallSites
All of the new call sites inlined into the caller.
Definition Cloning.h:282
InlineResult is basically true or false.
Definition InlineCost.h:181
static InlineResult success()
Definition InlineCost.h:186
static InlineResult failure(const char *Reason)
Definition InlineCost.h:187
This represents the llvm.instrprof.callsite intrinsic.
This represents the llvm.instrprof.increment intrinsic.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
LLVM_ABI void insertBefore(InstListType::iterator InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified position.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
A wrapper class for inspecting calls to intrinsic functions.
static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Invoke instruction.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
An instruction for reading from memory.
MDNode * createAnonymousAliasScope(MDNode *Domain, StringRef Name=StringRef())
Return metadata appropriate for an alias scope root node.
Definition MDBuilder.h:195
MDNode * createAnonymousAliasScopeDomain(StringRef Name=StringRef())
Return metadata appropriate for an alias scope domain node.
Definition MDBuilder.h:188
Metadata node.
Definition Metadata.h:1080
static MDTuple * getDistinct(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1580
void replaceAllUsesWith(Metadata *MD)
RAUW a temporary.
Definition Metadata.h:1284
static LLVM_ABI MDNode * concatenate(MDNode *A, MDNode *B)
Methods for metadata merging.
bool isTemporary() const
Definition Metadata.h:1264
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1442
op_iterator op_end() const
Definition Metadata.h:1438
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1572
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1450
op_iterator op_begin() const
Definition Metadata.h:1434
LLVMContext & getContext() const
Definition Metadata.h:1244
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
Definition Metadata.cpp:614
static TempMDTuple getTemporary(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Return a temporary node.
Definition Metadata.h:1549
bool onlyAccessesInaccessibleMem() const
Whether this function only (at most) accesses inaccessible memory.
Definition ModRef.h:265
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
Definition ModRef.h:255
Root of the metadata hierarchy.
Definition Metadata.h:64
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
The optimization diagnostic interface.
The instrumented contextual profile, produced by the CtxProfAnalysis.
LLVM_ABI bool isInSpecializedModule() const
LLVM_ABI void update(Visitor, const Function &F)
uint32_t getNumCounters(const Function &F) const
uint32_t allocateNextCounterIndex(const Function &F)
uint32_t getNumCallsites(const Function &F) const
uint32_t allocateNextCallsiteIndex(const Function &F)
A node (context) in the loaded contextual profile, suitable for mutation during IPO passes.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Analysis providing profile information.
LLVM_ABI std::optional< uint64_t > getProfileCount(const CallBase &CallInst, BlockFrequencyInfo *BFI, bool AllowSynthetic=false) const
Returns the profile count for CallInst.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
bool remove(const value_type &X)
Remove an item from the set vector.
Definition SetVector.h:181
bool contains(const_arg_type key) const
Check if the SetVector contains the given key.
Definition SetVector.h:252
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:339
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
Definition Type.cpp:314
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:141
Unconditional Branch instruction.
void setSuccessor(BasicBlock *NewSucc)
static UncondBrInst * Create(BasicBlock *Target, InsertPosition InsertBefore=nullptr)
BasicBlock * getSuccessor(unsigned i=0) const
Value * getOperand(unsigned i) const
Definition User.h:207
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
static LLVM_ABI ValueAsMetadata * get(Value *V)
Definition Metadata.cpp:509
See the file comment.
Definition ValueMap.h:84
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition ValueMap.h:167
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition ValueMap.h:156
iterator begin()
Definition ValueMap.h:138
iterator end()
Definition ValueMap.h:139
ValueMapIteratorImpl< MapT, const Value *, false > iterator
Definition ValueMap.h:135
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:549
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:258
iterator_range< user_iterator > users()
Definition Value.h:426
bool use_empty() const
Definition Value.h:346
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:399
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
Class to build a trie of call stack contexts for a particular profiled allocation call,...
Helper class to iterate through stack ids in both metadata (memprof MIB and callsite) and the corresp...
CallInst * Call
Changed
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
CallingConv Namespace - This namespace contains an enum with a value for the well-known calling conve...
Definition CallingConv.h:21
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > OverloadTys={})
Look up the Function declaration of the intrinsic id in the Module M.
bool match(Val *V, const Pattern &P)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
LLVM_ABI void trackAssignments(Function::iterator Start, Function::iterator End, const StorageToVarsMap &Vars, const DataLayout &DL, bool DebugPrints=false)
Track assignments to Vars between Start and End.
LLVM_ABI void remapAssignID(DenseMap< DIAssignID *, DIAssignID * > &Map, Instruction &I)
Replace DIAssignID uses and attachments with IDs from Map.
SmallVector< DbgVariableRecord * > getDVRAssignmentMarkers(const Instruction *Inst)
Return a range of dbg_assign records for which Inst performs the assignment they encode.
Definition DebugInfo.h:203
DenseMap< const AllocaInst *, SmallSetVector< VarRecord, 2 > > StorageToVarsMap
Map of backing storage to a set of variables that are stored to it.
Definition DebugInfo.h:286
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
Definition Metadata.h:696
LLVM_ABI MDNode * getMIBStackNode(const MDNode *MIB)
Returns the stack node from an MIB metadata node.
constexpr double phi
ARCInstKind getAttachedARCFunctionKind(const CallBase *CB)
This function returns the ARCInstKind of the function attached to operand bundle clang_arc_attachedca...
Definition ObjCARCUtil.h:75
ARCInstKind
Equivalence classes of instructions in the ARC Model.
@ None
anything that is inert from an ARC perspective.
@ RetainRV
objc_retainAutoreleasedReturnValue
std::optional< Function * > getAttachedARCFunction(const CallBase *CB)
This function returns operand bundle clang_arc_attachedcall's argument, which is the address of the A...
Definition ObjCARCUtil.h:43
bool isRetainOrClaimRV(ARCInstKind Kind)
Check whether the function is retainRV/unsafeClaimRV.
Definition ObjCARCUtil.h:67
const Value * GetRCIdentityRoot(const Value *V)
The RCIdentity root of a value V is a dominating value U for which retaining or releasing U is equiva...
bool hasAttachedCallOpBundle(const CallBase *CB)
Definition ObjCARCUtil.h:29
This is an optimization pass for GlobalISel generic memory operations.
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1732
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1669
LLVM_ABI BasicBlock * changeToInvokeAndSplitBasicBlock(CallInst *CI, BasicBlock *UnwindEdge, DomTreeUpdater *DTU=nullptr)
Convert the CallInst to InvokeInst with the specified unwind edge basic block.
Definition Local.cpp:2618
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
auto successors(const MachineBasicBlock *BB)
LLVM_ABI void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, bool ModuleLevelChanges, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix, ClonedCodeInfo &CodeInfo)
This works exactly like CloneFunctionInto, except that it does some simple constant prop and DCE on t...
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2208
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
std::string utostr(uint64_t X, bool isNeg=false)
MemoryEffectsBase< IRMemLocation > MemoryEffects
Summary of how a function affects memory in the program.
Definition ModRef.h:356
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
LLVM_ABI InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, bool MergeAttributes=false, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, bool TrackInlineHistory=false, Function *ForwardVarArgsTo=nullptr, OptimizationRemarkEmitter *ORE=nullptr)
This function inlines the called function into the basic block of the caller.
LLVM_ABI bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures, const Instruction *I, const DominatorTree *DT, bool IncludeI=false, unsigned MaxUsesToExplore=0, const LoopInfo *LI=nullptr)
PointerMayBeCapturedBefore - Return true if this pointer value may be captured by the enclosing funct...
LLVM_ABI InlineResult CanInlineCallSite(const CallBase &CB, InlineFunctionInfo &IFI)
Check if it is legal to perform inlining of the function called by CB into the caller at this particu...
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
Definition Local.h:252
LLVM_ABI Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
Definition Local.cpp:1581
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
Function::ProfileCount ProfileCount
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
Definition Local.cpp:2528
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
LLVM_ABI bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
LLVM_ABI void updateProfileCallee(Function *Callee, int64_t EntryDelta, const ValueMap< const Value *, WeakTrackingVH > *VMap=nullptr)
Updates profile information by adjusting the entry count by adding EntryDelta then scaling callsite i...
OperandBundleDefT< Value * > OperandBundleDef
Definition AutoUpgrade.h:34
LLVM_ABI bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
LLVM_ABI void InlineFunctionImpl(CallBase &CB, InlineFunctionInfo &IFI, bool MergeAttributes=false, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, bool TrackInlineHistory=false, Function *ForwardVarArgsTo=nullptr, OptimizationRemarkEmitter *ORE=nullptr)
This should generally not be used, use InlineFunction instead.
LLVM_ABI MDNode * uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2)
Compute the union of two access-group lists.
DWARFExpression::Operation Op
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
ValueMap< const Value *, WeakTrackingVH > ValueToValueMapTy
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
LLVM_ABI bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNotCapturedBefore.
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition STLExtras.h:2019
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition STLExtras.h:2192
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
bool capturesAnything(CaptureComponents CC)
Definition ModRef.h:379
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=MaxLookupSearchDepth)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
LLVM_ABI void updateLoopMetadataDebugLocations(Instruction &I, function_ref< Metadata *(Metadata *)> Updater)
Update the debug locations contained within the MD_loop metadata attached to the instruction I,...
LLVM_ABI bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
LLVM_ABI void scaleProfData(Instruction &I, uint64_t S, uint64_t T)
Scaling the profile data attached to 'I' using the ratio of S/T.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
This struct can be used to capture information about code being cloned, while it is being cloned.
Definition Cloning.h:69
bool ContainsDynamicAllocas
This is set to true if the cloned code contains a 'dynamic' alloca.
Definition Cloning.h:80
bool isSimplified(const Value *From, const Value *To) const
Definition Cloning.h:98
bool ContainsCalls
This is set to true if the cloned code contains a normal call instruction.
Definition Cloning.h:71
bool ContainsMemProfMetadata
This is set to true if there is memprof related metadata (memprof or callsite metadata) in the cloned...
Definition Cloning.h:75
SmallSetVector< const Value *, 4 > OriginallyIndirectCalls
Definition Cloning.h:94
std::vector< WeakTrackingVH > OperandBundleCallSites
All cloned call sites that have operand bundles attached are appended to this vector.
Definition Cloning.h:85
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition Alignment.h:130
static Instruction * tryGetVTableInstruction(CallBase *CB)
Helper struct for trackAssignments, below.
Definition DebugInfo.h:244