LLVM 19.0.0git
InlineFunction.cpp
Go to the documentation of this file.
1//===- InlineFunction.cpp - Code to perform function inlining -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements inlining of a function into a call site, resolving
10// parameters and the return value as appropriate.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/ADT/DenseMap.h"
15#include "llvm/ADT/STLExtras.h"
16#include "llvm/ADT/SetVector.h"
33#include "llvm/IR/Argument.h"
35#include "llvm/IR/BasicBlock.h"
36#include "llvm/IR/CFG.h"
37#include "llvm/IR/Constant.h"
39#include "llvm/IR/Constants.h"
40#include "llvm/IR/DataLayout.h"
41#include "llvm/IR/DebugInfo.h"
43#include "llvm/IR/DebugLoc.h"
45#include "llvm/IR/Dominators.h"
47#include "llvm/IR/Function.h"
48#include "llvm/IR/IRBuilder.h"
49#include "llvm/IR/InlineAsm.h"
50#include "llvm/IR/InstrTypes.h"
51#include "llvm/IR/Instruction.h"
54#include "llvm/IR/Intrinsics.h"
55#include "llvm/IR/LLVMContext.h"
56#include "llvm/IR/MDBuilder.h"
57#include "llvm/IR/Metadata.h"
58#include "llvm/IR/Module.h"
59#include "llvm/IR/Type.h"
60#include "llvm/IR/User.h"
61#include "llvm/IR/Value.h"
69#include <algorithm>
70#include <cassert>
71#include <cstdint>
72#include <iterator>
73#include <limits>
74#include <optional>
75#include <string>
76#include <utility>
77#include <vector>
78
79#define DEBUG_TYPE "inline-function"
80
81using namespace llvm;
82using namespace llvm::memprof;
84
85static cl::opt<bool>
86EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
88 cl::desc("Convert noalias attributes to metadata during inlining."));
89
90static cl::opt<bool>
91 UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden,
92 cl::init(true),
93 cl::desc("Use the llvm.experimental.noalias.scope.decl "
94 "intrinsic during inlining."));
95
96// Disabled by default, because the added alignment assumptions may increase
97// compile-time and block optimizations. This option is not suitable for use
98// with frontends that emit comprehensive parameter alignment annotations.
99static cl::opt<bool>
100PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
101 cl::init(false), cl::Hidden,
102 cl::desc("Convert align attributes to assumptions during inlining."));
103
105 "max-inst-checked-for-throw-during-inlining", cl::Hidden,
106 cl::desc("the maximum number of instructions analyzed for may throw during "
107 "attribute inference in inlined body"),
108 cl::init(4));
109
110namespace {
111
112 /// A class for recording information about inlining a landing pad.
113 class LandingPadInliningInfo {
114 /// Destination of the invoke's unwind.
115 BasicBlock *OuterResumeDest;
116
117 /// Destination for the callee's resume.
118 BasicBlock *InnerResumeDest = nullptr;
119
120 /// LandingPadInst associated with the invoke.
121 LandingPadInst *CallerLPad = nullptr;
122
123 /// PHI for EH values from landingpad insts.
124 PHINode *InnerEHValuesPHI = nullptr;
125
126 SmallVector<Value*, 8> UnwindDestPHIValues;
127
128 public:
129 LandingPadInliningInfo(InvokeInst *II)
130 : OuterResumeDest(II->getUnwindDest()) {
131 // If there are PHI nodes in the unwind destination block, we need to keep
132 // track of which values came into them from the invoke before removing
133 // the edge from this block.
134 BasicBlock *InvokeBB = II->getParent();
135 BasicBlock::iterator I = OuterResumeDest->begin();
136 for (; isa<PHINode>(I); ++I) {
137 // Save the value to use for this edge.
138 PHINode *PHI = cast<PHINode>(I);
139 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
140 }
141
142 CallerLPad = cast<LandingPadInst>(I);
143 }
144
145 /// The outer unwind destination is the target of
146 /// unwind edges introduced for calls within the inlined function.
147 BasicBlock *getOuterResumeDest() const {
148 return OuterResumeDest;
149 }
150
151 BasicBlock *getInnerResumeDest();
152
153 LandingPadInst *getLandingPadInst() const { return CallerLPad; }
154
155 /// Forward the 'resume' instruction to the caller's landing pad block.
156 /// When the landing pad block has only one predecessor, this is
157 /// a simple branch. When there is more than one predecessor, we need to
158 /// split the landing pad block after the landingpad instruction and jump
159 /// to there.
160 void forwardResume(ResumeInst *RI,
162
163 /// Add incoming-PHI values to the unwind destination block for the given
164 /// basic block, using the values for the original invoke's source block.
165 void addIncomingPHIValuesFor(BasicBlock *BB) const {
166 addIncomingPHIValuesForInto(BB, OuterResumeDest);
167 }
168
169 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
170 BasicBlock::iterator I = dest->begin();
171 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
172 PHINode *phi = cast<PHINode>(I);
173 phi->addIncoming(UnwindDestPHIValues[i], src);
174 }
175 }
176 };
177
178} // end anonymous namespace
179
180/// Get or create a target for the branch from ResumeInsts.
181BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
182 if (InnerResumeDest) return InnerResumeDest;
183
184 // Split the landing pad.
185 BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator();
186 InnerResumeDest =
187 OuterResumeDest->splitBasicBlock(SplitPoint,
188 OuterResumeDest->getName() + ".body");
189
190 // The number of incoming edges we expect to the inner landing pad.
191 const unsigned PHICapacity = 2;
192
193 // Create corresponding new PHIs for all the PHIs in the outer landing pad.
194 BasicBlock::iterator InsertPoint = InnerResumeDest->begin();
195 BasicBlock::iterator I = OuterResumeDest->begin();
196 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
197 PHINode *OuterPHI = cast<PHINode>(I);
198 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
199 OuterPHI->getName() + ".lpad-body");
200 InnerPHI->insertBefore(InsertPoint);
201 OuterPHI->replaceAllUsesWith(InnerPHI);
202 InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
203 }
204
205 // Create a PHI for the exception values.
206 InnerEHValuesPHI =
207 PHINode::Create(CallerLPad->getType(), PHICapacity, "eh.lpad-body");
208 InnerEHValuesPHI->insertBefore(InsertPoint);
209 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
210 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
211
212 // All done.
213 return InnerResumeDest;
214}
215
216/// Forward the 'resume' instruction to the caller's landing pad block.
217/// When the landing pad block has only one predecessor, this is a simple
218/// branch. When there is more than one predecessor, we need to split the
219/// landing pad block after the landingpad instruction and jump to there.
220void LandingPadInliningInfo::forwardResume(
222 BasicBlock *Dest = getInnerResumeDest();
223 BasicBlock *Src = RI->getParent();
224
225 BranchInst::Create(Dest, Src);
226
227 // Update the PHIs in the destination. They were inserted in an order which
228 // makes this work.
229 addIncomingPHIValuesForInto(Src, Dest);
230
231 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
232 RI->eraseFromParent();
233}
234
235/// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
236static Value *getParentPad(Value *EHPad) {
237 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
238 return FPI->getParentPad();
239 return cast<CatchSwitchInst>(EHPad)->getParentPad();
240}
241
243
244/// Helper for getUnwindDestToken that does the descendant-ward part of
245/// the search.
247 UnwindDestMemoTy &MemoMap) {
248 SmallVector<Instruction *, 8> Worklist(1, EHPad);
249
250 while (!Worklist.empty()) {
251 Instruction *CurrentPad = Worklist.pop_back_val();
252 // We only put pads on the worklist that aren't in the MemoMap. When
253 // we find an unwind dest for a pad we may update its ancestors, but
254 // the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
255 // so they should never get updated while queued on the worklist.
256 assert(!MemoMap.count(CurrentPad));
257 Value *UnwindDestToken = nullptr;
258 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
259 if (CatchSwitch->hasUnwindDest()) {
260 UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
261 } else {
262 // Catchswitch doesn't have a 'nounwind' variant, and one might be
263 // annotated as "unwinds to caller" when really it's nounwind (see
264 // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
265 // parent's unwind dest from this. We can check its catchpads'
266 // descendants, since they might include a cleanuppad with an
267 // "unwinds to caller" cleanupret, which can be trusted.
268 for (auto HI = CatchSwitch->handler_begin(),
269 HE = CatchSwitch->handler_end();
270 HI != HE && !UnwindDestToken; ++HI) {
271 BasicBlock *HandlerBlock = *HI;
272 auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI());
273 for (User *Child : CatchPad->users()) {
274 // Intentionally ignore invokes here -- since the catchswitch is
275 // marked "unwind to caller", it would be a verifier error if it
276 // contained an invoke which unwinds out of it, so any invoke we'd
277 // encounter must unwind to some child of the catch.
278 if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
279 continue;
280
281 Instruction *ChildPad = cast<Instruction>(Child);
282 auto Memo = MemoMap.find(ChildPad);
283 if (Memo == MemoMap.end()) {
284 // Haven't figured out this child pad yet; queue it.
285 Worklist.push_back(ChildPad);
286 continue;
287 }
288 // We've already checked this child, but might have found that
289 // it offers no proof either way.
290 Value *ChildUnwindDestToken = Memo->second;
291 if (!ChildUnwindDestToken)
292 continue;
293 // We already know the child's unwind dest, which can either
294 // be ConstantTokenNone to indicate unwind to caller, or can
295 // be another child of the catchpad. Only the former indicates
296 // the unwind dest of the catchswitch.
297 if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
298 UnwindDestToken = ChildUnwindDestToken;
299 break;
300 }
301 assert(getParentPad(ChildUnwindDestToken) == CatchPad);
302 }
303 }
304 }
305 } else {
306 auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
307 for (User *U : CleanupPad->users()) {
308 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
309 if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
310 UnwindDestToken = RetUnwindDest->getFirstNonPHI();
311 else
312 UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext());
313 break;
314 }
315 Value *ChildUnwindDestToken;
316 if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
317 ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
318 } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
319 Instruction *ChildPad = cast<Instruction>(U);
320 auto Memo = MemoMap.find(ChildPad);
321 if (Memo == MemoMap.end()) {
322 // Haven't resolved this child yet; queue it and keep searching.
323 Worklist.push_back(ChildPad);
324 continue;
325 }
326 // We've checked this child, but still need to ignore it if it
327 // had no proof either way.
328 ChildUnwindDestToken = Memo->second;
329 if (!ChildUnwindDestToken)
330 continue;
331 } else {
332 // Not a relevant user of the cleanuppad
333 continue;
334 }
335 // In a well-formed program, the child/invoke must either unwind to
336 // an(other) child of the cleanup, or exit the cleanup. In the
337 // first case, continue searching.
338 if (isa<Instruction>(ChildUnwindDestToken) &&
339 getParentPad(ChildUnwindDestToken) == CleanupPad)
340 continue;
341 UnwindDestToken = ChildUnwindDestToken;
342 break;
343 }
344 }
345 // If we haven't found an unwind dest for CurrentPad, we may have queued its
346 // children, so move on to the next in the worklist.
347 if (!UnwindDestToken)
348 continue;
349
350 // Now we know that CurrentPad unwinds to UnwindDestToken. It also exits
351 // any ancestors of CurrentPad up to but not including UnwindDestToken's
352 // parent pad. Record this in the memo map, and check to see if the
353 // original EHPad being queried is one of the ones exited.
354 Value *UnwindParent;
355 if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
356 UnwindParent = getParentPad(UnwindPad);
357 else
358 UnwindParent = nullptr;
359 bool ExitedOriginalPad = false;
360 for (Instruction *ExitedPad = CurrentPad;
361 ExitedPad && ExitedPad != UnwindParent;
362 ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) {
363 // Skip over catchpads since they just follow their catchswitches.
364 if (isa<CatchPadInst>(ExitedPad))
365 continue;
366 MemoMap[ExitedPad] = UnwindDestToken;
367 ExitedOriginalPad |= (ExitedPad == EHPad);
368 }
369
370 if (ExitedOriginalPad)
371 return UnwindDestToken;
372
373 // Continue the search.
374 }
375
376 // No definitive information is contained within this funclet.
377 return nullptr;
378}
379
380/// Given an EH pad, find where it unwinds. If it unwinds to an EH pad,
381/// return that pad instruction. If it unwinds to caller, return
382/// ConstantTokenNone. If it does not have a definitive unwind destination,
383/// return nullptr.
384///
385/// This routine gets invoked for calls in funclets in inlinees when inlining
386/// an invoke. Since many funclets don't have calls inside them, it's queried
387/// on-demand rather than building a map of pads to unwind dests up front.
388/// Determining a funclet's unwind dest may require recursively searching its
389/// descendants, and also ancestors and cousins if the descendants don't provide
390/// an answer. Since most funclets will have their unwind dest immediately
391/// available as the unwind dest of a catchswitch or cleanupret, this routine
392/// searches top-down from the given pad and then up. To avoid worst-case
393/// quadratic run-time given that approach, it uses a memo map to avoid
394/// re-processing funclet trees. The callers that rewrite the IR as they go
395/// take advantage of this, for correctness, by checking/forcing rewritten
396/// pads' entries to match the original callee view.
398 UnwindDestMemoTy &MemoMap) {
399 // Catchpads unwind to the same place as their catchswitch;
400 // redirct any queries on catchpads so the code below can
401 // deal with just catchswitches and cleanuppads.
402 if (auto *CPI = dyn_cast<CatchPadInst>(EHPad))
403 EHPad = CPI->getCatchSwitch();
404
405 // Check if we've already determined the unwind dest for this pad.
406 auto Memo = MemoMap.find(EHPad);
407 if (Memo != MemoMap.end())
408 return Memo->second;
409
410 // Search EHPad and, if necessary, its descendants.
411 Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap);
412 assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));
413 if (UnwindDestToken)
414 return UnwindDestToken;
415
416 // No information is available for this EHPad from itself or any of its
417 // descendants. An unwind all the way out to a pad in the caller would
418 // need also to agree with the unwind dest of the parent funclet, so
419 // search up the chain to try to find a funclet with information. Put
420 // null entries in the memo map to avoid re-processing as we go up.
421 MemoMap[EHPad] = nullptr;
422#ifndef NDEBUG
424 TempMemos.insert(EHPad);
425#endif
426 Instruction *LastUselessPad = EHPad;
427 Value *AncestorToken;
428 for (AncestorToken = getParentPad(EHPad);
429 auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
430 AncestorToken = getParentPad(AncestorToken)) {
431 // Skip over catchpads since they just follow their catchswitches.
432 if (isa<CatchPadInst>(AncestorPad))
433 continue;
434 // If the MemoMap had an entry mapping AncestorPad to nullptr, since we
435 // haven't yet called getUnwindDestTokenHelper for AncestorPad in this
436 // call to getUnwindDestToken, that would mean that AncestorPad had no
437 // information in itself, its descendants, or its ancestors. If that
438 // were the case, then we should also have recorded the lack of information
439 // for the descendant that we're coming from. So assert that we don't
440 // find a null entry in the MemoMap for AncestorPad.
441 assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);
442 auto AncestorMemo = MemoMap.find(AncestorPad);
443 if (AncestorMemo == MemoMap.end()) {
444 UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap);
445 } else {
446 UnwindDestToken = AncestorMemo->second;
447 }
448 if (UnwindDestToken)
449 break;
450 LastUselessPad = AncestorPad;
451 MemoMap[LastUselessPad] = nullptr;
452#ifndef NDEBUG
453 TempMemos.insert(LastUselessPad);
454#endif
455 }
456
457 // We know that getUnwindDestTokenHelper was called on LastUselessPad and
458 // returned nullptr (and likewise for EHPad and any of its ancestors up to
459 // LastUselessPad), so LastUselessPad has no information from below. Since
460 // getUnwindDestTokenHelper must investigate all downward paths through
461 // no-information nodes to prove that a node has no information like this,
462 // and since any time it finds information it records it in the MemoMap for
463 // not just the immediately-containing funclet but also any ancestors also
464 // exited, it must be the case that, walking downward from LastUselessPad,
465 // visiting just those nodes which have not been mapped to an unwind dest
466 // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since
467 // they are just used to keep getUnwindDestTokenHelper from repeating work),
468 // any node visited must have been exhaustively searched with no information
469 // for it found.
470 SmallVector<Instruction *, 8> Worklist(1, LastUselessPad);
471 while (!Worklist.empty()) {
472 Instruction *UselessPad = Worklist.pop_back_val();
473 auto Memo = MemoMap.find(UselessPad);
474 if (Memo != MemoMap.end() && Memo->second) {
475 // Here the name 'UselessPad' is a bit of a misnomer, because we've found
476 // that it is a funclet that does have information about unwinding to
477 // a particular destination; its parent was a useless pad.
478 // Since its parent has no information, the unwind edge must not escape
479 // the parent, and must target a sibling of this pad. This local unwind
480 // gives us no information about EHPad. Leave it and the subtree rooted
481 // at it alone.
482 assert(getParentPad(Memo->second) == getParentPad(UselessPad));
483 continue;
484 }
485 // We know we don't have information for UselesPad. If it has an entry in
486 // the MemoMap (mapping it to nullptr), it must be one of the TempMemos
487 // added on this invocation of getUnwindDestToken; if a previous invocation
488 // recorded nullptr, it would have had to prove that the ancestors of
489 // UselessPad, which include LastUselessPad, had no information, and that
490 // in turn would have required proving that the descendants of
491 // LastUselesPad, which include EHPad, have no information about
492 // LastUselessPad, which would imply that EHPad was mapped to nullptr in
493 // the MemoMap on that invocation, which isn't the case if we got here.
494 assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad));
495 // Assert as we enumerate users that 'UselessPad' doesn't have any unwind
496 // information that we'd be contradicting by making a map entry for it
497 // (which is something that getUnwindDestTokenHelper must have proved for
498 // us to get here). Just assert on is direct users here; the checks in
499 // this downward walk at its descendants will verify that they don't have
500 // any unwind edges that exit 'UselessPad' either (i.e. they either have no
501 // unwind edges or unwind to a sibling).
502 MemoMap[UselessPad] = UnwindDestToken;
503 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
504 assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad");
505 for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
506 auto *CatchPad = HandlerBlock->getFirstNonPHI();
507 for (User *U : CatchPad->users()) {
508 assert(
509 (!isa<InvokeInst>(U) ||
511 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
512 CatchPad)) &&
513 "Expected useless pad");
514 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
515 Worklist.push_back(cast<Instruction>(U));
516 }
517 }
518 } else {
519 assert(isa<CleanupPadInst>(UselessPad));
520 for (User *U : UselessPad->users()) {
521 assert(!isa<CleanupReturnInst>(U) && "Expected useless pad");
522 assert((!isa<InvokeInst>(U) ||
524 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
525 UselessPad)) &&
526 "Expected useless pad");
527 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
528 Worklist.push_back(cast<Instruction>(U));
529 }
530 }
531 }
532
533 return UnwindDestToken;
534}
535
536/// When we inline a basic block into an invoke,
537/// we have to turn all of the calls that can throw into invokes.
538/// This function analyze BB to see if there are any calls, and if so,
539/// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
540/// nodes in that block with the values specified in InvokeDestPHIValues.
542 BasicBlock *BB, BasicBlock *UnwindEdge,
543 UnwindDestMemoTy *FuncletUnwindMap = nullptr) {
545 // We only need to check for function calls: inlined invoke
546 // instructions require no special handling.
547 CallInst *CI = dyn_cast<CallInst>(&I);
548
549 if (!CI || CI->doesNotThrow())
550 continue;
551
552 // We do not need to (and in fact, cannot) convert possibly throwing calls
553 // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into
554 // invokes. The caller's "segment" of the deoptimization continuation
555 // attached to the newly inlined @llvm.experimental_deoptimize
556 // (resp. @llvm.experimental.guard) call should contain the exception
557 // handling logic, if any.
558 if (auto *F = CI->getCalledFunction())
559 if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
560 F->getIntrinsicID() == Intrinsic::experimental_guard)
561 continue;
562
563 if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
564 // This call is nested inside a funclet. If that funclet has an unwind
565 // destination within the inlinee, then unwinding out of this call would
566 // be UB. Rewriting this call to an invoke which targets the inlined
567 // invoke's unwind dest would give the call's parent funclet multiple
568 // unwind destinations, which is something that subsequent EH table
569 // generation can't handle and that the veirifer rejects. So when we
570 // see such a call, leave it as a call.
571 auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
572 Value *UnwindDestToken =
573 getUnwindDestToken(FuncletPad, *FuncletUnwindMap);
574 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
575 continue;
576#ifndef NDEBUG
577 Instruction *MemoKey;
578 if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
579 MemoKey = CatchPad->getCatchSwitch();
580 else
581 MemoKey = FuncletPad;
582 assert(FuncletUnwindMap->count(MemoKey) &&
583 (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
584 "must get memoized to avoid confusing later searches");
585#endif // NDEBUG
586 }
587
588 changeToInvokeAndSplitBasicBlock(CI, UnwindEdge);
589 return BB;
590 }
591 return nullptr;
592}
593
594/// If we inlined an invoke site, we need to convert calls
595/// in the body of the inlined function into invokes.
596///
597/// II is the invoke instruction being inlined. FirstNewBlock is the first
598/// block of the inlined code (the last block is the end of the function),
599/// and InlineCodeInfo is information about the code that got inlined.
600static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock,
601 ClonedCodeInfo &InlinedCodeInfo) {
602 BasicBlock *InvokeDest = II->getUnwindDest();
603
604 Function *Caller = FirstNewBlock->getParent();
605
606 // The inlined code is currently at the end of the function, scan from the
607 // start of the inlined code to its end, checking for stuff we need to
608 // rewrite.
609 LandingPadInliningInfo Invoke(II);
610
611 // Get all of the inlined landing pad instructions.
613 for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end();
614 I != E; ++I)
615 if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
616 InlinedLPads.insert(II->getLandingPadInst());
617
618 // Append the clauses from the outer landing pad instruction into the inlined
619 // landing pad instructions.
620 LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
621 for (LandingPadInst *InlinedLPad : InlinedLPads) {
622 unsigned OuterNum = OuterLPad->getNumClauses();
623 InlinedLPad->reserveClauses(OuterNum);
624 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
625 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
626 if (OuterLPad->isCleanup())
627 InlinedLPad->setCleanup(true);
628 }
629
630 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
631 BB != E; ++BB) {
632 if (InlinedCodeInfo.ContainsCalls)
634 &*BB, Invoke.getOuterResumeDest()))
635 // Update any PHI nodes in the exceptional block to indicate that there
636 // is now a new entry in them.
637 Invoke.addIncomingPHIValuesFor(NewBB);
638
639 // Forward any resumes that are remaining here.
640 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
641 Invoke.forwardResume(RI, InlinedLPads);
642 }
643
644 // Now that everything is happy, we have one final detail. The PHI nodes in
645 // the exception destination block still have entries due to the original
646 // invoke instruction. Eliminate these entries (which might even delete the
647 // PHI node) now.
648 InvokeDest->removePredecessor(II->getParent());
649}
650
651/// If we inlined an invoke site, we need to convert calls
652/// in the body of the inlined function into invokes.
653///
654/// II is the invoke instruction being inlined. FirstNewBlock is the first
655/// block of the inlined code (the last block is the end of the function),
656/// and InlineCodeInfo is information about the code that got inlined.
657static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock,
658 ClonedCodeInfo &InlinedCodeInfo) {
659 BasicBlock *UnwindDest = II->getUnwindDest();
660 Function *Caller = FirstNewBlock->getParent();
661
662 assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!");
663
664 // If there are PHI nodes in the unwind destination block, we need to keep
665 // track of which values came into them from the invoke before removing the
666 // edge from this block.
667 SmallVector<Value *, 8> UnwindDestPHIValues;
668 BasicBlock *InvokeBB = II->getParent();
669 for (PHINode &PHI : UnwindDest->phis()) {
670 // Save the value to use for this edge.
671 UnwindDestPHIValues.push_back(PHI.getIncomingValueForBlock(InvokeBB));
672 }
673
674 // Add incoming-PHI values to the unwind destination block for the given basic
675 // block, using the values for the original invoke's source block.
676 auto UpdatePHINodes = [&](BasicBlock *Src) {
677 BasicBlock::iterator I = UnwindDest->begin();
678 for (Value *V : UnwindDestPHIValues) {
679 PHINode *PHI = cast<PHINode>(I);
680 PHI->addIncoming(V, Src);
681 ++I;
682 }
683 };
684
685 // This connects all the instructions which 'unwind to caller' to the invoke
686 // destination.
687 UnwindDestMemoTy FuncletUnwindMap;
688 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
689 BB != E; ++BB) {
690 if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
691 if (CRI->unwindsToCaller()) {
692 auto *CleanupPad = CRI->getCleanupPad();
693 CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI->getIterator());
694 CRI->eraseFromParent();
695 UpdatePHINodes(&*BB);
696 // Finding a cleanupret with an unwind destination would confuse
697 // subsequent calls to getUnwindDestToken, so map the cleanuppad
698 // to short-circuit any such calls and recognize this as an "unwind
699 // to caller" cleanup.
700 assert(!FuncletUnwindMap.count(CleanupPad) ||
701 isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
702 FuncletUnwindMap[CleanupPad] =
703 ConstantTokenNone::get(Caller->getContext());
704 }
705 }
706
707 Instruction *I = BB->getFirstNonPHI();
708 if (!I->isEHPad())
709 continue;
710
711 Instruction *Replacement = nullptr;
712 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
713 if (CatchSwitch->unwindsToCaller()) {
714 Value *UnwindDestToken;
715 if (auto *ParentPad =
716 dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
717 // This catchswitch is nested inside another funclet. If that
718 // funclet has an unwind destination within the inlinee, then
719 // unwinding out of this catchswitch would be UB. Rewriting this
720 // catchswitch to unwind to the inlined invoke's unwind dest would
721 // give the parent funclet multiple unwind destinations, which is
722 // something that subsequent EH table generation can't handle and
723 // that the veirifer rejects. So when we see such a call, leave it
724 // as "unwind to caller".
725 UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap);
726 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
727 continue;
728 } else {
729 // This catchswitch has no parent to inherit constraints from, and
730 // none of its descendants can have an unwind edge that exits it and
731 // targets another funclet in the inlinee. It may or may not have a
732 // descendant that definitively has an unwind to caller. In either
733 // case, we'll have to assume that any unwinds out of it may need to
734 // be routed to the caller, so treat it as though it has a definitive
735 // unwind to caller.
736 UnwindDestToken = ConstantTokenNone::get(Caller->getContext());
737 }
738 auto *NewCatchSwitch = CatchSwitchInst::Create(
739 CatchSwitch->getParentPad(), UnwindDest,
740 CatchSwitch->getNumHandlers(), CatchSwitch->getName(),
741 CatchSwitch->getIterator());
742 for (BasicBlock *PadBB : CatchSwitch->handlers())
743 NewCatchSwitch->addHandler(PadBB);
744 // Propagate info for the old catchswitch over to the new one in
745 // the unwind map. This also serves to short-circuit any subsequent
746 // checks for the unwind dest of this catchswitch, which would get
747 // confused if they found the outer handler in the callee.
748 FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
749 Replacement = NewCatchSwitch;
750 }
751 } else if (!isa<FuncletPadInst>(I)) {
752 llvm_unreachable("unexpected EHPad!");
753 }
754
755 if (Replacement) {
756 Replacement->takeName(I);
757 I->replaceAllUsesWith(Replacement);
758 I->eraseFromParent();
759 UpdatePHINodes(&*BB);
760 }
761 }
762
763 if (InlinedCodeInfo.ContainsCalls)
764 for (Function::iterator BB = FirstNewBlock->getIterator(),
765 E = Caller->end();
766 BB != E; ++BB)
768 &*BB, UnwindDest, &FuncletUnwindMap))
769 // Update any PHI nodes in the exceptional block to indicate that there
770 // is now a new entry in them.
771 UpdatePHINodes(NewBB);
772
773 // Now that everything is happy, we have one final detail. The PHI nodes in
774 // the exception destination block still have entries due to the original
775 // invoke instruction. Eliminate these entries (which might even delete the
776 // PHI node) now.
777 UnwindDest->removePredecessor(InvokeBB);
778}
779
780static bool haveCommonPrefix(MDNode *MIBStackContext,
781 MDNode *CallsiteStackContext) {
782 assert(MIBStackContext->getNumOperands() > 0 &&
783 CallsiteStackContext->getNumOperands() > 0);
784 // Because of the context trimming performed during matching, the callsite
785 // context could have more stack ids than the MIB. We match up to the end of
786 // the shortest stack context.
787 for (auto MIBStackIter = MIBStackContext->op_begin(),
788 CallsiteStackIter = CallsiteStackContext->op_begin();
789 MIBStackIter != MIBStackContext->op_end() &&
790 CallsiteStackIter != CallsiteStackContext->op_end();
791 MIBStackIter++, CallsiteStackIter++) {
792 auto *Val1 = mdconst::dyn_extract<ConstantInt>(*MIBStackIter);
793 auto *Val2 = mdconst::dyn_extract<ConstantInt>(*CallsiteStackIter);
794 assert(Val1 && Val2);
795 if (Val1->getZExtValue() != Val2->getZExtValue())
796 return false;
797 }
798 return true;
799}
800
801static void removeMemProfMetadata(CallBase *Call) {
802 Call->setMetadata(LLVMContext::MD_memprof, nullptr);
803}
804
806 Call->setMetadata(LLVMContext::MD_callsite, nullptr);
807}
808
810 const std::vector<Metadata *> &MIBList) {
811 assert(!MIBList.empty());
812 // Remove existing memprof, which will either be replaced or may not be needed
813 // if we are able to use a single allocation type function attribute.
816 for (Metadata *MIB : MIBList)
817 CallStack.addCallStack(cast<MDNode>(MIB));
818 bool MemprofMDAttached = CallStack.buildAndAttachMIBMetadata(CI);
819 assert(MemprofMDAttached == CI->hasMetadata(LLVMContext::MD_memprof));
820 if (!MemprofMDAttached)
821 // If we used a function attribute remove the callsite metadata as well.
823}
824
825// Update the metadata on the inlined copy ClonedCall of a call OrigCall in the
826// inlined callee body, based on the callsite metadata InlinedCallsiteMD from
827// the call that was inlined.
828static void propagateMemProfHelper(const CallBase *OrigCall,
829 CallBase *ClonedCall,
830 MDNode *InlinedCallsiteMD) {
831 MDNode *OrigCallsiteMD = ClonedCall->getMetadata(LLVMContext::MD_callsite);
832 MDNode *ClonedCallsiteMD = nullptr;
833 // Check if the call originally had callsite metadata, and update it for the
834 // new call in the inlined body.
835 if (OrigCallsiteMD) {
836 // The cloned call's context is now the concatenation of the original call's
837 // callsite metadata and the callsite metadata on the call where it was
838 // inlined.
839 ClonedCallsiteMD = MDNode::concatenate(OrigCallsiteMD, InlinedCallsiteMD);
840 ClonedCall->setMetadata(LLVMContext::MD_callsite, ClonedCallsiteMD);
841 }
842
843 // Update any memprof metadata on the cloned call.
844 MDNode *OrigMemProfMD = ClonedCall->getMetadata(LLVMContext::MD_memprof);
845 if (!OrigMemProfMD)
846 return;
847 // We currently expect that allocations with memprof metadata also have
848 // callsite metadata for the allocation's part of the context.
849 assert(OrigCallsiteMD);
850
851 // New call's MIB list.
852 std::vector<Metadata *> NewMIBList;
853
854 // For each MIB metadata, check if its call stack context starts with the
855 // new clone's callsite metadata. If so, that MIB goes onto the cloned call in
856 // the inlined body. If not, it stays on the out-of-line original call.
857 for (auto &MIBOp : OrigMemProfMD->operands()) {
858 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
859 // Stack is first operand of MIB.
860 MDNode *StackMD = getMIBStackNode(MIB);
861 assert(StackMD);
862 // See if the new cloned callsite context matches this profiled context.
863 if (haveCommonPrefix(StackMD, ClonedCallsiteMD))
864 // Add it to the cloned call's MIB list.
865 NewMIBList.push_back(MIB);
866 }
867 if (NewMIBList.empty()) {
868 removeMemProfMetadata(ClonedCall);
869 removeCallsiteMetadata(ClonedCall);
870 return;
871 }
872 if (NewMIBList.size() < OrigMemProfMD->getNumOperands())
873 updateMemprofMetadata(ClonedCall, NewMIBList);
874}
875
876// Update memprof related metadata (!memprof and !callsite) based on the
877// inlining of Callee into the callsite at CB. The updates include merging the
878// inlined callee's callsite metadata with that of the inlined call,
879// and moving the subset of any memprof contexts to the inlined callee
880// allocations if they match the new inlined call stack.
881static void
883 bool ContainsMemProfMetadata,
885 MDNode *CallsiteMD = CB.getMetadata(LLVMContext::MD_callsite);
886 // Only need to update if the inlined callsite had callsite metadata, or if
887 // there was any memprof metadata inlined.
888 if (!CallsiteMD && !ContainsMemProfMetadata)
889 return;
890
891 // Propagate metadata onto the cloned calls in the inlined callee.
892 for (const auto &Entry : VMap) {
893 // See if this is a call that has been inlined and remapped, and not
894 // simplified away in the process.
895 auto *OrigCall = dyn_cast_or_null<CallBase>(Entry.first);
896 auto *ClonedCall = dyn_cast_or_null<CallBase>(Entry.second);
897 if (!OrigCall || !ClonedCall)
898 continue;
899 // If the inlined callsite did not have any callsite metadata, then it isn't
900 // involved in any profiled call contexts, and we can remove any memprof
901 // metadata on the cloned call.
902 if (!CallsiteMD) {
903 removeMemProfMetadata(ClonedCall);
904 removeCallsiteMetadata(ClonedCall);
905 continue;
906 }
907 propagateMemProfHelper(OrigCall, ClonedCall, CallsiteMD);
908 }
909}
910
911/// When inlining a call site that has !llvm.mem.parallel_loop_access,
912/// !llvm.access.group, !alias.scope or !noalias metadata, that metadata should
913/// be propagated to all memory-accessing cloned instructions.
915 Function::iterator FEnd) {
916 MDNode *MemParallelLoopAccess =
917 CB.getMetadata(LLVMContext::MD_mem_parallel_loop_access);
918 MDNode *AccessGroup = CB.getMetadata(LLVMContext::MD_access_group);
919 MDNode *AliasScope = CB.getMetadata(LLVMContext::MD_alias_scope);
920 MDNode *NoAlias = CB.getMetadata(LLVMContext::MD_noalias);
921 if (!MemParallelLoopAccess && !AccessGroup && !AliasScope && !NoAlias)
922 return;
923
924 for (BasicBlock &BB : make_range(FStart, FEnd)) {
925 for (Instruction &I : BB) {
926 // This metadata is only relevant for instructions that access memory.
927 if (!I.mayReadOrWriteMemory())
928 continue;
929
930 if (MemParallelLoopAccess) {
931 // TODO: This probably should not overwrite MemParalleLoopAccess.
932 MemParallelLoopAccess = MDNode::concatenate(
933 I.getMetadata(LLVMContext::MD_mem_parallel_loop_access),
934 MemParallelLoopAccess);
935 I.setMetadata(LLVMContext::MD_mem_parallel_loop_access,
936 MemParallelLoopAccess);
937 }
938
939 if (AccessGroup)
940 I.setMetadata(LLVMContext::MD_access_group, uniteAccessGroups(
941 I.getMetadata(LLVMContext::MD_access_group), AccessGroup));
942
943 if (AliasScope)
944 I.setMetadata(LLVMContext::MD_alias_scope, MDNode::concatenate(
945 I.getMetadata(LLVMContext::MD_alias_scope), AliasScope));
946
947 if (NoAlias)
948 I.setMetadata(LLVMContext::MD_noalias, MDNode::concatenate(
949 I.getMetadata(LLVMContext::MD_noalias), NoAlias));
950 }
951 }
952}
953
954/// Bundle operands of the inlined function must be added to inlined call sites.
956 Instruction *CallSiteEHPad) {
957 for (Instruction &II : llvm::make_early_inc_range(*InlinedBB)) {
958 CallBase *I = dyn_cast<CallBase>(&II);
959 if (!I)
960 continue;
961 // Skip call sites which already have a "funclet" bundle.
962 if (I->getOperandBundle(LLVMContext::OB_funclet))
963 continue;
964 // Skip call sites which are nounwind intrinsics (as long as they don't
965 // lower into regular function calls in the course of IR transformations).
966 auto *CalledFn =
967 dyn_cast<Function>(I->getCalledOperand()->stripPointerCasts());
968 if (CalledFn && CalledFn->isIntrinsic() && I->doesNotThrow() &&
969 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
970 continue;
971
973 I->getOperandBundlesAsDefs(OpBundles);
974 OpBundles.emplace_back("funclet", CallSiteEHPad);
975
976 Instruction *NewInst = CallBase::Create(I, OpBundles, I->getIterator());
977 NewInst->takeName(I);
978 I->replaceAllUsesWith(NewInst);
979 I->eraseFromParent();
980 }
981}
982
983namespace {
984/// Utility for cloning !noalias and !alias.scope metadata. When a code region
985/// using scoped alias metadata is inlined, the aliasing relationships may not
986/// hold between the two version. It is necessary to create a deep clone of the
987/// metadata, putting the two versions in separate scope domains.
988class ScopedAliasMetadataDeepCloner {
991 MetadataMap MDMap;
992 void addRecursiveMetadataUses();
993
994public:
995 ScopedAliasMetadataDeepCloner(const Function *F);
996
997 /// Create a new clone of the scoped alias metadata, which will be used by
998 /// subsequent remap() calls.
999 void clone();
1000
1001 /// Remap instructions in the given range from the original to the cloned
1002 /// metadata.
1003 void remap(Function::iterator FStart, Function::iterator FEnd);
1004};
1005} // namespace
1006
1007ScopedAliasMetadataDeepCloner::ScopedAliasMetadataDeepCloner(
1008 const Function *F) {
1009 for (const BasicBlock &BB : *F) {
1010 for (const Instruction &I : BB) {
1011 if (const MDNode *M = I.getMetadata(LLVMContext::MD_alias_scope))
1012 MD.insert(M);
1013 if (const MDNode *M = I.getMetadata(LLVMContext::MD_noalias))
1014 MD.insert(M);
1015
1016 // We also need to clone the metadata in noalias intrinsics.
1017 if (const auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
1018 MD.insert(Decl->getScopeList());
1019 }
1020 }
1021 addRecursiveMetadataUses();
1022}
1023
1024void ScopedAliasMetadataDeepCloner::addRecursiveMetadataUses() {
1025 SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
1026 while (!Queue.empty()) {
1027 const MDNode *M = cast<MDNode>(Queue.pop_back_val());
1028 for (const Metadata *Op : M->operands())
1029 if (const MDNode *OpMD = dyn_cast<MDNode>(Op))
1030 if (MD.insert(OpMD))
1031 Queue.push_back(OpMD);
1032 }
1033}
1034
1035void ScopedAliasMetadataDeepCloner::clone() {
1036 assert(MDMap.empty() && "clone() already called ?");
1037
1039 for (const MDNode *I : MD) {
1040 DummyNodes.push_back(MDTuple::getTemporary(I->getContext(), std::nullopt));
1041 MDMap[I].reset(DummyNodes.back().get());
1042 }
1043
1044 // Create new metadata nodes to replace the dummy nodes, replacing old
1045 // metadata references with either a dummy node or an already-created new
1046 // node.
1048 for (const MDNode *I : MD) {
1049 for (const Metadata *Op : I->operands()) {
1050 if (const MDNode *M = dyn_cast<MDNode>(Op))
1051 NewOps.push_back(MDMap[M]);
1052 else
1053 NewOps.push_back(const_cast<Metadata *>(Op));
1054 }
1055
1056 MDNode *NewM = MDNode::get(I->getContext(), NewOps);
1057 MDTuple *TempM = cast<MDTuple>(MDMap[I]);
1058 assert(TempM->isTemporary() && "Expected temporary node");
1059
1060 TempM->replaceAllUsesWith(NewM);
1061 NewOps.clear();
1062 }
1063}
1064
1065void ScopedAliasMetadataDeepCloner::remap(Function::iterator FStart,
1066 Function::iterator FEnd) {
1067 if (MDMap.empty())
1068 return; // Nothing to do.
1069
1070 for (BasicBlock &BB : make_range(FStart, FEnd)) {
1071 for (Instruction &I : BB) {
1072 // TODO: The null checks for the MDMap.lookup() results should no longer
1073 // be necessary.
1074 if (MDNode *M = I.getMetadata(LLVMContext::MD_alias_scope))
1075 if (MDNode *MNew = MDMap.lookup(M))
1076 I.setMetadata(LLVMContext::MD_alias_scope, MNew);
1077
1078 if (MDNode *M = I.getMetadata(LLVMContext::MD_noalias))
1079 if (MDNode *MNew = MDMap.lookup(M))
1080 I.setMetadata(LLVMContext::MD_noalias, MNew);
1081
1082 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
1083 if (MDNode *MNew = MDMap.lookup(Decl->getScopeList()))
1084 Decl->setScopeList(MNew);
1085 }
1086 }
1087}
1088
1089/// If the inlined function has noalias arguments,
1090/// then add new alias scopes for each noalias argument, tag the mapped noalias
1091/// parameters with noalias metadata specifying the new scope, and tag all
1092/// non-derived loads, stores and memory intrinsics with the new alias scopes.
1094 const DataLayout &DL, AAResults *CalleeAAR,
1095 ClonedCodeInfo &InlinedFunctionInfo) {
1097 return;
1098
1099 const Function *CalledFunc = CB.getCalledFunction();
1101
1102 for (const Argument &Arg : CalledFunc->args())
1103 if (CB.paramHasAttr(Arg.getArgNo(), Attribute::NoAlias) && !Arg.use_empty())
1104 NoAliasArgs.push_back(&Arg);
1105
1106 if (NoAliasArgs.empty())
1107 return;
1108
1109 // To do a good job, if a noalias variable is captured, we need to know if
1110 // the capture point dominates the particular use we're considering.
1111 DominatorTree DT;
1112 DT.recalculate(const_cast<Function&>(*CalledFunc));
1113
1114 // noalias indicates that pointer values based on the argument do not alias
1115 // pointer values which are not based on it. So we add a new "scope" for each
1116 // noalias function argument. Accesses using pointers based on that argument
1117 // become part of that alias scope, accesses using pointers not based on that
1118 // argument are tagged as noalias with that scope.
1119
1121 MDBuilder MDB(CalledFunc->getContext());
1122
1123 // Create a new scope domain for this function.
1124 MDNode *NewDomain =
1125 MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
1126 for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
1127 const Argument *A = NoAliasArgs[i];
1128
1129 std::string Name = std::string(CalledFunc->getName());
1130 if (A->hasName()) {
1131 Name += ": %";
1132 Name += A->getName();
1133 } else {
1134 Name += ": argument ";
1135 Name += utostr(i);
1136 }
1137
1138 // Note: We always create a new anonymous root here. This is true regardless
1139 // of the linkage of the callee because the aliasing "scope" is not just a
1140 // property of the callee, but also all control dependencies in the caller.
1141 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
1142 NewScopes.insert(std::make_pair(A, NewScope));
1143
1144 if (UseNoAliasIntrinsic) {
1145 // Introduce a llvm.experimental.noalias.scope.decl for the noalias
1146 // argument.
1147 MDNode *AScopeList = MDNode::get(CalledFunc->getContext(), NewScope);
1148 auto *NoAliasDecl =
1150 // Ignore the result for now. The result will be used when the
1151 // llvm.noalias intrinsic is introduced.
1152 (void)NoAliasDecl;
1153 }
1154 }
1155
1156 // Iterate over all new instructions in the map; for all memory-access
1157 // instructions, add the alias scope metadata.
1158 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
1159 VMI != VMIE; ++VMI) {
1160 if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
1161 if (!VMI->second)
1162 continue;
1163
1164 Instruction *NI = dyn_cast<Instruction>(VMI->second);
1165 if (!NI || InlinedFunctionInfo.isSimplified(I, NI))
1166 continue;
1167
1168 bool IsArgMemOnlyCall = false, IsFuncCall = false;
1170
1171 if (const LoadInst *LI = dyn_cast<LoadInst>(I))
1172 PtrArgs.push_back(LI->getPointerOperand());
1173 else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
1174 PtrArgs.push_back(SI->getPointerOperand());
1175 else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
1176 PtrArgs.push_back(VAAI->getPointerOperand());
1177 else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
1178 PtrArgs.push_back(CXI->getPointerOperand());
1179 else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
1180 PtrArgs.push_back(RMWI->getPointerOperand());
1181 else if (const auto *Call = dyn_cast<CallBase>(I)) {
1182 // If we know that the call does not access memory, then we'll still
1183 // know that about the inlined clone of this call site, and we don't
1184 // need to add metadata.
1185 if (Call->doesNotAccessMemory())
1186 continue;
1187
1188 IsFuncCall = true;
1189 if (CalleeAAR) {
1190 MemoryEffects ME = CalleeAAR->getMemoryEffects(Call);
1191
1192 // We'll retain this knowledge without additional metadata.
1194 continue;
1195
1196 if (ME.onlyAccessesArgPointees())
1197 IsArgMemOnlyCall = true;
1198 }
1199
1200 for (Value *Arg : Call->args()) {
1201 // Only care about pointer arguments. If a noalias argument is
1202 // accessed through a non-pointer argument, it must be captured
1203 // first (e.g. via ptrtoint), and we protect against captures below.
1204 if (!Arg->getType()->isPointerTy())
1205 continue;
1206
1207 PtrArgs.push_back(Arg);
1208 }
1209 }
1210
1211 // If we found no pointers, then this instruction is not suitable for
1212 // pairing with an instruction to receive aliasing metadata.
1213 // However, if this is a call, this we might just alias with none of the
1214 // noalias arguments.
1215 if (PtrArgs.empty() && !IsFuncCall)
1216 continue;
1217
1218 // It is possible that there is only one underlying object, but you
1219 // need to go through several PHIs to see it, and thus could be
1220 // repeated in the Objects list.
1223
1224 for (const Value *V : PtrArgs) {
1226 getUnderlyingObjects(V, Objects, /* LI = */ nullptr);
1227
1228 for (const Value *O : Objects)
1229 ObjSet.insert(O);
1230 }
1231
1232 // Figure out if we're derived from anything that is not a noalias
1233 // argument.
1234 bool RequiresNoCaptureBefore = false, UsesAliasingPtr = false,
1235 UsesUnknownObject = false;
1236 for (const Value *V : ObjSet) {
1237 // Is this value a constant that cannot be derived from any pointer
1238 // value (we need to exclude constant expressions, for example, that
1239 // are formed from arithmetic on global symbols).
1240 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1241 isa<ConstantPointerNull>(V) ||
1242 isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1243 if (IsNonPtrConst)
1244 continue;
1245
1246 // If this is anything other than a noalias argument, then we cannot
1247 // completely describe the aliasing properties using alias.scope
1248 // metadata (and, thus, won't add any).
1249 if (const Argument *A = dyn_cast<Argument>(V)) {
1250 if (!CB.paramHasAttr(A->getArgNo(), Attribute::NoAlias))
1251 UsesAliasingPtr = true;
1252 } else {
1253 UsesAliasingPtr = true;
1254 }
1255
1256 if (isEscapeSource(V)) {
1257 // An escape source can only alias with a noalias argument if it has
1258 // been captured beforehand.
1259 RequiresNoCaptureBefore = true;
1260 } else if (!isa<Argument>(V) && !isIdentifiedObject(V)) {
1261 // If this is neither an escape source, nor some identified object
1262 // (which cannot directly alias a noalias argument), nor some other
1263 // argument (which, by definition, also cannot alias a noalias
1264 // argument), conservatively do not make any assumptions.
1265 UsesUnknownObject = true;
1266 }
1267 }
1268
1269 // Nothing we can do if the used underlying object cannot be reliably
1270 // determined.
1271 if (UsesUnknownObject)
1272 continue;
1273
1274 // A function call can always get captured noalias pointers (via other
1275 // parameters, globals, etc.).
1276 if (IsFuncCall && !IsArgMemOnlyCall)
1277 RequiresNoCaptureBefore = true;
1278
1279 // First, we want to figure out all of the sets with which we definitely
1280 // don't alias. Iterate over all noalias set, and add those for which:
1281 // 1. The noalias argument is not in the set of objects from which we
1282 // definitely derive.
1283 // 2. The noalias argument has not yet been captured.
1284 // An arbitrary function that might load pointers could see captured
1285 // noalias arguments via other noalias arguments or globals, and so we
1286 // must always check for prior capture.
1287 for (const Argument *A : NoAliasArgs) {
1288 if (ObjSet.contains(A))
1289 continue; // May be based on a noalias argument.
1290
1291 // It might be tempting to skip the PointerMayBeCapturedBefore check if
1292 // A->hasNoCaptureAttr() is true, but this is incorrect because
1293 // nocapture only guarantees that no copies outlive the function, not
1294 // that the value cannot be locally captured.
1295 if (!RequiresNoCaptureBefore ||
1296 !PointerMayBeCapturedBefore(A, /* ReturnCaptures */ false,
1297 /* StoreCaptures */ false, I, &DT))
1298 NoAliases.push_back(NewScopes[A]);
1299 }
1300
1301 if (!NoAliases.empty())
1302 NI->setMetadata(LLVMContext::MD_noalias,
1304 NI->getMetadata(LLVMContext::MD_noalias),
1305 MDNode::get(CalledFunc->getContext(), NoAliases)));
1306
1307 // Next, we want to figure out all of the sets to which we might belong.
1308 // We might belong to a set if the noalias argument is in the set of
1309 // underlying objects. If there is some non-noalias argument in our list
1310 // of underlying objects, then we cannot add a scope because the fact
1311 // that some access does not alias with any set of our noalias arguments
1312 // cannot itself guarantee that it does not alias with this access
1313 // (because there is some pointer of unknown origin involved and the
1314 // other access might also depend on this pointer). We also cannot add
1315 // scopes to arbitrary functions unless we know they don't access any
1316 // non-parameter pointer-values.
1317 bool CanAddScopes = !UsesAliasingPtr;
1318 if (CanAddScopes && IsFuncCall)
1319 CanAddScopes = IsArgMemOnlyCall;
1320
1321 if (CanAddScopes)
1322 for (const Argument *A : NoAliasArgs) {
1323 if (ObjSet.count(A))
1324 Scopes.push_back(NewScopes[A]);
1325 }
1326
1327 if (!Scopes.empty())
1328 NI->setMetadata(
1329 LLVMContext::MD_alias_scope,
1330 MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope),
1331 MDNode::get(CalledFunc->getContext(), Scopes)));
1332 }
1333 }
1334}
1335
1337 ReturnInst *End) {
1338
1339 assert(Begin->getParent() == End->getParent() &&
1340 "Expected to be in same basic block!");
1341 auto BeginIt = Begin->getIterator();
1342 assert(BeginIt != End->getIterator() && "Non-empty BB has empty iterator");
1344 ++BeginIt, End->getIterator(), InlinerAttributeWindow + 1);
1345}
1346
1347// Add attributes from CB params and Fn attributes that can always be propagated
1348// to the corresponding argument / inner callbases.
1350 ValueToValueMapTy &VMap) {
1351 auto *CalledFunction = CB.getCalledFunction();
1352 auto &Context = CalledFunction->getContext();
1353
1354 // Collect valid attributes for all params.
1355 SmallVector<AttrBuilder> ValidParamAttrs;
1356 bool HasAttrToPropagate = false;
1357
1358 for (unsigned I = 0, E = CB.arg_size(); I < E; ++I) {
1359 ValidParamAttrs.emplace_back(AttrBuilder{CB.getContext()});
1360 // Access attributes can be propagated to any param with the same underlying
1361 // object as the argument.
1362 if (CB.paramHasAttr(I, Attribute::ReadNone))
1363 ValidParamAttrs.back().addAttribute(Attribute::ReadNone);
1364 if (CB.paramHasAttr(I, Attribute::ReadOnly))
1365 ValidParamAttrs.back().addAttribute(Attribute::ReadOnly);
1366 HasAttrToPropagate |= ValidParamAttrs.back().hasAttributes();
1367 }
1368
1369 // Won't be able to propagate anything.
1370 if (!HasAttrToPropagate)
1371 return;
1372
1373 for (BasicBlock &BB : *CalledFunction) {
1374 for (Instruction &Ins : BB) {
1375 const auto *InnerCB = dyn_cast<CallBase>(&Ins);
1376 if (!InnerCB)
1377 continue;
1378 auto *NewInnerCB = dyn_cast_or_null<CallBase>(VMap.lookup(InnerCB));
1379 if (!NewInnerCB)
1380 continue;
1381 AttributeList AL = NewInnerCB->getAttributes();
1382 for (unsigned I = 0, E = InnerCB->arg_size(); I < E; ++I) {
1383 // Check if the underlying value for the parameter is an argument.
1384 const Value *UnderlyingV =
1385 getUnderlyingObject(InnerCB->getArgOperand(I));
1386 const Argument *Arg = dyn_cast<Argument>(UnderlyingV);
1387 if (!Arg)
1388 continue;
1389
1390 if (AL.hasParamAttr(I, Attribute::ByVal))
1391 // It's unsound to propagate memory attributes to byval arguments.
1392 // Even if CalledFunction doesn't e.g. write to the argument,
1393 // the call to NewInnerCB may write to its by-value copy.
1394 continue;
1395
1396 unsigned ArgNo = Arg->getArgNo();
1397 // If so, propagate its access attributes.
1398 AL = AL.addParamAttributes(Context, I, ValidParamAttrs[ArgNo]);
1399 // We can have conflicting attributes from the inner callsite and
1400 // to-be-inlined callsite. In that case, choose the most
1401 // restrictive.
1402
1403 // readonly + writeonly means we can never deref so make readnone.
1404 if (AL.hasParamAttr(I, Attribute::ReadOnly) &&
1405 AL.hasParamAttr(I, Attribute::WriteOnly))
1406 AL = AL.addParamAttribute(Context, I, Attribute::ReadNone);
1407
1408 // If have readnone, need to clear readonly/writeonly
1409 if (AL.hasParamAttr(I, Attribute::ReadNone)) {
1410 AL = AL.removeParamAttribute(Context, I, Attribute::ReadOnly);
1411 AL = AL.removeParamAttribute(Context, I, Attribute::WriteOnly);
1412 }
1413
1414 // Writable cannot exist in conjunction w/ readonly/readnone
1415 if (AL.hasParamAttr(I, Attribute::ReadOnly) ||
1416 AL.hasParamAttr(I, Attribute::ReadNone))
1417 AL = AL.removeParamAttribute(Context, I, Attribute::Writable);
1418 }
1419 NewInnerCB->setAttributes(AL);
1420 }
1421 }
1422}
1423
1424// Only allow these white listed attributes to be propagated back to the
1425// callee. This is because other attributes may only be valid on the call
1426// itself, i.e. attributes such as signext and zeroext.
1427
1428// Attributes that are always okay to propagate as if they are violated its
1429// immediate UB.
1431 AttrBuilder Valid(CB.getContext());
1432 if (auto DerefBytes = CB.getRetDereferenceableBytes())
1433 Valid.addDereferenceableAttr(DerefBytes);
1434 if (auto DerefOrNullBytes = CB.getRetDereferenceableOrNullBytes())
1435 Valid.addDereferenceableOrNullAttr(DerefOrNullBytes);
1436 if (CB.hasRetAttr(Attribute::NoAlias))
1437 Valid.addAttribute(Attribute::NoAlias);
1438 if (CB.hasRetAttr(Attribute::NoUndef))
1439 Valid.addAttribute(Attribute::NoUndef);
1440 return Valid;
1441}
1442
1443// Attributes that need additional checks as propagating them may change
1444// behavior or cause new UB.
1446 AttrBuilder Valid(CB.getContext());
1447 if (CB.hasRetAttr(Attribute::NonNull))
1448 Valid.addAttribute(Attribute::NonNull);
1449 if (CB.hasRetAttr(Attribute::Alignment))
1450 Valid.addAlignmentAttr(CB.getRetAlign());
1451 if (std::optional<ConstantRange> Range = CB.getRange())
1452 Valid.addRangeAttr(*Range);
1453 return Valid;
1454}
1455
1459 if (!ValidUB.hasAttributes() && !ValidPG.hasAttributes())
1460 return;
1461 auto *CalledFunction = CB.getCalledFunction();
1462 auto &Context = CalledFunction->getContext();
1463
1464 for (auto &BB : *CalledFunction) {
1465 auto *RI = dyn_cast<ReturnInst>(BB.getTerminator());
1466 if (!RI || !isa<CallBase>(RI->getOperand(0)))
1467 continue;
1468 auto *RetVal = cast<CallBase>(RI->getOperand(0));
1469 // Check that the cloned RetVal exists and is a call, otherwise we cannot
1470 // add the attributes on the cloned RetVal. Simplification during inlining
1471 // could have transformed the cloned instruction.
1472 auto *NewRetVal = dyn_cast_or_null<CallBase>(VMap.lookup(RetVal));
1473 if (!NewRetVal)
1474 continue;
1475 // Backward propagation of attributes to the returned value may be incorrect
1476 // if it is control flow dependent.
1477 // Consider:
1478 // @callee {
1479 // %rv = call @foo()
1480 // %rv2 = call @bar()
1481 // if (%rv2 != null)
1482 // return %rv2
1483 // if (%rv == null)
1484 // exit()
1485 // return %rv
1486 // }
1487 // caller() {
1488 // %val = call nonnull @callee()
1489 // }
1490 // Here we cannot add the nonnull attribute on either foo or bar. So, we
1491 // limit the check to both RetVal and RI are in the same basic block and
1492 // there are no throwing/exiting instructions between these instructions.
1493 if (RI->getParent() != RetVal->getParent() ||
1495 continue;
1496 // Add to the existing attributes of NewRetVal, i.e. the cloned call
1497 // instruction.
1498 // NB! When we have the same attribute already existing on NewRetVal, but
1499 // with a differing value, the AttributeList's merge API honours the already
1500 // existing attribute value (i.e. attributes such as dereferenceable,
1501 // dereferenceable_or_null etc). See AttrBuilder::merge for more details.
1502 AttributeList AL = NewRetVal->getAttributes();
1503 if (ValidUB.getDereferenceableBytes() < AL.getRetDereferenceableBytes())
1504 ValidUB.removeAttribute(Attribute::Dereferenceable);
1505 if (ValidUB.getDereferenceableOrNullBytes() <
1506 AL.getRetDereferenceableOrNullBytes())
1507 ValidUB.removeAttribute(Attribute::DereferenceableOrNull);
1508 AttributeList NewAL = AL.addRetAttributes(Context, ValidUB);
1509 // Attributes that may generate poison returns are a bit tricky. If we
1510 // propagate them, other uses of the callsite might have their behavior
1511 // change or cause UB (if they have noundef) b.c of the new potential
1512 // poison.
1513 // Take the following three cases:
1514 //
1515 // 1)
1516 // define nonnull ptr @foo() {
1517 // %p = call ptr @bar()
1518 // call void @use(ptr %p) willreturn nounwind
1519 // ret ptr %p
1520 // }
1521 //
1522 // 2)
1523 // define noundef nonnull ptr @foo() {
1524 // %p = call ptr @bar()
1525 // call void @use(ptr %p) willreturn nounwind
1526 // ret ptr %p
1527 // }
1528 //
1529 // 3)
1530 // define nonnull ptr @foo() {
1531 // %p = call noundef ptr @bar()
1532 // ret ptr %p
1533 // }
1534 //
1535 // In case 1, we can't propagate nonnull because poison value in @use may
1536 // change behavior or trigger UB.
1537 // In case 2, we don't need to be concerned about propagating nonnull, as
1538 // any new poison at @use will trigger UB anyways.
1539 // In case 3, we can never propagate nonnull because it may create UB due to
1540 // the noundef on @bar.
1541 if (ValidPG.getAlignment().valueOrOne() < AL.getRetAlignment().valueOrOne())
1542 ValidPG.removeAttribute(Attribute::Alignment);
1543 if (ValidPG.hasAttributes()) {
1544 Attribute CBRange = ValidPG.getAttribute(Attribute::Range);
1545 if (CBRange.isValid()) {
1546 Attribute NewRange = AL.getRetAttr(Attribute::Range);
1547 if (NewRange.isValid()) {
1548 ValidPG.addRangeAttr(
1549 CBRange.getRange().intersectWith(NewRange.getRange()));
1550 }
1551 }
1552 // Three checks.
1553 // If the callsite has `noundef`, then a poison due to violating the
1554 // return attribute will create UB anyways so we can always propagate.
1555 // Otherwise, if the return value (callee to be inlined) has `noundef`, we
1556 // can't propagate as a new poison return will cause UB.
1557 // Finally, check if the return value has no uses whose behavior may
1558 // change/may cause UB if we potentially return poison. At the moment this
1559 // is implemented overly conservatively with a single-use check.
1560 // TODO: Update the single-use check to iterate through uses and only bail
1561 // if we have a potentially dangerous use.
1562
1563 if (CB.hasRetAttr(Attribute::NoUndef) ||
1564 (RetVal->hasOneUse() && !RetVal->hasRetAttr(Attribute::NoUndef)))
1565 NewAL = NewAL.addRetAttributes(Context, ValidPG);
1566 }
1567 NewRetVal->setAttributes(NewAL);
1568 }
1569}
1570
1571/// If the inlined function has non-byval align arguments, then
1572/// add @llvm.assume-based alignment assumptions to preserve this information.
1575 return;
1576
1578 auto &DL = CB.getDataLayout();
1579
1580 // To avoid inserting redundant assumptions, we should check for assumptions
1581 // already in the caller. To do this, we might need a DT of the caller.
1582 DominatorTree DT;
1583 bool DTCalculated = false;
1584
1585 Function *CalledFunc = CB.getCalledFunction();
1586 for (Argument &Arg : CalledFunc->args()) {
1587 if (!Arg.getType()->isPointerTy() || Arg.hasPassPointeeByValueCopyAttr() ||
1588 Arg.hasNUses(0))
1589 continue;
1590 MaybeAlign Alignment = Arg.getParamAlign();
1591 if (!Alignment)
1592 continue;
1593
1594 if (!DTCalculated) {
1595 DT.recalculate(*CB.getCaller());
1596 DTCalculated = true;
1597 }
1598 // If we can already prove the asserted alignment in the context of the
1599 // caller, then don't bother inserting the assumption.
1600 Value *ArgVal = CB.getArgOperand(Arg.getArgNo());
1601 if (getKnownAlignment(ArgVal, DL, &CB, AC, &DT) >= *Alignment)
1602 continue;
1603
1605 DL, ArgVal, Alignment->value());
1606 AC->registerAssumption(cast<AssumeInst>(NewAsmp));
1607 }
1608}
1609
1610static void HandleByValArgumentInit(Type *ByValType, Value *Dst, Value *Src,
1611 Module *M, BasicBlock *InsertBlock,
1612 InlineFunctionInfo &IFI,
1613 Function *CalledFunc) {
1614 IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
1615
1616 Value *Size =
1617 Builder.getInt64(M->getDataLayout().getTypeStoreSize(ByValType));
1618
1619 // Always generate a memcpy of alignment 1 here because we don't know
1620 // the alignment of the src pointer. Other optimizations can infer
1621 // better alignment.
1622 CallInst *CI = Builder.CreateMemCpy(Dst, /*DstAlign*/ Align(1), Src,
1623 /*SrcAlign*/ Align(1), Size);
1624
1625 // The verifier requires that all calls of debug-info-bearing functions
1626 // from debug-info-bearing functions have a debug location (for inlining
1627 // purposes). Assign a dummy location to satisfy the constraint.
1628 if (!CI->getDebugLoc() && InsertBlock->getParent()->getSubprogram())
1629 if (DISubprogram *SP = CalledFunc->getSubprogram())
1630 CI->setDebugLoc(DILocation::get(SP->getContext(), 0, 0, SP));
1631}
1632
1633/// When inlining a call site that has a byval argument,
1634/// we have to make the implicit memcpy explicit by adding it.
1635static Value *HandleByValArgument(Type *ByValType, Value *Arg,
1636 Instruction *TheCall,
1637 const Function *CalledFunc,
1638 InlineFunctionInfo &IFI,
1639 MaybeAlign ByValAlignment) {
1640 Function *Caller = TheCall->getFunction();
1641 const DataLayout &DL = Caller->getDataLayout();
1642
1643 // If the called function is readonly, then it could not mutate the caller's
1644 // copy of the byval'd memory. In this case, it is safe to elide the copy and
1645 // temporary.
1646 if (CalledFunc->onlyReadsMemory()) {
1647 // If the byval argument has a specified alignment that is greater than the
1648 // passed in pointer, then we either have to round up the input pointer or
1649 // give up on this transformation.
1650 if (ByValAlignment.valueOrOne() == 1)
1651 return Arg;
1652
1653 AssumptionCache *AC =
1654 IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
1655
1656 // If the pointer is already known to be sufficiently aligned, or if we can
1657 // round it up to a larger alignment, then we don't need a temporary.
1658 if (getOrEnforceKnownAlignment(Arg, *ByValAlignment, DL, TheCall, AC) >=
1659 *ByValAlignment)
1660 return Arg;
1661
1662 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
1663 // for code quality, but rarely happens and is required for correctness.
1664 }
1665
1666 // Create the alloca. If we have DataLayout, use nice alignment.
1667 Align Alignment = DL.getPrefTypeAlign(ByValType);
1668
1669 // If the byval had an alignment specified, we *must* use at least that
1670 // alignment, as it is required by the byval argument (and uses of the
1671 // pointer inside the callee).
1672 if (ByValAlignment)
1673 Alignment = std::max(Alignment, *ByValAlignment);
1674
1675 AllocaInst *NewAlloca = new AllocaInst(ByValType, DL.getAllocaAddrSpace(),
1676 nullptr, Alignment, Arg->getName());
1677 NewAlloca->insertBefore(Caller->begin()->begin());
1678 IFI.StaticAllocas.push_back(NewAlloca);
1679
1680 // Uses of the argument in the function should use our new alloca
1681 // instead.
1682 return NewAlloca;
1683}
1684
1685// Check whether this Value is used by a lifetime intrinsic.
1687 for (User *U : V->users())
1688 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U))
1689 if (II->isLifetimeStartOrEnd())
1690 return true;
1691 return false;
1692}
1693
1694// Check whether the given alloca already has
1695// lifetime.start or lifetime.end intrinsics.
1697 Type *Ty = AI->getType();
1698 Type *Int8PtrTy =
1699 PointerType::get(Ty->getContext(), Ty->getPointerAddressSpace());
1700 if (Ty == Int8PtrTy)
1701 return isUsedByLifetimeMarker(AI);
1702
1703 // Do a scan to find all the casts to i8*.
1704 for (User *U : AI->users()) {
1705 if (U->getType() != Int8PtrTy) continue;
1706 if (U->stripPointerCasts() != AI) continue;
1708 return true;
1709 }
1710 return false;
1711}
1712
1713/// Return the result of AI->isStaticAlloca() if AI were moved to the entry
1714/// block. Allocas used in inalloca calls and allocas of dynamic array size
1715/// cannot be static.
1717 return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca();
1718}
1719
1720/// Returns a DebugLoc for a new DILocation which is a clone of \p OrigDL
1721/// inlined at \p InlinedAt. \p IANodes is an inlined-at cache.
1722static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt,
1723 LLVMContext &Ctx,
1725 auto IA = DebugLoc::appendInlinedAt(OrigDL, InlinedAt, Ctx, IANodes);
1726 return DILocation::get(Ctx, OrigDL.getLine(), OrigDL.getCol(),
1727 OrigDL.getScope(), IA);
1728}
1729
1730/// Update inlined instructions' line numbers to
1731/// to encode location where these instructions are inlined.
1733 Instruction *TheCall, bool CalleeHasDebugInfo) {
1734 const DebugLoc &TheCallDL = TheCall->getDebugLoc();
1735 if (!TheCallDL)
1736 return;
1737
1738 auto &Ctx = Fn->getContext();
1739 DILocation *InlinedAtNode = TheCallDL;
1740
1741 // Create a unique call site, not to be confused with any other call from the
1742 // same location.
1743 InlinedAtNode = DILocation::getDistinct(
1744 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1745 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1746
1747 // Cache the inlined-at nodes as they're built so they are reused, without
1748 // this every instruction's inlined-at chain would become distinct from each
1749 // other.
1751
1752 // Check if we are not generating inline line tables and want to use
1753 // the call site location instead.
1754 bool NoInlineLineTables = Fn->hasFnAttribute("no-inline-line-tables");
1755
1756 // Helper-util for updating the metadata attached to an instruction.
1757 auto UpdateInst = [&](Instruction &I) {
1758 // Loop metadata needs to be updated so that the start and end locs
1759 // reference inlined-at locations.
1760 auto updateLoopInfoLoc = [&Ctx, &InlinedAtNode,
1761 &IANodes](Metadata *MD) -> Metadata * {
1762 if (auto *Loc = dyn_cast_or_null<DILocation>(MD))
1763 return inlineDebugLoc(Loc, InlinedAtNode, Ctx, IANodes).get();
1764 return MD;
1765 };
1766 updateLoopMetadataDebugLocations(I, updateLoopInfoLoc);
1767
1768 if (!NoInlineLineTables)
1769 if (DebugLoc DL = I.getDebugLoc()) {
1770 DebugLoc IDL =
1771 inlineDebugLoc(DL, InlinedAtNode, I.getContext(), IANodes);
1772 I.setDebugLoc(IDL);
1773 return;
1774 }
1775
1776 if (CalleeHasDebugInfo && !NoInlineLineTables)
1777 return;
1778
1779 // If the inlined instruction has no line number, or if inline info
1780 // is not being generated, make it look as if it originates from the call
1781 // location. This is important for ((__always_inline, __nodebug__))
1782 // functions which must use caller location for all instructions in their
1783 // function body.
1784
1785 // Don't update static allocas, as they may get moved later.
1786 if (auto *AI = dyn_cast<AllocaInst>(&I))
1788 return;
1789
1790 // Do not force a debug loc for pseudo probes, since they do not need to
1791 // be debuggable, and also they are expected to have a zero/null dwarf
1792 // discriminator at this point which could be violated otherwise.
1793 if (isa<PseudoProbeInst>(I))
1794 return;
1795
1796 I.setDebugLoc(TheCallDL);
1797 };
1798
1799 // Helper-util for updating debug-info records attached to instructions.
1800 auto UpdateDVR = [&](DbgRecord *DVR) {
1801 assert(DVR->getDebugLoc() && "Debug Value must have debug loc");
1802 if (NoInlineLineTables) {
1803 DVR->setDebugLoc(TheCallDL);
1804 return;
1805 }
1806 DebugLoc DL = DVR->getDebugLoc();
1807 DebugLoc IDL =
1808 inlineDebugLoc(DL, InlinedAtNode,
1809 DVR->getMarker()->getParent()->getContext(), IANodes);
1810 DVR->setDebugLoc(IDL);
1811 };
1812
1813 // Iterate over all instructions, updating metadata and debug-info records.
1814 for (; FI != Fn->end(); ++FI) {
1815 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); BI != BE;
1816 ++BI) {
1817 UpdateInst(*BI);
1818 for (DbgRecord &DVR : BI->getDbgRecordRange()) {
1819 UpdateDVR(&DVR);
1820 }
1821 }
1822
1823 // Remove debug info intrinsics if we're not keeping inline info.
1824 if (NoInlineLineTables) {
1825 BasicBlock::iterator BI = FI->begin();
1826 while (BI != FI->end()) {
1827 if (isa<DbgInfoIntrinsic>(BI)) {
1828 BI = BI->eraseFromParent();
1829 continue;
1830 } else {
1831 BI->dropDbgRecords();
1832 }
1833 ++BI;
1834 }
1835 }
1836 }
1837}
1838
1839#undef DEBUG_TYPE
1840#define DEBUG_TYPE "assignment-tracking"
1841/// Find Alloca and linked DbgAssignIntrinsic for locals escaped by \p CB.
1843 const CallBase &CB) {
1844 at::StorageToVarsMap EscapedLocals;
1846
1847 LLVM_DEBUG(
1848 errs() << "# Finding caller local variables escaped by callee\n");
1849 for (const Value *Arg : CB.args()) {
1850 LLVM_DEBUG(errs() << "INSPECT: " << *Arg << "\n");
1851 if (!Arg->getType()->isPointerTy()) {
1852 LLVM_DEBUG(errs() << " | SKIP: Not a pointer\n");
1853 continue;
1854 }
1855
1856 const Instruction *I = dyn_cast<Instruction>(Arg);
1857 if (!I) {
1858 LLVM_DEBUG(errs() << " | SKIP: Not result of instruction\n");
1859 continue;
1860 }
1861
1862 // Walk back to the base storage.
1863 assert(Arg->getType()->isPtrOrPtrVectorTy());
1864 APInt TmpOffset(DL.getIndexTypeSizeInBits(Arg->getType()), 0, false);
1865 const AllocaInst *Base = dyn_cast<AllocaInst>(
1866 Arg->stripAndAccumulateConstantOffsets(DL, TmpOffset, true));
1867 if (!Base) {
1868 LLVM_DEBUG(errs() << " | SKIP: Couldn't walk back to base storage\n");
1869 continue;
1870 }
1871
1872 assert(Base);
1873 LLVM_DEBUG(errs() << " | BASE: " << *Base << "\n");
1874 // We only need to process each base address once - skip any duplicates.
1875 if (!SeenBases.insert(Base).second)
1876 continue;
1877
1878 // Find all local variables associated with the backing storage.
1879 auto CollectAssignsForStorage = [&](auto *DbgAssign) {
1880 // Skip variables from inlined functions - they are not local variables.
1881 if (DbgAssign->getDebugLoc().getInlinedAt())
1882 return;
1883 LLVM_DEBUG(errs() << " > DEF : " << *DbgAssign << "\n");
1884 EscapedLocals[Base].insert(at::VarRecord(DbgAssign));
1885 };
1886 for_each(at::getAssignmentMarkers(Base), CollectAssignsForStorage);
1887 for_each(at::getDVRAssignmentMarkers(Base), CollectAssignsForStorage);
1888 }
1889 return EscapedLocals;
1890}
1891
1893 const CallBase &CB) {
1894 LLVM_DEBUG(errs() << "trackInlinedStores into "
1895 << Start->getParent()->getName() << " from "
1896 << CB.getCalledFunction()->getName() << "\n");
1897 std::unique_ptr<DataLayout> DL = std::make_unique<DataLayout>(CB.getModule());
1899}
1900
1901/// Update inlined instructions' DIAssignID metadata. We need to do this
1902/// otherwise a function inlined more than once into the same function
1903/// will cause DIAssignID to be shared by many instructions.
1906 // Loop over all the inlined instructions. If we find a DIAssignID
1907 // attachment or use, replace it with a new version.
1908 for (auto BBI = Start; BBI != End; ++BBI) {
1909 for (Instruction &I : *BBI)
1910 at::remapAssignID(Map, I);
1911 }
1912}
1913#undef DEBUG_TYPE
1914#define DEBUG_TYPE "inline-function"
1915
1916/// Update the block frequencies of the caller after a callee has been inlined.
1917///
1918/// Each block cloned into the caller has its block frequency scaled by the
1919/// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of
1920/// callee's entry block gets the same frequency as the callsite block and the
1921/// relative frequencies of all cloned blocks remain the same after cloning.
1922static void updateCallerBFI(BasicBlock *CallSiteBlock,
1923 const ValueToValueMapTy &VMap,
1924 BlockFrequencyInfo *CallerBFI,
1925 BlockFrequencyInfo *CalleeBFI,
1926 const BasicBlock &CalleeEntryBlock) {
1928 for (auto Entry : VMap) {
1929 if (!isa<BasicBlock>(Entry.first) || !Entry.second)
1930 continue;
1931 auto *OrigBB = cast<BasicBlock>(Entry.first);
1932 auto *ClonedBB = cast<BasicBlock>(Entry.second);
1933 BlockFrequency Freq = CalleeBFI->getBlockFreq(OrigBB);
1934 if (!ClonedBBs.insert(ClonedBB).second) {
1935 // Multiple blocks in the callee might get mapped to one cloned block in
1936 // the caller since we prune the callee as we clone it. When that happens,
1937 // we want to use the maximum among the original blocks' frequencies.
1938 BlockFrequency NewFreq = CallerBFI->getBlockFreq(ClonedBB);
1939 if (NewFreq > Freq)
1940 Freq = NewFreq;
1941 }
1942 CallerBFI->setBlockFreq(ClonedBB, Freq);
1943 }
1944 BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock));
1945 CallerBFI->setBlockFreqAndScale(
1946 EntryClone, CallerBFI->getBlockFreq(CallSiteBlock), ClonedBBs);
1947}
1948
1949/// Update the branch metadata for cloned call instructions.
1950static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap,
1951 const ProfileCount &CalleeEntryCount,
1952 const CallBase &TheCall, ProfileSummaryInfo *PSI,
1953 BlockFrequencyInfo *CallerBFI) {
1954 if (CalleeEntryCount.isSynthetic() || CalleeEntryCount.getCount() < 1)
1955 return;
1956 auto CallSiteCount =
1957 PSI ? PSI->getProfileCount(TheCall, CallerBFI) : std::nullopt;
1958 int64_t CallCount =
1959 std::min(CallSiteCount.value_or(0), CalleeEntryCount.getCount());
1960 updateProfileCallee(Callee, -CallCount, &VMap);
1961}
1962
1964 Function *Callee, int64_t EntryDelta,
1966 auto CalleeCount = Callee->getEntryCount();
1967 if (!CalleeCount)
1968 return;
1969
1970 const uint64_t PriorEntryCount = CalleeCount->getCount();
1971
1972 // Since CallSiteCount is an estimate, it could exceed the original callee
1973 // count and has to be set to 0 so guard against underflow.
1974 const uint64_t NewEntryCount =
1975 (EntryDelta < 0 && static_cast<uint64_t>(-EntryDelta) > PriorEntryCount)
1976 ? 0
1977 : PriorEntryCount + EntryDelta;
1978
1979 // During inlining ?
1980 if (VMap) {
1981 uint64_t CloneEntryCount = PriorEntryCount - NewEntryCount;
1982 for (auto Entry : *VMap) {
1983 if (isa<CallInst>(Entry.first))
1984 if (auto *CI = dyn_cast_or_null<CallInst>(Entry.second))
1985 CI->updateProfWeight(CloneEntryCount, PriorEntryCount);
1986 if (isa<InvokeInst>(Entry.first))
1987 if (auto *II = dyn_cast_or_null<InvokeInst>(Entry.second))
1988 II->updateProfWeight(CloneEntryCount, PriorEntryCount);
1989 }
1990 }
1991
1992 if (EntryDelta) {
1993 Callee->setEntryCount(NewEntryCount);
1994
1995 for (BasicBlock &BB : *Callee)
1996 // No need to update the callsite if it is pruned during inlining.
1997 if (!VMap || VMap->count(&BB))
1998 for (Instruction &I : BB) {
1999 if (CallInst *CI = dyn_cast<CallInst>(&I))
2000 CI->updateProfWeight(NewEntryCount, PriorEntryCount);
2001 if (InvokeInst *II = dyn_cast<InvokeInst>(&I))
2002 II->updateProfWeight(NewEntryCount, PriorEntryCount);
2003 }
2004 }
2005}
2006
2007/// An operand bundle "clang.arc.attachedcall" on a call indicates the call
2008/// result is implicitly consumed by a call to retainRV or claimRV immediately
2009/// after the call. This function inlines the retainRV/claimRV calls.
2010///
2011/// There are three cases to consider:
2012///
2013/// 1. If there is a call to autoreleaseRV that takes a pointer to the returned
2014/// object in the callee return block, the autoreleaseRV call and the
2015/// retainRV/claimRV call in the caller cancel out. If the call in the caller
2016/// is a claimRV call, a call to objc_release is emitted.
2017///
2018/// 2. If there is a call in the callee return block that doesn't have operand
2019/// bundle "clang.arc.attachedcall", the operand bundle on the original call
2020/// is transferred to the call in the callee.
2021///
2022/// 3. Otherwise, a call to objc_retain is inserted if the call in the caller is
2023/// a retainRV call.
2024static void
2026 const SmallVectorImpl<ReturnInst *> &Returns) {
2027 Module *Mod = CB.getModule();
2028 assert(objcarc::isRetainOrClaimRV(RVCallKind) && "unexpected ARC function");
2029 bool IsRetainRV = RVCallKind == objcarc::ARCInstKind::RetainRV,
2030 IsUnsafeClaimRV = !IsRetainRV;
2031
2032 for (auto *RI : Returns) {
2033 Value *RetOpnd = objcarc::GetRCIdentityRoot(RI->getOperand(0));
2034 bool InsertRetainCall = IsRetainRV;
2035 IRBuilder<> Builder(RI->getContext());
2036
2037 // Walk backwards through the basic block looking for either a matching
2038 // autoreleaseRV call or an unannotated call.
2039 auto InstRange = llvm::make_range(++(RI->getIterator().getReverse()),
2040 RI->getParent()->rend());
2041 for (Instruction &I : llvm::make_early_inc_range(InstRange)) {
2042 // Ignore casts.
2043 if (isa<CastInst>(I))
2044 continue;
2045
2046 if (auto *II = dyn_cast<IntrinsicInst>(&I)) {
2047 if (II->getIntrinsicID() != Intrinsic::objc_autoreleaseReturnValue ||
2048 !II->hasNUses(0) ||
2049 objcarc::GetRCIdentityRoot(II->getOperand(0)) != RetOpnd)
2050 break;
2051
2052 // If we've found a matching authoreleaseRV call:
2053 // - If claimRV is attached to the call, insert a call to objc_release
2054 // and erase the autoreleaseRV call.
2055 // - If retainRV is attached to the call, just erase the autoreleaseRV
2056 // call.
2057 if (IsUnsafeClaimRV) {
2058 Builder.SetInsertPoint(II);
2059 Function *IFn =
2060 Intrinsic::getDeclaration(Mod, Intrinsic::objc_release);
2061 Builder.CreateCall(IFn, RetOpnd, "");
2062 }
2063 II->eraseFromParent();
2064 InsertRetainCall = false;
2065 break;
2066 }
2067
2068 auto *CI = dyn_cast<CallInst>(&I);
2069
2070 if (!CI)
2071 break;
2072
2073 if (objcarc::GetRCIdentityRoot(CI) != RetOpnd ||
2075 break;
2076
2077 // If we've found an unannotated call that defines RetOpnd, add a
2078 // "clang.arc.attachedcall" operand bundle.
2079 Value *BundleArgs[] = {*objcarc::getAttachedARCFunction(&CB)};
2080 OperandBundleDef OB("clang.arc.attachedcall", BundleArgs);
2081 auto *NewCall = CallBase::addOperandBundle(
2082 CI, LLVMContext::OB_clang_arc_attachedcall, OB, CI->getIterator());
2083 NewCall->copyMetadata(*CI);
2084 CI->replaceAllUsesWith(NewCall);
2085 CI->eraseFromParent();
2086 InsertRetainCall = false;
2087 break;
2088 }
2089
2090 if (InsertRetainCall) {
2091 // The retainRV is attached to the call and we've failed to find a
2092 // matching autoreleaseRV or an annotated call in the callee. Emit a call
2093 // to objc_retain.
2094 Builder.SetInsertPoint(RI);
2095 Function *IFn = Intrinsic::getDeclaration(Mod, Intrinsic::objc_retain);
2096 Builder.CreateCall(IFn, RetOpnd, "");
2097 }
2098 }
2099}
2100
2101/// This function inlines the called function into the basic block of the
2102/// caller. This returns false if it is not possible to inline this call.
2103/// The program is still in a well defined state if this occurs though.
2104///
2105/// Note that this only does one level of inlining. For example, if the
2106/// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
2107/// exists in the instruction stream. Similarly this will inline a recursive
2108/// function by one level.
2110 bool MergeAttributes,
2111 AAResults *CalleeAAR,
2112 bool InsertLifetime,
2113 Function *ForwardVarArgsTo) {
2114 assert(CB.getParent() && CB.getFunction() && "Instruction not in function!");
2115
2116 // FIXME: we don't inline callbr yet.
2117 if (isa<CallBrInst>(CB))
2118 return InlineResult::failure("We don't inline callbr yet.");
2119
2120 // If IFI has any state in it, zap it before we fill it in.
2121 IFI.reset();
2122
2123 Function *CalledFunc = CB.getCalledFunction();
2124 if (!CalledFunc || // Can't inline external function or indirect
2125 CalledFunc->isDeclaration()) // call!
2126 return InlineResult::failure("external or indirect");
2127
2128 // The inliner does not know how to inline through calls with operand bundles
2129 // in general ...
2130 Value *ConvergenceControlToken = nullptr;
2131 if (CB.hasOperandBundles()) {
2132 for (int i = 0, e = CB.getNumOperandBundles(); i != e; ++i) {
2133 auto OBUse = CB.getOperandBundleAt(i);
2134 uint32_t Tag = OBUse.getTagID();
2135 // ... but it knows how to inline through "deopt" operand bundles ...
2137 continue;
2138 // ... and "funclet" operand bundles.
2140 continue;
2142 continue;
2144 continue;
2146 ConvergenceControlToken = OBUse.Inputs[0].get();
2147 continue;
2148 }
2149
2150 return InlineResult::failure("unsupported operand bundle");
2151 }
2152 }
2153
2154 // FIXME: The check below is redundant and incomplete. According to spec, if a
2155 // convergent call is missing a token, then the caller is using uncontrolled
2156 // convergence. If the callee has an entry intrinsic, then the callee is using
2157 // controlled convergence, and the call cannot be inlined. A proper
2158 // implemenation of this check requires a whole new analysis that identifies
2159 // convergence in every function. For now, we skip that and just do this one
2160 // cursory check. The underlying assumption is that in a compiler flow that
2161 // fully implements convergence control tokens, there is no mixing of
2162 // controlled and uncontrolled convergent operations in the whole program.
2163 if (CB.isConvergent()) {
2164 auto *I = CalledFunc->getEntryBlock().getFirstNonPHI();
2165 if (auto *IntrinsicCall = dyn_cast<IntrinsicInst>(I)) {
2166 if (IntrinsicCall->getIntrinsicID() ==
2167 Intrinsic::experimental_convergence_entry) {
2168 if (!ConvergenceControlToken) {
2169 return InlineResult::failure(
2170 "convergent call needs convergencectrl operand");
2171 }
2172 }
2173 }
2174 }
2175
2176 // If the call to the callee cannot throw, set the 'nounwind' flag on any
2177 // calls that we inline.
2178 bool MarkNoUnwind = CB.doesNotThrow();
2179
2180 BasicBlock *OrigBB = CB.getParent();
2181 Function *Caller = OrigBB->getParent();
2182
2183 // GC poses two hazards to inlining, which only occur when the callee has GC:
2184 // 1. If the caller has no GC, then the callee's GC must be propagated to the
2185 // caller.
2186 // 2. If the caller has a differing GC, it is invalid to inline.
2187 if (CalledFunc->hasGC()) {
2188 if (!Caller->hasGC())
2189 Caller->setGC(CalledFunc->getGC());
2190 else if (CalledFunc->getGC() != Caller->getGC())
2191 return InlineResult::failure("incompatible GC");
2192 }
2193
2194 // Get the personality function from the callee if it contains a landing pad.
2195 Constant *CalledPersonality =
2196 CalledFunc->hasPersonalityFn()
2197 ? CalledFunc->getPersonalityFn()->stripPointerCasts()
2198 : nullptr;
2199
2200 // Find the personality function used by the landing pads of the caller. If it
2201 // exists, then check to see that it matches the personality function used in
2202 // the callee.
2203 Constant *CallerPersonality =
2204 Caller->hasPersonalityFn()
2205 ? Caller->getPersonalityFn()->stripPointerCasts()
2206 : nullptr;
2207 if (CalledPersonality) {
2208 if (!CallerPersonality)
2209 Caller->setPersonalityFn(CalledPersonality);
2210 // If the personality functions match, then we can perform the
2211 // inlining. Otherwise, we can't inline.
2212 // TODO: This isn't 100% true. Some personality functions are proper
2213 // supersets of others and can be used in place of the other.
2214 else if (CalledPersonality != CallerPersonality)
2215 return InlineResult::failure("incompatible personality");
2216 }
2217
2218 // We need to figure out which funclet the callsite was in so that we may
2219 // properly nest the callee.
2220 Instruction *CallSiteEHPad = nullptr;
2221 if (CallerPersonality) {
2222 EHPersonality Personality = classifyEHPersonality(CallerPersonality);
2223 if (isScopedEHPersonality(Personality)) {
2224 std::optional<OperandBundleUse> ParentFunclet =
2226 if (ParentFunclet)
2227 CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
2228
2229 // OK, the inlining site is legal. What about the target function?
2230
2231 if (CallSiteEHPad) {
2232 if (Personality == EHPersonality::MSVC_CXX) {
2233 // The MSVC personality cannot tolerate catches getting inlined into
2234 // cleanup funclets.
2235 if (isa<CleanupPadInst>(CallSiteEHPad)) {
2236 // Ok, the call site is within a cleanuppad. Let's check the callee
2237 // for catchpads.
2238 for (const BasicBlock &CalledBB : *CalledFunc) {
2239 if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
2240 return InlineResult::failure("catch in cleanup funclet");
2241 }
2242 }
2243 } else if (isAsynchronousEHPersonality(Personality)) {
2244 // SEH is even less tolerant, there may not be any sort of exceptional
2245 // funclet in the callee.
2246 for (const BasicBlock &CalledBB : *CalledFunc) {
2247 if (CalledBB.isEHPad())
2248 return InlineResult::failure("SEH in cleanup funclet");
2249 }
2250 }
2251 }
2252 }
2253 }
2254
2255 // Determine if we are dealing with a call in an EHPad which does not unwind
2256 // to caller.
2257 bool EHPadForCallUnwindsLocally = false;
2258 if (CallSiteEHPad && isa<CallInst>(CB)) {
2259 UnwindDestMemoTy FuncletUnwindMap;
2260 Value *CallSiteUnwindDestToken =
2261 getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap);
2262
2263 EHPadForCallUnwindsLocally =
2264 CallSiteUnwindDestToken &&
2265 !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
2266 }
2267
2268 // Get an iterator to the last basic block in the function, which will have
2269 // the new function inlined after it.
2270 Function::iterator LastBlock = --Caller->end();
2271
2272 // Make sure to capture all of the return instructions from the cloned
2273 // function.
2275 ClonedCodeInfo InlinedFunctionInfo;
2276 Function::iterator FirstNewBlock;
2277
2278 { // Scope to destroy VMap after cloning.
2279 ValueToValueMapTy VMap;
2280 struct ByValInit {
2281 Value *Dst;
2282 Value *Src;
2283 Type *Ty;
2284 };
2285 // Keep a list of pair (dst, src) to emit byval initializations.
2286 SmallVector<ByValInit, 4> ByValInits;
2287
2288 // When inlining a function that contains noalias scope metadata,
2289 // this metadata needs to be cloned so that the inlined blocks
2290 // have different "unique scopes" at every call site.
2291 // Track the metadata that must be cloned. Do this before other changes to
2292 // the function, so that we do not get in trouble when inlining caller ==
2293 // callee.
2294 ScopedAliasMetadataDeepCloner SAMetadataCloner(CB.getCalledFunction());
2295
2296 auto &DL = Caller->getDataLayout();
2297
2298 // Calculate the vector of arguments to pass into the function cloner, which
2299 // matches up the formal to the actual argument values.
2300 auto AI = CB.arg_begin();
2301 unsigned ArgNo = 0;
2302 for (Function::arg_iterator I = CalledFunc->arg_begin(),
2303 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
2304 Value *ActualArg = *AI;
2305
2306 // When byval arguments actually inlined, we need to make the copy implied
2307 // by them explicit. However, we don't do this if the callee is readonly
2308 // or readnone, because the copy would be unneeded: the callee doesn't
2309 // modify the struct.
2310 if (CB.isByValArgument(ArgNo)) {
2311 ActualArg = HandleByValArgument(CB.getParamByValType(ArgNo), ActualArg,
2312 &CB, CalledFunc, IFI,
2313 CalledFunc->getParamAlign(ArgNo));
2314 if (ActualArg != *AI)
2315 ByValInits.push_back(
2316 {ActualArg, (Value *)*AI, CB.getParamByValType(ArgNo)});
2317 }
2318
2319 VMap[&*I] = ActualArg;
2320 }
2321
2322 // TODO: Remove this when users have been updated to the assume bundles.
2323 // Add alignment assumptions if necessary. We do this before the inlined
2324 // instructions are actually cloned into the caller so that we can easily
2325 // check what will be known at the start of the inlined code.
2326 AddAlignmentAssumptions(CB, IFI);
2327
2328 AssumptionCache *AC =
2329 IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
2330
2331 /// Preserve all attributes on of the call and its parameters.
2332 salvageKnowledge(&CB, AC);
2333
2334 // We want the inliner to prune the code as it copies. We would LOVE to
2335 // have no dead or constant instructions leftover after inlining occurs
2336 // (which can happen, e.g., because an argument was constant), but we'll be
2337 // happy with whatever the cloner can do.
2338 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
2339 /*ModuleLevelChanges=*/false, Returns, ".i",
2340 &InlinedFunctionInfo);
2341 // Remember the first block that is newly cloned over.
2342 FirstNewBlock = LastBlock; ++FirstNewBlock;
2343
2344 // Insert retainRV/clainRV runtime calls.
2346 if (RVCallKind != objcarc::ARCInstKind::None)
2347 inlineRetainOrClaimRVCalls(CB, RVCallKind, Returns);
2348
2349 // Updated caller/callee profiles only when requested. For sample loader
2350 // inlining, the context-sensitive inlinee profile doesn't need to be
2351 // subtracted from callee profile, and the inlined clone also doesn't need
2352 // to be scaled based on call site count.
2353 if (IFI.UpdateProfile) {
2354 if (IFI.CallerBFI != nullptr && IFI.CalleeBFI != nullptr)
2355 // Update the BFI of blocks cloned into the caller.
2356 updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI,
2357 CalledFunc->front());
2358
2359 if (auto Profile = CalledFunc->getEntryCount())
2360 updateCallProfile(CalledFunc, VMap, *Profile, CB, IFI.PSI,
2361 IFI.CallerBFI);
2362 }
2363
2364 // Inject byval arguments initialization.
2365 for (ByValInit &Init : ByValInits)
2366 HandleByValArgumentInit(Init.Ty, Init.Dst, Init.Src, Caller->getParent(),
2367 &*FirstNewBlock, IFI, CalledFunc);
2368
2369 std::optional<OperandBundleUse> ParentDeopt =
2371 if (ParentDeopt) {
2373
2374 for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) {
2375 CallBase *ICS = dyn_cast_or_null<CallBase>(VH);
2376 if (!ICS)
2377 continue; // instruction was DCE'd or RAUW'ed to undef
2378
2379 OpDefs.clear();
2380
2381 OpDefs.reserve(ICS->getNumOperandBundles());
2382
2383 for (unsigned COBi = 0, COBe = ICS->getNumOperandBundles(); COBi < COBe;
2384 ++COBi) {
2385 auto ChildOB = ICS->getOperandBundleAt(COBi);
2386 if (ChildOB.getTagID() != LLVMContext::OB_deopt) {
2387 // If the inlined call has other operand bundles, let them be
2388 OpDefs.emplace_back(ChildOB);
2389 continue;
2390 }
2391
2392 // It may be useful to separate this logic (of handling operand
2393 // bundles) out to a separate "policy" component if this gets crowded.
2394 // Prepend the parent's deoptimization continuation to the newly
2395 // inlined call's deoptimization continuation.
2396 std::vector<Value *> MergedDeoptArgs;
2397 MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
2398 ChildOB.Inputs.size());
2399
2400 llvm::append_range(MergedDeoptArgs, ParentDeopt->Inputs);
2401 llvm::append_range(MergedDeoptArgs, ChildOB.Inputs);
2402
2403 OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));
2404 }
2405
2406 Instruction *NewI = CallBase::Create(ICS, OpDefs, ICS->getIterator());
2407
2408 // Note: the RAUW does the appropriate fixup in VMap, so we need to do
2409 // this even if the call returns void.
2410 ICS->replaceAllUsesWith(NewI);
2411
2412 VH = nullptr;
2413 ICS->eraseFromParent();
2414 }
2415 }
2416
2417 // For 'nodebug' functions, the associated DISubprogram is always null.
2418 // Conservatively avoid propagating the callsite debug location to
2419 // instructions inlined from a function whose DISubprogram is not null.
2420 fixupLineNumbers(Caller, FirstNewBlock, &CB,
2421 CalledFunc->getSubprogram() != nullptr);
2422
2423 if (isAssignmentTrackingEnabled(*Caller->getParent())) {
2424 // Interpret inlined stores to caller-local variables as assignments.
2425 trackInlinedStores(FirstNewBlock, Caller->end(), CB);
2426
2427 // Update DIAssignID metadata attachments and uses so that they are
2428 // unique to this inlined instance.
2429 fixupAssignments(FirstNewBlock, Caller->end());
2430 }
2431
2432 // Now clone the inlined noalias scope metadata.
2433 SAMetadataCloner.clone();
2434 SAMetadataCloner.remap(FirstNewBlock, Caller->end());
2435
2436 // Add noalias metadata if necessary.
2437 AddAliasScopeMetadata(CB, VMap, DL, CalleeAAR, InlinedFunctionInfo);
2438
2439 // Clone return attributes on the callsite into the calls within the inlined
2440 // function which feed into its return value.
2441 AddReturnAttributes(CB, VMap);
2442
2443 // Clone attributes on the params of the callsite to calls within the
2444 // inlined function which use the same param.
2446
2447 propagateMemProfMetadata(CalledFunc, CB,
2448 InlinedFunctionInfo.ContainsMemProfMetadata, VMap);
2449
2450 // Propagate metadata on the callsite if necessary.
2451 PropagateCallSiteMetadata(CB, FirstNewBlock, Caller->end());
2452
2453 // Register any cloned assumptions.
2454 if (IFI.GetAssumptionCache)
2455 for (BasicBlock &NewBlock :
2456 make_range(FirstNewBlock->getIterator(), Caller->end()))
2457 for (Instruction &I : NewBlock)
2458 if (auto *II = dyn_cast<AssumeInst>(&I))
2459 IFI.GetAssumptionCache(*Caller).registerAssumption(II);
2460 }
2461
2462 if (ConvergenceControlToken) {
2463 auto *I = FirstNewBlock->getFirstNonPHI();
2464 if (auto *IntrinsicCall = dyn_cast<IntrinsicInst>(I)) {
2465 if (IntrinsicCall->getIntrinsicID() ==
2466 Intrinsic::experimental_convergence_entry) {
2467 IntrinsicCall->replaceAllUsesWith(ConvergenceControlToken);
2468 IntrinsicCall->eraseFromParent();
2469 }
2470 }
2471 }
2472
2473 // If there are any alloca instructions in the block that used to be the entry
2474 // block for the callee, move them to the entry block of the caller. First
2475 // calculate which instruction they should be inserted before. We insert the
2476 // instructions at the end of the current alloca list.
2477 {
2478 BasicBlock::iterator InsertPoint = Caller->begin()->begin();
2479 for (BasicBlock::iterator I = FirstNewBlock->begin(),
2480 E = FirstNewBlock->end(); I != E; ) {
2481 AllocaInst *AI = dyn_cast<AllocaInst>(I++);
2482 if (!AI) continue;
2483
2484 // If the alloca is now dead, remove it. This often occurs due to code
2485 // specialization.
2486 if (AI->use_empty()) {
2487 AI->eraseFromParent();
2488 continue;
2489 }
2490
2492 continue;
2493
2494 // Keep track of the static allocas that we inline into the caller.
2495 IFI.StaticAllocas.push_back(AI);
2496
2497 // Scan for the block of allocas that we can move over, and move them
2498 // all at once.
2499 while (isa<AllocaInst>(I) &&
2500 !cast<AllocaInst>(I)->use_empty() &&
2501 allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) {
2502 IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
2503 ++I;
2504 }
2505
2506 // Transfer all of the allocas over in a block. Using splice means
2507 // that the instructions aren't removed from the symbol table, then
2508 // reinserted.
2509 I.setTailBit(true);
2510 Caller->getEntryBlock().splice(InsertPoint, &*FirstNewBlock,
2511 AI->getIterator(), I);
2512 }
2513 }
2514
2515 SmallVector<Value*,4> VarArgsToForward;
2516 SmallVector<AttributeSet, 4> VarArgsAttrs;
2517 for (unsigned i = CalledFunc->getFunctionType()->getNumParams();
2518 i < CB.arg_size(); i++) {
2519 VarArgsToForward.push_back(CB.getArgOperand(i));
2520 VarArgsAttrs.push_back(CB.getAttributes().getParamAttrs(i));
2521 }
2522
2523 bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false;
2524 if (InlinedFunctionInfo.ContainsCalls) {
2525 CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
2526 if (CallInst *CI = dyn_cast<CallInst>(&CB))
2527 CallSiteTailKind = CI->getTailCallKind();
2528
2529 // For inlining purposes, the "notail" marker is the same as no marker.
2530 if (CallSiteTailKind == CallInst::TCK_NoTail)
2531 CallSiteTailKind = CallInst::TCK_None;
2532
2533 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
2534 ++BB) {
2536 CallInst *CI = dyn_cast<CallInst>(&I);
2537 if (!CI)
2538 continue;
2539
2540 // Forward varargs from inlined call site to calls to the
2541 // ForwardVarArgsTo function, if requested, and to musttail calls.
2542 if (!VarArgsToForward.empty() &&
2543 ((ForwardVarArgsTo &&
2544 CI->getCalledFunction() == ForwardVarArgsTo) ||
2545 CI->isMustTailCall())) {
2546 // Collect attributes for non-vararg parameters.
2547 AttributeList Attrs = CI->getAttributes();
2549 if (!Attrs.isEmpty() || !VarArgsAttrs.empty()) {
2550 for (unsigned ArgNo = 0;
2551 ArgNo < CI->getFunctionType()->getNumParams(); ++ArgNo)
2552 ArgAttrs.push_back(Attrs.getParamAttrs(ArgNo));
2553 }
2554
2555 // Add VarArg attributes.
2556 ArgAttrs.append(VarArgsAttrs.begin(), VarArgsAttrs.end());
2557 Attrs = AttributeList::get(CI->getContext(), Attrs.getFnAttrs(),
2558 Attrs.getRetAttrs(), ArgAttrs);
2559 // Add VarArgs to existing parameters.
2560 SmallVector<Value *, 6> Params(CI->args());
2561 Params.append(VarArgsToForward.begin(), VarArgsToForward.end());
2562 CallInst *NewCI = CallInst::Create(
2563 CI->getFunctionType(), CI->getCalledOperand(), Params, "", CI->getIterator());
2564 NewCI->setDebugLoc(CI->getDebugLoc());
2565 NewCI->setAttributes(Attrs);
2566 NewCI->setCallingConv(CI->getCallingConv());
2567 CI->replaceAllUsesWith(NewCI);
2568 CI->eraseFromParent();
2569 CI = NewCI;
2570 }
2571
2572 if (Function *F = CI->getCalledFunction())
2573 InlinedDeoptimizeCalls |=
2574 F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
2575
2576 // We need to reduce the strength of any inlined tail calls. For
2577 // musttail, we have to avoid introducing potential unbounded stack
2578 // growth. For example, if functions 'f' and 'g' are mutually recursive
2579 // with musttail, we can inline 'g' into 'f' so long as we preserve
2580 // musttail on the cloned call to 'f'. If either the inlined call site
2581 // or the cloned call site is *not* musttail, the program already has
2582 // one frame of stack growth, so it's safe to remove musttail. Here is
2583 // a table of example transformations:
2584 //
2585 // f -> musttail g -> musttail f ==> f -> musttail f
2586 // f -> musttail g -> tail f ==> f -> tail f
2587 // f -> g -> musttail f ==> f -> f
2588 // f -> g -> tail f ==> f -> f
2589 //
2590 // Inlined notail calls should remain notail calls.
2591 CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
2592 if (ChildTCK != CallInst::TCK_NoTail)
2593 ChildTCK = std::min(CallSiteTailKind, ChildTCK);
2594 CI->setTailCallKind(ChildTCK);
2595 InlinedMustTailCalls |= CI->isMustTailCall();
2596
2597 // Call sites inlined through a 'nounwind' call site should be
2598 // 'nounwind' as well. However, avoid marking call sites explicitly
2599 // where possible. This helps expose more opportunities for CSE after
2600 // inlining, commonly when the callee is an intrinsic.
2601 if (MarkNoUnwind && !CI->doesNotThrow())
2602 CI->setDoesNotThrow();
2603 }
2604 }
2605 }
2606
2607 // Leave lifetime markers for the static alloca's, scoping them to the
2608 // function we just inlined.
2609 // We need to insert lifetime intrinsics even at O0 to avoid invalid
2610 // access caused by multithreaded coroutines. The check
2611 // `Caller->isPresplitCoroutine()` would affect AlwaysInliner at O0 only.
2612 if ((InsertLifetime || Caller->isPresplitCoroutine()) &&
2613 !IFI.StaticAllocas.empty()) {
2614 IRBuilder<> builder(&*FirstNewBlock, FirstNewBlock->begin());
2615 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
2616 AllocaInst *AI = IFI.StaticAllocas[ai];
2617 // Don't mark swifterror allocas. They can't have bitcast uses.
2618 if (AI->isSwiftError())
2619 continue;
2620
2621 // If the alloca is already scoped to something smaller than the whole
2622 // function then there's no need to add redundant, less accurate markers.
2623 if (hasLifetimeMarkers(AI))
2624 continue;
2625
2626 // Try to determine the size of the allocation.
2627 ConstantInt *AllocaSize = nullptr;
2628 if (ConstantInt *AIArraySize =
2629 dyn_cast<ConstantInt>(AI->getArraySize())) {
2630 auto &DL = Caller->getDataLayout();
2631 Type *AllocaType = AI->getAllocatedType();
2632 TypeSize AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
2633 uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
2634
2635 // Don't add markers for zero-sized allocas.
2636 if (AllocaArraySize == 0)
2637 continue;
2638
2639 // Check that array size doesn't saturate uint64_t and doesn't
2640 // overflow when it's multiplied by type size.
2641 if (!AllocaTypeSize.isScalable() &&
2642 AllocaArraySize != std::numeric_limits<uint64_t>::max() &&
2643 std::numeric_limits<uint64_t>::max() / AllocaArraySize >=
2644 AllocaTypeSize.getFixedValue()) {
2645 AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
2646 AllocaArraySize * AllocaTypeSize);
2647 }
2648 }
2649
2650 builder.CreateLifetimeStart(AI, AllocaSize);
2651 for (ReturnInst *RI : Returns) {
2652 // Don't insert llvm.lifetime.end calls between a musttail or deoptimize
2653 // call and a return. The return kills all local allocas.
2654 if (InlinedMustTailCalls &&
2655 RI->getParent()->getTerminatingMustTailCall())
2656 continue;
2657 if (InlinedDeoptimizeCalls &&
2658 RI->getParent()->getTerminatingDeoptimizeCall())
2659 continue;
2660 IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
2661 }
2662 }
2663 }
2664
2665 // If the inlined code contained dynamic alloca instructions, wrap the inlined
2666 // code with llvm.stacksave/llvm.stackrestore intrinsics.
2667 if (InlinedFunctionInfo.ContainsDynamicAllocas) {
2668 // Insert the llvm.stacksave.
2669 CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
2670 .CreateStackSave("savedstack");
2671
2672 // Insert a call to llvm.stackrestore before any return instructions in the
2673 // inlined function.
2674 for (ReturnInst *RI : Returns) {
2675 // Don't insert llvm.stackrestore calls between a musttail or deoptimize
2676 // call and a return. The return will restore the stack pointer.
2677 if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
2678 continue;
2679 if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())
2680 continue;
2681 IRBuilder<>(RI).CreateStackRestore(SavedPtr);
2682 }
2683 }
2684
2685 // If we are inlining for an invoke instruction, we must make sure to rewrite
2686 // any call instructions into invoke instructions. This is sensitive to which
2687 // funclet pads were top-level in the inlinee, so must be done before
2688 // rewriting the "parent pad" links.
2689 if (auto *II = dyn_cast<InvokeInst>(&CB)) {
2690 BasicBlock *UnwindDest = II->getUnwindDest();
2691 Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI();
2692 if (isa<LandingPadInst>(FirstNonPHI)) {
2693 HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2694 } else {
2695 HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2696 }
2697 }
2698
2699 // Update the lexical scopes of the new funclets and callsites.
2700 // Anything that had 'none' as its parent is now nested inside the callsite's
2701 // EHPad.
2702 if (CallSiteEHPad) {
2703 for (Function::iterator BB = FirstNewBlock->getIterator(),
2704 E = Caller->end();
2705 BB != E; ++BB) {
2706 // Add bundle operands to inlined call sites.
2707 PropagateOperandBundles(BB, CallSiteEHPad);
2708
2709 // It is problematic if the inlinee has a cleanupret which unwinds to
2710 // caller and we inline it into a call site which doesn't unwind but into
2711 // an EH pad that does. Such an edge must be dynamically unreachable.
2712 // As such, we replace the cleanupret with unreachable.
2713 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
2714 if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
2715 changeToUnreachable(CleanupRet);
2716
2717 Instruction *I = BB->getFirstNonPHI();
2718 if (!I->isEHPad())
2719 continue;
2720
2721 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
2722 if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
2723 CatchSwitch->setParentPad(CallSiteEHPad);
2724 } else {
2725 auto *FPI = cast<FuncletPadInst>(I);
2726 if (isa<ConstantTokenNone>(FPI->getParentPad()))
2727 FPI->setParentPad(CallSiteEHPad);
2728 }
2729 }
2730 }
2731
2732 if (InlinedDeoptimizeCalls) {
2733 // We need to at least remove the deoptimizing returns from the Return set,
2734 // so that the control flow from those returns does not get merged into the
2735 // caller (but terminate it instead). If the caller's return type does not
2736 // match the callee's return type, we also need to change the return type of
2737 // the intrinsic.
2738 if (Caller->getReturnType() == CB.getType()) {
2739 llvm::erase_if(Returns, [](ReturnInst *RI) {
2740 return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;
2741 });
2742 } else {
2743 SmallVector<ReturnInst *, 8> NormalReturns;
2744 Function *NewDeoptIntrinsic = Intrinsic::getDeclaration(
2745 Caller->getParent(), Intrinsic::experimental_deoptimize,
2746 {Caller->getReturnType()});
2747
2748 for (ReturnInst *RI : Returns) {
2749 CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();
2750 if (!DeoptCall) {
2751 NormalReturns.push_back(RI);
2752 continue;
2753 }
2754
2755 // The calling convention on the deoptimize call itself may be bogus,
2756 // since the code we're inlining may have undefined behavior (and may
2757 // never actually execute at runtime); but all
2758 // @llvm.experimental.deoptimize declarations have to have the same
2759 // calling convention in a well-formed module.
2760 auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv();
2761 NewDeoptIntrinsic->setCallingConv(CallingConv);
2762 auto *CurBB = RI->getParent();
2763 RI->eraseFromParent();
2764
2765 SmallVector<Value *, 4> CallArgs(DeoptCall->args());
2766
2768 DeoptCall->getOperandBundlesAsDefs(OpBundles);
2769 auto DeoptAttributes = DeoptCall->getAttributes();
2770 DeoptCall->eraseFromParent();
2771 assert(!OpBundles.empty() &&
2772 "Expected at least the deopt operand bundle");
2773
2774 IRBuilder<> Builder(CurBB);
2775 CallInst *NewDeoptCall =
2776 Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
2777 NewDeoptCall->setCallingConv(CallingConv);
2778 NewDeoptCall->setAttributes(DeoptAttributes);
2779 if (NewDeoptCall->getType()->isVoidTy())
2780 Builder.CreateRetVoid();
2781 else
2782 Builder.CreateRet(NewDeoptCall);
2783 // Since the ret type is changed, remove the incompatible attributes.
2784 NewDeoptCall->removeRetAttrs(
2785 AttributeFuncs::typeIncompatible(NewDeoptCall->getType()));
2786 }
2787
2788 // Leave behind the normal returns so we can merge control flow.
2789 std::swap(Returns, NormalReturns);
2790 }
2791 }
2792
2793 // Handle any inlined musttail call sites. In order for a new call site to be
2794 // musttail, the source of the clone and the inlined call site must have been
2795 // musttail. Therefore it's safe to return without merging control into the
2796 // phi below.
2797 if (InlinedMustTailCalls) {
2798 // Check if we need to bitcast the result of any musttail calls.
2799 Type *NewRetTy = Caller->getReturnType();
2800 bool NeedBitCast = !CB.use_empty() && CB.getType() != NewRetTy;
2801
2802 // Handle the returns preceded by musttail calls separately.
2803 SmallVector<ReturnInst *, 8> NormalReturns;
2804 for (ReturnInst *RI : Returns) {
2805 CallInst *ReturnedMustTail =
2806 RI->getParent()->getTerminatingMustTailCall();
2807 if (!ReturnedMustTail) {
2808 NormalReturns.push_back(RI);
2809 continue;
2810 }
2811 if (!NeedBitCast)
2812 continue;
2813
2814 // Delete the old return and any preceding bitcast.
2815 BasicBlock *CurBB = RI->getParent();
2816 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
2817 RI->eraseFromParent();
2818 if (OldCast)
2819 OldCast->eraseFromParent();
2820
2821 // Insert a new bitcast and return with the right type.
2822 IRBuilder<> Builder(CurBB);
2823 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
2824 }
2825
2826 // Leave behind the normal returns so we can merge control flow.
2827 std::swap(Returns, NormalReturns);
2828 }
2829
2830 // Now that all of the transforms on the inlined code have taken place but
2831 // before we splice the inlined code into the CFG and lose track of which
2832 // blocks were actually inlined, collect the call sites. We only do this if
2833 // call graph updates weren't requested, as those provide value handle based
2834 // tracking of inlined call sites instead. Calls to intrinsics are not
2835 // collected because they are not inlineable.
2836 if (InlinedFunctionInfo.ContainsCalls) {
2837 // Otherwise just collect the raw call sites that were inlined.
2838 for (BasicBlock &NewBB :
2839 make_range(FirstNewBlock->getIterator(), Caller->end()))
2840 for (Instruction &I : NewBB)
2841 if (auto *CB = dyn_cast<CallBase>(&I))
2842 if (!(CB->getCalledFunction() &&
2844 IFI.InlinedCallSites.push_back(CB);
2845 }
2846
2847 // If we cloned in _exactly one_ basic block, and if that block ends in a
2848 // return instruction, we splice the body of the inlined callee directly into
2849 // the calling basic block.
2850 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
2851 // Move all of the instructions right before the call.
2852 OrigBB->splice(CB.getIterator(), &*FirstNewBlock, FirstNewBlock->begin(),
2853 FirstNewBlock->end());
2854 // Remove the cloned basic block.
2855 Caller->back().eraseFromParent();
2856
2857 // If the call site was an invoke instruction, add a branch to the normal
2858 // destination.
2859 if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
2860 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), CB.getIterator());
2861 NewBr->setDebugLoc(Returns[0]->getDebugLoc());
2862 }
2863
2864 // If the return instruction returned a value, replace uses of the call with
2865 // uses of the returned value.
2866 if (!CB.use_empty()) {
2867 ReturnInst *R = Returns[0];
2868 if (&CB == R->getReturnValue())
2870 else
2871 CB.replaceAllUsesWith(R->getReturnValue());
2872 }
2873 // Since we are now done with the Call/Invoke, we can delete it.
2874 CB.eraseFromParent();
2875
2876 // Since we are now done with the return instruction, delete it also.
2877 Returns[0]->eraseFromParent();
2878
2879 if (MergeAttributes)
2880 AttributeFuncs::mergeAttributesForInlining(*Caller, *CalledFunc);
2881
2882 // We are now done with the inlining.
2883 return InlineResult::success();
2884 }
2885
2886 // Otherwise, we have the normal case, of more than one block to inline or
2887 // multiple return sites.
2888
2889 // We want to clone the entire callee function into the hole between the
2890 // "starter" and "ender" blocks. How we accomplish this depends on whether
2891 // this is an invoke instruction or a call instruction.
2892 BasicBlock *AfterCallBB;
2893 BranchInst *CreatedBranchToNormalDest = nullptr;
2894 if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
2895
2896 // Add an unconditional branch to make this look like the CallInst case...
2897 CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), CB.getIterator());
2898
2899 // Split the basic block. This guarantees that no PHI nodes will have to be
2900 // updated due to new incoming edges, and make the invoke case more
2901 // symmetric to the call case.
2902 AfterCallBB =
2903 OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(),
2904 CalledFunc->getName() + ".exit");
2905
2906 } else { // It's a call
2907 // If this is a call instruction, we need to split the basic block that
2908 // the call lives in.
2909 //
2910 AfterCallBB = OrigBB->splitBasicBlock(CB.getIterator(),
2911 CalledFunc->getName() + ".exit");
2912 }
2913
2914 if (IFI.CallerBFI) {
2915 // Copy original BB's block frequency to AfterCallBB
2916 IFI.CallerBFI->setBlockFreq(AfterCallBB,
2917 IFI.CallerBFI->getBlockFreq(OrigBB));
2918 }
2919
2920 // Change the branch that used to go to AfterCallBB to branch to the first
2921 // basic block of the inlined function.
2922 //
2923 Instruction *Br = OrigBB->getTerminator();
2924 assert(Br && Br->getOpcode() == Instruction::Br &&
2925 "splitBasicBlock broken!");
2926 Br->setOperand(0, &*FirstNewBlock);
2927
2928 // Now that the function is correct, make it a little bit nicer. In
2929 // particular, move the basic blocks inserted from the end of the function
2930 // into the space made by splitting the source basic block.
2931 Caller->splice(AfterCallBB->getIterator(), Caller, FirstNewBlock,
2932 Caller->end());
2933
2934 // Handle all of the return instructions that we just cloned in, and eliminate
2935 // any users of the original call/invoke instruction.
2936 Type *RTy = CalledFunc->getReturnType();
2937
2938 PHINode *PHI = nullptr;
2939 if (Returns.size() > 1) {
2940 // The PHI node should go at the front of the new basic block to merge all
2941 // possible incoming values.
2942 if (!CB.use_empty()) {
2943 PHI = PHINode::Create(RTy, Returns.size(), CB.getName());
2944 PHI->insertBefore(AfterCallBB->begin());
2945 // Anything that used the result of the function call should now use the
2946 // PHI node as their operand.
2948 }
2949
2950 // Loop over all of the return instructions adding entries to the PHI node
2951 // as appropriate.
2952 if (PHI) {
2953 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2954 ReturnInst *RI = Returns[i];
2955 assert(RI->getReturnValue()->getType() == PHI->getType() &&
2956 "Ret value not consistent in function!");
2957 PHI->addIncoming(RI->getReturnValue(), RI->getParent());
2958 }
2959 }
2960
2961 // Add a branch to the merge points and remove return instructions.
2962 DebugLoc Loc;
2963 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2964 ReturnInst *RI = Returns[i];
2965 BranchInst* BI = BranchInst::Create(AfterCallBB, RI->getIterator());
2966 Loc = RI->getDebugLoc();
2967 BI->setDebugLoc(Loc);
2968 RI->eraseFromParent();
2969 }
2970 // We need to set the debug location to *somewhere* inside the
2971 // inlined function. The line number may be nonsensical, but the
2972 // instruction will at least be associated with the right
2973 // function.
2974 if (CreatedBranchToNormalDest)
2975 CreatedBranchToNormalDest->setDebugLoc(Loc);
2976 } else if (!Returns.empty()) {
2977 // Otherwise, if there is exactly one return value, just replace anything
2978 // using the return value of the call with the computed value.
2979 if (!CB.use_empty()) {
2980 if (&CB == Returns[0]->getReturnValue())
2982 else
2983 CB.replaceAllUsesWith(Returns[0]->getReturnValue());
2984 }
2985
2986 // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
2987 BasicBlock *ReturnBB = Returns[0]->getParent();
2988 ReturnBB->replaceAllUsesWith(AfterCallBB);
2989
2990 // Splice the code from the return block into the block that it will return
2991 // to, which contains the code that was after the call.
2992 AfterCallBB->splice(AfterCallBB->begin(), ReturnBB);
2993
2994 if (CreatedBranchToNormalDest)
2995 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
2996
2997 // Delete the return instruction now and empty ReturnBB now.
2998 Returns[0]->eraseFromParent();
2999 ReturnBB->eraseFromParent();
3000 } else if (!CB.use_empty()) {
3001 // No returns, but something is using the return value of the call. Just
3002 // nuke the result.
3004 }
3005
3006 // Since we are now done with the Call/Invoke, we can delete it.
3007 CB.eraseFromParent();
3008
3009 // If we inlined any musttail calls and the original return is now
3010 // unreachable, delete it. It can only contain a bitcast and ret.
3011 if (InlinedMustTailCalls && pred_empty(AfterCallBB))
3012 AfterCallBB->eraseFromParent();
3013
3014 // We should always be able to fold the entry block of the function into the
3015 // single predecessor of the block...
3016 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
3017 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
3018
3019 // Splice the code entry block into calling block, right before the
3020 // unconditional branch.
3021 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
3022 OrigBB->splice(Br->getIterator(), CalleeEntry);
3023
3024 // Remove the unconditional branch.
3025 Br->eraseFromParent();
3026
3027 // Now we can remove the CalleeEntry block, which is now empty.
3028 CalleeEntry->eraseFromParent();
3029
3030 // If we inserted a phi node, check to see if it has a single value (e.g. all
3031 // the entries are the same or undef). If so, remove the PHI so it doesn't
3032 // block other optimizations.
3033 if (PHI) {
3034 AssumptionCache *AC =
3035 IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
3036 auto &DL = Caller->getDataLayout();
3037 if (Value *V = simplifyInstruction(PHI, {DL, nullptr, nullptr, AC})) {
3038 PHI->replaceAllUsesWith(V);
3039 PHI->eraseFromParent();
3040 }
3041 }
3042
3043 if (MergeAttributes)
3044 AttributeFuncs::mergeAttributesForInlining(*Caller, *CalledFunc);
3045
3046 return InlineResult::success();
3047}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Rewrite undef for PHI
static void UpdatePHINodes(BasicBlock *OrigBB, BasicBlock *NewBB, ArrayRef< BasicBlock * > Preds, BranchInst *BI, bool HasLoopExit)
Update the PHI nodes in OrigBB to include the values coming from NewBB.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static cl::opt< bool > NoAliases("csky-no-aliases", cl::desc("Disable the emission of assembler pseudo instructions"), cl::init(false), cl::Hidden)
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
static AttrBuilder IdentifyValidUBGeneratingAttributes(CallBase &CB)
static at::StorageToVarsMap collectEscapedLocals(const DataLayout &DL, const CallBase &CB)
Find Alloca and linked DbgAssignIntrinsic for locals escaped by CB.
static void fixupLineNumbers(Function *Fn, Function::iterator FI, Instruction *TheCall, bool CalleeHasDebugInfo)
Update inlined instructions' line numbers to to encode location where these instructions are inlined.
static void removeCallsiteMetadata(CallBase *Call)
static void propagateMemProfHelper(const CallBase *OrigCall, CallBase *ClonedCall, MDNode *InlinedCallsiteMD)
static Value * getUnwindDestToken(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Given an EH pad, find where it unwinds.
static cl::opt< bool > PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", cl::init(false), cl::Hidden, cl::desc("Convert align attributes to assumptions during inlining."))
static void AddReturnAttributes(CallBase &CB, ValueToValueMapTy &VMap)
static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
static Value * getUnwindDestTokenHelper(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Helper for getUnwindDestToken that does the descendant-ward part of the search.
static BasicBlock * HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, BasicBlock *UnwindEdge, UnwindDestMemoTy *FuncletUnwindMap=nullptr)
When we inline a basic block into an invoke, we have to turn all of the calls that can throw into inv...
static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &IANodes)
Returns a DebugLoc for a new DILocation which is a clone of OrigDL inlined at InlinedAt.
static cl::opt< bool > UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden, cl::init(true), cl::desc("Use the llvm.experimental.noalias.scope.decl " "intrinsic during inlining."))
static void PropagateCallSiteMetadata(CallBase &CB, Function::iterator FStart, Function::iterator FEnd)
When inlining a call site that has !llvm.mem.parallel_loop_access, !llvm.access.group,...
static AttrBuilder IdentifyValidPoisonGeneratingAttributes(CallBase &CB)
static void propagateMemProfMetadata(Function *Callee, CallBase &CB, bool ContainsMemProfMetadata, const ValueMap< const Value *, WeakTrackingVH > &VMap)
static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap, const ProfileCount &CalleeEntryCount, const CallBase &TheCall, ProfileSummaryInfo *PSI, BlockFrequencyInfo *CallerBFI)
Update the branch metadata for cloned call instructions.
static void updateCallerBFI(BasicBlock *CallSiteBlock, const ValueToValueMapTy &VMap, BlockFrequencyInfo *CallerBFI, BlockFrequencyInfo *CalleeBFI, const BasicBlock &CalleeEntryBlock)
Update the block frequencies of the caller after a callee has been inlined.
static bool MayContainThrowingOrExitingCallAfterCB(CallBase *Begin, ReturnInst *End)
static void HandleByValArgumentInit(Type *ByValType, Value *Dst, Value *Src, Module *M, BasicBlock *InsertBlock, InlineFunctionInfo &IFI, Function *CalledFunc)
static cl::opt< bool > EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), cl::Hidden, cl::desc("Convert noalias attributes to metadata during inlining."))
static void AddAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap, const DataLayout &DL, AAResults *CalleeAAR, ClonedCodeInfo &InlinedFunctionInfo)
If the inlined function has noalias arguments, then add new alias scopes for each noalias argument,...
static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
static void inlineRetainOrClaimRVCalls(CallBase &CB, objcarc::ARCInstKind RVCallKind, const SmallVectorImpl< ReturnInst * > &Returns)
An operand bundle "clang.arc.attachedcall" on a call indicates the call result is implicitly consumed...
static void AddParamAndFnBasicAttributes(const CallBase &CB, ValueToValueMapTy &VMap)
static Value * getParentPad(Value *EHPad)
Helper for getUnwindDestToken/getUnwindDestTokenHelper.
static void fixupAssignments(Function::iterator Start, Function::iterator End)
Update inlined instructions' DIAssignID metadata.
static bool allocaWouldBeStaticInEntry(const AllocaInst *AI)
Return the result of AI->isStaticAlloca() if AI were moved to the entry block.
static bool isUsedByLifetimeMarker(Value *V)
static void removeMemProfMetadata(CallBase *Call)
static Value * HandleByValArgument(Type *ByValType, Value *Arg, Instruction *TheCall, const Function *CalledFunc, InlineFunctionInfo &IFI, MaybeAlign ByValAlignment)
When inlining a call site that has a byval argument, we have to make the implicit memcpy explicit by ...
static void AddAlignmentAssumptions(CallBase &CB, InlineFunctionInfo &IFI)
If the inlined function has non-byval align arguments, then add @llvm.assume-based alignment assumpti...
static void trackInlinedStores(Function::iterator Start, Function::iterator End, const CallBase &CB)
static cl::opt< unsigned > InlinerAttributeWindow("max-inst-checked-for-throw-during-inlining", cl::Hidden, cl::desc("the maximum number of instructions analyzed for may throw during " "attribute inference in inlined body"), cl::init(4))
static bool haveCommonPrefix(MDNode *MIBStackContext, MDNode *CallsiteStackContext)
static void PropagateOperandBundles(Function::iterator InlinedBB, Instruction *CallSiteEHPad)
Bundle operands of the inlined function must be added to inlined call sites.
static bool hasLifetimeMarkers(AllocaInst *AI)
static void updateMemprofMetadata(CallBase *CI, const std::vector< Metadata * > &MIBList)
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Load MIR Sample Profile
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
This file contains the declarations for metadata subclasses.
Module.h This file contains the declarations for the Module class.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
This file defines common analysis utilities used by the ObjC ARC Optimizer.
This file defines ARC utility functions which are used by various parts of the compiler.
Module * Mod
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
MemoryEffects getMemoryEffects(const CallBase *Call)
Return the behavior of the given call site.
Class for arbitrary precision integers.
Definition: APInt.h:77
an instruction to allocate memory on the stack
Definition: Instructions.h:60
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:146
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:96
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:114
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition: Instructions.h:136
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:92
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
Definition: Argument.h:49
A cache of @llvm.assume calls within a function.
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:494
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:695
AttrBuilder & addAlignmentAttr(MaybeAlign Align)
This turns an alignment into the form used internally in Attribute.
Attribute getAttribute(Attribute::AttrKind Kind) const
Return Attribute with the given Kind.
uint64_t getDereferenceableBytes() const
Retrieve the number of dereferenceable bytes, if the dereferenceable attribute exists (zero is return...
Definition: Attributes.h:1118
bool hasAttributes() const
Return true if the builder has IR-level attributes.
Definition: Attributes.h:1092
AttrBuilder & addAttribute(Attribute::AttrKind Val)
Add an attribute to the builder.
MaybeAlign getAlignment() const
Retrieve the alignment attribute, if it exists.
Definition: Attributes.h:1107
AttrBuilder & addDereferenceableAttr(uint64_t Bytes)
This turns the number of dereferenceable bytes into the form used internally in Attribute.
uint64_t getDereferenceableOrNullBytes() const
Retrieve the number of dereferenceable_or_null bytes, if the dereferenceable_or_null attribute exists...
Definition: Attributes.h:1124
AttrBuilder & removeAttribute(Attribute::AttrKind Val)
Remove an attribute from the builder.
AttrBuilder & addDereferenceableOrNullAttr(uint64_t Bytes)
This turns the number of dereferenceable_or_null bytes into the form used internally in Attribute.
AttrBuilder & addRangeAttr(const ConstantRange &CR)
Add range attribute.
AttributeList addRetAttributes(LLVMContext &C, const AttrBuilder &B) const
Add a return value attribute to the list.
Definition: Attributes.h:598
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
AttributeSet getParamAttrs(unsigned ArgNo) const
The attributes for the argument or parameter at the given index are returned.
const ConstantRange & getRange() const
Returns the value of the range attribute.
Definition: Attributes.cpp:495
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:203
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:438
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:507
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:365
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
Definition: BasicBlock.cpp:575
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:209
SymbolTableList< BasicBlock >::iterator eraseFromParent()
Unlink 'this' from the containing function and delete it.
Definition: BasicBlock.cpp:277
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:167
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:229
void splice(BasicBlock::iterator ToIt, BasicBlock *FromBB)
Transfer all instructions from FromBB to this basic block at ToIt.
Definition: BasicBlock.h:621
void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)
Update PHI nodes in this BasicBlock before removal of predecessor Pred.
Definition: BasicBlock.cpp:514
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
void setBlockFreq(const BasicBlock *BB, BlockFrequency Freq)
void setBlockFreqAndScale(const BasicBlock *ReferenceBB, BlockFrequency Freq, SmallPtrSetImpl< BasicBlock * > &BlocksToScale)
Set the frequency of ReferenceBB to Freq and scale the frequencies of the blocks in BlocksToScale suc...
BlockFrequency getBlockFreq(const BasicBlock *BB) const
getblockFreq - Return block frequency.
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1236
void setCallingConv(CallingConv::ID CC)
Definition: InstrTypes.h:1527
void setDoesNotThrow()
Definition: InstrTypes.h:2016
MaybeAlign getRetAlign() const
Extract the alignment of the return value.
Definition: InstrTypes.h:1829
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Definition: InstrTypes.h:2112
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Definition: InstrTypes.h:2143
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1465
void removeRetAttrs(const AttributeMask &AttrsToRemove)
Removes the attributes from the return value.
Definition: InstrTypes.h:1636
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
Definition: InstrTypes.h:1673
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
Definition: InstrTypes.h:2056
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1523
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1385
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
Definition: InstrTypes.h:1774
static CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
Definition: InstrTypes.h:1856
Value * getCalledOperand() const
Definition: InstrTypes.h:1458
void setAttributes(AttributeList A)
Set the parameter attributes for this call.
Definition: InstrTypes.h:1546
std::optional< ConstantRange > getRange() const
If this return value has a range attribute, return the value range of the argument.
bool doesNotThrow() const
Determine if the call cannot unwind.
Definition: InstrTypes.h:2015
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1410
uint64_t getRetDereferenceableBytes() const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
Definition: InstrTypes.h:1900
bool isConvergent() const
Determine if the invoke is convergent.
Definition: InstrTypes.h:2027
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1323
static CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
uint64_t getRetDereferenceableOrNullBytes() const
Extract the number of dereferenceable_or_null bytes for a call (0=unknown).
Definition: InstrTypes.h:1915
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1401
unsigned arg_size() const
Definition: InstrTypes.h:1408
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1542
bool hasOperandBundles() const
Return true if this User has any operand bundles.
Definition: InstrTypes.h:2061
Function * getCaller()
Helper to get the caller (the parent function).
This class represents a function call, abstracting a target machine's calling convention.
void setTailCallKind(TailCallKind TCK)
TailCallKind getTailCallKind() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
bool isMustTailCall() const
static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, InsertPosition InsertBefore=nullptr)
This is the shared class of boolean and integer constants.
Definition: Constants.h:81
ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
Definition: Constants.cpp:1500
This is an important base class in LLVM.
Definition: Constant.h:41
const Constant * stripPointerCasts() const
Definition: Constant.h:213
Debug location.
Subprogram description.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
Base class for non-instruction debug metadata records that have positions within IR.
A debug info location.
Definition: DebugLoc.h:33
unsigned getLine() const
Definition: DebugLoc.cpp:24
DILocation * get() const
Get the underlying DILocation.
Definition: DebugLoc.cpp:20
MDNode * getScope() const
Definition: DebugLoc.cpp:34
static DebugLoc appendInlinedAt(const DebugLoc &DL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &Cache)
Rebuild the entire inlined-at chain for this instruction so that the top of the chain now is inlined-...
Definition: DebugLoc.cpp:110
unsigned getCol() const
Definition: DebugLoc.cpp:29
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:151
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:142
Class to represent profile counts.
Definition: Function.h:289
uint64_t getCount() const
Definition: Function.h:297
const BasicBlock & getEntryBlock() const
Definition: Function.h:800
BasicBlockListType::iterator iterator
Definition: Function.h:69
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:207
const BasicBlock & front() const
Definition: Function.h:823
iterator_range< arg_iterator > args()
Definition: Function.h:855
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition: Metadata.cpp:1830
bool hasGC() const
hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm to use during code generatio...
Definition: Function.h:342
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:274
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition: Function.h:868
Constant * getPersonalityFn() const
Get the personality function associated with this function.
Definition: Function.cpp:1934
arg_iterator arg_end()
Definition: Function.h:840
arg_iterator arg_begin()
Definition: Function.h:831
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition: Function.h:247
MaybeAlign getParamAlign(unsigned ArgNo) const
Definition: Function.h:479
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:358
const std::string & getGC() const
Definition: Function.cpp:785
std::optional< ProfileCount > getEntryCount(bool AllowSynthetic=false) const
Get the entry count for this function.
Definition: Function.cpp:2019
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:212
iterator end()
Definition: Function.h:818
void setCallingConv(CallingConv::ID CC)
Definition: Function.h:278
bool onlyReadsMemory() const
Determine if the function does not access or only reads memory.
Definition: Function.cpp:842
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:690
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:290
CallInst * CreateStackSave(const Twine &Name="")
Create a call to llvm.stacksave.
Definition: IRBuilder.h:1051
CallInst * CreateLifetimeStart(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.start intrinsic.
Definition: IRBuilder.cpp:481
CallInst * CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, unsigned Alignment, Value *OffsetValue=nullptr)
Create an assume intrinsic call that represents an alignment assumption on the provided pointer.
Definition: IRBuilder.cpp:1306
ReturnInst * CreateRet(Value *V)
Create a 'ret <val>' instruction.
Definition: IRBuilder.h:1093
ConstantInt * getInt64(uint64_t C)
Get a constant 64-bit value.
Definition: IRBuilder.h:489
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2125
ReturnInst * CreateRetVoid()
Create a 'ret void' instruction.
Definition: IRBuilder.h:1088
CallInst * CreateLifetimeEnd(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.end intrinsic.
Definition: IRBuilder.cpp:496
CallInst * CreateStackRestore(Value *Ptr, const Twine &Name="")
Create a call to llvm.stackrestore.
Definition: IRBuilder.h:1058
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:178
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2410
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memcpy between the specified pointers.
Definition: IRBuilder.h:657
Instruction * CreateNoAliasScopeDeclaration(Value *Scope)
Create a llvm.experimental.noalias.scope.decl intrinsic call.
Definition: IRBuilder.cpp:562
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2664
This class captures the data input to the InlineFunction call, and records the auxiliary results prod...
Definition: Cloning.h:203
ProfileSummaryInfo * PSI
Definition: Cloning.h:216
bool UpdateProfile
Update profile for callee as well as cloned version.
Definition: Cloning.h:236
function_ref< AssumptionCache &(Function &)> GetAssumptionCache
If non-null, InlineFunction will update the callgraph to reflect the changes it makes.
Definition: Cloning.h:215
BlockFrequencyInfo * CalleeBFI
Definition: Cloning.h:217
SmallVector< AllocaInst *, 4 > StaticAllocas
InlineFunction fills this in with all static allocas that get copied into the caller.
Definition: Cloning.h:221
BlockFrequencyInfo * CallerBFI
Definition: Cloning.h:217
SmallVector< CallBase *, 8 > InlinedCallSites
All of the new call sites inlined into the caller.
Definition: Cloning.h:232
InlineResult is basically true or false.
Definition: InlineCost.h:180
static InlineResult success()
Definition: InlineCost.h:185
static InlineResult failure(const char *Reason)
Definition: InlineCost.h:186
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
Definition: Instruction.cpp:97
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:476
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:66
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
Definition: Instruction.h:363
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
Definition: Instruction.h:834
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:92
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:70
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:381
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1635
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:274
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:473
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Definition: Instruction.cpp:74
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
static bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Invoke instruction.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
An instruction for reading from memory.
Definition: Instructions.h:173
MDNode * createAnonymousAliasScope(MDNode *Domain, StringRef Name=StringRef())
Return metadata appropriate for an alias scope root node.
Definition: MDBuilder.h:174
MDNode * createAnonymousAliasScopeDomain(StringRef Name=StringRef())
Return metadata appropriate for an alias scope domain node.
Definition: MDBuilder.h:167
Metadata node.
Definition: Metadata.h:1067
void replaceAllUsesWith(Metadata *MD)
RAUW a temporary.
Definition: Metadata.h:1264
static MDNode * concatenate(MDNode *A, MDNode *B)
Methods for metadata merging.
Definition: Metadata.cpp:1108
bool isTemporary() const
Definition: Metadata.h:1251
ArrayRef< MDOperand > operands() const
Definition: Metadata.h:1426
op_iterator op_end() const
Definition: Metadata.h:1422
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1541
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1434
op_iterator op_begin() const
Definition: Metadata.h:1418
LLVMContext & getContext() const
Definition: Metadata.h:1231
Tuple of metadata.
Definition: Metadata.h:1470
static TempMDTuple getTemporary(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Return a temporary node.
Definition: Metadata.h:1518
bool onlyAccessesInaccessibleMem() const
Whether this function only (at most) accesses inaccessible memory.
Definition: ModRef.h:211
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
Definition: ModRef.h:201
Root of the metadata hierarchy.
Definition: Metadata.h:62
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
A container for an operand bundle being viewed as a set of values rather than a set of uses.
Definition: InstrTypes.h:1189
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1814
Analysis providing profile information.
std::optional< uint64_t > getProfileCount(const CallBase &CallInst, BlockFrequencyInfo *BFI, bool AllowSynthetic=false) const
Returns the profile count for CallInst.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
A vector that has set insertion semantics.
Definition: SetVector.h:57
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:323
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:412
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:344
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:418
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:479
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void reserve(size_type N)
Definition: SmallVector.h:676
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:696
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:289
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:129
static IntegerType * getInt64Ty(LLVMContext &C)
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:140
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
Value * getOperand(unsigned i) const
Definition: User.h:169
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
See the file comment.
Definition: ValueMap.h:84
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: ValueMap.h:164
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: ValueMap.h:151
iterator begin()
Definition: ValueMap.h:134
iterator end()
Definition: ValueMap.h:135
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
iterator_range< user_iterator > users()
Definition: Value.h:421
bool use_empty() const
Definition: Value.h:344
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1074
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:199
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
Class to build a trie of call stack contexts for a particular profiled allocation call,...
Helper class to iterate through stack ids in both metadata (memprof MIB and callsite) and the corresp...
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void mergeAttributesForInlining(Function &Caller, const Function &Callee)
Merge caller's and callee's attributes.
AttributeMask typeIncompatible(Type *Ty, AttributeSafetyKind ASK=ASK_ALL)
Which attributes cannot be applied to a type.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1484
AssignmentMarkerRange getAssignmentMarkers(DIAssignID *ID)
Return a range of dbg.assign intrinsics which use \ID as an operand.
Definition: DebugInfo.cpp:1808
void trackAssignments(Function::iterator Start, Function::iterator End, const StorageToVarsMap &Vars, const DataLayout &DL, bool DebugPrints=false)
Track assignments to Vars between Start and End.
Definition: DebugInfo.cpp:2181
void remapAssignID(DenseMap< DIAssignID *, DIAssignID * > &Map, Instruction &I)
Replace DIAssignID uses and attachments with IDs from Map.
Definition: DebugInfo.cpp:2049
SmallVector< DbgVariableRecord * > getDVRAssignmentMarkers(const Instruction *Inst)
Definition: DebugInfo.h:238
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
MDNode * getMIBStackNode(const MDNode *MIB)
Returns the stack node from an MIB metadata node.
constexpr double phi
Definition: MathExtras.h:45
ARCInstKind getAttachedARCFunctionKind(const CallBase *CB)
This function returns the ARCInstKind of the function attached to operand bundle clang_arc_attachedca...
Definition: ObjCARCUtil.h:60
ARCInstKind
Equivalence classes of instructions in the ARC Model.
std::optional< Function * > getAttachedARCFunction(const CallBase *CB)
This function returns operand bundle clang_arc_attachedcall's argument, which is the address of the A...
Definition: ObjCARCUtil.h:43
bool isRetainOrClaimRV(ARCInstKind Kind)
Check whether the function is retainRV/unsafeClaimRV.
Definition: ObjCARCUtil.h:52
const Value * GetRCIdentityRoot(const Value *V)
The RCIdentity root of a value V is a dominating value U for which retaining or releasing U is equiva...
bool hasAttachedCallOpBundle(const CallBase *CB)
Definition: ObjCARCUtil.h:29
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1715
BasicBlock * changeToInvokeAndSplitBasicBlock(CallInst *CI, BasicBlock *UnwindEdge, DomTreeUpdater *DTU=nullptr)
Convert the CallInst to InvokeInst with the specified unwind edge basic block.
Definition: Local.cpp:2925
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures, bool StoreCaptures, const Instruction *I, const DominatorTree *DT, bool IncludeI=false, unsigned MaxUsesToExplore=0, const LoopInfo *LI=nullptr)
PointerMayBeCapturedBefore - Return true if this pointer value may be captured by the enclosing funct...
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2067
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:656
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
Definition: Local.h:242
Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
Definition: Local.cpp:1543
void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, bool ModuleLevelChanges, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr)
This works exactly like CloneFunctionInto, except that it does some simple constant prop and DCE on t...
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
Definition: Local.cpp:2839
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
void updateProfileCallee(Function *Callee, int64_t EntryDelta, const ValueMap< const Value *, WeakTrackingVH > *VMap=nullptr)
Updates profile information by adjusting the entry count by adding EntryDelta then scaling callsite i...
bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
Definition: DebugInfo.cpp:2367
MDNode * uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2)
Compute the union of two access-group lists.
InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, bool MergeAttributes=false, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr)
This function inlines the called function into the basic block of the caller.
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNonEscapingLocalOb...
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:2051
bool pred_empty(const BasicBlock *BB)
Definition: CFG.h:118
void updateLoopMetadataDebugLocations(Instruction &I, function_ref< Metadata *(Metadata *)> Updater)
Update the debug locations contained within the MD_loop metadata attached to the instruction I,...
Definition: DebugInfo.cpp:422
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
This struct can be used to capture information about code being cloned, while it is being cloned.
Definition: Cloning.h:62
bool ContainsDynamicAllocas
This is set to true if the cloned code contains a 'dynamic' alloca.
Definition: Cloning.h:73
bool isSimplified(const Value *From, const Value *To) const
Definition: Cloning.h:87
bool ContainsCalls
This is set to true if the cloned code contains a normal call instruction.
Definition: Cloning.h:64
bool ContainsMemProfMetadata
This is set to true if there is memprof related metadata (memprof or callsite metadata) in the cloned...
Definition: Cloning.h:68
std::vector< WeakTrackingVH > OperandBundleCallSites
All cloned call sites that have operand bundles attached are appended to this vector.
Definition: Cloning.h:78
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
Helper struct for trackAssignments, below.
Definition: DebugInfo.h:281