81#define DEBUG_TYPE "inline-function"
90 cl::desc(
"Convert noalias attributes to metadata during inlining."));
95 cl::desc(
"Use the llvm.experimental.noalias.scope.decl "
96 "intrinsic during inlining."));
104 cl::desc(
"Convert align attributes to assumptions during inlining."));
107 "max-inst-checked-for-throw-during-inlining",
cl::Hidden,
108 cl::desc(
"the maximum number of instructions analyzed for may throw during "
109 "attribute inference in inlined body"),
115 class LandingPadInliningInfo {
126 PHINode *InnerEHValuesPHI =
nullptr;
132 : OuterResumeDest(
II->getUnwindDest()) {
138 for (; isa<PHINode>(
I); ++
I) {
141 UnwindDestPHIValues.
push_back(
PHI->getIncomingValueForBlock(InvokeBB));
144 CallerLPad = cast<LandingPadInst>(
I);
150 return OuterResumeDest;
167 void addIncomingPHIValuesFor(
BasicBlock *BB)
const {
168 addIncomingPHIValuesForInto(BB, OuterResumeDest);
173 for (
unsigned i = 0, e = UnwindDestPHIValues.
size(); i != e; ++i, ++
I) {
175 phi->addIncoming(UnwindDestPHIValues[i], src);
183BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
184 if (InnerResumeDest)
return InnerResumeDest;
190 OuterResumeDest->
getName() +
".body");
193 const unsigned PHICapacity = 2;
198 for (
unsigned i = 0, e = UnwindDestPHIValues.
size(); i != e; ++i, ++
I) {
199 PHINode *OuterPHI = cast<PHINode>(
I);
201 OuterPHI->
getName() +
".lpad-body");
212 InnerEHValuesPHI->
addIncoming(CallerLPad, OuterResumeDest);
215 return InnerResumeDest;
222void LandingPadInliningInfo::forwardResume(
231 addIncomingPHIValuesForInto(Src, Dest);
239 if (
auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
240 return FPI->getParentPad();
241 return cast<CatchSwitchInst>(EHPad)->getParentPad();
252 while (!Worklist.
empty()) {
259 Value *UnwindDestToken =
nullptr;
260 if (
auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
261 if (CatchSwitch->hasUnwindDest()) {
262 UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
270 for (
auto HI = CatchSwitch->handler_begin(),
271 HE = CatchSwitch->handler_end();
272 HI != HE && !UnwindDestToken; ++HI) {
274 auto *CatchPad = cast<CatchPadInst>(HandlerBlock->
getFirstNonPHI());
280 if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
284 auto Memo = MemoMap.
find(ChildPad);
285 if (Memo == MemoMap.
end()) {
292 Value *ChildUnwindDestToken = Memo->second;
293 if (!ChildUnwindDestToken)
299 if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
300 UnwindDestToken = ChildUnwindDestToken;
308 auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
309 for (
User *U : CleanupPad->users()) {
310 if (
auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
311 if (
BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
312 UnwindDestToken = RetUnwindDest->getFirstNonPHI();
317 Value *ChildUnwindDestToken;
318 if (
auto *Invoke = dyn_cast<InvokeInst>(U)) {
319 ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
320 }
else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
322 auto Memo = MemoMap.
find(ChildPad);
323 if (Memo == MemoMap.
end()) {
330 ChildUnwindDestToken = Memo->second;
331 if (!ChildUnwindDestToken)
340 if (isa<Instruction>(ChildUnwindDestToken) &&
343 UnwindDestToken = ChildUnwindDestToken;
349 if (!UnwindDestToken)
357 if (
auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
360 UnwindParent =
nullptr;
361 bool ExitedOriginalPad =
false;
363 ExitedPad && ExitedPad != UnwindParent;
364 ExitedPad = dyn_cast<Instruction>(
getParentPad(ExitedPad))) {
366 if (isa<CatchPadInst>(ExitedPad))
368 MemoMap[ExitedPad] = UnwindDestToken;
369 ExitedOriginalPad |= (ExitedPad == EHPad);
372 if (ExitedOriginalPad)
373 return UnwindDestToken;
404 if (
auto *CPI = dyn_cast<CatchPadInst>(EHPad))
405 EHPad = CPI->getCatchSwitch();
408 auto Memo = MemoMap.
find(EHPad);
409 if (Memo != MemoMap.
end())
414 assert((UnwindDestToken ==
nullptr) != (MemoMap.
count(EHPad) != 0));
416 return UnwindDestToken;
423 MemoMap[EHPad] =
nullptr;
429 Value *AncestorToken;
431 auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
434 if (isa<CatchPadInst>(AncestorPad))
443 assert(!MemoMap.
count(AncestorPad) || MemoMap[AncestorPad]);
444 auto AncestorMemo = MemoMap.
find(AncestorPad);
445 if (AncestorMemo == MemoMap.
end()) {
448 UnwindDestToken = AncestorMemo->second;
452 LastUselessPad = AncestorPad;
453 MemoMap[LastUselessPad] =
nullptr;
455 TempMemos.
insert(LastUselessPad);
473 while (!Worklist.
empty()) {
475 auto Memo = MemoMap.
find(UselessPad);
476 if (Memo != MemoMap.
end() && Memo->second) {
504 MemoMap[UselessPad] = UnwindDestToken;
505 if (
auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
506 assert(CatchSwitch->getUnwindDest() ==
nullptr &&
"Expected useless pad");
507 for (
BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
508 auto *CatchPad = HandlerBlock->getFirstNonPHI();
509 for (
User *U : CatchPad->users()) {
511 (!isa<InvokeInst>(U) ||
513 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
515 "Expected useless pad");
516 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
517 Worklist.
push_back(cast<Instruction>(U));
521 assert(isa<CleanupPadInst>(UselessPad));
523 assert(!isa<CleanupReturnInst>(U) &&
"Expected useless pad");
524 assert((!isa<InvokeInst>(U) ||
526 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
528 "Expected useless pad");
529 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
530 Worklist.
push_back(cast<Instruction>(U));
535 return UnwindDestToken;
561 if (
F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
562 F->getIntrinsicID() == Intrinsic::experimental_guard)
573 auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
574 Value *UnwindDestToken =
576 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
580 if (
auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
581 MemoKey = CatchPad->getCatchSwitch();
583 MemoKey = FuncletPad;
584 assert(FuncletUnwindMap->count(MemoKey) &&
585 (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
586 "must get memoized to avoid confusing later searches");
611 LandingPadInliningInfo Invoke(
II);
617 if (
InvokeInst *
II = dyn_cast<InvokeInst>(
I->getTerminator()))
618 InlinedLPads.
insert(
II->getLandingPadInst());
625 InlinedLPad->reserveClauses(OuterNum);
626 for (
unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
627 InlinedLPad->addClause(OuterLPad->
getClause(OuterIdx));
629 InlinedLPad->setCleanup(
true);
636 &*BB, Invoke.getOuterResumeDest()))
639 Invoke.addIncomingPHIValuesFor(NewBB);
642 if (
ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
643 Invoke.forwardResume(RI, InlinedLPads);
673 UnwindDestPHIValues.
push_back(
PHI.getIncomingValueForBlock(InvokeBB));
680 for (
Value *V : UnwindDestPHIValues) {
682 PHI->addIncoming(V, Src);
692 if (
auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
693 if (CRI->unwindsToCaller()) {
694 auto *CleanupPad = CRI->getCleanupPad();
696 CRI->eraseFromParent();
703 isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
704 FuncletUnwindMap[CleanupPad] =
714 if (
auto *CatchSwitch = dyn_cast<CatchSwitchInst>(
I)) {
715 if (CatchSwitch->unwindsToCaller()) {
716 Value *UnwindDestToken;
717 if (
auto *ParentPad =
718 dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
728 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
741 CatchSwitch->getParentPad(), UnwindDest,
742 CatchSwitch->getNumHandlers(), CatchSwitch->
getName(),
743 CatchSwitch->getIterator());
744 for (
BasicBlock *PadBB : CatchSwitch->handlers())
745 NewCatchSwitch->addHandler(PadBB);
750 FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
751 Replacement = NewCatchSwitch;
753 }
else if (!isa<FuncletPadInst>(
I)) {
759 I->replaceAllUsesWith(Replacement);
760 I->eraseFromParent();
770 &*BB, UnwindDest, &FuncletUnwindMap))
783 MDNode *CallsiteStackContext) {
789 for (
auto MIBStackIter = MIBStackContext->
op_begin(),
790 CallsiteStackIter = CallsiteStackContext->
op_begin();
791 MIBStackIter != MIBStackContext->
op_end() &&
792 CallsiteStackIter != CallsiteStackContext->
op_end();
793 MIBStackIter++, CallsiteStackIter++) {
794 auto *Val1 = mdconst::dyn_extract<ConstantInt>(*MIBStackIter);
795 auto *Val2 = mdconst::dyn_extract<ConstantInt>(*CallsiteStackIter);
797 if (Val1->getZExtValue() != Val2->getZExtValue())
804 Call->setMetadata(LLVMContext::MD_memprof,
nullptr);
808 Call->setMetadata(LLVMContext::MD_callsite,
nullptr);
812 const std::vector<Metadata *> &MIBList) {
819 CallStack.addCallStack(cast<MDNode>(MIB));
820 bool MemprofMDAttached =
CallStack.buildAndAttachMIBMetadata(CI);
822 if (!MemprofMDAttached)
832 MDNode *InlinedCallsiteMD) {
834 MDNode *ClonedCallsiteMD =
nullptr;
837 if (OrigCallsiteMD) {
842 ClonedCall->
setMetadata(LLVMContext::MD_callsite, ClonedCallsiteMD);
854 std::vector<Metadata *> NewMIBList;
859 for (
auto &MIBOp : OrigMemProfMD->
operands()) {
860 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
867 NewMIBList.push_back(MIB);
869 if (NewMIBList.empty()) {
885 bool ContainsMemProfMetadata,
890 if (!CallsiteMD && !ContainsMemProfMetadata)
894 for (
const auto &Entry : VMap) {
897 auto *OrigCall = dyn_cast_or_null<CallBase>(Entry.first);
898 auto *ClonedCall = dyn_cast_or_null<CallBase>(Entry.second);
899 if (!OrigCall || !ClonedCall)
918 MDNode *MemParallelLoopAccess =
919 CB.
getMetadata(LLVMContext::MD_mem_parallel_loop_access);
923 if (!MemParallelLoopAccess && !AccessGroup && !AliasScope && !NoAlias)
929 if (!
I.mayReadOrWriteMemory())
932 if (MemParallelLoopAccess) {
935 I.getMetadata(LLVMContext::MD_mem_parallel_loop_access),
936 MemParallelLoopAccess);
937 I.setMetadata(LLVMContext::MD_mem_parallel_loop_access,
938 MemParallelLoopAccess);
943 I.getMetadata(LLVMContext::MD_access_group), AccessGroup));
947 I.getMetadata(LLVMContext::MD_alias_scope), AliasScope));
951 I.getMetadata(LLVMContext::MD_noalias), NoAlias));
969 dyn_cast<Function>(
I->getCalledOperand()->stripPointerCasts());
970 if (CalledFn && CalledFn->isIntrinsic() &&
I->doesNotThrow() &&
975 I->getOperandBundlesAsDefs(OpBundles);
980 I->replaceAllUsesWith(NewInst);
981 I->eraseFromParent();
990class ScopedAliasMetadataDeepCloner {
994 void addRecursiveMetadataUses();
997 ScopedAliasMetadataDeepCloner(
const Function *
F);
1009ScopedAliasMetadataDeepCloner::ScopedAliasMetadataDeepCloner(
1013 if (
const MDNode *M =
I.getMetadata(LLVMContext::MD_alias_scope))
1015 if (
const MDNode *M =
I.getMetadata(LLVMContext::MD_noalias))
1019 if (
const auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&
I))
1020 MD.insert(Decl->getScopeList());
1023 addRecursiveMetadataUses();
1026void ScopedAliasMetadataDeepCloner::addRecursiveMetadataUses() {
1028 while (!
Queue.empty()) {
1031 if (
const MDNode *OpMD = dyn_cast<MDNode>(
Op))
1032 if (MD.insert(OpMD))
1033 Queue.push_back(OpMD);
1037void ScopedAliasMetadataDeepCloner::clone() {
1038 assert(MDMap.empty() &&
"clone() already called ?");
1043 MDMap[
I].reset(DummyNodes.
back().get());
1052 if (
const MDNode *M = dyn_cast<MDNode>(
Op))
1059 MDTuple *TempM = cast<MDTuple>(MDMap[
I]);
1076 if (
MDNode *M =
I.getMetadata(LLVMContext::MD_alias_scope))
1077 if (
MDNode *MNew = MDMap.lookup(M))
1078 I.setMetadata(LLVMContext::MD_alias_scope, MNew);
1080 if (
MDNode *M =
I.getMetadata(LLVMContext::MD_noalias))
1081 if (
MDNode *MNew = MDMap.lookup(M))
1082 I.setMetadata(LLVMContext::MD_noalias, MNew);
1084 if (
auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&
I))
1085 if (
MDNode *MNew = MDMap.lookup(Decl->getScopeList()))
1086 Decl->setScopeList(MNew);
1105 if (CB.
paramHasAttr(Arg.getArgNo(), Attribute::NoAlias) && !Arg.use_empty())
1108 if (NoAliasArgs.
empty())
1128 for (
unsigned i = 0, e = NoAliasArgs.
size(); i != e; ++i) {
1131 std::string
Name = std::string(CalledFunc->
getName());
1134 Name +=
A->getName();
1136 Name +=
": argument ";
1144 NewScopes.
insert(std::make_pair(
A, NewScope));
1161 VMI != VMIE; ++VMI) {
1162 if (
const Instruction *
I = dyn_cast<Instruction>(VMI->first)) {
1166 Instruction *NI = dyn_cast<Instruction>(VMI->second);
1170 bool IsArgMemOnlyCall =
false, IsFuncCall =
false;
1173 if (
const LoadInst *LI = dyn_cast<LoadInst>(
I))
1174 PtrArgs.
push_back(LI->getPointerOperand());
1175 else if (
const StoreInst *SI = dyn_cast<StoreInst>(
I))
1176 PtrArgs.
push_back(SI->getPointerOperand());
1177 else if (
const VAArgInst *VAAI = dyn_cast<VAArgInst>(
I))
1178 PtrArgs.
push_back(VAAI->getPointerOperand());
1180 PtrArgs.
push_back(CXI->getPointerOperand());
1182 PtrArgs.
push_back(RMWI->getPointerOperand());
1183 else if (
const auto *Call = dyn_cast<CallBase>(
I)) {
1187 if (Call->doesNotAccessMemory())
1199 IsArgMemOnlyCall =
true;
1202 for (
Value *Arg : Call->args()) {
1206 if (!Arg->getType()->isPointerTy())
1217 if (PtrArgs.
empty() && !IsFuncCall)
1226 for (
const Value *V : PtrArgs) {
1230 for (
const Value *O : Objects)
1236 bool RequiresNoCaptureBefore =
false, UsesAliasingPtr =
false,
1237 UsesUnknownObject =
false;
1238 for (
const Value *V : ObjSet) {
1242 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1243 isa<ConstantPointerNull>(V) ||
1244 isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1251 if (
const Argument *
A = dyn_cast<Argument>(V)) {
1253 UsesAliasingPtr =
true;
1255 UsesAliasingPtr =
true;
1261 RequiresNoCaptureBefore =
true;
1267 UsesUnknownObject =
true;
1273 if (UsesUnknownObject)
1278 if (IsFuncCall && !IsArgMemOnlyCall)
1279 RequiresNoCaptureBefore =
true;
1297 if (!RequiresNoCaptureBefore ||
1319 bool CanAddScopes = !UsesAliasingPtr;
1320 if (CanAddScopes && IsFuncCall)
1321 CanAddScopes = IsArgMemOnlyCall;
1326 Scopes.push_back(NewScopes[
A]);
1329 if (!Scopes.empty())
1331 LLVMContext::MD_alias_scope,
1342 "Expected to be in same basic block!");
1344 assert(BeginIt !=
End->getIterator() &&
"Non-empty BB has empty iterator");
1354 auto &Context = CalledFunction->
getContext();
1358 bool HasAttrToPropagate =
false;
1360 for (
unsigned I = 0, E = CB.
arg_size();
I < E; ++
I) {
1365 ValidParamAttrs.
back().addAttribute(Attribute::ReadNone);
1367 ValidParamAttrs.
back().addAttribute(Attribute::ReadOnly);
1368 HasAttrToPropagate |= ValidParamAttrs.
back().hasAttributes();
1372 if (!HasAttrToPropagate)
1377 const auto *InnerCB = dyn_cast<CallBase>(&Ins);
1380 auto *NewInnerCB = dyn_cast_or_null<CallBase>(VMap.
lookup(InnerCB));
1384 for (
unsigned I = 0, E = InnerCB->arg_size();
I < E; ++
I) {
1386 const Value *UnderlyingV =
1388 const Argument *Arg = dyn_cast<Argument>(UnderlyingV);
1392 if (AL.hasParamAttr(
I, Attribute::ByVal))
1400 AL = AL.addParamAttributes(Context,
I, ValidParamAttrs[ArgNo]);
1406 if (AL.hasParamAttr(
I, Attribute::ReadOnly) &&
1407 AL.hasParamAttr(
I, Attribute::WriteOnly))
1408 AL = AL.addParamAttribute(Context,
I, Attribute::ReadNone);
1411 if (AL.hasParamAttr(
I, Attribute::ReadNone)) {
1412 AL = AL.removeParamAttribute(Context,
I, Attribute::ReadOnly);
1413 AL = AL.removeParamAttribute(Context,
I, Attribute::WriteOnly);
1417 if (AL.hasParamAttr(
I, Attribute::ReadOnly) ||
1418 AL.hasParamAttr(
I, Attribute::ReadNone))
1419 AL = AL.removeParamAttribute(Context,
I, Attribute::Writable);
1421 NewInnerCB->setAttributes(AL);
1464 auto &Context = CalledFunction->
getContext();
1466 for (
auto &BB : *CalledFunction) {
1467 auto *RI = dyn_cast<ReturnInst>(BB.getTerminator());
1468 if (!RI || !isa<CallBase>(RI->
getOperand(0)))
1470 auto *RetVal = cast<CallBase>(RI->
getOperand(0));
1474 auto *NewRetVal = dyn_cast_or_null<CallBase>(VMap.
lookup(RetVal));
1495 if (RI->
getParent() != RetVal->getParent() ||
1508 AL.getRetDereferenceableOrNullBytes())
1510 AttributeList NewAL = AL.addRetAttributes(Context, ValidUB);
1548 Attribute NewRange = AL.getRetAttr(Attribute::Range);
1566 (RetVal->hasOneUse() && !RetVal->hasRetAttr(Attribute::NoUndef)))
1569 NewRetVal->setAttributes(NewAL);
1585 bool DTCalculated =
false;
1589 if (!Arg.getType()->isPointerTy() || Arg.hasPassPointeeByValueCopyAttr() ||
1596 if (!DTCalculated) {
1598 DTCalculated =
true;
1607 DL, ArgVal, Alignment->value());
1619 Builder.
getInt64(M->getDataLayout().getTypeStoreSize(ByValType));
1632 CI->
setDebugLoc(DILocation::get(SP->getContext(), 0, 0, SP));
1669 Align Alignment =
DL.getPrefTypeAlign(ByValType);
1675 Alignment = std::max(Alignment, *ByValAlignment);
1679 nullptr, Alignment, Arg->
getName());
1690 for (
User *U : V->users())
1692 if (
II->isLifetimeStartOrEnd())
1703 if (Ty == Int8PtrTy)
1708 if (U->getType() != Int8PtrTy)
continue;
1709 if (U->stripPointerCasts() != AI)
continue;
1729 return DILocation::get(Ctx, OrigDL.
getLine(), OrigDL.
getCol(),
1746 InlinedAtNode = DILocation::getDistinct(
1747 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1748 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1757 bool NoInlineLineTables = Fn->
hasFnAttribute(
"no-inline-line-tables");
1763 auto updateLoopInfoLoc = [&Ctx, &InlinedAtNode,
1765 if (
auto *Loc = dyn_cast_or_null<DILocation>(MD))
1771 if (!NoInlineLineTables)
1779 if (CalleeHasDebugInfo && !NoInlineLineTables)
1789 if (
auto *AI = dyn_cast<AllocaInst>(&
I))
1796 if (isa<PseudoProbeInst>(
I))
1799 I.setDebugLoc(TheCallDL);
1804 assert(DVR->getDebugLoc() &&
"Debug Value must have debug loc");
1805 if (NoInlineLineTables) {
1806 DVR->setDebugLoc(TheCallDL);
1812 DVR->getMarker()->getParent()->
getContext(), IANodes);
1813 DVR->setDebugLoc(IDL);
1817 for (; FI != Fn->
end(); ++FI) {
1820 for (
DbgRecord &DVR :
I.getDbgRecordRange()) {
1826 if (NoInlineLineTables) {
1828 while (BI != FI->end()) {
1829 if (isa<DbgInfoIntrinsic>(BI)) {
1830 BI = BI->eraseFromParent();
1833 BI->dropDbgRecords();
1842#define DEBUG_TYPE "assignment-tracking"
1850 errs() <<
"# Finding caller local variables escaped by callee\n");
1853 if (!Arg->getType()->isPointerTy()) {
1865 assert(Arg->getType()->isPtrOrPtrVectorTy());
1866 APInt TmpOffset(
DL.getIndexTypeSizeInBits(Arg->getType()), 0,
false);
1868 Arg->stripAndAccumulateConstantOffsets(
DL, TmpOffset,
true));
1870 LLVM_DEBUG(
errs() <<
" | SKIP: Couldn't walk back to base storage\n");
1881 auto CollectAssignsForStorage = [&](
auto *DbgAssign) {
1883 if (DbgAssign->getDebugLoc().getInlinedAt())
1891 return EscapedLocals;
1897 << Start->getParent()->getName() <<
" from "
1910 for (
auto BBI = Start; BBI !=
End; ++BBI) {
1916#define DEBUG_TYPE "inline-function"
1930 for (
auto Entry : VMap) {
1931 if (!isa<BasicBlock>(Entry.first) || !Entry.second)
1933 auto *OrigBB = cast<BasicBlock>(Entry.first);
1934 auto *ClonedBB = cast<BasicBlock>(Entry.second);
1936 if (!ClonedBBs.
insert(ClonedBB).second) {
1948 EntryClone, CallerBFI->
getBlockFreq(CallSiteBlock), ClonedBBs);
1958 auto CallSiteCount =
1961 std::min(CallSiteCount.value_or(0), CalleeEntryCount.
getCount());
1966 Function *Callee, int64_t EntryDelta,
1968 auto CalleeCount = Callee->getEntryCount();
1972 const uint64_t PriorEntryCount = CalleeCount->getCount();
1977 (EntryDelta < 0 && static_cast<uint64_t>(-EntryDelta) > PriorEntryCount)
1979 : PriorEntryCount + EntryDelta;
1981 auto updateVTableProfWeight = [](
CallBase *CB,
const uint64_t NewEntryCount,
1990 uint64_t CloneEntryCount = PriorEntryCount - NewEntryCount;
1991 for (
auto Entry : *VMap) {
1992 if (isa<CallInst>(Entry.first))
1993 if (
auto *CI = dyn_cast_or_null<CallInst>(Entry.second)) {
1994 CI->updateProfWeight(CloneEntryCount, PriorEntryCount);
1995 updateVTableProfWeight(CI, CloneEntryCount, PriorEntryCount);
1998 if (isa<InvokeInst>(Entry.first))
1999 if (
auto *
II = dyn_cast_or_null<InvokeInst>(Entry.second)) {
2000 II->updateProfWeight(CloneEntryCount, PriorEntryCount);
2001 updateVTableProfWeight(
II, CloneEntryCount, PriorEntryCount);
2007 Callee->setEntryCount(NewEntryCount);
2011 if (!VMap || VMap->
count(&BB))
2013 if (
CallInst *CI = dyn_cast<CallInst>(&
I)) {
2014 CI->updateProfWeight(NewEntryCount, PriorEntryCount);
2015 updateVTableProfWeight(CI, NewEntryCount, PriorEntryCount);
2018 II->updateProfWeight(NewEntryCount, PriorEntryCount);
2019 updateVTableProfWeight(
II, NewEntryCount, PriorEntryCount);
2047 bool IsRetainRV = RVCallKind == objcarc::ARCInstKind::RetainRV,
2048 IsUnsafeClaimRV = !IsRetainRV;
2050 for (
auto *RI : Returns) {
2052 bool InsertRetainCall = IsRetainRV;
2061 if (isa<CastInst>(
I))
2064 if (
auto *
II = dyn_cast<IntrinsicInst>(&
I)) {
2065 if (
II->getIntrinsicID() != Intrinsic::objc_autoreleaseReturnValue ||
2075 if (IsUnsafeClaimRV) {
2081 II->eraseFromParent();
2082 InsertRetainCall =
false;
2086 auto *CI = dyn_cast<CallInst>(&
I);
2101 NewCall->copyMetadata(*CI);
2102 CI->replaceAllUsesWith(NewCall);
2103 CI->eraseFromParent();
2104 InsertRetainCall =
false;
2108 if (InsertRetainCall) {
2128 bool MergeAttributes,
2130 bool InsertLifetime,
2135 if (isa<CallBrInst>(CB))
2148 Value *ConvergenceControlToken =
nullptr;
2164 ConvergenceControlToken = OBUse.Inputs[0].get();
2183 if (
auto *IntrinsicCall = dyn_cast<IntrinsicInst>(
I)) {
2184 if (IntrinsicCall->getIntrinsicID() ==
2185 Intrinsic::experimental_convergence_entry) {
2186 if (!ConvergenceControlToken) {
2188 "convergent call needs convergencectrl operand");
2205 if (CalledFunc->
hasGC()) {
2206 if (!Caller->hasGC())
2207 Caller->setGC(CalledFunc->
getGC());
2208 else if (CalledFunc->
getGC() != Caller->getGC())
2222 Caller->hasPersonalityFn()
2223 ? Caller->getPersonalityFn()->stripPointerCasts()
2225 if (CalledPersonality) {
2226 if (!CallerPersonality)
2227 Caller->setPersonalityFn(CalledPersonality);
2232 else if (CalledPersonality != CallerPersonality)
2239 if (CallerPersonality) {
2242 std::optional<OperandBundleUse> ParentFunclet =
2245 CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
2249 if (CallSiteEHPad) {
2250 if (Personality == EHPersonality::MSVC_CXX) {
2253 if (isa<CleanupPadInst>(CallSiteEHPad)) {
2256 for (
const BasicBlock &CalledBB : *CalledFunc) {
2257 if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
2264 for (
const BasicBlock &CalledBB : *CalledFunc) {
2265 if (CalledBB.isEHPad())
2275 bool EHPadForCallUnwindsLocally =
false;
2276 if (CallSiteEHPad && isa<CallInst>(CB)) {
2278 Value *CallSiteUnwindDestToken =
2281 EHPadForCallUnwindsLocally =
2282 CallSiteUnwindDestToken &&
2283 !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
2314 auto &
DL = Caller->getDataLayout();
2321 E = CalledFunc->
arg_end();
I != E; ++
I, ++AI, ++ArgNo) {
2322 Value *ActualArg = *AI;
2330 &CB, CalledFunc, IFI,
2332 if (ActualArg != *AI)
2337 VMap[&*
I] = ActualArg;
2357 false, Returns,
".i",
2358 &InlinedFunctionInfo);
2360 FirstNewBlock = LastBlock; ++FirstNewBlock;
2364 if (RVCallKind != objcarc::ARCInstKind::None)
2375 CalledFunc->
front());
2383 for (ByValInit &
Init : ByValInits)
2385 &*FirstNewBlock, IFI, CalledFunc);
2387 std::optional<OperandBundleUse> ParentDeopt =
2393 CallBase *ICS = dyn_cast_or_null<CallBase>(VH);
2414 std::vector<Value *> MergedDeoptArgs;
2415 MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
2416 ChildOB.Inputs.size());
2421 OpDefs.
emplace_back(
"deopt", std::move(MergedDeoptArgs));
2451 SAMetadataCloner.clone();
2452 SAMetadataCloner.remap(FirstNewBlock, Caller->end());
2474 make_range(FirstNewBlock->getIterator(), Caller->end()))
2476 if (
auto *
II = dyn_cast<AssumeInst>(&
I))
2480 if (ConvergenceControlToken) {
2481 auto *
I = FirstNewBlock->getFirstNonPHI();
2482 if (
auto *IntrinsicCall = dyn_cast<IntrinsicInst>(
I)) {
2483 if (IntrinsicCall->getIntrinsicID() ==
2484 Intrinsic::experimental_convergence_entry) {
2485 IntrinsicCall->replaceAllUsesWith(ConvergenceControlToken);
2486 IntrinsicCall->eraseFromParent();
2498 E = FirstNewBlock->end();
I != E; ) {
2517 while (isa<AllocaInst>(
I) &&
2518 !cast<AllocaInst>(
I)->use_empty() &&
2528 Caller->getEntryBlock().splice(InsertPoint, &*FirstNewBlock,
2541 bool InlinedMustTailCalls =
false, InlinedDeoptimizeCalls =
false;
2544 if (
CallInst *CI = dyn_cast<CallInst>(&CB))
2545 CallSiteTailKind = CI->getTailCallKind();
2560 if (!VarArgsToForward.
empty() &&
2561 ((ForwardVarArgsTo &&
2567 if (!Attrs.isEmpty() || !VarArgsAttrs.
empty()) {
2568 for (
unsigned ArgNo = 0;
2570 ArgAttrs.
push_back(Attrs.getParamAttrs(ArgNo));
2576 Attrs.getRetAttrs(), ArgAttrs);
2591 InlinedDeoptimizeCalls |=
2592 F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
2611 ChildTCK = std::min(CallSiteTailKind, ChildTCK);
2630 if ((InsertLifetime || Caller->isPresplitCoroutine()) &&
2632 IRBuilder<> builder(&*FirstNewBlock, FirstNewBlock->begin());
2647 auto &
DL = Caller->getDataLayout();
2649 TypeSize AllocaTypeSize =
DL.getTypeAllocSize(AllocaType);
2650 uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
2653 if (AllocaArraySize == 0)
2659 AllocaArraySize != std::numeric_limits<uint64_t>::max() &&
2660 std::numeric_limits<uint64_t>::max() / AllocaArraySize >=
2663 AllocaArraySize * AllocaTypeSize);
2671 if (InlinedMustTailCalls &&
2672 RI->
getParent()->getTerminatingMustTailCall())
2674 if (InlinedDeoptimizeCalls &&
2675 RI->
getParent()->getTerminatingDeoptimizeCall())
2694 if (InlinedMustTailCalls && RI->
getParent()->getTerminatingMustTailCall())
2696 if (InlinedDeoptimizeCalls && RI->
getParent()->getTerminatingDeoptimizeCall())
2706 if (
auto *
II = dyn_cast<InvokeInst>(&CB)) {
2709 if (isa<LandingPadInst>(FirstNonPHI)) {
2719 if (CallSiteEHPad) {
2730 if (
auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
2731 if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
2738 if (
auto *CatchSwitch = dyn_cast<CatchSwitchInst>(
I)) {
2739 if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
2740 CatchSwitch->setParentPad(CallSiteEHPad);
2742 auto *FPI = cast<FuncletPadInst>(
I);
2743 if (isa<ConstantTokenNone>(FPI->getParentPad()))
2744 FPI->setParentPad(CallSiteEHPad);
2749 if (InlinedDeoptimizeCalls) {
2755 if (Caller->getReturnType() == CB.
getType()) {
2757 return RI->
getParent()->getTerminatingDeoptimizeCall() !=
nullptr;
2762 Caller->getParent(), Intrinsic::experimental_deoptimize,
2763 {Caller->getReturnType()});
2789 "Expected at least the deopt operand bundle");
2793 Builder.
CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
2814 if (InlinedMustTailCalls) {
2816 Type *NewRetTy = Caller->getReturnType();
2823 RI->
getParent()->getTerminatingMustTailCall();
2824 if (!ReturnedMustTail) {
2833 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
2836 OldCast->eraseFromParent();
2856 make_range(FirstNewBlock->getIterator(), Caller->end()))
2858 if (
auto *CB = dyn_cast<CallBase>(&
I))
2867 if (Returns.
size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
2870 FirstNewBlock->end());
2872 Caller->back().eraseFromParent();
2885 if (&CB == R->getReturnValue())
2894 Returns[0]->eraseFromParent();
2896 if (MergeAttributes)
2910 BranchInst *CreatedBranchToNormalDest =
nullptr;
2921 CalledFunc->
getName() +
".exit");
2928 CalledFunc->
getName() +
".exit");
2942 "splitBasicBlock broken!");
2948 Caller->splice(AfterCallBB->
getIterator(), Caller, FirstNewBlock,
2956 if (Returns.
size() > 1) {
2961 PHI->insertBefore(AfterCallBB->
begin());
2972 "Ret value not consistent in function!");
2973 PHI->addIncoming(RI->getReturnValue(), RI->
getParent());
2989 if (CreatedBranchToNormalDest)
2991 }
else if (!Returns.
empty()) {
2995 if (&CB == Returns[0]->getReturnValue())
3002 BasicBlock *ReturnBB = Returns[0]->getParent();
3007 AfterCallBB->
splice(AfterCallBB->
begin(), ReturnBB);
3009 if (CreatedBranchToNormalDest)
3013 Returns[0]->eraseFromParent();
3026 if (InlinedMustTailCalls &&
pred_empty(AfterCallBB))
3031 assert(cast<BranchInst>(Br)->isUnconditional() &&
"splitBasicBlock broken!");
3032 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
3051 auto &
DL = Caller->getDataLayout();
3053 PHI->replaceAllUsesWith(V);
3054 PHI->eraseFromParent();
3058 if (MergeAttributes)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void UpdatePHINodes(BasicBlock *OrigBB, BasicBlock *NewBB, ArrayRef< BasicBlock * > Preds, BranchInst *BI, bool HasLoopExit)
Update the PHI nodes in OrigBB to include the values coming from NewBB.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static cl::opt< bool > NoAliases("csky-no-aliases", cl::desc("Disable the emission of assembler pseudo instructions"), cl::init(false), cl::Hidden)
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
static AttrBuilder IdentifyValidUBGeneratingAttributes(CallBase &CB)
static at::StorageToVarsMap collectEscapedLocals(const DataLayout &DL, const CallBase &CB)
Find Alloca and linked DbgAssignIntrinsic for locals escaped by CB.
static void fixupLineNumbers(Function *Fn, Function::iterator FI, Instruction *TheCall, bool CalleeHasDebugInfo)
Update inlined instructions' line numbers to to encode location where these instructions are inlined.
static void removeCallsiteMetadata(CallBase *Call)
static void propagateMemProfHelper(const CallBase *OrigCall, CallBase *ClonedCall, MDNode *InlinedCallsiteMD)
static Value * getUnwindDestToken(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Given an EH pad, find where it unwinds.
static cl::opt< bool > PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", cl::init(false), cl::Hidden, cl::desc("Convert align attributes to assumptions during inlining."))
static void AddReturnAttributes(CallBase &CB, ValueToValueMapTy &VMap)
static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
static Value * getUnwindDestTokenHelper(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Helper for getUnwindDestToken that does the descendant-ward part of the search.
static BasicBlock * HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, BasicBlock *UnwindEdge, UnwindDestMemoTy *FuncletUnwindMap=nullptr)
When we inline a basic block into an invoke, we have to turn all of the calls that can throw into inv...
static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &IANodes)
Returns a DebugLoc for a new DILocation which is a clone of OrigDL inlined at InlinedAt.
static cl::opt< bool > UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden, cl::init(true), cl::desc("Use the llvm.experimental.noalias.scope.decl " "intrinsic during inlining."))
static void PropagateCallSiteMetadata(CallBase &CB, Function::iterator FStart, Function::iterator FEnd)
When inlining a call site that has !llvm.mem.parallel_loop_access, !llvm.access.group,...
static AttrBuilder IdentifyValidPoisonGeneratingAttributes(CallBase &CB)
static void propagateMemProfMetadata(Function *Callee, CallBase &CB, bool ContainsMemProfMetadata, const ValueMap< const Value *, WeakTrackingVH > &VMap)
static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap, const ProfileCount &CalleeEntryCount, const CallBase &TheCall, ProfileSummaryInfo *PSI, BlockFrequencyInfo *CallerBFI)
Update the branch metadata for cloned call instructions.
static void updateCallerBFI(BasicBlock *CallSiteBlock, const ValueToValueMapTy &VMap, BlockFrequencyInfo *CallerBFI, BlockFrequencyInfo *CalleeBFI, const BasicBlock &CalleeEntryBlock)
Update the block frequencies of the caller after a callee has been inlined.
static bool MayContainThrowingOrExitingCallAfterCB(CallBase *Begin, ReturnInst *End)
static void HandleByValArgumentInit(Type *ByValType, Value *Dst, Value *Src, Module *M, BasicBlock *InsertBlock, InlineFunctionInfo &IFI, Function *CalledFunc)
static cl::opt< bool > EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), cl::Hidden, cl::desc("Convert noalias attributes to metadata during inlining."))
static void AddAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap, const DataLayout &DL, AAResults *CalleeAAR, ClonedCodeInfo &InlinedFunctionInfo)
If the inlined function has noalias arguments, then add new alias scopes for each noalias argument,...
static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
static void inlineRetainOrClaimRVCalls(CallBase &CB, objcarc::ARCInstKind RVCallKind, const SmallVectorImpl< ReturnInst * > &Returns)
An operand bundle "clang.arc.attachedcall" on a call indicates the call result is implicitly consumed...
static void AddParamAndFnBasicAttributes(const CallBase &CB, ValueToValueMapTy &VMap)
static Value * getParentPad(Value *EHPad)
Helper for getUnwindDestToken/getUnwindDestTokenHelper.
static void fixupAssignments(Function::iterator Start, Function::iterator End)
Update inlined instructions' DIAssignID metadata.
static bool allocaWouldBeStaticInEntry(const AllocaInst *AI)
Return the result of AI->isStaticAlloca() if AI were moved to the entry block.
static bool isUsedByLifetimeMarker(Value *V)
static void removeMemProfMetadata(CallBase *Call)
static Value * HandleByValArgument(Type *ByValType, Value *Arg, Instruction *TheCall, const Function *CalledFunc, InlineFunctionInfo &IFI, MaybeAlign ByValAlignment)
When inlining a call site that has a byval argument, we have to make the implicit memcpy explicit by ...
static void AddAlignmentAssumptions(CallBase &CB, InlineFunctionInfo &IFI)
If the inlined function has non-byval align arguments, then add @llvm.assume-based alignment assumpti...
static void trackInlinedStores(Function::iterator Start, Function::iterator End, const CallBase &CB)
static cl::opt< unsigned > InlinerAttributeWindow("max-inst-checked-for-throw-during-inlining", cl::Hidden, cl::desc("the maximum number of instructions analyzed for may throw during " "attribute inference in inlined body"), cl::init(4))
static bool haveCommonPrefix(MDNode *MIBStackContext, MDNode *CallsiteStackContext)
static void PropagateOperandBundles(Function::iterator InlinedBB, Instruction *CallSiteEHPad)
Bundle operands of the inlined function must be added to inlined call sites.
static bool hasLifetimeMarkers(AllocaInst *AI)
static void updateMemprofMetadata(CallBase *CI, const std::vector< Metadata * > &MIBList)
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
Module.h This file contains the declarations for the Module class.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
This file defines common analysis utilities used by the ObjC ARC Optimizer.
This file defines ARC utility functions which are used by various parts of the compiler.
if(auto Err=PB.parsePassPipeline(MPM, Passes)) return wrap(std MPM run * Mod
This file contains the declarations for profiling metadata utility functions.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
MemoryEffects getMemoryEffects(const CallBase *Call)
Return the behavior of the given call site.
Class for arbitrary precision integers.
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
const Value * getArraySize() const
Get the number of elements allocated.
This class represents an incoming formal argument to a Function.
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
A cache of @llvm.assume calls within a function.
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
AttrBuilder & addAlignmentAttr(MaybeAlign Align)
This turns an alignment into the form used internally in Attribute.
Attribute getAttribute(Attribute::AttrKind Kind) const
Return Attribute with the given Kind.
uint64_t getDereferenceableBytes() const
Retrieve the number of dereferenceable bytes, if the dereferenceable attribute exists (zero is return...
bool hasAttributes() const
Return true if the builder has IR-level attributes.
AttrBuilder & addAttribute(Attribute::AttrKind Val)
Add an attribute to the builder.
MaybeAlign getAlignment() const
Retrieve the alignment attribute, if it exists.
AttrBuilder & addDereferenceableAttr(uint64_t Bytes)
This turns the number of dereferenceable bytes into the form used internally in Attribute.
uint64_t getDereferenceableOrNullBytes() const
Retrieve the number of dereferenceable_or_null bytes, if the dereferenceable_or_null attribute exists...
AttrBuilder & removeAttribute(Attribute::AttrKind Val)
Remove an attribute from the builder.
AttrBuilder & addDereferenceableOrNullAttr(uint64_t Bytes)
This turns the number of dereferenceable_or_null bytes into the form used internally in Attribute.
AttrBuilder & addRangeAttr(const ConstantRange &CR)
Add range attribute.
AttributeList addRetAttributes(LLVMContext &C, const AttrBuilder &B) const
Add a return value attribute to the list.
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
AttributeSet getParamAttrs(unsigned ArgNo) const
The attributes for the argument or parameter at the given index are returned.
const ConstantRange & getRange() const
Returns the value of the range attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
const Function * getParent() const
Return the enclosing method, or null if none.
SymbolTableList< BasicBlock >::iterator eraseFromParent()
Unlink 'this' from the containing function and delete it.
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
void splice(BasicBlock::iterator ToIt, BasicBlock *FromBB)
Transfer all instructions from FromBB to this basic block at ToIt.
void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)
Update PHI nodes in this BasicBlock before removal of predecessor Pred.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
void setBlockFreq(const BasicBlock *BB, BlockFrequency Freq)
void setBlockFreqAndScale(const BasicBlock *ReferenceBB, BlockFrequency Freq, SmallPtrSetImpl< BasicBlock * > &BlocksToScale)
Set the frequency of ReferenceBB to Freq and scale the frequencies of the blocks in BlocksToScale suc...
BlockFrequency getBlockFreq(const BasicBlock *BB) const
getblockFreq - Return block frequency.
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
void setCallingConv(CallingConv::ID CC)
MaybeAlign getRetAlign() const
Extract the alignment of the return value.
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
void removeRetAttrs(const AttributeMask &AttrsToRemove)
Removes the attributes from the return value.
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
static CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
Value * getCalledOperand() const
void setAttributes(AttributeList A)
Set the parameter attributes for this call.
std::optional< ConstantRange > getRange() const
If this return value has a range attribute, return the value range of the argument.
bool doesNotThrow() const
Determine if the call cannot unwind.
Value * getArgOperand(unsigned i) const
uint64_t getRetDereferenceableBytes() const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
bool isConvergent() const
Determine if the invoke is convergent.
FunctionType * getFunctionType() const
static CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
uint64_t getRetDereferenceableOrNullBytes() const
Extract the number of dereferenceable_or_null bytes for a call (0=unknown).
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
AttributeList getAttributes() const
Return the parameter attributes for this call.
bool hasOperandBundles() const
Return true if this User has any operand bundles.
Function * getCaller()
Helper to get the caller (the parent function).
This class represents a function call, abstracting a target machine's calling convention.
void setTailCallKind(TailCallKind TCK)
TailCallKind getTailCallKind() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
bool isMustTailCall() const
static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, InsertPosition InsertBefore=nullptr)
This is the shared class of boolean and integer constants.
ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
This is an important base class in LLVM.
const Constant * stripPointerCasts() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Base class for non-instruction debug metadata records that have positions within IR.
DILocation * get() const
Get the underlying DILocation.
MDNode * getScope() const
static DebugLoc appendInlinedAt(const DebugLoc &DL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &Cache)
Rebuild the entire inlined-at chain for this instruction so that the top of the chain now is inlined-...
iterator find(const_arg_type_t< KeyT > Val)
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Class to represent profile counts.
uint64_t getCount() const
const BasicBlock & getEntryBlock() const
BasicBlockListType::iterator iterator
FunctionType * getFunctionType() const
Returns the FunctionType for me.
const BasicBlock & front() const
iterator_range< arg_iterator > args()
DISubprogram * getSubprogram() const
Get the attached subprogram.
bool hasGC() const
hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm to use during code generatio...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasPersonalityFn() const
Check whether this function has a personality function.
Constant * getPersonalityFn() const
Get the personality function associated with this function.
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
MaybeAlign getParamAlign(unsigned ArgNo) const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
const std::string & getGC() const
std::optional< ProfileCount > getEntryCount(bool AllowSynthetic=false) const
Get the entry count for this function.
Type * getReturnType() const
Returns the type of the ret val.
void setCallingConv(CallingConv::ID CC)
bool onlyReadsMemory() const
Determine if the function does not access or only reads memory.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
CallInst * CreateStackSave(const Twine &Name="")
Create a call to llvm.stacksave.
CallInst * CreateLifetimeStart(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.start intrinsic.
CallInst * CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, unsigned Alignment, Value *OffsetValue=nullptr)
Create an assume intrinsic call that represents an alignment assumption on the provided pointer.
ReturnInst * CreateRet(Value *V)
Create a 'ret <val>' instruction.
ConstantInt * getInt64(uint64_t C)
Get a constant 64-bit value.
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
ReturnInst * CreateRetVoid()
Create a 'ret void' instruction.
CallInst * CreateLifetimeEnd(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.end intrinsic.
CallInst * CreateStackRestore(Value *Ptr, const Twine &Name="")
Create a call to llvm.stackrestore.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memcpy between the specified pointers.
Instruction * CreateNoAliasScopeDeclaration(Value *Scope)
Create a llvm.experimental.noalias.scope.decl intrinsic call.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
This class captures the data input to the InlineFunction call, and records the auxiliary results prod...
bool UpdateProfile
Update profile for callee as well as cloned version.
function_ref< AssumptionCache &(Function &)> GetAssumptionCache
If non-null, InlineFunction will update the callgraph to reflect the changes it makes.
BlockFrequencyInfo * CalleeBFI
SmallVector< AllocaInst *, 4 > StaticAllocas
InlineFunction fills this in with all static allocas that get copied into the caller.
BlockFrequencyInfo * CallerBFI
SmallVector< CallBase *, 8 > InlinedCallSites
All of the new call sites inlined into the caller.
InlineResult is basically true or false.
static InlineResult success()
static InlineResult failure(const char *Reason)
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
const Function * getFunction() const
Return the function this instruction belongs to.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
A wrapper class for inspecting calls to intrinsic functions.
static bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
This is an important class for using LLVM in a threaded context.
@ OB_clang_arc_attachedcall
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
An instruction for reading from memory.
MDNode * createAnonymousAliasScope(MDNode *Domain, StringRef Name=StringRef())
Return metadata appropriate for an alias scope root node.
MDNode * createAnonymousAliasScopeDomain(StringRef Name=StringRef())
Return metadata appropriate for an alias scope domain node.
void replaceAllUsesWith(Metadata *MD)
RAUW a temporary.
static MDNode * concatenate(MDNode *A, MDNode *B)
Methods for metadata merging.
ArrayRef< MDOperand > operands() const
op_iterator op_end() const
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
unsigned getNumOperands() const
Return number of MDNode operands.
op_iterator op_begin() const
LLVMContext & getContext() const
static TempMDTuple getTemporary(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Return a temporary node.
bool onlyAccessesInaccessibleMem() const
Whether this function only (at most) accesses inaccessible memory.
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
A Module instance is used to store all the information related to an LLVM module.
A container for an operand bundle being viewed as a set of values rather than a set of uses.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Analysis providing profile information.
std::optional< uint64_t > getProfileCount(const CallBase &CallInst, BlockFrequencyInfo *BFI, bool AllowSynthetic=false) const
Returns the profile count for CallInst.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
A vector that has set insertion semantics.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static IntegerType * getInt64Ty(LLVMContext &C)
bool isVoidTy() const
Return true if this is 'void'.
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
LLVMContext & getContext() const
All values hold a context through their type.
StringRef getName() const
Return a constant reference to the value's name.
void takeName(Value *V)
Transfer the name from V to this value.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
const ParentTy * getParent() const
self_iterator getIterator()
Class to build a trie of call stack contexts for a particular profiled allocation call,...
Helper class to iterate through stack ids in both metadata (memprof MIB and callsite) and the corresp...
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void mergeAttributesForInlining(Function &Caller, const Function &Callee)
Merge caller's and callee's attributes.
AttributeMask typeIncompatible(Type *Ty, AttributeSafetyKind ASK=ASK_ALL)
Which attributes cannot be applied to a type.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
AssignmentMarkerRange getAssignmentMarkers(DIAssignID *ID)
Return a range of dbg.assign intrinsics which use \ID as an operand.
void trackAssignments(Function::iterator Start, Function::iterator End, const StorageToVarsMap &Vars, const DataLayout &DL, bool DebugPrints=false)
Track assignments to Vars between Start and End.
void remapAssignID(DenseMap< DIAssignID *, DIAssignID * > &Map, Instruction &I)
Replace DIAssignID uses and attachments with IDs from Map.
SmallVector< DbgVariableRecord * > getDVRAssignmentMarkers(const Instruction *Inst)
initializer< Ty > init(const Ty &Val)
MDNode * getMIBStackNode(const MDNode *MIB)
Returns the stack node from an MIB metadata node.
ARCInstKind getAttachedARCFunctionKind(const CallBase *CB)
This function returns the ARCInstKind of the function attached to operand bundle clang_arc_attachedca...
ARCInstKind
Equivalence classes of instructions in the ARC Model.
std::optional< Function * > getAttachedARCFunction(const CallBase *CB)
This function returns operand bundle clang_arc_attachedcall's argument, which is the address of the A...
bool isRetainOrClaimRV(ARCInstKind Kind)
Check whether the function is retainRV/unsafeClaimRV.
const Value * GetRCIdentityRoot(const Value *V)
The RCIdentity root of a value V is a dominating value U for which retaining or releasing U is equiva...
bool hasAttachedCallOpBundle(const CallBase *CB)
This is an optimization pass for GlobalISel generic memory operations.
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
BasicBlock * changeToInvokeAndSplitBasicBlock(CallInst *CI, BasicBlock *UnwindEdge, DomTreeUpdater *DTU=nullptr)
Convert the CallInst to InvokeInst with the specified unwind edge basic block.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures, bool StoreCaptures, const Instruction *I, const DominatorTree *DT, bool IncludeI=false, unsigned MaxUsesToExplore=0, const LoopInfo *LI=nullptr)
PointerMayBeCapturedBefore - Return true if this pointer value may be captured by the enclosing funct...
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, bool ModuleLevelChanges, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr)
This works exactly like CloneFunctionInto, except that it does some simple constant prop and DCE on t...
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
void updateProfileCallee(Function *Callee, int64_t EntryDelta, const ValueMap< const Value *, WeakTrackingVH > *VMap=nullptr)
Updates profile information by adjusting the entry count by adding EntryDelta then scaling callsite i...
bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
MDNode * uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2)
Compute the union of two access-group lists.
InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, bool MergeAttributes=false, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr)
This function inlines the called function into the basic block of the caller.
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNonEscapingLocalOb...
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
bool pred_empty(const BasicBlock *BB)
void updateLoopMetadataDebugLocations(Instruction &I, function_ref< Metadata *(Metadata *)> Updater)
Update the debug locations contained within the MD_loop metadata attached to the instruction I,...
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
void scaleProfData(Instruction &I, uint64_t S, uint64_t T)
Scaling the profile data attached to 'I' using the ratio of S/T.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
This struct can be used to capture information about code being cloned, while it is being cloned.
bool ContainsDynamicAllocas
This is set to true if the cloned code contains a 'dynamic' alloca.
bool isSimplified(const Value *From, const Value *To) const
bool ContainsCalls
This is set to true if the cloned code contains a normal call instruction.
bool ContainsMemProfMetadata
This is set to true if there is memprof related metadata (memprof or callsite metadata) in the cloned...
std::vector< WeakTrackingVH > OperandBundleCallSites
All cloned call sites that have operand bundles attached are appended to this vector.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
static Instruction * tryGetVTableInstruction(CallBase *CB)
Helper struct for trackAssignments, below.