86#define DEBUG_TYPE "inline-function"
95 cl::desc(
"Convert noalias attributes to metadata during inlining."));
100 cl::desc(
"Use the llvm.experimental.noalias.scope.decl "
101 "intrinsic during inlining."));
109 cl::desc(
"Convert align attributes to assumptions during inlining."));
112 "max-inst-checked-for-throw-during-inlining",
cl::Hidden,
113 cl::desc(
"the maximum number of instructions analyzed for may throw during "
114 "attribute inference in inlined body"),
120 class LandingPadInliningInfo {
131 PHINode *InnerEHValuesPHI =
nullptr;
137 : OuterResumeDest(
II->getUnwindDest()) {
143 for (; isa<PHINode>(
I); ++
I) {
146 UnwindDestPHIValues.
push_back(
PHI->getIncomingValueForBlock(InvokeBB));
149 CallerLPad = cast<LandingPadInst>(
I);
155 return OuterResumeDest;
172 void addIncomingPHIValuesFor(
BasicBlock *BB)
const {
173 addIncomingPHIValuesForInto(BB, OuterResumeDest);
178 for (
unsigned i = 0, e = UnwindDestPHIValues.
size(); i != e; ++i, ++
I) {
180 phi->addIncoming(UnwindDestPHIValues[i], src);
189 if (
auto *IntrinsicCall = dyn_cast<ConvergenceControlInst>(
I)) {
190 if (IntrinsicCall->isEntry()) {
191 return IntrinsicCall;
194 I =
I->getNextNode();
200BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
201 if (InnerResumeDest)
return InnerResumeDest;
207 OuterResumeDest->
getName() +
".body");
210 const unsigned PHICapacity = 2;
215 for (
unsigned i = 0, e = UnwindDestPHIValues.
size(); i != e; ++i, ++
I) {
216 PHINode *OuterPHI = cast<PHINode>(
I);
218 OuterPHI->
getName() +
".lpad-body");
229 InnerEHValuesPHI->
addIncoming(CallerLPad, OuterResumeDest);
232 return InnerResumeDest;
239void LandingPadInliningInfo::forwardResume(
248 addIncomingPHIValuesForInto(Src, Dest);
256 if (
auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
257 return FPI->getParentPad();
258 return cast<CatchSwitchInst>(EHPad)->getParentPad();
269 while (!Worklist.
empty()) {
276 Value *UnwindDestToken =
nullptr;
277 if (
auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
278 if (CatchSwitch->hasUnwindDest()) {
279 UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
287 for (
auto HI = CatchSwitch->handler_begin(),
288 HE = CatchSwitch->handler_end();
289 HI != HE && !UnwindDestToken; ++HI) {
291 auto *CatchPad = cast<CatchPadInst>(HandlerBlock->
getFirstNonPHI());
297 if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
301 auto Memo = MemoMap.
find(ChildPad);
302 if (Memo == MemoMap.
end()) {
309 Value *ChildUnwindDestToken = Memo->second;
310 if (!ChildUnwindDestToken)
316 if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
317 UnwindDestToken = ChildUnwindDestToken;
325 auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
326 for (
User *U : CleanupPad->users()) {
327 if (
auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
328 if (
BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
329 UnwindDestToken = RetUnwindDest->getFirstNonPHI();
334 Value *ChildUnwindDestToken;
335 if (
auto *Invoke = dyn_cast<InvokeInst>(U)) {
336 ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
337 }
else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
339 auto Memo = MemoMap.
find(ChildPad);
340 if (Memo == MemoMap.
end()) {
347 ChildUnwindDestToken = Memo->second;
348 if (!ChildUnwindDestToken)
357 if (isa<Instruction>(ChildUnwindDestToken) &&
360 UnwindDestToken = ChildUnwindDestToken;
366 if (!UnwindDestToken)
374 if (
auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
377 UnwindParent =
nullptr;
378 bool ExitedOriginalPad =
false;
380 ExitedPad && ExitedPad != UnwindParent;
381 ExitedPad = dyn_cast<Instruction>(
getParentPad(ExitedPad))) {
383 if (isa<CatchPadInst>(ExitedPad))
385 MemoMap[ExitedPad] = UnwindDestToken;
386 ExitedOriginalPad |= (ExitedPad == EHPad);
389 if (ExitedOriginalPad)
390 return UnwindDestToken;
421 if (
auto *CPI = dyn_cast<CatchPadInst>(EHPad))
422 EHPad = CPI->getCatchSwitch();
425 auto Memo = MemoMap.
find(EHPad);
426 if (Memo != MemoMap.
end())
431 assert((UnwindDestToken ==
nullptr) != (MemoMap.
count(EHPad) != 0));
433 return UnwindDestToken;
440 MemoMap[EHPad] =
nullptr;
446 Value *AncestorToken;
448 auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
451 if (isa<CatchPadInst>(AncestorPad))
460 assert(!MemoMap.
count(AncestorPad) || MemoMap[AncestorPad]);
461 auto AncestorMemo = MemoMap.
find(AncestorPad);
462 if (AncestorMemo == MemoMap.
end()) {
465 UnwindDestToken = AncestorMemo->second;
469 LastUselessPad = AncestorPad;
470 MemoMap[LastUselessPad] =
nullptr;
472 TempMemos.
insert(LastUselessPad);
490 while (!Worklist.
empty()) {
492 auto Memo = MemoMap.
find(UselessPad);
493 if (Memo != MemoMap.
end() && Memo->second) {
521 MemoMap[UselessPad] = UnwindDestToken;
522 if (
auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
523 assert(CatchSwitch->getUnwindDest() ==
nullptr &&
"Expected useless pad");
524 for (
BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
525 auto *CatchPad = HandlerBlock->getFirstNonPHI();
526 for (
User *U : CatchPad->users()) {
528 (!isa<InvokeInst>(U) ||
530 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
532 "Expected useless pad");
533 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
534 Worklist.
push_back(cast<Instruction>(U));
538 assert(isa<CleanupPadInst>(UselessPad));
540 assert(!isa<CleanupReturnInst>(U) &&
"Expected useless pad");
541 assert((!isa<InvokeInst>(U) ||
543 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
545 "Expected useless pad");
546 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
547 Worklist.
push_back(cast<Instruction>(U));
552 return UnwindDestToken;
578 if (
F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
579 F->getIntrinsicID() == Intrinsic::experimental_guard)
590 auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
591 Value *UnwindDestToken =
593 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
597 if (
auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
598 MemoKey = CatchPad->getCatchSwitch();
600 MemoKey = FuncletPad;
601 assert(FuncletUnwindMap->count(MemoKey) &&
602 (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
603 "must get memoized to avoid confusing later searches");
628 LandingPadInliningInfo Invoke(
II);
634 if (
InvokeInst *
II = dyn_cast<InvokeInst>(
I->getTerminator()))
635 InlinedLPads.
insert(
II->getLandingPadInst());
642 InlinedLPad->reserveClauses(OuterNum);
643 for (
unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
644 InlinedLPad->addClause(OuterLPad->
getClause(OuterIdx));
646 InlinedLPad->setCleanup(
true);
653 &*BB, Invoke.getOuterResumeDest()))
656 Invoke.addIncomingPHIValuesFor(NewBB);
659 if (
ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
660 Invoke.forwardResume(RI, InlinedLPads);
690 UnwindDestPHIValues.
push_back(
PHI.getIncomingValueForBlock(InvokeBB));
697 for (
Value *V : UnwindDestPHIValues) {
699 PHI->addIncoming(V, Src);
709 if (
auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
710 if (CRI->unwindsToCaller()) {
711 auto *CleanupPad = CRI->getCleanupPad();
713 CRI->eraseFromParent();
720 isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
721 FuncletUnwindMap[CleanupPad] =
731 if (
auto *CatchSwitch = dyn_cast<CatchSwitchInst>(
I)) {
732 if (CatchSwitch->unwindsToCaller()) {
733 Value *UnwindDestToken;
734 if (
auto *ParentPad =
735 dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
745 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
758 CatchSwitch->getParentPad(), UnwindDest,
759 CatchSwitch->getNumHandlers(), CatchSwitch->
getName(),
760 CatchSwitch->getIterator());
761 for (
BasicBlock *PadBB : CatchSwitch->handlers())
762 NewCatchSwitch->addHandler(PadBB);
767 FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
768 Replacement = NewCatchSwitch;
770 }
else if (!isa<FuncletPadInst>(
I)) {
776 I->replaceAllUsesWith(Replacement);
777 I->eraseFromParent();
787 &*BB, UnwindDest, &FuncletUnwindMap))
800 MDNode *CallsiteStackContext) {
806 for (
auto MIBStackIter = MIBStackContext->
op_begin(),
807 CallsiteStackIter = CallsiteStackContext->
op_begin();
808 MIBStackIter != MIBStackContext->
op_end() &&
809 CallsiteStackIter != CallsiteStackContext->
op_end();
810 MIBStackIter++, CallsiteStackIter++) {
811 auto *Val1 = mdconst::dyn_extract<ConstantInt>(*MIBStackIter);
812 auto *Val2 = mdconst::dyn_extract<ConstantInt>(*CallsiteStackIter);
814 if (Val1->getZExtValue() != Val2->getZExtValue())
821 Call->setMetadata(LLVMContext::MD_memprof,
nullptr);
825 Call->setMetadata(LLVMContext::MD_callsite,
nullptr);
829 const std::vector<Metadata *> &MIBList) {
836 CallStack.addCallStack(cast<MDNode>(MIB));
837 bool MemprofMDAttached =
CallStack.buildAndAttachMIBMetadata(CI);
839 if (!MemprofMDAttached)
849 MDNode *InlinedCallsiteMD) {
851 MDNode *ClonedCallsiteMD =
nullptr;
854 if (OrigCallsiteMD) {
859 ClonedCall->
setMetadata(LLVMContext::MD_callsite, ClonedCallsiteMD);
871 std::vector<Metadata *> NewMIBList;
876 for (
auto &MIBOp : OrigMemProfMD->
operands()) {
877 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
884 NewMIBList.push_back(MIB);
886 if (NewMIBList.empty()) {
902 bool ContainsMemProfMetadata,
907 if (!CallsiteMD && !ContainsMemProfMetadata)
911 for (
const auto &Entry : VMap) {
914 auto *OrigCall = dyn_cast_or_null<CallBase>(Entry.first);
915 auto *ClonedCall = dyn_cast_or_null<CallBase>(Entry.second);
916 if (!OrigCall || !ClonedCall)
935 MDNode *MemParallelLoopAccess =
936 CB.
getMetadata(LLVMContext::MD_mem_parallel_loop_access);
940 if (!MemParallelLoopAccess && !AccessGroup && !AliasScope && !NoAlias)
946 if (!
I.mayReadOrWriteMemory())
949 if (MemParallelLoopAccess) {
952 I.getMetadata(LLVMContext::MD_mem_parallel_loop_access),
953 MemParallelLoopAccess);
954 I.setMetadata(LLVMContext::MD_mem_parallel_loop_access,
955 MemParallelLoopAccess);
960 I.getMetadata(LLVMContext::MD_access_group), AccessGroup));
964 I.getMetadata(LLVMContext::MD_alias_scope), AliasScope));
968 I.getMetadata(LLVMContext::MD_noalias), NoAlias));
986 dyn_cast<Function>(
I->getCalledOperand()->stripPointerCasts());
987 if (CalledFn && CalledFn->isIntrinsic() &&
I->doesNotThrow() &&
992 I->getOperandBundlesAsDefs(OpBundles);
997 I->replaceAllUsesWith(NewInst);
998 I->eraseFromParent();
1007class ScopedAliasMetadataDeepCloner {
1011 void addRecursiveMetadataUses();
1014 ScopedAliasMetadataDeepCloner(
const Function *
F);
1026ScopedAliasMetadataDeepCloner::ScopedAliasMetadataDeepCloner(
1030 if (
const MDNode *M =
I.getMetadata(LLVMContext::MD_alias_scope))
1032 if (
const MDNode *M =
I.getMetadata(LLVMContext::MD_noalias))
1036 if (
const auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&
I))
1037 MD.insert(Decl->getScopeList());
1040 addRecursiveMetadataUses();
1043void ScopedAliasMetadataDeepCloner::addRecursiveMetadataUses() {
1045 while (!
Queue.empty()) {
1048 if (
const MDNode *OpMD = dyn_cast<MDNode>(
Op))
1049 if (MD.insert(OpMD))
1050 Queue.push_back(OpMD);
1054void ScopedAliasMetadataDeepCloner::clone() {
1055 assert(MDMap.empty() &&
"clone() already called ?");
1060 MDMap[
I].reset(DummyNodes.
back().get());
1069 if (
const MDNode *M = dyn_cast<MDNode>(
Op))
1076 MDTuple *TempM = cast<MDTuple>(MDMap[
I]);
1093 if (
MDNode *M =
I.getMetadata(LLVMContext::MD_alias_scope))
1094 if (
MDNode *MNew = MDMap.lookup(M))
1095 I.setMetadata(LLVMContext::MD_alias_scope, MNew);
1097 if (
MDNode *M =
I.getMetadata(LLVMContext::MD_noalias))
1098 if (
MDNode *MNew = MDMap.lookup(M))
1099 I.setMetadata(LLVMContext::MD_noalias, MNew);
1101 if (
auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&
I))
1102 if (
MDNode *MNew = MDMap.lookup(Decl->getScopeList()))
1103 Decl->setScopeList(MNew);
1122 if (CB.
paramHasAttr(Arg.getArgNo(), Attribute::NoAlias) && !Arg.use_empty())
1125 if (NoAliasArgs.
empty())
1145 for (
unsigned i = 0, e = NoAliasArgs.
size(); i != e; ++i) {
1148 std::string
Name = std::string(CalledFunc->
getName());
1151 Name +=
A->getName();
1153 Name +=
": argument ";
1161 NewScopes.
insert(std::make_pair(
A, NewScope));
1178 VMI != VMIE; ++VMI) {
1179 if (
const Instruction *
I = dyn_cast<Instruction>(VMI->first)) {
1183 Instruction *NI = dyn_cast<Instruction>(VMI->second);
1187 bool IsArgMemOnlyCall =
false, IsFuncCall =
false;
1190 if (
const LoadInst *LI = dyn_cast<LoadInst>(
I))
1191 PtrArgs.
push_back(LI->getPointerOperand());
1192 else if (
const StoreInst *SI = dyn_cast<StoreInst>(
I))
1193 PtrArgs.
push_back(SI->getPointerOperand());
1194 else if (
const VAArgInst *VAAI = dyn_cast<VAArgInst>(
I))
1195 PtrArgs.
push_back(VAAI->getPointerOperand());
1197 PtrArgs.
push_back(CXI->getPointerOperand());
1199 PtrArgs.
push_back(RMWI->getPointerOperand());
1200 else if (
const auto *Call = dyn_cast<CallBase>(
I)) {
1204 if (Call->doesNotAccessMemory())
1216 IsArgMemOnlyCall =
true;
1219 for (
Value *Arg : Call->args()) {
1223 if (!Arg->getType()->isPointerTy())
1234 if (PtrArgs.
empty() && !IsFuncCall)
1243 for (
const Value *V : PtrArgs) {
1247 for (
const Value *O : Objects)
1253 bool RequiresNoCaptureBefore =
false, UsesAliasingPtr =
false,
1254 UsesUnknownObject =
false;
1255 for (
const Value *V : ObjSet) {
1259 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1260 isa<ConstantPointerNull>(V) ||
1261 isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1268 if (
const Argument *
A = dyn_cast<Argument>(V)) {
1270 UsesAliasingPtr =
true;
1272 UsesAliasingPtr =
true;
1278 RequiresNoCaptureBefore =
true;
1284 UsesUnknownObject =
true;
1290 if (UsesUnknownObject)
1295 if (IsFuncCall && !IsArgMemOnlyCall)
1296 RequiresNoCaptureBefore =
true;
1314 if (!RequiresNoCaptureBefore ||
1336 bool CanAddScopes = !UsesAliasingPtr;
1337 if (CanAddScopes && IsFuncCall)
1338 CanAddScopes = IsArgMemOnlyCall;
1343 Scopes.push_back(NewScopes[
A]);
1346 if (!Scopes.empty())
1348 LLVMContext::MD_alias_scope,
1359 "Expected to be in same basic block!");
1361 assert(BeginIt !=
End->getIterator() &&
"Non-empty BB has empty iterator");
1372 auto &Context = CalledFunction->
getContext();
1376 bool HasAttrToPropagate =
false;
1384 Attribute::Dereferenceable, Attribute::DereferenceableOrNull,
1385 Attribute::NonNull, Attribute::Alignment, Attribute::Range};
1387 for (
unsigned I = 0, E = CB.
arg_size();
I < E; ++
I) {
1393 ValidObjParamAttrs.
back().addAttribute(Attribute::ReadNone);
1395 ValidObjParamAttrs.
back().addAttribute(Attribute::ReadOnly);
1400 ValidExactParamAttrs.
back().addAttribute(Attr);
1403 HasAttrToPropagate |= ValidObjParamAttrs.
back().hasAttributes();
1404 HasAttrToPropagate |= ValidExactParamAttrs.
back().hasAttributes();
1408 if (!HasAttrToPropagate)
1413 const auto *InnerCB = dyn_cast<CallBase>(&Ins);
1416 auto *NewInnerCB = dyn_cast_or_null<CallBase>(VMap.
lookup(InnerCB));
1421 if (InlinedFunctionInfo.
isSimplified(InnerCB, NewInnerCB))
1425 for (
unsigned I = 0, E = InnerCB->arg_size();
I < E; ++
I) {
1430 if (NewInnerCB->paramHasAttr(
I, Attribute::ByVal))
1434 if (
match(NewInnerCB->getArgOperand(
I),
1439 const Argument *Arg = dyn_cast<Argument>(InnerCB->getArgOperand(
I));
1450 if (AL.getParamDereferenceableBytes(
I) >
1451 NewAB.getDereferenceableBytes())
1453 if (AL.getParamDereferenceableOrNullBytes(
I) >
1454 NewAB.getDereferenceableOrNullBytes())
1456 if (AL.getParamAlignment(
I).valueOrOne() >
1457 NewAB.getAlignment().valueOrOne())
1459 if (
auto ExistingRange = AL.getParamRange(
I)) {
1460 if (
auto NewRange = NewAB.getRange()) {
1463 NewAB.removeAttribute(Attribute::Range);
1464 NewAB.addRangeAttr(CombinedRange);
1467 AL = AL.addParamAttributes(Context,
I, NewAB);
1468 }
else if (NewInnerCB->getArgOperand(
I)->getType()->isPointerTy()) {
1470 const Value *UnderlyingV =
1472 Arg = dyn_cast<Argument>(UnderlyingV);
1481 AL = AL.addParamAttributes(Context,
I, ValidObjParamAttrs[ArgNo]);
1488 if (AL.hasParamAttr(
I, Attribute::ReadOnly) &&
1489 AL.hasParamAttr(
I, Attribute::WriteOnly))
1490 AL = AL.addParamAttribute(Context,
I, Attribute::ReadNone);
1493 if (AL.hasParamAttr(
I, Attribute::ReadNone)) {
1494 AL = AL.removeParamAttribute(Context,
I, Attribute::ReadOnly);
1495 AL = AL.removeParamAttribute(Context,
I, Attribute::WriteOnly);
1499 if (AL.hasParamAttr(
I, Attribute::ReadOnly) ||
1500 AL.hasParamAttr(
I, Attribute::ReadNone))
1501 AL = AL.removeParamAttribute(Context,
I, Attribute::Writable);
1503 NewInnerCB->setAttributes(AL);
1547 auto &Context = CalledFunction->
getContext();
1549 for (
auto &BB : *CalledFunction) {
1550 auto *RI = dyn_cast<ReturnInst>(BB.getTerminator());
1551 if (!RI || !isa<CallBase>(RI->
getOperand(0)))
1553 auto *RetVal = cast<CallBase>(RI->
getOperand(0));
1557 auto *NewRetVal = dyn_cast_or_null<CallBase>(VMap.
lookup(RetVal));
1563 if (InlinedFunctionInfo.
isSimplified(RetVal, NewRetVal))
1583 if (RI->
getParent() != RetVal->getParent() ||
1596 AL.getRetDereferenceableOrNullBytes())
1598 AttributeList NewAL = AL.addRetAttributes(Context, ValidUB);
1636 Attribute NewRange = AL.getRetAttr(Attribute::Range);
1654 (RetVal->hasOneUse() && !RetVal->hasRetAttr(Attribute::NoUndef)))
1657 NewRetVal->setAttributes(NewAL);
1673 bool DTCalculated =
false;
1677 if (!Arg.getType()->isPointerTy() || Arg.hasPassPointeeByValueCopyAttr() ||
1684 if (!DTCalculated) {
1686 DTCalculated =
true;
1695 DL, ArgVal, Alignment->value());
1707 Builder.
getInt64(M->getDataLayout().getTypeStoreSize(ByValType));
1720 CI->
setDebugLoc(DILocation::get(SP->getContext(), 0, 0, SP));
1757 Align Alignment =
DL.getPrefTypeAlign(ByValType);
1763 Alignment = std::max(Alignment, *ByValAlignment);
1767 nullptr, Alignment, Arg->
getName());
1778 for (
User *U : V->users())
1780 if (
II->isLifetimeStartOrEnd())
1791 if (Ty == Int8PtrTy)
1796 if (U->getType() != Int8PtrTy)
continue;
1797 if (U->stripPointerCasts() != AI)
continue;
1817 return DILocation::get(Ctx, OrigDL.
getLine(), OrigDL.
getCol(),
1834 InlinedAtNode = DILocation::getDistinct(
1835 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1836 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1845 bool NoInlineLineTables = Fn->
hasFnAttribute(
"no-inline-line-tables");
1851 auto updateLoopInfoLoc = [&Ctx, &InlinedAtNode,
1853 if (
auto *Loc = dyn_cast_or_null<DILocation>(MD))
1859 if (!NoInlineLineTables)
1867 if (CalleeHasDebugInfo && !NoInlineLineTables)
1877 if (
auto *AI = dyn_cast<AllocaInst>(&
I))
1884 if (isa<PseudoProbeInst>(
I))
1887 I.setDebugLoc(TheCallDL);
1892 assert(DVR->getDebugLoc() &&
"Debug Value must have debug loc");
1893 if (NoInlineLineTables) {
1894 DVR->setDebugLoc(TheCallDL);
1900 DVR->getMarker()->getParent()->
getContext(), IANodes);
1901 DVR->setDebugLoc(IDL);
1905 for (; FI != Fn->
end(); ++FI) {
1908 for (
DbgRecord &DVR :
I.getDbgRecordRange()) {
1914 if (NoInlineLineTables) {
1916 while (BI != FI->end()) {
1917 if (isa<DbgInfoIntrinsic>(BI)) {
1918 BI = BI->eraseFromParent();
1921 BI->dropDbgRecords();
1930#define DEBUG_TYPE "assignment-tracking"
1938 errs() <<
"# Finding caller local variables escaped by callee\n");
1941 if (!Arg->getType()->isPointerTy()) {
1953 assert(Arg->getType()->isPtrOrPtrVectorTy());
1954 APInt TmpOffset(
DL.getIndexTypeSizeInBits(Arg->getType()), 0,
false);
1956 Arg->stripAndAccumulateConstantOffsets(
DL, TmpOffset,
true));
1958 LLVM_DEBUG(
errs() <<
" | SKIP: Couldn't walk back to base storage\n");
1969 auto CollectAssignsForStorage = [&](
auto *DbgAssign) {
1971 if (DbgAssign->getDebugLoc().getInlinedAt())
1979 return EscapedLocals;
1985 << Start->getParent()->getName() <<
" from "
1998 for (
auto BBI = Start; BBI !=
End; ++BBI) {
2004#define DEBUG_TYPE "inline-function"
2018 for (
auto Entry : VMap) {
2019 if (!isa<BasicBlock>(Entry.first) || !Entry.second)
2021 auto *OrigBB = cast<BasicBlock>(Entry.first);
2022 auto *ClonedBB = cast<BasicBlock>(Entry.second);
2024 if (!ClonedBBs.
insert(ClonedBB).second) {
2036 EntryClone, CallerBFI->
getBlockFreq(CallSiteBlock), ClonedBBs);
2046 auto CallSiteCount =
2049 std::min(CallSiteCount.value_or(0), CalleeEntryCount.
getCount());
2054 Function *Callee, int64_t EntryDelta,
2056 auto CalleeCount = Callee->getEntryCount();
2060 const uint64_t PriorEntryCount = CalleeCount->getCount();
2065 (EntryDelta < 0 && static_cast<uint64_t>(-EntryDelta) > PriorEntryCount)
2067 : PriorEntryCount + EntryDelta;
2069 auto updateVTableProfWeight = [](
CallBase *CB,
const uint64_t NewEntryCount,
2078 uint64_t CloneEntryCount = PriorEntryCount - NewEntryCount;
2079 for (
auto Entry : *VMap) {
2080 if (isa<CallInst>(Entry.first))
2081 if (
auto *CI = dyn_cast_or_null<CallInst>(Entry.second)) {
2082 CI->updateProfWeight(CloneEntryCount, PriorEntryCount);
2083 updateVTableProfWeight(CI, CloneEntryCount, PriorEntryCount);
2086 if (isa<InvokeInst>(Entry.first))
2087 if (
auto *
II = dyn_cast_or_null<InvokeInst>(Entry.second)) {
2088 II->updateProfWeight(CloneEntryCount, PriorEntryCount);
2089 updateVTableProfWeight(
II, CloneEntryCount, PriorEntryCount);
2095 Callee->setEntryCount(NewEntryCount);
2099 if (!VMap || VMap->
count(&BB))
2101 if (
CallInst *CI = dyn_cast<CallInst>(&
I)) {
2102 CI->updateProfWeight(NewEntryCount, PriorEntryCount);
2103 updateVTableProfWeight(CI, NewEntryCount, PriorEntryCount);
2106 II->updateProfWeight(NewEntryCount, PriorEntryCount);
2107 updateVTableProfWeight(
II, NewEntryCount, PriorEntryCount);
2134 bool IsRetainRV = RVCallKind == objcarc::ARCInstKind::RetainRV,
2135 IsUnsafeClaimRV = !IsRetainRV;
2137 for (
auto *RI : Returns) {
2139 bool InsertRetainCall = IsRetainRV;
2148 if (isa<CastInst>(
I))
2151 if (
auto *
II = dyn_cast<IntrinsicInst>(&
I)) {
2152 if (
II->getIntrinsicID() != Intrinsic::objc_autoreleaseReturnValue ||
2162 if (IsUnsafeClaimRV) {
2166 II->eraseFromParent();
2167 InsertRetainCall =
false;
2171 auto *CI = dyn_cast<CallInst>(&
I);
2186 NewCall->copyMetadata(*CI);
2187 CI->replaceAllUsesWith(NewCall);
2188 CI->eraseFromParent();
2189 InsertRetainCall =
false;
2193 if (InsertRetainCall) {
2224static const std::pair<std::vector<int64_t>, std::vector<int64_t>>
2232 std::vector<int64_t> CalleeCounterMap;
2233 std::vector<int64_t> CalleeCallsiteMap;
2234 CalleeCounterMap.resize(CalleeCounters, -1);
2235 CalleeCallsiteMap.resize(CalleeCallsites, -1);
2238 if (Ins.getNameValue() == &Caller)
2240 const auto OldID =
static_cast<uint32_t>(Ins.getIndex()->getZExtValue());
2241 if (CalleeCounterMap[OldID] == -1)
2243 const auto NewID =
static_cast<uint32_t>(CalleeCounterMap[OldID]);
2245 Ins.setNameValue(&Caller);
2246 Ins.setIndex(NewID);
2251 if (Ins.getNameValue() == &Caller)
2253 const auto OldID =
static_cast<uint32_t>(Ins.getIndex()->getZExtValue());
2254 if (CalleeCallsiteMap[OldID] == -1)
2256 const auto NewID =
static_cast<uint32_t>(CalleeCallsiteMap[OldID]);
2258 Ins.setNameValue(&Caller);
2259 Ins.setIndex(NewID);
2263 std::deque<BasicBlock *> Worklist;
2280 Worklist.push_back(StartBB);
2281 while (!Worklist.empty()) {
2282 auto *BB = Worklist.front();
2283 Worklist.pop_front();
2284 bool Changed =
false;
2287 Changed |= RewriteInstrIfNeeded(*BBID);
2291 BBID->moveBefore(&*BB->getFirstInsertionPt());
2294 if (
auto *Inc = dyn_cast<InstrProfIncrementInst>(&
I)) {
2295 if (isa<InstrProfIncrementInstStep>(Inc)) {
2302 if (isa<Constant>(Inc->getStep())) {
2303 assert(!Inc->getNextNode() || !isa<SelectInst>(Inc->getNextNode()));
2304 Inc->eraseFromParent();
2306 assert(isa_and_nonnull<SelectInst>(Inc->getNextNode()));
2307 RewriteInstrIfNeeded(*Inc);
2309 }
else if (Inc != BBID) {
2314 Inc->eraseFromParent();
2317 }
else if (
auto *CS = dyn_cast<InstrProfCallsite>(&
I)) {
2318 Changed |= RewriteCallsiteInsIfNeeded(*CS);
2321 if (!BBID || Changed)
2323 if (Seen.
insert(Succ).second)
2324 Worklist.push_back(Succ);
2328 llvm::all_of(CalleeCounterMap, [&](
const auto &V) {
return V != 0; }) &&
2329 "Counter index mapping should be either to -1 or to non-zero index, "
2331 "index corresponds to the entry BB of the caller");
2333 llvm::all_of(CalleeCallsiteMap, [&](
const auto &V) {
return V != 0; }) &&
2334 "Callsite index mapping should be either to -1 or to non-zero index, "
2335 "because there should have been at least a callsite - the inlined one "
2336 "- which would have had a 0 index.");
2338 return {std::move(CalleeCounterMap), std::move(CalleeCallsiteMap)};
2357 bool MergeAttributes,
2359 bool InsertLifetime,
2362 return InlineFunction(CB, IFI, MergeAttributes, CalleeAAR, InsertLifetime,
2374 const auto CallsiteID =
2375 static_cast<uint32_t>(CallsiteIDIns->getIndex()->getZExtValue());
2380 auto Ret =
InlineFunction(CB, IFI, MergeAttributes, CalleeAAR, InsertLifetime,
2382 if (!Ret.isSuccess())
2387 CallsiteIDIns->eraseFromParent();
2392 const auto IndicesMaps =
remapIndices(Caller, StartBB, CtxProf,
2393 NumCalleeCounters, NumCalleeCallsites);
2398 const auto &[CalleeCounterMap, CalleeCallsiteMap] = IndicesMaps;
2400 (Ctx.counters().size() +
2401 llvm::count_if(CalleeCounterMap, [](
auto V) { return V != -1; }) ==
2403 "The caller's counters size should have grown by the number of new "
2404 "distinct counters inherited from the inlined callee.");
2405 Ctx.resizeCounters(NewCountersSize);
2409 auto CSIt = Ctx.callsites().find(CallsiteID);
2410 if (CSIt == Ctx.callsites().end())
2412 auto CalleeCtxIt = CSIt->second.find(CalleeGUID);
2415 if (CalleeCtxIt == CSIt->second.end())
2420 auto &CalleeCtx = CalleeCtxIt->second;
2421 assert(CalleeCtx.guid() == CalleeGUID);
2423 for (
auto I = 0U;
I < CalleeCtx.counters().
size(); ++
I) {
2424 const int64_t NewIndex = CalleeCounterMap[
I];
2425 if (NewIndex >= 0) {
2426 assert(NewIndex != 0 &&
"counter index mapping shouldn't happen to a 0 "
2427 "index, that's the caller's entry BB");
2428 Ctx.counters()[NewIndex] = CalleeCtx.counters()[
I];
2431 for (
auto &[
I, OtherSet] : CalleeCtx.callsites()) {
2432 const int64_t NewCSIdx = CalleeCallsiteMap[
I];
2433 if (NewCSIdx >= 0) {
2435 "callsite index mapping shouldn't happen to a 0 index, the "
2436 "caller must've had at least one callsite (with such an index)");
2437 Ctx.ingestAllContexts(NewCSIdx, std::move(OtherSet));
2443 auto Deleted = Ctx.callsites().erase(CallsiteID);
2447 CtxProf.
update(Updater, Caller);
2460 bool MergeAttributes,
2462 bool InsertLifetime,
2467 if (isa<CallBrInst>(CB))
2480 Value *ConvergenceControlToken =
nullptr;
2496 ConvergenceControlToken = OBUse.Inputs[0].get();
2514 if (!ConvergenceControlToken &&
2517 "convergent call needs convergencectrl operand");
2532 if (CalledFunc->
hasGC()) {
2533 if (!Caller->hasGC())
2534 Caller->setGC(CalledFunc->
getGC());
2535 else if (CalledFunc->
getGC() != Caller->getGC())
2549 Caller->hasPersonalityFn()
2550 ? Caller->getPersonalityFn()->stripPointerCasts()
2552 if (CalledPersonality) {
2553 if (!CallerPersonality)
2554 Caller->setPersonalityFn(CalledPersonality);
2559 else if (CalledPersonality != CallerPersonality)
2566 if (CallerPersonality) {
2569 std::optional<OperandBundleUse> ParentFunclet =
2572 CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
2576 if (CallSiteEHPad) {
2577 if (Personality == EHPersonality::MSVC_CXX) {
2580 if (isa<CleanupPadInst>(CallSiteEHPad)) {
2583 for (
const BasicBlock &CalledBB : *CalledFunc) {
2584 if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
2591 for (
const BasicBlock &CalledBB : *CalledFunc) {
2592 if (CalledBB.isEHPad())
2602 bool EHPadForCallUnwindsLocally =
false;
2603 if (CallSiteEHPad && isa<CallInst>(CB)) {
2605 Value *CallSiteUnwindDestToken =
2608 EHPadForCallUnwindsLocally =
2609 CallSiteUnwindDestToken &&
2610 !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
2641 auto &
DL = Caller->getDataLayout();
2648 E = CalledFunc->
arg_end();
I != E; ++
I, ++AI, ++ArgNo) {
2649 Value *ActualArg = *AI;
2657 &CB, CalledFunc, IFI,
2659 if (ActualArg != *AI)
2664 VMap[&*
I] = ActualArg;
2684 false, Returns,
".i",
2685 &InlinedFunctionInfo);
2687 FirstNewBlock = LastBlock; ++FirstNewBlock;
2691 if (RVCallKind != objcarc::ARCInstKind::None)
2702 CalledFunc->
front());
2710 for (ByValInit &
Init : ByValInits)
2712 &*FirstNewBlock, IFI, CalledFunc);
2714 std::optional<OperandBundleUse> ParentDeopt =
2720 CallBase *ICS = dyn_cast_or_null<CallBase>(VH);
2741 std::vector<Value *> MergedDeoptArgs;
2742 MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
2743 ChildOB.Inputs.size());
2748 OpDefs.
emplace_back(
"deopt", std::move(MergedDeoptArgs));
2778 SAMetadataCloner.clone();
2779 SAMetadataCloner.remap(FirstNewBlock, Caller->end());
2801 make_range(FirstNewBlock->getIterator(), Caller->end()))
2803 if (
auto *
II = dyn_cast<AssumeInst>(&
I))
2807 if (ConvergenceControlToken) {
2809 if (IntrinsicCall) {
2822 E = FirstNewBlock->end();
I != E; ) {
2841 while (isa<AllocaInst>(
I) &&
2842 !cast<AllocaInst>(
I)->use_empty() &&
2852 Caller->getEntryBlock().splice(InsertPoint, &*FirstNewBlock,
2865 bool InlinedMustTailCalls =
false, InlinedDeoptimizeCalls =
false;
2868 if (
CallInst *CI = dyn_cast<CallInst>(&CB))
2869 CallSiteTailKind = CI->getTailCallKind();
2884 if (!VarArgsToForward.
empty() &&
2885 ((ForwardVarArgsTo &&
2891 if (!Attrs.isEmpty() || !VarArgsAttrs.
empty()) {
2892 for (
unsigned ArgNo = 0;
2894 ArgAttrs.
push_back(Attrs.getParamAttrs(ArgNo));
2900 Attrs.getRetAttrs(), ArgAttrs);
2915 InlinedDeoptimizeCalls |=
2916 F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
2935 ChildTCK = std::min(CallSiteTailKind, ChildTCK);
2954 if ((InsertLifetime || Caller->isPresplitCoroutine()) &&
2956 IRBuilder<> builder(&*FirstNewBlock, FirstNewBlock->begin());
2971 auto &
DL = Caller->getDataLayout();
2973 TypeSize AllocaTypeSize =
DL.getTypeAllocSize(AllocaType);
2974 uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
2977 if (AllocaArraySize == 0)
2983 AllocaArraySize != std::numeric_limits<uint64_t>::max() &&
2984 std::numeric_limits<uint64_t>::max() / AllocaArraySize >=
2987 AllocaArraySize * AllocaTypeSize);
2995 if (InlinedMustTailCalls &&
2996 RI->
getParent()->getTerminatingMustTailCall())
2998 if (InlinedDeoptimizeCalls &&
2999 RI->
getParent()->getTerminatingDeoptimizeCall())
3018 if (InlinedMustTailCalls && RI->
getParent()->getTerminatingMustTailCall())
3020 if (InlinedDeoptimizeCalls && RI->
getParent()->getTerminatingDeoptimizeCall())
3030 if (
auto *
II = dyn_cast<InvokeInst>(&CB)) {
3033 if (isa<LandingPadInst>(FirstNonPHI)) {
3043 if (CallSiteEHPad) {
3054 if (
auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
3055 if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
3062 if (
auto *CatchSwitch = dyn_cast<CatchSwitchInst>(
I)) {
3063 if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
3064 CatchSwitch->setParentPad(CallSiteEHPad);
3066 auto *FPI = cast<FuncletPadInst>(
I);
3067 if (isa<ConstantTokenNone>(FPI->getParentPad()))
3068 FPI->setParentPad(CallSiteEHPad);
3073 if (InlinedDeoptimizeCalls) {
3079 if (Caller->getReturnType() == CB.
getType()) {
3081 return RI->
getParent()->getTerminatingDeoptimizeCall() !=
nullptr;
3086 Caller->getParent(), Intrinsic::experimental_deoptimize,
3087 {Caller->getReturnType()});
3113 "Expected at least the deopt operand bundle");
3117 Builder.
CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
3138 if (InlinedMustTailCalls) {
3140 Type *NewRetTy = Caller->getReturnType();
3147 RI->
getParent()->getTerminatingMustTailCall();
3148 if (!ReturnedMustTail) {
3157 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
3160 OldCast->eraseFromParent();
3180 make_range(FirstNewBlock->getIterator(), Caller->end()))
3182 if (
auto *CB = dyn_cast<CallBase>(&
I))
3191 if (Returns.
size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
3194 FirstNewBlock->end());
3196 Caller->back().eraseFromParent();
3209 if (&CB == R->getReturnValue())
3218 Returns[0]->eraseFromParent();
3220 if (MergeAttributes)
3234 BranchInst *CreatedBranchToNormalDest =
nullptr;
3245 CalledFunc->
getName() +
".exit");
3252 CalledFunc->
getName() +
".exit");
3266 "splitBasicBlock broken!");
3272 Caller->splice(AfterCallBB->
getIterator(), Caller, FirstNewBlock,
3280 if (Returns.
size() > 1) {
3285 PHI->insertBefore(AfterCallBB->
begin());
3296 "Ret value not consistent in function!");
3297 PHI->addIncoming(RI->getReturnValue(), RI->
getParent());
3313 if (CreatedBranchToNormalDest)
3315 }
else if (!Returns.
empty()) {
3319 if (&CB == Returns[0]->getReturnValue())
3326 BasicBlock *ReturnBB = Returns[0]->getParent();
3331 AfterCallBB->
splice(AfterCallBB->
begin(), ReturnBB);
3333 if (CreatedBranchToNormalDest)
3337 Returns[0]->eraseFromParent();
3350 if (InlinedMustTailCalls &&
pred_empty(AfterCallBB))
3355 assert(cast<BranchInst>(Br)->isUnconditional() &&
"splitBasicBlock broken!");
3356 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
3375 auto &
DL = Caller->getDataLayout();
3377 PHI->replaceAllUsesWith(V);
3378 PHI->eraseFromParent();
3382 if (MergeAttributes)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
static void UpdatePHINodes(BasicBlock *OrigBB, BasicBlock *NewBB, ArrayRef< BasicBlock * > Preds, BranchInst *BI, bool HasLoopExit)
Update the PHI nodes in OrigBB to include the values coming from NewBB.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static cl::opt< bool > NoAliases("csky-no-aliases", cl::desc("Disable the emission of assembler pseudo instructions"), cl::init(false), cl::Hidden)
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
static AttrBuilder IdentifyValidUBGeneratingAttributes(CallBase &CB)
static at::StorageToVarsMap collectEscapedLocals(const DataLayout &DL, const CallBase &CB)
Find Alloca and linked DbgAssignIntrinsic for locals escaped by CB.
static void fixupLineNumbers(Function *Fn, Function::iterator FI, Instruction *TheCall, bool CalleeHasDebugInfo)
Update inlined instructions' line numbers to to encode location where these instructions are inlined.
static void removeCallsiteMetadata(CallBase *Call)
static void propagateMemProfHelper(const CallBase *OrigCall, CallBase *ClonedCall, MDNode *InlinedCallsiteMD)
static Value * getUnwindDestToken(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Given an EH pad, find where it unwinds.
static cl::opt< bool > PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", cl::init(false), cl::Hidden, cl::desc("Convert align attributes to assumptions during inlining."))
static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
static Value * getUnwindDestTokenHelper(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Helper for getUnwindDestToken that does the descendant-ward part of the search.
static BasicBlock * HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, BasicBlock *UnwindEdge, UnwindDestMemoTy *FuncletUnwindMap=nullptr)
When we inline a basic block into an invoke, we have to turn all of the calls that can throw into inv...
static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &IANodes)
Returns a DebugLoc for a new DILocation which is a clone of OrigDL inlined at InlinedAt.
static cl::opt< bool > UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden, cl::init(true), cl::desc("Use the llvm.experimental.noalias.scope.decl " "intrinsic during inlining."))
static void PropagateCallSiteMetadata(CallBase &CB, Function::iterator FStart, Function::iterator FEnd)
When inlining a call site that has !llvm.mem.parallel_loop_access, !llvm.access.group,...
static AttrBuilder IdentifyValidPoisonGeneratingAttributes(CallBase &CB)
static void propagateMemProfMetadata(Function *Callee, CallBase &CB, bool ContainsMemProfMetadata, const ValueMap< const Value *, WeakTrackingVH > &VMap)
static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap, const ProfileCount &CalleeEntryCount, const CallBase &TheCall, ProfileSummaryInfo *PSI, BlockFrequencyInfo *CallerBFI)
Update the branch metadata for cloned call instructions.
static void updateCallerBFI(BasicBlock *CallSiteBlock, const ValueToValueMapTy &VMap, BlockFrequencyInfo *CallerBFI, BlockFrequencyInfo *CalleeBFI, const BasicBlock &CalleeEntryBlock)
Update the block frequencies of the caller after a callee has been inlined.
static void AddReturnAttributes(CallBase &CB, ValueToValueMapTy &VMap, ClonedCodeInfo &InlinedFunctionInfo)
static bool MayContainThrowingOrExitingCallAfterCB(CallBase *Begin, ReturnInst *End)
static void HandleByValArgumentInit(Type *ByValType, Value *Dst, Value *Src, Module *M, BasicBlock *InsertBlock, InlineFunctionInfo &IFI, Function *CalledFunc)
static cl::opt< bool > EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), cl::Hidden, cl::desc("Convert noalias attributes to metadata during inlining."))
static void AddAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap, const DataLayout &DL, AAResults *CalleeAAR, ClonedCodeInfo &InlinedFunctionInfo)
If the inlined function has noalias arguments, then add new alias scopes for each noalias argument,...
static const std::pair< std::vector< int64_t >, std::vector< int64_t > > remapIndices(Function &Caller, BasicBlock *StartBB, PGOContextualProfile &CtxProf, uint32_t CalleeCounters, uint32_t CalleeCallsites)
static IntrinsicInst * getConvergenceEntry(BasicBlock &BB)
static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
static void inlineRetainOrClaimRVCalls(CallBase &CB, objcarc::ARCInstKind RVCallKind, const SmallVectorImpl< ReturnInst * > &Returns)
An operand bundle "clang.arc.attachedcall" on a call indicates the call result is implicitly consumed...
static Value * getParentPad(Value *EHPad)
Helper for getUnwindDestToken/getUnwindDestTokenHelper.
static void fixupAssignments(Function::iterator Start, Function::iterator End)
Update inlined instructions' DIAssignID metadata.
static bool allocaWouldBeStaticInEntry(const AllocaInst *AI)
Return the result of AI->isStaticAlloca() if AI were moved to the entry block.
static bool isUsedByLifetimeMarker(Value *V)
static void removeMemProfMetadata(CallBase *Call)
static Value * HandleByValArgument(Type *ByValType, Value *Arg, Instruction *TheCall, const Function *CalledFunc, InlineFunctionInfo &IFI, MaybeAlign ByValAlignment)
When inlining a call site that has a byval argument, we have to make the implicit memcpy explicit by ...
static void AddAlignmentAssumptions(CallBase &CB, InlineFunctionInfo &IFI)
If the inlined function has non-byval align arguments, then add @llvm.assume-based alignment assumpti...
static void trackInlinedStores(Function::iterator Start, Function::iterator End, const CallBase &CB)
static cl::opt< unsigned > InlinerAttributeWindow("max-inst-checked-for-throw-during-inlining", cl::Hidden, cl::desc("the maximum number of instructions analyzed for may throw during " "attribute inference in inlined body"), cl::init(4))
static void AddParamAndFnBasicAttributes(const CallBase &CB, ValueToValueMapTy &VMap, ClonedCodeInfo &InlinedFunctionInfo)
static bool haveCommonPrefix(MDNode *MIBStackContext, MDNode *CallsiteStackContext)
static void PropagateOperandBundles(Function::iterator InlinedBB, Instruction *CallSiteEHPad)
Bundle operands of the inlined function must be added to inlined call sites.
static bool hasLifetimeMarkers(AllocaInst *AI)
static void updateMemprofMetadata(CallBase *CI, const std::vector< Metadata * > &MIBList)
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
This file defines common analysis utilities used by the ObjC ARC Optimizer.
This file defines ARC utility functions which are used by various parts of the compiler.
This file contains the declarations for profiling metadata utility functions.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
MemoryEffects getMemoryEffects(const CallBase *Call)
Return the behavior of the given call site.
Class for arbitrary precision integers.
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
const Value * getArraySize() const
Get the number of elements allocated.
This class represents an incoming formal argument to a Function.
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
static uint64_t getGUID(const Function &F)
A cache of @llvm.assume calls within a function.
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
AttrBuilder & addAlignmentAttr(MaybeAlign Align)
This turns an alignment into the form used internally in Attribute.
Attribute getAttribute(Attribute::AttrKind Kind) const
Return Attribute with the given Kind.
uint64_t getDereferenceableBytes() const
Retrieve the number of dereferenceable bytes, if the dereferenceable attribute exists (zero is return...
bool hasAttributes() const
Return true if the builder has IR-level attributes.
AttrBuilder & addAttribute(Attribute::AttrKind Val)
Add an attribute to the builder.
MaybeAlign getAlignment() const
Retrieve the alignment attribute, if it exists.
AttrBuilder & addDereferenceableAttr(uint64_t Bytes)
This turns the number of dereferenceable bytes into the form used internally in Attribute.
uint64_t getDereferenceableOrNullBytes() const
Retrieve the number of dereferenceable_or_null bytes, if the dereferenceable_or_null attribute exists...
AttrBuilder & removeAttribute(Attribute::AttrKind Val)
Remove an attribute from the builder.
AttrBuilder & addDereferenceableOrNullAttr(uint64_t Bytes)
This turns the number of dereferenceable_or_null bytes into the form used internally in Attribute.
AttrBuilder & addRangeAttr(const ConstantRange &CR)
Add range attribute.
AttributeList addRetAttributes(LLVMContext &C, const AttrBuilder &B) const
Add a return value attribute to the list.
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
AttributeSet getParamAttrs(unsigned ArgNo) const
The attributes for the argument or parameter at the given index are returned.
AttributeSet removeAttribute(LLVMContext &C, Attribute::AttrKind Kind) const
Remove the specified attribute from this set.
static AttributeSet get(LLVMContext &C, const AttrBuilder &B)
const ConstantRange & getRange() const
Returns the value of the range attribute.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
bool isValid() const
Return true if the attribute is any kind of attribute.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
const Function * getParent() const
Return the enclosing method, or null if none.
SymbolTableList< BasicBlock >::iterator eraseFromParent()
Unlink 'this' from the containing function and delete it.
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
void splice(BasicBlock::iterator ToIt, BasicBlock *FromBB)
Transfer all instructions from FromBB to this basic block at ToIt.
void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)
Update PHI nodes in this BasicBlock before removal of predecessor Pred.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
void setBlockFreq(const BasicBlock *BB, BlockFrequency Freq)
void setBlockFreqAndScale(const BasicBlock *ReferenceBB, BlockFrequency Freq, SmallPtrSetImpl< BasicBlock * > &BlocksToScale)
Set the frequency of ReferenceBB to Freq and scale the frequencies of the blocks in BlocksToScale suc...
BlockFrequency getBlockFreq(const BasicBlock *BB) const
getblockFreq - Return block frequency.
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
void setCallingConv(CallingConv::ID CC)
MaybeAlign getRetAlign() const
Extract the alignment of the return value.
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
void removeRetAttrs(const AttributeMask &AttrsToRemove)
Removes the attributes from the return value.
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
static CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
AttributeSet getRetAttributes() const
Return the return attributes for this call.
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
Value * getCalledOperand() const
void setAttributes(AttributeList A)
Set the attributes for this call.
std::optional< ConstantRange > getRange() const
If this return value has a range attribute, return the value range of the argument.
bool doesNotThrow() const
Determine if the call cannot unwind.
Value * getArgOperand(unsigned i) const
uint64_t getRetDereferenceableBytes() const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
bool isConvergent() const
Determine if the invoke is convergent.
FunctionType * getFunctionType() const
static CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
uint64_t getRetDereferenceableOrNullBytes() const
Extract the number of dereferenceable_or_null bytes for a call (0=unknown).
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
bool hasOperandBundles() const
Return true if this User has any operand bundles.
Function * getCaller()
Helper to get the caller (the parent function).
This class represents a function call, abstracting a target machine's calling convention.
void setTailCallKind(TailCallKind TCK)
TailCallKind getTailCallKind() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
bool isMustTailCall() const
static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, InsertPosition InsertBefore=nullptr)
This is the shared class of boolean and integer constants.
This class represents a range of values.
ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
This is an important base class in LLVM.
const Constant * stripPointerCasts() const
static InstrProfIncrementInst * getBBInstrumentation(BasicBlock &BB)
Get the instruction instrumenting a BB, or nullptr if not present.
static InstrProfCallsite * getCallsiteInstrumentation(CallBase &CB)
Get the instruction instrumenting a callsite, or nullptr if that cannot be found.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Base class for non-instruction debug metadata records that have positions within IR.
DILocation * get() const
Get the underlying DILocation.
MDNode * getScope() const
static DebugLoc appendInlinedAt(const DebugLoc &DL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &Cache)
Rebuild the entire inlined-at chain for this instruction so that the top of the chain now is inlined-...
iterator find(const_arg_type_t< KeyT > Val)
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Implements a dense probed hash-table based set.
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Class to represent profile counts.
uint64_t getCount() const
const BasicBlock & getEntryBlock() const
BasicBlockListType::iterator iterator
FunctionType * getFunctionType() const
Returns the FunctionType for me.
const BasicBlock & front() const
iterator_range< arg_iterator > args()
DISubprogram * getSubprogram() const
Get the attached subprogram.
bool hasGC() const
hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm to use during code generatio...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasPersonalityFn() const
Check whether this function has a personality function.
Constant * getPersonalityFn() const
Get the personality function associated with this function.
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
MaybeAlign getParamAlign(unsigned ArgNo) const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
const std::string & getGC() const
std::optional< ProfileCount > getEntryCount(bool AllowSynthetic=false) const
Get the entry count for this function.
Type * getReturnType() const
Returns the type of the ret val.
void setCallingConv(CallingConv::ID CC)
bool onlyReadsMemory() const
Determine if the function does not access or only reads memory.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
CallInst * CreateStackSave(const Twine &Name="")
Create a call to llvm.stacksave.
CallInst * CreateLifetimeStart(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.start intrinsic.
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
CallInst * CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, unsigned Alignment, Value *OffsetValue=nullptr)
Create an assume intrinsic call that represents an alignment assumption on the provided pointer.
ReturnInst * CreateRet(Value *V)
Create a 'ret <val>' instruction.
ConstantInt * getInt64(uint64_t C)
Get a constant 64-bit value.
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
ReturnInst * CreateRetVoid()
Create a 'ret void' instruction.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
CallInst * CreateLifetimeEnd(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.end intrinsic.
CallInst * CreateStackRestore(Value *Ptr, const Twine &Name="")
Create a call to llvm.stackrestore.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memcpy between the specified pointers.
Instruction * CreateNoAliasScopeDeclaration(Value *Scope)
Create a llvm.experimental.noalias.scope.decl intrinsic call.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
This class captures the data input to the InlineFunction call, and records the auxiliary results prod...
bool UpdateProfile
Update profile for callee as well as cloned version.
function_ref< AssumptionCache &(Function &)> GetAssumptionCache
If non-null, InlineFunction will update the callgraph to reflect the changes it makes.
BlockFrequencyInfo * CalleeBFI
SmallVector< AllocaInst *, 4 > StaticAllocas
InlineFunction fills this in with all static allocas that get copied into the caller.
BlockFrequencyInfo * CallerBFI
SmallVector< CallBase *, 8 > InlinedCallSites
All of the new call sites inlined into the caller.
InlineResult is basically true or false.
static InlineResult success()
static InlineResult failure(const char *Reason)
This represents the llvm.instrprof.callsite intrinsic.
This represents the llvm.instrprof.increment intrinsic.
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
const Function * getFunction() const
Return the function this instruction belongs to.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
A wrapper class for inspecting calls to intrinsic functions.
static bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
This is an important class for using LLVM in a threaded context.
@ OB_clang_arc_attachedcall
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
An instruction for reading from memory.
MDNode * createAnonymousAliasScope(MDNode *Domain, StringRef Name=StringRef())
Return metadata appropriate for an alias scope root node.
MDNode * createAnonymousAliasScopeDomain(StringRef Name=StringRef())
Return metadata appropriate for an alias scope domain node.
void replaceAllUsesWith(Metadata *MD)
RAUW a temporary.
static MDNode * concatenate(MDNode *A, MDNode *B)
Methods for metadata merging.
ArrayRef< MDOperand > operands() const
op_iterator op_end() const
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
unsigned getNumOperands() const
Return number of MDNode operands.
op_iterator op_begin() const
LLVMContext & getContext() const
static TempMDTuple getTemporary(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Return a temporary node.
bool onlyAccessesInaccessibleMem() const
Whether this function only (at most) accesses inaccessible memory.
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
A Module instance is used to store all the information related to an LLVM module.
A container for an operand bundle being viewed as a set of values rather than a set of uses.
The instrumented contextual profile, produced by the CtxProfAnalysis.
void update(Visitor, const Function &F)
uint32_t getNumCounters(const Function &F) const
uint32_t allocateNextCounterIndex(const Function &F)
uint32_t getNumCallsites(const Function &F) const
uint32_t allocateNextCallsiteIndex(const Function &F)
A node (context) in the loaded contextual profile, suitable for mutation during IPO passes.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Analysis providing profile information.
std::optional< uint64_t > getProfileCount(const CallBase &CallInst, BlockFrequencyInfo *BFI, bool AllowSynthetic=false) const
Returns the profile count for CallInst.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
A vector that has set insertion semantics.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static IntegerType * getInt64Ty(LLVMContext &C)
bool isVoidTy() const
Return true if this is 'void'.
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
LLVMContext & getContext() const
All values hold a context through their type.
StringRef getName() const
Return a constant reference to the value's name.
void takeName(Value *V)
Transfer the name from V to this value.
std::pair< iterator, bool > insert(const ValueT &V)
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
const ParentTy * getParent() const
self_iterator getIterator()
Class to build a trie of call stack contexts for a particular profiled allocation call,...
Helper class to iterate through stack ids in both metadata (memprof MIB and callsite) and the corresp...
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
AttributeMask typeIncompatible(Type *Ty, AttributeSet AS, AttributeSafetyKind ASK=ASK_ALL)
Which attributes cannot be applied to a type.
void mergeAttributesForInlining(Function &Caller, const Function &Callee)
Merge caller's and callee's attributes.
Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
bool match(Val *V, const Pattern &P)
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
AssignmentMarkerRange getAssignmentMarkers(DIAssignID *ID)
Return a range of dbg.assign intrinsics which use \ID as an operand.
void trackAssignments(Function::iterator Start, Function::iterator End, const StorageToVarsMap &Vars, const DataLayout &DL, bool DebugPrints=false)
Track assignments to Vars between Start and End.
void remapAssignID(DenseMap< DIAssignID *, DIAssignID * > &Map, Instruction &I)
Replace DIAssignID uses and attachments with IDs from Map.
SmallVector< DbgVariableRecord * > getDVRAssignmentMarkers(const Instruction *Inst)
initializer< Ty > init(const Ty &Val)
MDNode * getMIBStackNode(const MDNode *MIB)
Returns the stack node from an MIB metadata node.
ARCInstKind getAttachedARCFunctionKind(const CallBase *CB)
This function returns the ARCInstKind of the function attached to operand bundle clang_arc_attachedca...
ARCInstKind
Equivalence classes of instructions in the ARC Model.
std::optional< Function * > getAttachedARCFunction(const CallBase *CB)
This function returns operand bundle clang_arc_attachedcall's argument, which is the address of the A...
bool isRetainOrClaimRV(ARCInstKind Kind)
Check whether the function is retainRV/unsafeClaimRV.
const Value * GetRCIdentityRoot(const Value *V)
The RCIdentity root of a value V is a dominating value U for which retaining or releasing U is equiva...
bool hasAttachedCallOpBundle(const CallBase *CB)
This is an optimization pass for GlobalISel generic memory operations.
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
BasicBlock * changeToInvokeAndSplitBasicBlock(CallInst *CI, BasicBlock *UnwindEdge, DomTreeUpdater *DTU=nullptr)
Convert the CallInst to InvokeInst with the specified unwind edge basic block.
auto successors(const MachineBasicBlock *BB)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures, bool StoreCaptures, const Instruction *I, const DominatorTree *DT, bool IncludeI=false, unsigned MaxUsesToExplore=0, const LoopInfo *LI=nullptr)
PointerMayBeCapturedBefore - Return true if this pointer value may be captured by the enclosing funct...
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, bool ModuleLevelChanges, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr)
This works exactly like CloneFunctionInto, except that it does some simple constant prop and DCE on t...
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
void updateProfileCallee(Function *Callee, int64_t EntryDelta, const ValueMap< const Value *, WeakTrackingVH > *VMap=nullptr)
Updates profile information by adjusting the entry count by adding EntryDelta then scaling callsite i...
bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
MDNode * uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2)
Compute the union of two access-group lists.
InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, bool MergeAttributes=false, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr)
This function inlines the called function into the basic block of the caller.
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNonEscapingLocalOb...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
bool pred_empty(const BasicBlock *BB)
void updateLoopMetadataDebugLocations(Instruction &I, function_ref< Metadata *(Metadata *)> Updater)
Update the debug locations contained within the MD_loop metadata attached to the instruction I,...
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
void scaleProfData(Instruction &I, uint64_t S, uint64_t T)
Scaling the profile data attached to 'I' using the ratio of S/T.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
This struct can be used to capture information about code being cloned, while it is being cloned.
bool ContainsDynamicAllocas
This is set to true if the cloned code contains a 'dynamic' alloca.
bool isSimplified(const Value *From, const Value *To) const
bool ContainsCalls
This is set to true if the cloned code contains a normal call instruction.
bool ContainsMemProfMetadata
This is set to true if there is memprof related metadata (memprof or callsite metadata) in the cloned...
std::vector< WeakTrackingVH > OperandBundleCallSites
All cloned call sites that have operand bundles attached are appended to this vector.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
static Instruction * tryGetVTableInstruction(CallBase *CB)
Helper struct for trackAssignments, below.