85#define DEBUG_TYPE "inline-function"
94 cl::desc(
"Convert noalias attributes to metadata during inlining."));
99 cl::desc(
"Use the llvm.experimental.noalias.scope.decl "
100 "intrinsic during inlining."));
108 cl::desc(
"Convert align attributes to assumptions during inlining."));
111 "max-inst-checked-for-throw-during-inlining",
cl::Hidden,
112 cl::desc(
"the maximum number of instructions analyzed for may throw during "
113 "attribute inference in inlined body"),
119 class LandingPadInliningInfo {
130 PHINode *InnerEHValuesPHI =
nullptr;
136 : OuterResumeDest(
II->getUnwindDest()) {
145 UnwindDestPHIValues.
push_back(
PHI->getIncomingValueForBlock(InvokeBB));
154 return OuterResumeDest;
159 LandingPadInst *getLandingPadInst()
const {
return CallerLPad; }
166 void forwardResume(ResumeInst *RI,
167 SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
171 void addIncomingPHIValuesFor(BasicBlock *BB)
const {
172 addIncomingPHIValuesForInto(BB, OuterResumeDest);
175 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest)
const {
177 for (
unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++
I) {
179 phi->addIncoming(UnwindDestPHIValues[i], src);
187 while (It != BB.
end()) {
189 if (IntrinsicCall->isEntry()) {
190 return IntrinsicCall;
199BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
200 if (InnerResumeDest)
return InnerResumeDest;
206 OuterResumeDest->
getName() +
".body");
209 const unsigned PHICapacity = 2;
214 for (
unsigned i = 0, e = UnwindDestPHIValues.
size(); i != e; ++i, ++
I) {
217 OuterPHI->
getName() +
".lpad-body");
228 InnerEHValuesPHI->
addIncoming(CallerLPad, OuterResumeDest);
231 return InnerResumeDest;
238void LandingPadInliningInfo::forwardResume(
239 ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) {
248 addIncomingPHIValuesForInto(Src, Dest);
257 return FPI->getParentPad();
269 while (!Worklist.
empty()) {
276 Value *UnwindDestToken =
nullptr;
278 if (CatchSwitch->hasUnwindDest()) {
279 UnwindDestToken = &*CatchSwitch->getUnwindDest()->getFirstNonPHIIt();
287 for (
auto HI = CatchSwitch->handler_begin(),
288 HE = CatchSwitch->handler_end();
289 HI != HE && !UnwindDestToken; ++HI) {
302 auto Memo = MemoMap.
find(ChildPad);
303 if (Memo == MemoMap.
end()) {
310 Value *ChildUnwindDestToken = Memo->second;
311 if (!ChildUnwindDestToken)
318 UnwindDestToken = ChildUnwindDestToken;
329 if (
BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
330 UnwindDestToken = &*RetUnwindDest->getFirstNonPHIIt();
335 Value *ChildUnwindDestToken;
337 ChildUnwindDestToken = &*Invoke->getUnwindDest()->getFirstNonPHIIt();
340 auto Memo = MemoMap.
find(ChildPad);
341 if (Memo == MemoMap.
end()) {
348 ChildUnwindDestToken = Memo->second;
349 if (!ChildUnwindDestToken)
361 UnwindDestToken = ChildUnwindDestToken;
367 if (!UnwindDestToken)
378 UnwindParent =
nullptr;
379 bool ExitedOriginalPad =
false;
381 ExitedPad && ExitedPad != UnwindParent;
386 MemoMap[ExitedPad] = UnwindDestToken;
387 ExitedOriginalPad |= (ExitedPad == EHPad);
390 if (ExitedOriginalPad)
391 return UnwindDestToken;
423 EHPad = CPI->getCatchSwitch();
426 auto Memo = MemoMap.
find(EHPad);
427 if (Memo != MemoMap.
end())
432 assert((UnwindDestToken ==
nullptr) != (MemoMap.
count(EHPad) != 0));
434 return UnwindDestToken;
441 MemoMap[EHPad] =
nullptr;
447 Value *AncestorToken;
461 assert(!MemoMap.
count(AncestorPad) || MemoMap[AncestorPad]);
462 auto AncestorMemo = MemoMap.
find(AncestorPad);
463 if (AncestorMemo == MemoMap.
end()) {
466 UnwindDestToken = AncestorMemo->second;
470 LastUselessPad = AncestorPad;
471 MemoMap[LastUselessPad] =
nullptr;
473 TempMemos.
insert(LastUselessPad);
491 while (!Worklist.
empty()) {
493 auto Memo = MemoMap.
find(UselessPad);
494 if (Memo != MemoMap.
end() && Memo->second) {
522 MemoMap[UselessPad] = UnwindDestToken;
524 assert(CatchSwitch->getUnwindDest() ==
nullptr &&
"Expected useless pad");
525 for (
BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
526 auto *CatchPad = &*HandlerBlock->getFirstNonPHIIt();
531 ->getFirstNonPHIIt()) == CatchPad)) &&
532 "Expected useless pad");
546 "Expected useless pad");
553 return UnwindDestToken;
579 if (
F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
580 F->getIntrinsicID() == Intrinsic::experimental_guard)
592 Value *UnwindDestToken =
599 MemoKey = CatchPad->getCatchSwitch();
601 MemoKey = FuncletPad;
602 assert(FuncletUnwindMap->count(MemoKey) &&
603 (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
604 "must get memoized to avoid confusing later searches");
629 LandingPadInliningInfo Invoke(
II);
636 InlinedLPads.
insert(
II->getLandingPadInst());
643 InlinedLPad->reserveClauses(OuterNum);
644 for (
unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
645 InlinedLPad->addClause(OuterLPad->
getClause(OuterIdx));
647 InlinedLPad->setCleanup(
true);
654 &*BB, Invoke.getOuterResumeDest()))
657 Invoke.addIncomingPHIValuesFor(NewBB);
661 Invoke.forwardResume(RI, InlinedLPads);
691 UnwindDestPHIValues.
push_back(
PHI.getIncomingValueForBlock(InvokeBB));
698 for (
Value *V : UnwindDestPHIValues) {
700 PHI->addIncoming(V, Src);
711 if (CRI->unwindsToCaller()) {
712 auto *CleanupPad = CRI->getCleanupPad();
714 CRI->eraseFromParent();
722 FuncletUnwindMap[CleanupPad] =
733 if (CatchSwitch->unwindsToCaller()) {
734 Value *UnwindDestToken;
735 if (
auto *ParentPad =
759 CatchSwitch->getParentPad(), UnwindDest,
760 CatchSwitch->getNumHandlers(), CatchSwitch->
getName(),
761 CatchSwitch->getIterator());
762 for (
BasicBlock *PadBB : CatchSwitch->handlers())
763 NewCatchSwitch->addHandler(PadBB);
768 FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
769 Replacement = NewCatchSwitch;
777 I->replaceAllUsesWith(Replacement);
778 I->eraseFromParent();
788 &*BB, UnwindDest, &FuncletUnwindMap))
801 MDNode *CallsiteStackContext) {
807 for (
auto MIBStackIter = MIBStackContext->
op_begin(),
808 CallsiteStackIter = CallsiteStackContext->
op_begin();
809 MIBStackIter != MIBStackContext->
op_end() &&
810 CallsiteStackIter != CallsiteStackContext->
op_end();
811 MIBStackIter++, CallsiteStackIter++) {
815 if (Val1->getZExtValue() != Val2->getZExtValue())
822 Call->setMetadata(LLVMContext::MD_memprof,
nullptr);
826 Call->setMetadata(LLVMContext::MD_callsite,
nullptr);
830 const std::vector<Metadata *> &MIBList,
839 bool MemprofMDAttached =
CallStack.buildAndAttachMIBMetadata(CI);
841 if (!MemprofMDAttached)
851 MDNode *InlinedCallsiteMD,
854 MDNode *ClonedCallsiteMD =
nullptr;
857 if (OrigCallsiteMD) {
862 ClonedCall->
setMetadata(LLVMContext::MD_callsite, ClonedCallsiteMD);
874 std::vector<Metadata *> NewMIBList;
879 for (
auto &MIBOp : OrigMemProfMD->
operands()) {
887 NewMIBList.push_back(MIB);
889 if (NewMIBList.empty()) {
905 bool ContainsMemProfMetadata,
911 if (!CallsiteMD && !ContainsMemProfMetadata)
915 for (
const auto &Entry : VMap) {
920 if (!OrigCall || !ClonedCall)
939 MDNode *MemParallelLoopAccess =
940 CB.
getMetadata(LLVMContext::MD_mem_parallel_loop_access);
944 if (!MemParallelLoopAccess && !AccessGroup && !AliasScope && !NoAlias)
950 if (!
I.mayReadOrWriteMemory())
953 if (MemParallelLoopAccess) {
956 I.getMetadata(LLVMContext::MD_mem_parallel_loop_access),
957 MemParallelLoopAccess);
958 I.setMetadata(LLVMContext::MD_mem_parallel_loop_access,
959 MemParallelLoopAccess);
964 I.getMetadata(LLVMContext::MD_access_group), AccessGroup));
968 I.getMetadata(LLVMContext::MD_alias_scope), AliasScope));
972 I.getMetadata(LLVMContext::MD_noalias), NoAlias));
986 InlineSiteLoc = CI->getZExtValue();
996 if (!CI || !CI->getMetadata(
"srcloc"))
998 auto *Callee = CI->getCalledFunction();
999 if (!Callee || (!Callee->hasFnAttribute(
"dontcall-error") &&
1000 !Callee->hasFnAttribute(
"dontcall-warn")))
1004 if (
MDNode *Existing = CI->getMetadata(
"inlined.from"))
1008 Ops.push_back(MakeMDInt(0));
1011 Ops.push_back(MakeMDInt(InlineSiteLoc));
1031 if (CalledFn && CalledFn->isIntrinsic() &&
I->doesNotThrow() &&
1036 I->getOperandBundlesAsDefs(OpBundles);
1041 I->replaceAllUsesWith(NewInst);
1042 I->eraseFromParent();
1051class ScopedAliasMetadataDeepCloner {
1052 using MetadataMap = DenseMap<const MDNode *, TrackingMDNodeRef>;
1053 SetVector<const MDNode *> MD;
1055 void addRecursiveMetadataUses();
1058 ScopedAliasMetadataDeepCloner(
const Function *
F);
1070ScopedAliasMetadataDeepCloner::ScopedAliasMetadataDeepCloner(
1071 const Function *
F) {
1072 for (
const BasicBlock &BB : *
F) {
1073 for (
const Instruction &
I : BB) {
1074 if (
const MDNode *M =
I.getMetadata(LLVMContext::MD_alias_scope))
1076 if (
const MDNode *M =
I.getMetadata(LLVMContext::MD_noalias))
1081 MD.insert(Decl->getScopeList());
1084 addRecursiveMetadataUses();
1087void ScopedAliasMetadataDeepCloner::addRecursiveMetadataUses() {
1089 while (!
Queue.empty()) {
1093 if (MD.insert(OpMD))
1094 Queue.push_back(OpMD);
1098void ScopedAliasMetadataDeepCloner::clone() {
1099 assert(MDMap.
empty() &&
"clone() already called ?");
1102 for (
const MDNode *
I : MD) {
1104 MDMap[
I].reset(DummyNodes.
back().get());
1111 for (
const MDNode *
I : MD) {
1133 for (BasicBlock &BB :
make_range(FStart, FEnd)) {
1134 for (Instruction &
I : BB) {
1137 if (MDNode *M =
I.getMetadata(LLVMContext::MD_alias_scope))
1138 if (MDNode *MNew = MDMap.
lookup(M))
1139 I.setMetadata(LLVMContext::MD_alias_scope, MNew);
1141 if (MDNode *M =
I.getMetadata(LLVMContext::MD_noalias))
1142 if (MDNode *MNew = MDMap.
lookup(M))
1143 I.setMetadata(LLVMContext::MD_noalias, MNew);
1146 if (MDNode *MNew = MDMap.
lookup(Decl->getScopeList()))
1147 Decl->setScopeList(MNew);
1166 if (CB.
paramHasAttr(Arg.getArgNo(), Attribute::NoAlias) && !Arg.use_empty())
1169 if (NoAliasArgs.
empty())
1189 for (
unsigned i = 0, e = NoAliasArgs.
size(); i != e; ++i) {
1192 std::string Name = std::string(CalledFunc->
getName());
1195 Name +=
A->getName();
1197 Name +=
": argument ";
1205 NewScopes.
insert(std::make_pair(
A, NewScope));
1212 IRBuilder<>(&CB).CreateNoAliasScopeDeclaration(AScopeList);
1222 VMI != VMIE; ++VMI) {
1231 bool IsArgMemOnlyCall =
false, IsFuncCall =
false;
1235 PtrArgs.
push_back(LI->getPointerOperand());
1239 PtrArgs.
push_back(VAAI->getPointerOperand());
1241 PtrArgs.
push_back(CXI->getPointerOperand());
1243 PtrArgs.
push_back(RMWI->getPointerOperand());
1248 if (
Call->doesNotAccessMemory())
1260 IsArgMemOnlyCall =
true;
1267 if (!Arg->getType()->isPointerTy())
1278 if (PtrArgs.
empty() && !IsFuncCall)
1287 for (
const Value *V : PtrArgs) {
1296 bool RequiresNoCaptureBefore =
false, UsesAliasingPtr =
false,
1297 UsesUnknownObject =
false;
1298 for (
const Value *V : ObjSet) {
1313 UsesAliasingPtr =
true;
1315 UsesAliasingPtr =
true;
1321 RequiresNoCaptureBefore =
true;
1327 UsesUnknownObject =
true;
1333 if (UsesUnknownObject)
1338 if (IsFuncCall && !IsArgMemOnlyCall)
1339 RequiresNoCaptureBefore =
true;
1357 if (!RequiresNoCaptureBefore ||
1359 A,
false,
I, &DT,
false,
1380 bool CanAddScopes = !UsesAliasingPtr;
1381 if (CanAddScopes && IsFuncCall)
1382 CanAddScopes = IsArgMemOnlyCall;
1387 Scopes.push_back(NewScopes[
A]);
1390 if (!Scopes.empty())
1392 LLVMContext::MD_alias_scope,
1403 "Expected to be in same basic block!");
1416 auto &Context = CalledFunction->
getContext();
1420 bool HasAttrToPropagate =
false;
1428 Attribute::Dereferenceable, Attribute::DereferenceableOrNull,
1429 Attribute::NonNull, Attribute::NoFPClass,
1430 Attribute::Alignment, Attribute::Range};
1438 ValidObjParamAttrs.
back().addAttribute(Attribute::ReadNone);
1440 ValidObjParamAttrs.
back().addAttribute(Attribute::ReadOnly);
1445 ValidExactParamAttrs.
back().addAttribute(Attr);
1448 HasAttrToPropagate |= ValidObjParamAttrs.
back().hasAttributes();
1449 HasAttrToPropagate |= ValidExactParamAttrs.
back().hasAttributes();
1453 if (!HasAttrToPropagate)
1466 if (InlinedFunctionInfo.
isSimplified(InnerCB, NewInnerCB))
1469 AttributeList AL = NewInnerCB->getAttributes();
1470 for (
unsigned I = 0,
E = InnerCB->arg_size();
I <
E; ++
I) {
1475 if (NewInnerCB->paramHasAttr(
I, Attribute::ByVal))
1479 if (
match(NewInnerCB->getArgOperand(
I),
1495 if (AL.getParamDereferenceableBytes(
I) >
1496 NewAB.getDereferenceableBytes())
1497 NewAB.removeAttribute(Attribute::Dereferenceable);
1498 if (AL.getParamDereferenceableOrNullBytes(
I) >
1499 NewAB.getDereferenceableOrNullBytes())
1500 NewAB.removeAttribute(Attribute::DereferenceableOrNull);
1501 if (AL.getParamAlignment(
I).valueOrOne() >
1502 NewAB.getAlignment().valueOrOne())
1503 NewAB.removeAttribute(Attribute::Alignment);
1504 if (
auto ExistingRange = AL.getParamRange(
I)) {
1505 if (
auto NewRange = NewAB.getRange()) {
1508 NewAB.removeAttribute(Attribute::Range);
1509 NewAB.addRangeAttr(CombinedRange);
1513 if (
FPClassTest ExistingNoFP = AL.getParamNoFPClass(
I))
1514 NewAB.addNoFPClassAttr(ExistingNoFP | NewAB.getNoFPClass());
1516 AL = AL.addParamAttributes(Context,
I, NewAB);
1517 }
else if (NewInnerCB->getArgOperand(
I)->getType()->isPointerTy()) {
1519 const Value *UnderlyingV =
1530 AL = AL.addParamAttributes(Context,
I, ValidObjParamAttrs[ArgNo]);
1537 if (AL.hasParamAttr(
I, Attribute::ReadOnly) &&
1538 AL.hasParamAttr(
I, Attribute::WriteOnly))
1539 AL = AL.addParamAttribute(Context,
I, Attribute::ReadNone);
1542 if (AL.hasParamAttr(
I, Attribute::ReadNone)) {
1543 AL = AL.removeParamAttribute(Context,
I, Attribute::ReadOnly);
1544 AL = AL.removeParamAttribute(Context,
I, Attribute::WriteOnly);
1548 if (AL.hasParamAttr(
I, Attribute::ReadOnly) ||
1549 AL.hasParamAttr(
I, Attribute::ReadNone))
1550 AL = AL.removeParamAttribute(Context,
I, Attribute::Writable);
1552 NewInnerCB->setAttributes(AL);
1566 Valid.addDereferenceableAttr(DerefBytes);
1568 Valid.addDereferenceableOrNullAttr(DerefOrNullBytes);
1570 Valid.addAttribute(Attribute::NoAlias);
1572 Valid.addAttribute(Attribute::NoUndef);
1581 Valid.addAttribute(Attribute::NonNull);
1585 Valid.addRangeAttr(*
Range);
1595 if (!CallSiteValidUB.hasAttributes() && !CallSiteValidPG.hasAttributes())
1598 auto &Context = CalledFunction->
getContext();
1600 for (
auto &BB : *CalledFunction) {
1614 if (InlinedFunctionInfo.
isSimplified(RetVal, NewRetVal))
1634 if (RI->
getParent() != RetVal->getParent() ||
1645 AttributeList AL = NewRetVal->getAttributes();
1646 if (ValidUB.getDereferenceableBytes() < AL.getRetDereferenceableBytes())
1647 ValidUB.removeAttribute(Attribute::Dereferenceable);
1648 if (ValidUB.getDereferenceableOrNullBytes() <
1649 AL.getRetDereferenceableOrNullBytes())
1650 ValidUB.removeAttribute(Attribute::DereferenceableOrNull);
1651 AttributeList NewAL = AL.addRetAttributes(Context, ValidUB);
1684 if (ValidPG.getAlignment().valueOrOne() < AL.getRetAlignment().valueOrOne())
1685 ValidPG.removeAttribute(Attribute::Alignment);
1686 if (ValidPG.hasAttributes()) {
1687 Attribute CBRange = ValidPG.getAttribute(Attribute::Range);
1689 Attribute NewRange = AL.getRetAttr(Attribute::Range);
1691 ValidPG.addRangeAttr(
1696 Attribute CBNoFPClass = ValidPG.getAttribute(Attribute::NoFPClass);
1697 if (CBNoFPClass.
isValid() && AL.hasRetAttr(Attribute::NoFPClass)) {
1698 ValidPG.addNoFPClassAttr(
1700 AL.getRetAttr(Attribute::NoFPClass).getNoFPClass());
1715 (RetVal->hasOneUse() && !RetVal->hasRetAttr(Attribute::NoUndef)))
1716 NewAL = NewAL.addRetAttributes(Context, ValidPG);
1718 NewRetVal->setAttributes(NewAL);
1734 bool DTCalculated =
false;
1738 if (!Arg.getType()->isPointerTy() || Arg.hasPassPointeeByValueCopyAttr() ||
1745 if (!DTCalculated) {
1747 DTCalculated =
true;
1756 DL, ArgVal, Alignment->value());
1769 Builder.getInt64(M->getDataLayout().getTypeStoreSize(ByValType));
1771 Align DstAlign = Dst->getPointerAlignment(M->getDataLayout());
1774 CallInst *CI = Builder.CreateMemCpy(Dst, DstAlign, Src, SrcAlign,
Size);
1818 Align Alignment =
DL.getPrefTypeAlign(ByValType);
1824 Alignment = std::max(Alignment, *ByValAlignment);
1828 nullptr, Alignment, Arg->
getName());
1852 if (Ty == Int8PtrTy)
1857 if (U->getType() != Int8PtrTy)
continue;
1858 if (U->stripPointerCasts() != AI)
continue;
1903 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1904 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1913 bool NoInlineLineTables = Fn->
hasFnAttribute(
"no-inline-line-tables");
1919 auto updateLoopInfoLoc = [&Ctx, &InlinedAtNode,
1927 if (!NoInlineLineTables)
1935 if (CalleeHasDebugInfo && !NoInlineLineTables)
1955 I.setDebugLoc(TheCallDL);
1960 assert(DVR->getDebugLoc() &&
"Debug Value must have debug loc");
1961 if (NoInlineLineTables) {
1962 DVR->setDebugLoc(TheCallDL);
1968 DVR->getMarker()->getParent()->
getContext(), IANodes);
1969 DVR->setDebugLoc(IDL);
1973 for (; FI != Fn->
end(); ++FI) {
1976 for (
DbgRecord &DVR :
I.getDbgRecordRange()) {
1982 if (NoInlineLineTables) {
1984 while (BI != FI->end()) {
1985 BI->dropDbgRecords();
1993#define DEBUG_TYPE "assignment-tracking"
2001 errs() <<
"# Finding caller local variables escaped by callee\n");
2004 if (!Arg->getType()->isPointerTy()) {
2016 assert(Arg->getType()->isPtrOrPtrVectorTy());
2017 APInt TmpOffset(
DL.getIndexTypeSizeInBits(Arg->getType()), 0,
false);
2019 Arg->stripAndAccumulateConstantOffsets(
DL, TmpOffset,
true));
2021 LLVM_DEBUG(
errs() <<
" | SKIP: Couldn't walk back to base storage\n");
2034 if (DbgAssign->getDebugLoc().getInlinedAt())
2041 return EscapedLocals;
2047 <<
Start->getParent()->getName() <<
" from "
2060 for (
auto BBI =
Start; BBI != End; ++BBI) {
2066#define DEBUG_TYPE "inline-function"
2080 for (
auto Entry : VMap) {
2086 if (!ClonedBBs.
insert(ClonedBB).second) {
2098 EntryClone, CallerBFI->
getBlockFreq(CallSiteBlock), ClonedBBs);
2108 auto CallSiteCount =
2111 std::min(CallSiteCount.value_or(0), CalleeEntryCount.
getCount());
2116 Function *Callee, int64_t EntryDelta,
2118 auto CalleeCount = Callee->getEntryCount();
2122 const uint64_t PriorEntryCount = CalleeCount->getCount();
2127 (EntryDelta < 0 && static_cast<uint64_t>(-EntryDelta) > PriorEntryCount)
2129 : PriorEntryCount + EntryDelta;
2131 auto updateVTableProfWeight = [](
CallBase *CB,
const uint64_t NewEntryCount,
2140 uint64_t CloneEntryCount = PriorEntryCount - NewEntryCount;
2141 for (
auto Entry : *VMap) {
2144 CI->updateProfWeight(CloneEntryCount, PriorEntryCount);
2145 updateVTableProfWeight(CI, CloneEntryCount, PriorEntryCount);
2150 II->updateProfWeight(CloneEntryCount, PriorEntryCount);
2151 updateVTableProfWeight(
II, CloneEntryCount, PriorEntryCount);
2157 Callee->setEntryCount(NewEntryCount);
2161 if (!VMap || VMap->
count(&BB))
2164 CI->updateProfWeight(NewEntryCount, PriorEntryCount);
2165 updateVTableProfWeight(CI, NewEntryCount, PriorEntryCount);
2168 II->updateProfWeight(NewEntryCount, PriorEntryCount);
2169 updateVTableProfWeight(
II, NewEntryCount, PriorEntryCount);
2197 IsUnsafeClaimRV = !IsRetainRV;
2199 for (
auto *RI : Returns) {
2201 bool InsertRetainCall = IsRetainRV;
2214 if (
II->getIntrinsicID() != Intrinsic::objc_autoreleaseReturnValue ||
2224 if (IsUnsafeClaimRV) {
2225 Builder.SetInsertPoint(
II);
2226 Builder.CreateIntrinsic(Intrinsic::objc_release, RetOpnd);
2228 II->eraseFromParent();
2229 InsertRetainCall =
false;
2248 NewCall->copyMetadata(*CI);
2249 CI->replaceAllUsesWith(NewCall);
2250 CI->eraseFromParent();
2251 InsertRetainCall =
false;
2255 if (InsertRetainCall) {
2259 Builder.SetInsertPoint(RI);
2260 Builder.CreateIntrinsic(Intrinsic::objc_retain, RetOpnd);
2286static std::pair<std::vector<int64_t>, std::vector<int64_t>>
2294 std::vector<int64_t> CalleeCounterMap;
2295 std::vector<int64_t> CalleeCallsiteMap;
2296 CalleeCounterMap.resize(CalleeCounters, -1);
2297 CalleeCallsiteMap.resize(CalleeCallsites, -1);
2300 if (Ins.getNameValue() == &Caller)
2302 const auto OldID =
static_cast<uint32_t>(Ins.getIndex()->getZExtValue());
2303 if (CalleeCounterMap[OldID] == -1)
2305 const auto NewID =
static_cast<uint32_t>(CalleeCounterMap[OldID]);
2307 Ins.setNameValue(&Caller);
2308 Ins.setIndex(NewID);
2313 if (Ins.getNameValue() == &Caller)
2315 const auto OldID =
static_cast<uint32_t>(Ins.getIndex()->getZExtValue());
2316 if (CalleeCallsiteMap[OldID] == -1)
2318 const auto NewID =
static_cast<uint32_t>(CalleeCallsiteMap[OldID]);
2320 Ins.setNameValue(&Caller);
2321 Ins.setIndex(NewID);
2325 std::deque<BasicBlock *> Worklist;
2342 Worklist.push_back(StartBB);
2343 while (!Worklist.empty()) {
2344 auto *BB = Worklist.front();
2345 Worklist.pop_front();
2349 Changed |= RewriteInstrIfNeeded(*BBID);
2353 BBID->moveBefore(BB->getFirstInsertionPt());
2366 Inc->eraseFromParent();
2369 RewriteInstrIfNeeded(*Inc);
2371 }
else if (Inc != BBID) {
2376 Inc->eraseFromParent();
2380 Changed |= RewriteCallsiteInsIfNeeded(*CS);
2385 if (Seen.
insert(Succ).second)
2386 Worklist.push_back(Succ);
2390 "Counter index mapping should be either to -1 or to non-zero index, "
2392 "index corresponds to the entry BB of the caller");
2394 "Callsite index mapping should be either to -1 or to non-zero index, "
2395 "because there should have been at least a callsite - the inlined one "
2396 "- which would have had a 0 index.");
2398 return {std::move(CalleeCounterMap), std::move(CalleeCallsiteMap)};
2417 bool MergeAttributes,
AAResults *CalleeAAR,
bool InsertLifetime,
2420 return InlineFunction(CB, IFI, MergeAttributes, CalleeAAR, InsertLifetime,
2421 ForwardVarArgsTo, ORE);
2433 static_cast<uint32_t>(CallsiteIDIns->getIndex()->getZExtValue());
2438 auto Ret =
InlineFunction(CB, IFI, MergeAttributes, CalleeAAR, InsertLifetime,
2439 ForwardVarArgsTo, ORE);
2440 if (!Ret.isSuccess())
2445 CallsiteIDIns->eraseFromParent();
2450 const auto IndicesMaps =
remapIndices(Caller, StartBB, CtxProf,
2451 NumCalleeCounters, NumCalleeCallsites);
2456 const auto &[CalleeCounterMap, CalleeCallsiteMap] = IndicesMaps;
2458 (Ctx.counters().size() +
2459 llvm::count_if(CalleeCounterMap, [](
auto V) { return V != -1; }) ==
2461 "The caller's counters size should have grown by the number of new "
2462 "distinct counters inherited from the inlined callee.");
2463 Ctx.resizeCounters(NewCountersSize);
2467 auto CSIt = Ctx.callsites().find(
CallsiteID);
2468 if (CSIt == Ctx.callsites().end())
2470 auto CalleeCtxIt = CSIt->second.find(CalleeGUID);
2473 if (CalleeCtxIt == CSIt->second.end())
2478 auto &CalleeCtx = CalleeCtxIt->second;
2479 assert(CalleeCtx.guid() == CalleeGUID);
2481 for (
auto I = 0U;
I < CalleeCtx.counters().
size(); ++
I) {
2482 const int64_t NewIndex = CalleeCounterMap[
I];
2483 if (NewIndex >= 0) {
2484 assert(NewIndex != 0 &&
"counter index mapping shouldn't happen to a 0 "
2485 "index, that's the caller's entry BB");
2486 Ctx.counters()[NewIndex] = CalleeCtx.counters()[
I];
2489 for (
auto &[
I, OtherSet] : CalleeCtx.callsites()) {
2490 const int64_t NewCSIdx = CalleeCallsiteMap[
I];
2491 if (NewCSIdx >= 0) {
2493 "callsite index mapping shouldn't happen to a 0 index, the "
2494 "caller must've had at least one callsite (with such an index)");
2495 Ctx.ingestAllContexts(NewCSIdx, std::move(OtherSet));
2505 CtxProf.
update(Updater, Caller);
2563 "convergent call needs convergencectrl operand");
2574 if (CalledFunc->
hasGC()) {
2575 if (Caller->hasGC() && CalledFunc->
getGC() != Caller->getGC())
2589 Caller->hasPersonalityFn()
2590 ? Caller->getPersonalityFn()->stripPointerCasts()
2592 if (CalledPersonality) {
2597 if (CallerPersonality && CalledPersonality != CallerPersonality)
2603 if (CallerPersonality) {
2606 std::optional<OperandBundleUse> ParentFunclet =
2620 for (
const BasicBlock &CalledBB : *CalledFunc) {
2628 for (
const BasicBlock &CalledBB : *CalledFunc) {
2629 if (CalledBB.isEHPad())
2649 bool MergeAttributes,
AAResults *CalleeAAR,
2650 bool InsertLifetime,
Function *ForwardVarArgsTo,
2656 "CanInlineCallSite should have verified direct call to definition");
2660 bool EHPadForCallUnwindsLocally =
false;
2663 Value *CallSiteUnwindDestToken =
2666 EHPadForCallUnwindsLocally =
2667 CallSiteUnwindDestToken &&
2685 if (CalledFunc->
hasGC()) {
2686 if (!Caller->hasGC())
2687 Caller->setGC(CalledFunc->
getGC());
2690 "CanInlineCallSite should have verified compatible GCs");
2697 if (!Caller->hasPersonalityFn()) {
2698 Caller->setPersonalityFn(CalledPersonality);
2700 assert(Caller->getPersonalityFn()->stripPointerCasts() ==
2701 CalledPersonality &&
2702 "CanInlineCallSite should have verified compatible personality");
2726 auto &
DL = Caller->getDataLayout();
2733 E = CalledFunc->
arg_end();
I != E; ++
I, ++AI, ++ArgNo) {
2734 Value *ActualArg = *AI;
2742 &CB, CalledFunc, IFI,
2744 if (ActualArg != *AI)
2750 VMap[&*
I] = ActualArg;
2770 false, Returns,
".i",
2771 &InlinedFunctionInfo);
2773 FirstNewBlock = LastBlock; ++FirstNewBlock;
2788 CalledFunc->
front());
2796 for (ByValInit &
Init : ByValInits)
2798 Caller->getParent(), &*FirstNewBlock, IFI,
2801 std::optional<OperandBundleUse> ParentDeopt =
2828 std::vector<Value *> MergedDeoptArgs;
2829 MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
2830 ChildOB.Inputs.size());
2835 OpDefs.
emplace_back(
"deopt", std::move(MergedDeoptArgs));
2865 SAMetadataCloner.clone();
2866 SAMetadataCloner.remap(FirstNewBlock, Caller->end());
2886 if (CalledFunc->
hasMetadata(LLVMContext::MD_implicit_ref)) {
2888 CalledFunc->
getMetadata(LLVMContext::MD_implicit_ref, MDs);
2890 Caller->addMetadata(LLVMContext::MD_implicit_ref, *MD);
2896 FirstNewBlock, Caller->end());
2901 make_range(FirstNewBlock->getIterator(), Caller->end()))
2909 if (IntrinsicCall) {
2922 E = FirstNewBlock->end();
I != E; ) {
2952 Caller->getEntryBlock().splice(
InsertPoint, &*FirstNewBlock,
2969 bool InlinedMustTailCalls =
false, InlinedDeoptimizeCalls =
false;
2973 CallSiteTailKind = CI->getTailCallKind();
2988 if (!VarArgsToForward.
empty() &&
2989 ((ForwardVarArgsTo &&
2995 if (!Attrs.isEmpty() || !VarArgsAttrs.
empty()) {
2996 for (
unsigned ArgNo = 0;
2998 ArgAttrs.
push_back(Attrs.getParamAttrs(ArgNo));
3003 Attrs = AttributeList::get(CI->
getContext(), Attrs.getFnAttrs(),
3004 Attrs.getRetAttrs(), ArgAttrs);
3019 InlinedDeoptimizeCalls |=
3020 F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
3039 ChildTCK = std::min(CallSiteTailKind, ChildTCK);
3058 if ((InsertLifetime || Caller->isPresplitCoroutine()) &&
3060 IRBuilder<> builder(&*FirstNewBlock, FirstNewBlock->begin());
3079 if (InlinedMustTailCalls &&
3080 RI->
getParent()->getTerminatingMustTailCall())
3082 if (InlinedDeoptimizeCalls &&
3083 RI->
getParent()->getTerminatingDeoptimizeCall())
3095 .CreateStackSave(
"savedstack");
3102 if (InlinedMustTailCalls && RI->
getParent()->getTerminatingMustTailCall())
3104 if (InlinedDeoptimizeCalls && RI->
getParent()->getTerminatingDeoptimizeCall())
3139 if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
3157 if (InlinedDeoptimizeCalls) {
3163 if (Caller->getReturnType() == CB.
getType()) {
3165 return RI->
getParent()->getTerminatingDeoptimizeCall() !=
nullptr;
3170 Caller->getParent(), Intrinsic::experimental_deoptimize,
3171 {Caller->getReturnType()});
3197 "Expected at least the deopt operand bundle");
3201 Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
3205 Builder.CreateRetVoid();
3207 Builder.CreateRet(NewDeoptCall);
3222 if (InlinedMustTailCalls) {
3224 Type *NewRetTy = Caller->getReturnType();
3231 RI->
getParent()->getTerminatingMustTailCall();
3232 if (!ReturnedMustTail) {
3244 OldCast->eraseFromParent();
3248 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
3264 make_range(FirstNewBlock->getIterator(), Caller->end()))
3275 if (Returns.
size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
3278 FirstNewBlock->end());
3280 Caller->back().eraseFromParent();
3294 if (&CB == R->getReturnValue())
3303 Returns[0]->eraseFromParent();
3305 if (MergeAttributes)
3306 AttributeFuncs::mergeAttributesForInlining(*Caller, *CalledFunc);
3323 CreatedBranchToNormalDest =
3333 CalledFunc->
getName() +
".exit");
3340 CalledFunc->
getName() +
".exit");
3358 Caller->splice(AfterCallBB->
getIterator(), Caller, FirstNewBlock,
3366 if (Returns.
size() > 1) {
3371 PHI->insertBefore(AfterCallBB->
begin());
3382 "Ret value not consistent in function!");
3383 PHI->addIncoming(RI->getReturnValue(), RI->
getParent());
3392 BI->setDebugLoc(
Loc);
3399 if (CreatedBranchToNormalDest)
3400 CreatedBranchToNormalDest->setDebugLoc(
Loc);
3401 }
else if (!Returns.
empty()) {
3405 if (&CB == Returns[0]->getReturnValue())
3412 BasicBlock *ReturnBB = Returns[0]->getParent();
3417 AfterCallBB->
splice(AfterCallBB->
begin(), ReturnBB);
3419 if (CreatedBranchToNormalDest)
3420 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->
getDebugLoc());
3423 Returns[0]->eraseFromParent();
3430 if (CreatedBranchToNormalDest)
3442 if (InlinedMustTailCalls &&
pred_empty(AfterCallBB))
3452 OrigBB->
splice(Br->getIterator(), CalleeEntry);
3455 Br->eraseFromParent();
3466 auto &
DL = Caller->getDataLayout();
3468 PHI->replaceAllUsesWith(V);
3469 PHI->eraseFromParent();
3473 if (MergeAttributes)
3474 AttributeFuncs::mergeAttributesForInlining(*Caller, *CalledFunc);
3478 bool MergeAttributes,
3480 bool InsertLifetime,
3484 if (Result.isSuccess()) {
3486 ForwardVarArgsTo, ORE);
3494 ArrayRef<std::pair<Function *, int>> InlineHistory) {
3495 while (InlineHistoryID != -1) {
3496 assert(
unsigned(InlineHistoryID) < InlineHistory.size() &&
3497 "Invalid inline history ID");
3498 if (InlineHistory[InlineHistoryID].first ==
F)
3500 InlineHistoryID = InlineHistory[InlineHistoryID].second;
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
static void UpdatePHINodes(BasicBlock *OrigBB, BasicBlock *NewBB, ArrayRef< BasicBlock * > Preds, Instruction *BI, bool HasLoopExit)
Update the PHI nodes in OrigBB to include the values coming from NewBB.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static cl::opt< bool > NoAliases("csky-no-aliases", cl::desc("Disable the emission of assembler pseudo instructions"), cl::init(false), cl::Hidden)
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
static AttrBuilder IdentifyValidUBGeneratingAttributes(CallBase &CB)
DenseMap< Instruction *, Value * > UnwindDestMemoTy
static at::StorageToVarsMap collectEscapedLocals(const DataLayout &DL, const CallBase &CB)
Find Alloca and linked DbgAssignIntrinsic for locals escaped by CB.
static void fixupLineNumbers(Function *Fn, Function::iterator FI, Instruction *TheCall, bool CalleeHasDebugInfo)
Update inlined instructions' line numbers to to encode location where these instructions are inlined.
static void removeCallsiteMetadata(CallBase *Call)
static void PropagateInlinedFromMetadata(CallBase &CB, StringRef CalledFuncName, StringRef CallerFuncName, Function::iterator FStart, Function::iterator FEnd)
Track inlining chain via inlined.from metadata for dontcall diagnostics.
static Value * getUnwindDestToken(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Given an EH pad, find where it unwinds.
static void propagateMemProfMetadata(Function *Callee, CallBase &CB, bool ContainsMemProfMetadata, const ValueMap< const Value *, WeakTrackingVH > &VMap, OptimizationRemarkEmitter *ORE)
static cl::opt< bool > PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", cl::init(false), cl::Hidden, cl::desc("Convert align attributes to assumptions during inlining."))
static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
static Value * getUnwindDestTokenHelper(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Helper for getUnwindDestToken that does the descendant-ward part of the search.
static BasicBlock * HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, BasicBlock *UnwindEdge, UnwindDestMemoTy *FuncletUnwindMap=nullptr)
When we inline a basic block into an invoke, we have to turn all of the calls that can throw into inv...
static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &IANodes)
Returns a DebugLoc for a new DILocation which is a clone of OrigDL inlined at InlinedAt.
static cl::opt< bool > UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden, cl::init(true), cl::desc("Use the llvm.experimental.noalias.scope.decl " "intrinsic during inlining."))
static void PropagateCallSiteMetadata(CallBase &CB, Function::iterator FStart, Function::iterator FEnd)
When inlining a call site that has !llvm.mem.parallel_loop_access, !llvm.access.group,...
static std::pair< std::vector< int64_t >, std::vector< int64_t > > remapIndices(Function &Caller, BasicBlock *StartBB, PGOContextualProfile &CtxProf, uint32_t CalleeCounters, uint32_t CalleeCallsites)
static AttrBuilder IdentifyValidPoisonGeneratingAttributes(CallBase &CB)
static void updateMemprofMetadata(CallBase *CI, const std::vector< Metadata * > &MIBList, OptimizationRemarkEmitter *ORE)
static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap, const ProfileCount &CalleeEntryCount, const CallBase &TheCall, ProfileSummaryInfo *PSI, BlockFrequencyInfo *CallerBFI)
Update the branch metadata for cloned call instructions.
static void updateCallerBFI(BasicBlock *CallSiteBlock, const ValueToValueMapTy &VMap, BlockFrequencyInfo *CallerBFI, BlockFrequencyInfo *CalleeBFI, const BasicBlock &CalleeEntryBlock)
Update the block frequencies of the caller after a callee has been inlined.
static void AddReturnAttributes(CallBase &CB, ValueToValueMapTy &VMap, ClonedCodeInfo &InlinedFunctionInfo)
static void HandleByValArgumentInit(Type *ByValType, Value *Dst, Value *Src, MaybeAlign SrcAlign, Module *M, BasicBlock *InsertBlock, InlineFunctionInfo &IFI, Function *CalledFunc)
static bool MayContainThrowingOrExitingCallAfterCB(CallBase *Begin, ReturnInst *End)
static cl::opt< bool > EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), cl::Hidden, cl::desc("Convert noalias attributes to metadata during inlining."))
static void AddAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap, const DataLayout &DL, AAResults *CalleeAAR, ClonedCodeInfo &InlinedFunctionInfo)
If the inlined function has noalias arguments, then add new alias scopes for each noalias argument,...
static IntrinsicInst * getConvergenceEntry(BasicBlock &BB)
static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
static void inlineRetainOrClaimRVCalls(CallBase &CB, objcarc::ARCInstKind RVCallKind, const SmallVectorImpl< ReturnInst * > &Returns)
An operand bundle "clang.arc.attachedcall" on a call indicates the call result is implicitly consumed...
static void fixupAssignments(Function::iterator Start, Function::iterator End)
Update inlined instructions' DIAssignID metadata.
static void propagateMemProfHelper(const CallBase *OrigCall, CallBase *ClonedCall, MDNode *InlinedCallsiteMD, OptimizationRemarkEmitter *ORE)
static bool allocaWouldBeStaticInEntry(const AllocaInst *AI)
Return the result of AI->isStaticAlloca() if AI were moved to the entry block.
static bool isUsedByLifetimeMarker(Value *V)
static void removeMemProfMetadata(CallBase *Call)
static Value * HandleByValArgument(Type *ByValType, Value *Arg, Instruction *TheCall, const Function *CalledFunc, InlineFunctionInfo &IFI, MaybeAlign ByValAlignment)
When inlining a call site that has a byval argument, we have to make the implicit memcpy explicit by ...
static void AddAlignmentAssumptions(CallBase &CB, InlineFunctionInfo &IFI)
If the inlined function has non-byval align arguments, then add @llvm.assume-based alignment assumpti...
static void trackInlinedStores(Function::iterator Start, Function::iterator End, const CallBase &CB)
static cl::opt< unsigned > InlinerAttributeWindow("max-inst-checked-for-throw-during-inlining", cl::Hidden, cl::desc("the maximum number of instructions analyzed for may throw during " "attribute inference in inlined body"), cl::init(4))
static void AddParamAndFnBasicAttributes(const CallBase &CB, ValueToValueMapTy &VMap, ClonedCodeInfo &InlinedFunctionInfo)
static bool haveCommonPrefix(MDNode *MIBStackContext, MDNode *CallsiteStackContext)
static void PropagateOperandBundles(Function::iterator InlinedBB, Instruction *CallSiteEHPad)
Bundle operands of the inlined function must be added to inlined call sites.
static bool hasLifetimeMarkers(AllocaInst *AI)
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first DebugLoc that has line number information, given a range of instructions.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
This file defines common analysis utilities used by the ObjC ARC Optimizer.
This file defines ARC utility functions which are used by various parts of the compiler.
This file contains the declarations for profiling metadata utility functions.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static Value * getParentPad(Value *EHPad)
LLVM_ABI MemoryEffects getMemoryEffects(const CallBase *Call)
Return the behavior of the given call site.
Class for arbitrary precision integers.
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
PointerType * getType() const
Overload to return most specific pointer type.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
const Value * getArraySize() const
Get the number of elements allocated.
This class represents an incoming formal argument to a Function.
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
static LLVM_ABI uint64_t getGUID(const Function &F)
A cache of @llvm.assume calls within a function.
LLVM_ABI void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
static LLVM_ABI AttributeSet get(LLVMContext &C, const AttrBuilder &B)
Functions, function parameters, and return types can have attributes to indicate how they should be t...
LLVM_ABI const ConstantRange & getRange() const
Returns the value of the range attribute.
LLVM_ABI FPClassTest getNoFPClass() const
Return the FPClassTest for nofpclass.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
bool isValid() const
Return true if the attribute is any kind of attribute.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
LLVM_ABI BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="")
Split the basic block into two basic blocks at the specified instruction.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI SymbolTableList< BasicBlock >::iterator eraseFromParent()
Unlink 'this' from the containing function and delete it.
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
void splice(BasicBlock::iterator ToIt, BasicBlock *FromBB)
Transfer all instructions from FromBB to this basic block at ToIt.
LLVM_ABI void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)
Update PHI nodes in this BasicBlock before removal of predecessor Pred.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
LLVM_ABI void setBlockFreq(const BasicBlock *BB, BlockFrequency Freq)
LLVM_ABI void setBlockFreqAndScale(const BasicBlock *ReferenceBB, BlockFrequency Freq, SmallPtrSetImpl< BasicBlock * > &BlocksToScale)
Set the frequency of ReferenceBB to Freq and scale the frequencies of the blocks in BlocksToScale suc...
LLVM_ABI BlockFrequency getBlockFreq(const BasicBlock *BB) const
getblockFreq - Return block frequency.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
void setCallingConv(CallingConv::ID CC)
LLVM_ABI FPClassTest getRetNoFPClass() const
Extract a test mask for disallowed floating-point value classes for the return value.
MaybeAlign getRetAlign() const
Extract the alignment of the return value.
LLVM_ABI void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
void removeRetAttrs(const AttributeMask &AttrsToRemove)
Removes the attributes from the return value.
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
static LLVM_ABI CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
MaybeAlign getParamAlign(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
AttributeSet getRetAttributes() const
Return the return attributes for this call.
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
Value * getCalledOperand() const
void setAttributes(AttributeList A)
Set the attributes for this call.
LLVM_ABI std::optional< ConstantRange > getRange() const
If this return value has a range attribute, return the value range of the argument.
bool doesNotThrow() const
Determine if the call cannot unwind.
Value * getArgOperand(unsigned i) const
uint64_t getRetDereferenceableBytes() const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
bool isConvergent() const
Determine if the invoke is convergent.
FunctionType * getFunctionType() const
static LLVM_ABI CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
uint64_t getRetDereferenceableOrNullBytes() const
Extract the number of dereferenceable_or_null bytes for a call (0=unknown).
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
This class represents a function call, abstracting a target machine's calling convention.
void setTailCallKind(TailCallKind TCK)
TailCallKind getTailCallKind() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
bool isMustTailCall() const
static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, InsertPosition InsertBefore=nullptr)
This class represents a range of values.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
This is an important base class in LLVM.
const Constant * stripPointerCasts() const
static LLVM_ABI InstrProfIncrementInst * getBBInstrumentation(BasicBlock &BB)
Get the instruction instrumenting a BB, or nullptr if not present.
static LLVM_ABI InstrProfCallsite * getCallsiteInstrumentation(CallBase &CB)
Get the instruction instrumenting a callsite, or nullptr if that cannot be found.
const DILocation * getWithoutAtom() const
uint64_t getAtomGroup() const
uint8_t getAtomRank() const
Subprogram description. Uses SubclassData1.
A parsed version of the target data layout string in and methods for querying it.
Base class for non-instruction debug metadata records that have positions within IR.
Record of a variable value-assignment, aka a non instruction representation of the dbg....
static DebugLoc getCompilerGenerated()
LLVM_ABI unsigned getLine() const
LLVM_ABI DILocation * get() const
Get the underlying DILocation.
LLVM_ABI MDNode * getScope() const
static LLVM_ABI DebugLoc appendInlinedAt(const DebugLoc &DL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &Cache)
Rebuild the entire inlined-at chain for this instruction so that the top of the chain now is inlined-...
static DebugLoc getTemporary()
LLVM_ABI unsigned getCol() const
LLVM_ABI bool isImplicitCode() const
Check if the DebugLoc corresponds to an implicit code.
static DebugLoc getUnknown()
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Implements a dense probed hash-table based set.
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Class to represent profile counts.
uint64_t getCount() const
const BasicBlock & getEntryBlock() const
BasicBlockListType::iterator iterator
FunctionType * getFunctionType() const
Returns the FunctionType for me.
const BasicBlock & front() const
iterator_range< arg_iterator > args()
DISubprogram * getSubprogram() const
Get the attached subprogram.
bool hasGC() const
hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm to use during code generatio...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasPersonalityFn() const
Check whether this function has a personality function.
Constant * getPersonalityFn() const
Get the personality function associated with this function.
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
MaybeAlign getParamAlign(unsigned ArgNo) const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
const std::string & getGC() const
std::optional< ProfileCount > getEntryCount(bool AllowSynthetic=false) const
Get the entry count for this function.
Type * getReturnType() const
Returns the type of the ret val.
void setCallingConv(CallingConv::ID CC)
bool onlyReadsMemory() const
Determine if the function does not access or only reads memory.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
bool hasMetadata() const
Return true if this value has any metadata attached to it.
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
LLVM_ABI CallInst * CreateLifetimeStart(Value *Ptr)
Create a lifetime.start intrinsic.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
This class captures the data input to the InlineFunction call, and records the auxiliary results prod...
Value * ConvergenceControlToken
bool UpdateProfile
Update profile for callee as well as cloned version.
Instruction * CallSiteEHPad
function_ref< AssumptionCache &(Function &)> GetAssumptionCache
If non-null, InlineFunction will update the callgraph to reflect the changes it makes.
BlockFrequencyInfo * CalleeBFI
SmallVector< AllocaInst *, 4 > StaticAllocas
InlineFunction fills this in with all static allocas that get copied into the caller.
BlockFrequencyInfo * CallerBFI
SmallVector< CallBase *, 8 > InlinedCallSites
All of the new call sites inlined into the caller.
InlineResult is basically true or false.
static InlineResult success()
static InlineResult failure(const char *Reason)
This represents the llvm.instrprof.callsite intrinsic.
This represents the llvm.instrprof.increment intrinsic.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
LLVM_ABI void insertBefore(InstListType::iterator InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified position.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
A wrapper class for inspecting calls to intrinsic functions.
static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
This is an important class for using LLVM in a threaded context.
@ OB_clang_arc_attachedcall
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
An instruction for reading from memory.
MDNode * createAnonymousAliasScope(MDNode *Domain, StringRef Name=StringRef())
Return metadata appropriate for an alias scope root node.
MDNode * createAnonymousAliasScopeDomain(StringRef Name=StringRef())
Return metadata appropriate for an alias scope domain node.
static MDTuple * getDistinct(LLVMContext &Context, ArrayRef< Metadata * > MDs)
void replaceAllUsesWith(Metadata *MD)
RAUW a temporary.
static LLVM_ABI MDNode * concatenate(MDNode *A, MDNode *B)
Methods for metadata merging.
ArrayRef< MDOperand > operands() const
op_iterator op_end() const
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
unsigned getNumOperands() const
Return number of MDNode operands.
op_iterator op_begin() const
LLVMContext & getContext() const
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
static TempMDTuple getTemporary(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Return a temporary node.
bool onlyAccessesInaccessibleMem() const
Whether this function only (at most) accesses inaccessible memory.
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
A Module instance is used to store all the information related to an LLVM module.
The instrumented contextual profile, produced by the CtxProfAnalysis.
LLVM_ABI bool isInSpecializedModule() const
LLVM_ABI void update(Visitor, const Function &F)
uint32_t getNumCounters(const Function &F) const
uint32_t allocateNextCounterIndex(const Function &F)
uint32_t getNumCallsites(const Function &F) const
uint32_t allocateNextCallsiteIndex(const Function &F)
A node (context) in the loaded contextual profile, suitable for mutation during IPO passes.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Analysis providing profile information.
LLVM_ABI std::optional< uint64_t > getProfileCount(const CallBase &CallInst, BlockFrequencyInfo *BFI, bool AllowSynthetic=false) const
Returns the profile count for CallInst.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isVoidTy() const
Return true if this is 'void'.
Unconditional Branch instruction.
void setSuccessor(BasicBlock *NewSucc)
static UncondBrInst * Create(BasicBlock *Target, InsertPosition InsertBefore=nullptr)
BasicBlock * getSuccessor(unsigned i=0) const
Value * getOperand(unsigned i) const
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
ValueMapIteratorImpl< MapT, const Value *, false > iterator
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
LLVMContext & getContext() const
All values hold a context through their type.
iterator_range< user_iterator > users()
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
std::pair< iterator, bool > insert(const ValueT &V)
const ParentTy * getParent() const
self_iterator getIterator()
Class to build a trie of call stack contexts for a particular profiled allocation call,...
Helper class to iterate through stack ids in both metadata (memprof MIB and callsite) and the corresp...
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
CallingConv Namespace - This namespace contains an enum with a value for the well-known calling conve...
@ BasicBlock
Various leaf nodes.
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
bool match(Val *V, const Pattern &P)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
LLVM_ABI void trackAssignments(Function::iterator Start, Function::iterator End, const StorageToVarsMap &Vars, const DataLayout &DL, bool DebugPrints=false)
Track assignments to Vars between Start and End.
LLVM_ABI void remapAssignID(DenseMap< DIAssignID *, DIAssignID * > &Map, Instruction &I)
Replace DIAssignID uses and attachments with IDs from Map.
SmallVector< DbgVariableRecord * > getDVRAssignmentMarkers(const Instruction *Inst)
Return a range of dbg_assign records for which Inst performs the assignment they encode.
DenseMap< const AllocaInst *, SmallSetVector< VarRecord, 2 > > StorageToVarsMap
Map of backing storage to a set of variables that are stored to it.
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
LLVM_ABI MDNode * getMIBStackNode(const MDNode *MIB)
Returns the stack node from an MIB metadata node.
ARCInstKind getAttachedARCFunctionKind(const CallBase *CB)
This function returns the ARCInstKind of the function attached to operand bundle clang_arc_attachedca...
ARCInstKind
Equivalence classes of instructions in the ARC Model.
@ None
anything that is inert from an ARC perspective.
@ RetainRV
objc_retainAutoreleasedReturnValue
std::optional< Function * > getAttachedARCFunction(const CallBase *CB)
This function returns operand bundle clang_arc_attachedcall's argument, which is the address of the A...
bool isRetainOrClaimRV(ARCInstKind Kind)
Check whether the function is retainRV/unsafeClaimRV.
const Value * GetRCIdentityRoot(const Value *V)
The RCIdentity root of a value V is a dominating value U for which retaining or releasing U is equiva...
bool hasAttachedCallOpBundle(const CallBase *CB)
This is an optimization pass for GlobalISel generic memory operations.
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
LLVM_ABI BasicBlock * changeToInvokeAndSplitBasicBlock(CallInst *CI, BasicBlock *UnwindEdge, DomTreeUpdater *DTU=nullptr)
Convert the CallInst to InvokeInst with the specified unwind edge basic block.
LLVM_ABI InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, bool MergeAttributes=false, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr, OptimizationRemarkEmitter *ORE=nullptr)
This function inlines the called function into the basic block of the caller.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
auto successors(const MachineBasicBlock *BB)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
std::string utostr(uint64_t X, bool isNeg=false)
LLVM_ABI bool inlineHistoryIncludes(Function *F, int InlineHistoryID, ArrayRef< std::pair< Function *, int > > InlineHistory)
Check if Function F appears in the inline history chain.
MemoryEffectsBase< IRMemLocation > MemoryEffects
Summary of how a function affects memory in the program.
bool isa_and_nonnull(const Y &Val)
LLVM_ABI bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures, const Instruction *I, const DominatorTree *DT, bool IncludeI=false, unsigned MaxUsesToExplore=0, const LoopInfo *LI=nullptr)
PointerMayBeCapturedBefore - Return true if this pointer value may be captured by the enclosing funct...
LLVM_ABI InlineResult CanInlineCallSite(const CallBase &CB, InlineFunctionInfo &IFI)
Check if it is legal to perform inlining of the function called by CB into the caller at this particu...
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
auto dyn_cast_or_null(const Y &Val)
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
LLVM_ABI Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, bool ModuleLevelChanges, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr)
This works exactly like CloneFunctionInto, except that it does some simple constant prop and DCE on t...
LLVM_ABI void InlineFunctionImpl(CallBase &CB, InlineFunctionInfo &IFI, bool MergeAttributes=false, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr, OptimizationRemarkEmitter *ORE=nullptr)
This should generally not be used, use InlineFunction instead.
Function::ProfileCount ProfileCount
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
LLVM_ABI bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
LLVM_ABI void updateProfileCallee(Function *Callee, int64_t EntryDelta, const ValueMap< const Value *, WeakTrackingVH > *VMap=nullptr)
Updates profile information by adjusting the entry count by adding EntryDelta then scaling callsite i...
OperandBundleDefT< Value * > OperandBundleDef
LLVM_ABI bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
LLVM_ABI MDNode * uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2)
Compute the union of two access-group lists.
DWARFExpression::Operation Op
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
ValueMap< const Value *, WeakTrackingVH > ValueToValueMapTy
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
LLVM_ABI bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNotCapturedBefore.
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
bool capturesAnything(CaptureComponents CC)
bool pred_empty(const BasicBlock *BB)
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=MaxLookupSearchDepth)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
LLVM_ABI void updateLoopMetadataDebugLocations(Instruction &I, function_ref< Metadata *(Metadata *)> Updater)
Update the debug locations contained within the MD_loop metadata attached to the instruction I,...
LLVM_ABI bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
LLVM_ABI void scaleProfData(Instruction &I, uint64_t S, uint64_t T)
Scaling the profile data attached to 'I' using the ratio of S/T.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
This struct can be used to capture information about code being cloned, while it is being cloned.
bool ContainsDynamicAllocas
This is set to true if the cloned code contains a 'dynamic' alloca.
bool isSimplified(const Value *From, const Value *To) const
bool ContainsCalls
This is set to true if the cloned code contains a normal call instruction.
bool ContainsMemProfMetadata
This is set to true if there is memprof related metadata (memprof or callsite metadata) in the cloned...
std::vector< WeakTrackingVH > OperandBundleCallSites
All cloned call sites that have operand bundles attached are appended to this vector.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
static Instruction * tryGetVTableInstruction(CallBase *CB)
Helper struct for trackAssignments, below.