25#include "llvm/Config/llvm-config.h"
51#define DEBUG_TYPE "coro-suspend-crossing"
57class BlockToIndexMapping {
61 size_t size()
const {
return V.size(); }
69 size_t blockToIndex(
BasicBlock const *BB)
const {
71 assert(
I !=
V.end() && *
I == BB &&
"BasicBlockNumberng: Unknown block");
95class SuspendCrossingInfo {
96 BlockToIndexMapping Mapping;
101 bool Suspend =
false;
103 bool KillLoop =
false;
104 bool Changed =
false;
109 BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]);
114 return Block[Mapping.blockToIndex(BB)];
121 template <
bool Initialize = false>
125#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
135 size_t const FromIndex = Mapping.blockToIndex(
From);
136 size_t const ToIndex = Mapping.blockToIndex(To);
137 bool const Result =
Block[ToIndex].Kills[FromIndex];
139 <<
" answer is " << Result <<
"\n");
148 size_t const FromIndex = Mapping.blockToIndex(
From);
149 size_t const ToIndex = Mapping.blockToIndex(To);
153 <<
" answer is " << Result <<
" (path or loop)\n");
157 bool isDefinitionAcrossSuspend(
BasicBlock *DefBB,
User *U)
const {
158 auto *
I = cast<Instruction>(U);
162 if (
auto *PN = dyn_cast<PHINode>(
I))
163 if (PN->getNumIncomingValues() > 1)
171 if (isa<CoroSuspendRetconInst>(
I) || isa<CoroSuspendAsyncInst>(
I)) {
173 assert(UseBB &&
"should have split coro.suspend into its own block");
176 return hasPathCrossingSuspendPoint(DefBB, UseBB);
180 return isDefinitionAcrossSuspend(&
A.getParent()->getEntryBlock(), U);
184 auto *DefBB =
I.getParent();
189 if (isa<AnyCoroSuspendInst>(
I)) {
191 assert(DefBB &&
"should have split coro.suspend into its own block");
194 return isDefinitionAcrossSuspend(DefBB, U);
197 bool isDefinitionAcrossSuspend(
Value &V,
User *U)
const {
198 if (
auto *Arg = dyn_cast<Argument>(&V))
199 return isDefinitionAcrossSuspend(*Arg, U);
200 if (
auto *Inst = dyn_cast<Instruction>(&V))
201 return isDefinitionAcrossSuspend(*Inst, U);
204 "Coroutine could only collect Argument and Instruction now.");
209#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
213 for (
size_t I = 0,
N = BV.
size();
I <
N; ++
I)
215 dbgs() <<
" " << Mapping.indexToBlock(
I)->getName();
220 for (
size_t I = 0,
N =
Block.size();
I <
N; ++
I) {
222 dbgs() <<
B->getName() <<
":\n";
230template <
bool Initialize>
231bool SuspendCrossingInfo::computeBlockData(
233 bool Changed =
false;
236 auto BBNo = Mapping.blockToIndex(BB);
240 if constexpr (!Initialize)
244 return !
Block[Mapping.blockToIndex(BB)].Changed;
252 auto SavedConsumes =
B.Consumes;
253 auto SavedKills =
B.Kills;
256 auto PrevNo = Mapping.blockToIndex(PI);
260 B.Consumes |=
P.Consumes;
266 B.Kills |=
P.Consumes;
272 B.Kills |=
B.Consumes;
282 B.KillLoop |=
B.Kills[BBNo];
286 if constexpr (!Initialize) {
287 B.Changed = (
B.Kills != SavedKills) || (
B.Consumes != SavedConsumes);
288 Changed |=
B.Changed;
297 const size_t N = Mapping.size();
301 for (
size_t I = 0;
I <
N; ++
I) {
303 B.Consumes.resize(
N);
313 getBlockData(
CE->getParent()).End =
true;
321 auto &
B = getBlockData(SuspendBlock);
323 B.Kills |=
B.Consumes;
326 markSuspendBlock(CSI);
327 if (
auto *Save = CSI->getCoroSave())
328 markSuspendBlock(Save);
334 computeBlockData<
true>(RPOT);
335 while (computeBlockData</*Initialize*/ false>(RPOT))
355 RematNode() =
default;
359 RematNode *EntryNode;
364 SuspendCrossingInfo &Checker;
366 RematGraph(
const std::function<
bool(
Instruction &)> &MaterializableCallback,
368 : MaterializableCallback(MaterializableCallback), Checker(Checker) {
369 std::unique_ptr<RematNode> FirstNode = std::make_unique<RematNode>(
I);
370 EntryNode = FirstNode.get();
371 std::deque<std::unique_ptr<RematNode>> WorkList;
372 addNode(std::move(FirstNode), WorkList, cast<User>(
I));
373 while (WorkList.size()) {
374 std::unique_ptr<RematNode>
N = std::move(WorkList.front());
375 WorkList.pop_front();
376 addNode(std::move(
N), WorkList, cast<User>(
I));
380 void addNode(std::unique_ptr<RematNode> NUPtr,
381 std::deque<std::unique_ptr<RematNode>> &WorkList,
383 RematNode *
N = NUPtr.get();
384 if (Remats.count(
N->Node))
388 Remats[
N->Node] = std::move(NUPtr);
389 for (
auto &Def :
N->Node->operands()) {
391 if (!
D || !MaterializableCallback(*
D) ||
392 !Checker.isDefinitionAcrossSuspend(*
D, FirstUse))
395 if (Remats.count(
D)) {
397 N->Operands.push_back(Remats[
D].
get());
402 for (
auto &
I : WorkList) {
405 N->Operands.push_back(
I.get());
411 std::unique_ptr<RematNode> ChildNode = std::make_unique<RematNode>(
D);
412 N->Operands.push_back(ChildNode.get());
413 WorkList.push_back(std::move(ChildNode));
418#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
421 if (EntryNode->Node->getParent()->hasName())
422 dbgs() << EntryNode->Node->getParent()->getName();
424 EntryNode->Node->getParent()->printAsOperand(
dbgs(),
false);
425 dbgs() <<
") : " << *EntryNode->Node <<
"\n";
426 for (
auto &E : Remats) {
427 dbgs() << *(E.first) <<
"\n";
428 for (RematNode *U : E.second->Operands)
429 dbgs() <<
" " << *
U->Node <<
"\n";
444 return N->Operands.begin();
452#define DEBUG_TYPE "coro-frame"
455class FrameTypeBuilder;
461 bool MayWriteBeforeCoroBegin;
464 bool MayWriteBeforeCoroBegin)
465 : Alloca(Alloca), Aliases(std::move(Aliases)),
466 MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {}
468struct FrameDataInfo {
478 for (
const auto &
P : Spills)
480 for (
const auto &
A : Allocas)
486 auto Itr = FieldIndexMap.find(V);
487 assert(Itr != FieldIndexMap.end() &&
488 "Value does not have a frame field index");
493 assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) &&
494 "Cannot set the index for the same field twice.");
495 FieldIndexMap[V] =
Index;
499 auto Iter = FieldAlignMap.find(V);
500 assert(Iter != FieldAlignMap.end());
505 assert(FieldAlignMap.count(V) == 0);
506 FieldAlignMap.insert({V, AL});
510 auto Iter = FieldDynamicAlignMap.find(V);
511 assert(Iter != FieldDynamicAlignMap.end());
516 assert(FieldDynamicAlignMap.count(V) == 0);
517 FieldDynamicAlignMap.insert({V,
Align});
521 auto Iter = FieldOffsetMap.find(V);
522 assert(Iter != FieldOffsetMap.end());
527 assert(FieldOffsetMap.count(V) == 0);
528 FieldOffsetMap.insert({V,
Offset});
532 void updateLayoutIndex(FrameTypeBuilder &
B);
537 bool LayoutIndexUpdateStarted =
false;
554 dbgs() <<
"------------- " << Title <<
"--------------\n";
555 for (
const auto &E : Spills) {
558 for (
auto *
I : E.second)
565 dbgs() <<
"------------- " << Title <<
"--------------\n";
566 for (
const auto &E : RM) {
573 dbgs() <<
"------------- Allocas --------------\n";
574 for (
const auto &
A : Allocas) {
581using FieldIDType = size_t;
586class FrameTypeBuilder {
592 FieldIDType LayoutFieldIndex;
602 bool IsFinished =
false;
604 std::optional<Align> MaxFrameAlignment;
611 std::optional<Align> MaxFrameAlignment)
612 :
DL(
DL), Context(Context), MaxFrameAlignment(MaxFrameAlignment) {}
616 [[nodiscard]] FieldIDType addFieldForAlloca(
AllocaInst *AI,
617 bool IsHeader =
false) {
622 if (
auto *CI = dyn_cast<ConstantInt>(AI->
getArraySize()))
623 Ty = ArrayType::get(Ty, CI->getValue().getZExtValue());
628 return addField(Ty, AI->
getAlign(), IsHeader);
658 void addFieldForAllocas(
const Function &
F, FrameDataInfo &FrameData,
662 [[nodiscard]] FieldIDType addField(
Type *Ty,
MaybeAlign MaybeFieldAlignment,
663 bool IsHeader =
false,
664 bool IsSpillOfValue =
false) {
665 assert(!IsFinished &&
"adding fields to a finished builder");
666 assert(Ty &&
"must provide a type for a field");
673 if (FieldSize == 0) {
681 Align ABIAlign =
DL.getABITypeAlign(Ty);
682 Align TyAlignment = ABIAlign;
683 if (IsSpillOfValue && MaxFrameAlignment && *MaxFrameAlignment < ABIAlign)
684 TyAlignment = *MaxFrameAlignment;
685 Align FieldAlignment = MaybeFieldAlignment.value_or(TyAlignment);
691 if (MaxFrameAlignment && (FieldAlignment > *MaxFrameAlignment)) {
694 FieldAlignment = *MaxFrameAlignment;
695 FieldSize = FieldSize + DynamicAlignBuffer;
702 StructSize =
Offset + FieldSize;
709 Fields.
push_back({FieldSize,
Offset, Ty, 0, FieldAlignment, TyAlignment,
710 DynamicAlignBuffer});
711 return Fields.
size() - 1;
718 assert(IsFinished &&
"not yet finished!");
722 Align getStructAlign()
const {
723 assert(IsFinished &&
"not yet finished!");
727 FieldIDType getLayoutFieldIndex(FieldIDType Id)
const {
728 assert(IsFinished &&
"not yet finished!");
729 return Fields[
Id].LayoutFieldIndex;
732 Field getLayoutField(FieldIDType Id)
const {
733 assert(IsFinished &&
"not yet finished!");
739void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &
B) {
740 auto Updater = [&](
Value *
I) {
741 auto Field =
B.getLayoutField(getFieldIndex(
I));
742 setFieldIndex(
I,
Field.LayoutFieldIndex);
745 Field.DynamicAlignBuffer
748 setDynamicAlign(
I, dynamicAlign);
751 LayoutIndexUpdateStarted =
true;
752 for (
auto &S : Spills)
754 for (
const auto &
A : Allocas)
756 LayoutIndexUpdateStarted =
false;
759void FrameTypeBuilder::addFieldForAllocas(
const Function &
F,
760 FrameDataInfo &FrameData,
767 for (
auto AllocaList : NonOverlapedAllocas) {
768 auto *LargestAI = *AllocaList.begin();
769 FieldIDType
Id = addFieldForAlloca(LargestAI);
770 for (
auto *Alloca : AllocaList)
778 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
798 if (
auto *ConstSWI = dyn_cast<SwitchInst>(U)) {
799 auto *SWI =
const_cast<SwitchInst *
>(ConstSWI);
800 DefaultSuspendDest[SWI] = SWI->getDefaultDest();
801 SWI->setDefaultDest(SWI->getSuccessor(1));
806 auto ExtractAllocas = [&]() {
807 AllocaSetType Allocas;
810 Allocas.push_back(
A.Alloca);
814 StackLifetime::LivenessType::May);
815 StackLifetimeAnalyzer.run();
817 return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps(
818 StackLifetimeAnalyzer.getLiveRange(AI2));
820 auto GetAllocaSize = [&](
const AllocaInfo &
A) {
821 std::optional<TypeSize> RetSize =
A.Alloca->getAllocationSize(
DL);
822 assert(RetSize &&
"Variable Length Arrays (VLA) are not supported.\n");
823 assert(!RetSize->isScalable() &&
"Scalable vectors are not yet supported");
824 return RetSize->getFixedValue();
830 sort(
FrameData.Allocas, [&](
const auto &Iter1,
const auto &Iter2) {
831 return GetAllocaSize(Iter1) > GetAllocaSize(Iter2);
839 for (
auto &AllocaSet : NonOverlapedAllocas) {
840 assert(!AllocaSet.empty() &&
"Processing Alloca Set is not empty.\n");
841 bool NoInference =
none_of(AllocaSet, [&](
auto Iter) {
842 return IsAllocaInferenre(Alloca, Iter);
850 bool Alignable = [&]() ->
bool {
851 auto *LargestAlloca = *AllocaSet.begin();
852 return LargestAlloca->getAlign().value() % Alloca->
getAlign().
value() ==
855 bool CouldMerge = NoInference && Alignable;
858 AllocaSet.push_back(Alloca);
863 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
868 for (
auto SwitchAndDefaultDest : DefaultSuspendDest) {
870 BasicBlock *DestBB = SwitchAndDefaultDest.second;
875 : NonOverlapedAllocas) {
876 if (AllocaSet.size() > 1) {
877 dbgs() <<
"In Function:" << F.getName() <<
"\n";
878 dbgs() <<
"Find Union Set "
880 dbgs() <<
"\tAllocas are \n";
881 for (auto Alloca : AllocaSet)
882 dbgs() <<
"\t\t" << *Alloca <<
"\n";
887void FrameTypeBuilder::finish(
StructType *Ty) {
888 assert(!IsFinished &&
"already finished!");
894 for (
auto &
Field : Fields) {
901 StructSize = SizeAndAlign.first;
902 StructAlign = SizeAndAlign.second;
905 return *
static_cast<Field *
>(
const_cast<void*
>(LayoutField.Id));
911 for (
auto &LayoutField : LayoutFields) {
912 auto &
F = getField(LayoutField);
913 if (!
isAligned(
F.TyAlignment, LayoutField.Offset))
921 FieldTypes.
reserve(LayoutFields.size() * 3 / 2);
923 for (
auto &LayoutField : LayoutFields) {
924 auto &
F = getField(LayoutField);
926 auto Offset = LayoutField.Offset;
932 if (
Offset != LastOffset) {
939 F.LayoutFieldIndex = FieldTypes.
size();
942 if (
F.DynamicAlignBuffer) {
949 Ty->
setBody(FieldTypes, Packed);
953 auto Layout =
DL.getStructLayout(Ty);
954 for (
auto &
F : Fields) {
956 assert(Layout->getElementOffset(
F.LayoutFieldIndex) ==
F.Offset);
965 for (
auto *V : FrameData.getAllDefs()) {
969 auto CacheIt = [&DIVarCache, V](
const auto &Container) {
973 if (
I != Container.end())
974 DIVarCache.
insert({V, (*I)->getVariable()});
988 OS <<
"__int_" << cast<IntegerType>(Ty)->getBitWidth();
990 return MDName->getString();
998 return "__floating_type_";
1002 return "PointerType";
1005 if (!cast<StructType>(Ty)->hasName())
1006 return "__LiteralStructType_";
1011 for (
auto &Iter : Buffer)
1012 if (Iter ==
'.' || Iter ==
':')
1015 return MDName->getString();
1018 return "UnknownType";
1030 DIType *RetType =
nullptr;
1033 auto BitWidth = cast<IntegerType>(Ty)->getBitWidth();
1035 llvm::DINode::FlagArtificial);
1038 dwarf::DW_ATE_float,
1039 llvm::DINode::FlagArtificial);
1051 std::nullopt,
Name);
1056 llvm::DINode::FlagArtificial,
nullptr, llvm::DINodeArray());
1058 auto *StructTy = cast<StructType>(Ty);
1060 for (
unsigned I = 0;
I < StructTy->getNumElements();
I++) {
1062 Scope, LineNum, DITypeCache);
1065 Scope, DITy->
getName(), Scope->getFile(), LineNum,
1068 llvm::DINode::FlagArtificial, DITy));
1078 Name, 8, dwarf::DW_ATE_unsigned_char, llvm::DINode::FlagArtificial);
1081 RetType = CharSizeType;
1092 DITypeCache.
insert({Ty, RetType});
1109 FrameDataInfo &FrameData) {
1114 if (!DIS || !DIS->getUnit() ||
1119 assert(Shape.
ABI == coro::ABI::Switch &&
1120 "We could only build debug infomation for C++ coroutine now.\n");
1125 "Coroutine with switch ABI should own Promise alloca");
1128 unsigned LineNum = DIS->getLine();
1131 DIS->getUnit(),
Twine(
F.getName() +
".coro_frame_ty").
str(),
1134 llvm::DINodeArray());
1147 NameCache.
insert({ResumeIndex,
"__resume_fn"});
1148 NameCache.
insert({DestroyIndex,
"__destroy_fn"});
1149 NameCache.
insert({IndexIndex,
"__coro_index"});
1170 dwarf::DW_ATE_unsigned_char)});
1172 for (
auto *V : FrameData.getAllDefs()) {
1176 auto Index = FrameData.getFieldIndex(V);
1178 NameCache.
insert({
Index, DIVarCache[V]->getName()});
1179 TyCache.
insert({
Index, DIVarCache[V]->getType()});
1185 OffsetCache.
insert({ResumeIndex, {8, 0}});
1186 OffsetCache.
insert({DestroyIndex, {8, 8}});
1191 for (
auto *V : FrameData.getAllDefs()) {
1192 auto Index = FrameData.getFieldIndex(V);
1195 {
Index, {FrameData.getAlign(V).
value(), FrameData.getOffset(V)}});
1203 unsigned UnknownTypeNum = 0;
1215 assert(Ty->
isSized() &&
"We can't handle type which is not sized.\n");
1217 AlignInBits = OffsetCache[
Index].first * 8;
1218 OffsetInBits = OffsetCache[
Index].second * 8;
1222 DITy = TyCache[
Index];
1224 DITy =
solveDIType(DBuilder, Ty, Layout, FrameDITy, LineNum, DITypeCache);
1225 assert(DITy &&
"SolveDIType shouldn't return nullptr.\n");
1227 Name +=
"_" + std::to_string(UnknownTypeNum);
1232 FrameDITy,
Name, DFile, LineNum, SizeInBits, AlignInBits, OffsetInBits,
1233 llvm::DINode::FlagArtificial, DITy));
1240 FrameDITy,
true, DINode::FlagArtificial);
1249 auto RetainedNodes = DIS->getRetainedNodes();
1251 RetainedNodes.end());
1258 DILocation::get(DIS->
getContext(), LineNum, 1, DIS);
1259 assert(FrameDIVar->isValidLocationForIntrinsic(DILoc));
1265 DbgVariableRecord::LocationType::Declare);
1267 It->getParent()->insertDbgRecordBefore(NewDVR, It);
1269 DBuilder.insertDeclare(Shape.
FramePtr, FrameDIVar,
1284 FrameDataInfo &FrameData) {
1289 Name.append(
".Frame");
1294 std::optional<Align> MaxFrameAlignment;
1295 if (Shape.
ABI == coro::ABI::Async)
1297 FrameTypeBuilder
B(
C,
DL, MaxFrameAlignment);
1300 std::optional<FieldIDType> SwitchIndexFieldId;
1302 if (Shape.
ABI == coro::ABI::Switch) {
1303 auto *FnPtrTy = PointerType::getUnqual(
C);
1307 (void)
B.addField(FnPtrTy, std::nullopt,
true);
1308 (void)
B.addField(FnPtrTy, std::nullopt,
true);
1314 FrameData.setFieldIndex(
1315 PromiseAlloca,
B.addFieldForAlloca(PromiseAlloca,
true));
1322 SwitchIndexFieldId =
B.addField(IndexType, std::nullopt);
1324 assert(PromiseAlloca ==
nullptr &&
"lowering doesn't support promises");
1329 B.addFieldForAllocas(
F, FrameData, Shape);
1334 if (Shape.
ABI == coro::ABI::Switch && PromiseAlloca)
1337 FrameData.Allocas.emplace_back(
1340 for (
auto &S : FrameData.Spills) {
1341 Type *FieldType = S.first->getType();
1344 if (
const Argument *
A = dyn_cast<Argument>(S.first))
1345 if (
A->hasByValAttr())
1346 FieldType =
A->getParamByValType();
1347 FieldIDType Id =
B.addField(FieldType, std::nullopt,
false ,
1349 FrameData.setFieldIndex(S.first, Id);
1353 FrameData.updateLayoutIndex(
B);
1357 switch (Shape.
ABI) {
1358 case coro::ABI::Switch: {
1360 auto IndexField =
B.getLayoutField(*SwitchIndexFieldId);
1372 case coro::ABI::Retcon:
1373 case coro::ABI::RetconOnce: {
1376 = (
B.getStructSize() <= Id->getStorageSize() &&
1377 B.getStructAlign() <= Id->getStorageAlignment());
1380 case coro::ABI::Async: {
1390 "The alignment requirment of frame variables cannot be higher than "
1391 "the alignment of the async function context");
1430 const SuspendCrossingInfo &Checker,
1431 bool ShouldUseLifetimeStartInfo)
1433 ShouldUseLifetimeStartInfo(ShouldUseLifetimeStartInfo) {
1435 CoroSuspendBBs.insert(SuspendInst->
getParent());
1443 if (PI.isEscaped() &&
1445 MayWriteBeforeCoroBegin =
true;
1467 if (
SI.getValueOperand() !=
U->get())
1480 auto IsSimpleStoreThenLoad = [&]() {
1481 auto *AI = dyn_cast<AllocaInst>(
SI.getPointerOperand());
1489 while (!StoreAliases.
empty()) {
1491 for (
User *U :
I->users()) {
1494 if (
auto *LI = dyn_cast<LoadInst>(U)) {
1501 if (
auto *S = dyn_cast<StoreInst>(U))
1502 if (S->getPointerOperand() ==
I)
1504 if (
auto *
II = dyn_cast<IntrinsicInst>(U))
1505 if (
II->isLifetimeStartOrEnd())
1509 if (
auto *BI = dyn_cast<BitCastInst>(U)) {
1520 if (!IsSimpleStoreThenLoad())
1547 if (!IsOffsetKnown || !
Offset.isZero())
1549 switch (
II.getIntrinsicID()) {
1552 case Intrinsic::lifetime_start:
1553 LifetimeStarts.insert(&
II);
1554 LifetimeStartBBs.push_back(
II.getParent());
1556 case Intrinsic::lifetime_end:
1557 LifetimeEndBBs.insert(
II.getParent());
1563 for (
unsigned Op = 0, OpCount = CB.
arg_size();
Op < OpCount; ++
Op)
1569 bool getShouldLiveOnFrame()
const {
1570 if (!ShouldLiveOnFrame)
1571 ShouldLiveOnFrame = computeShouldLiveOnFrame();
1572 return *ShouldLiveOnFrame;
1575 bool getMayWriteBeforeCoroBegin()
const {
return MayWriteBeforeCoroBegin; }
1578 assert(getShouldLiveOnFrame() &&
"This method should only be called if the "
1579 "alloca needs to live on the frame.");
1580 for (
const auto &
P : AliasOffetMap)
1583 "created before CoroBegin.");
1584 return AliasOffetMap;
1590 const SuspendCrossingInfo &Checker;
1600 bool MayWriteBeforeCoroBegin{
false};
1601 bool ShouldUseLifetimeStartInfo{
true};
1603 mutable std::optional<bool> ShouldLiveOnFrame{};
1605 bool computeShouldLiveOnFrame()
const {
1610 if (ShouldUseLifetimeStartInfo && !LifetimeStarts.empty()) {
1613 if (LifetimeEndBBs.empty())
1621 &LifetimeEndBBs, &DT))
1628 if (PI.isEscaped()) {
1629 for (
auto *
A : LifetimeStarts) {
1630 for (
auto *
B : LifetimeStarts) {
1631 if (Checker.hasPathOrLoopCrossingSuspendPoint(
A->getParent(),
1652 for (
auto *U1 : Users)
1653 for (
auto *U2 : Users)
1654 if (Checker.isDefinitionAcrossSuspend(*U1, U2))
1662 MayWriteBeforeCoroBegin =
true;
1666 for (
auto &U :
I.uses())
1679 if (!IsOffsetKnown) {
1680 AliasOffetMap[&
I].reset();
1682 auto Itr = AliasOffetMap.find(&
I);
1683 if (Itr == AliasOffetMap.end()) {
1685 }
else if (Itr->second && *Itr->second !=
Offset) {
1688 AliasOffetMap[&
I].reset();
1750 auto GetFramePointer = [&](
Value *Orig) ->
Value * {
1751 FieldIDType
Index = FrameData.getFieldIndex(Orig);
1757 if (
auto *AI = dyn_cast<AllocaInst>(Orig)) {
1758 if (
auto *CI = dyn_cast<ConstantInt>(AI->
getArraySize())) {
1759 auto Count = CI->getValue().getZExtValue();
1768 auto GEP = cast<GetElementPtrInst>(
1770 if (
auto *AI = dyn_cast<AllocaInst>(Orig)) {
1771 if (FrameData.getDynamicAlign(Orig) != 0) {
1774 auto *IntPtrTy = M->getDataLayout().getIntPtrType(AI->
getType());
1778 PtrValue = Builder.
CreateAdd(PtrValue, AlignMask);
1789 if (
GEP->getType() != Orig->getType())
1791 Orig->getName() +
Twine(
".cast"));
1796 for (
auto const &E : FrameData.Spills) {
1797 Value *Def = E.first;
1798 auto SpillAlignment =
Align(FrameData.getAlign(Def));
1802 Type *ByValTy =
nullptr;
1803 if (
auto *Arg = dyn_cast<Argument>(Def)) {
1810 Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture);
1812 if (Arg->hasByValAttr())
1813 ByValTy = Arg->getParamByValType();
1814 }
else if (
auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) {
1817 InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHIIt();
1819 auto *
I = cast<Instruction>(Def);
1824 }
else if (
auto *
II = dyn_cast<InvokeInst>(
I)) {
1827 auto *NewBB =
SplitEdge(
II->getParent(),
II->getNormalDest());
1828 InsertPt = NewBB->getTerminator()->getIterator();
1829 }
else if (isa<PHINode>(
I)) {
1832 if (
auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->
getTerminator()))
1837 assert(!
I->isTerminator() &&
"unexpected terminator");
1840 InsertPt =
I->getNextNode()->getIterator();
1844 auto Index = FrameData.getFieldIndex(Def);
1858 Value *CurrentReload =
nullptr;
1859 for (
auto *U : E.second) {
1863 if (CurrentBlock != U->getParent()) {
1864 CurrentBlock = U->getParent();
1868 auto *
GEP = GetFramePointer(E.first);
1869 GEP->setName(E.first->getName() +
Twine(
".reload.addr"));
1871 CurrentReload =
GEP;
1875 SpillAlignment, E.first->getName() +
Twine(
".reload"));
1882 if (
F->getSubprogram()) {
1884 while (DIs.
empty() && DVRs.
empty() && isa<LoadInst>(CurDef)) {
1885 auto *LdInst = cast<LoadInst>(CurDef);
1887 if (LdInst->getPointerOperandType() != LdInst->getType())
1889 CurDef = LdInst->getPointerOperand();
1890 if (!isa<AllocaInst, LoadInst>(CurDef))
1897 auto SalvageOne = [&](
auto *DDI) {
1898 bool AllowUnresolved =
false;
1905 DDI->getExpression(), DDI->getDebugLoc(),
1906 DbgVariableRecord::LocationType::Declare);
1911 .insertDeclare(CurrentReload, DDI->getVariable(),
1912 DDI->getExpression(), DDI->getDebugLoc(),
1926 if (
auto *PN = dyn_cast<PHINode>(U)) {
1927 assert(PN->getNumIncomingValues() == 1 &&
1928 "unexpected number of incoming "
1929 "values in the PHINode");
1930 PN->replaceAllUsesWith(CurrentReload);
1931 PN->eraseFromParent();
1937 U->replaceUsesOfWith(Def, CurrentReload);
1941 DVR.replaceVariableLocationOp(Def, CurrentReload,
true);
1953 if (Shape.
ABI == coro::ABI::Retcon || Shape.
ABI == coro::ABI::RetconOnce ||
1954 Shape.
ABI == coro::ABI::Async) {
1957 for (
const auto &
P : FrameData.Allocas) {
1959 auto *
G = GetFramePointer(Alloca);
1963 G->takeName(Alloca);
1978 for (
const auto &
A : FrameData.Allocas) {
1980 UsersToUpdate.
clear();
1982 auto *
I = cast<Instruction>(U);
1986 if (UsersToUpdate.
empty())
1988 auto *
G = GetFramePointer(Alloca);
1994 for (
auto *DVI : DIs)
1995 DVI->replaceUsesOfWith(Alloca,
G);
1996 for (
auto *DVR : DbgVariableRecords)
1997 DVR->replaceVariableLocationOp(Alloca,
G);
2003 if (
I->isLifetimeStartOrEnd()) {
2004 I->eraseFromParent();
2008 I->replaceUsesOfWith(Alloca,
G);
2012 for (
const auto &
A : FrameData.Allocas) {
2014 if (
A.MayWriteBeforeCoroBegin) {
2018 "Coroutines cannot handle copying of array allocas yet");
2020 auto *
G = GetFramePointer(Alloca);
2027 for (
const auto &Alias :
A.Aliases) {
2028 auto *
FramePtr = GetFramePointer(Alloca);
2029 auto &
Value = *Alias.second;
2034 AliasPtr, [&](
Use &U) {
return DT.
dominates(CB, U); });
2046 auto *Inst = dyn_cast<Instruction>(U.getUser());
2047 if (!Inst || DT.dominates(CB, Inst))
2050 if (auto *CI = dyn_cast<CallInst>(Inst)) {
2055 if (CI->onlyReadsMemory() ||
2056 CI->onlyReadsMemory(CI->getArgOperandNo(&U)))
2061 return isa<StoreInst>(Inst) ||
2064 isa<GetElementPtrInst>(Inst) ||
2069 isa<BitCastInst>(Inst);
2071 if (HasAccessingPromiseBeforeCB) {
2073 auto *
G = GetFramePointer(PA);
2085 PHINode *UntilPHI =
nullptr) {
2086 auto *PN = cast<PHINode>(&SuccBB->
front());
2088 int Index = PN->getBasicBlockIndex(InsertedBB);
2091 V->getType(), 1, V->getName() +
Twine(
".") + SuccBB->
getName());
2094 PN->setIncomingValue(
Index, InputV);
2095 PN = dyn_cast<PHINode>(PN->getNextNode());
2096 }
while (PN != UntilPHI);
2136 auto *NewCleanupPadBB =
2139 CleanupPadBB->
getParent(), CleanupPadBB);
2142 auto *SetDispatchValuePN =
2146 auto *SwitchOnDispatch = Builder.
CreateSwitch(SetDispatchValuePN, UnreachBB,
2149 int SwitchIndex = 0;
2155 Twine(
".from.") + Pred->getName(),
2156 CleanupPadBB->
getParent(), CleanupPadBB);
2158 CaseBB->setName(CleanupPadBB->
getName() +
Twine(
".from.") +
2168 auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex);
2169 SetDispatchValuePN->addIncoming(SwitchConstant, Pred);
2170 SwitchOnDispatch->addCase(SwitchConstant, CaseBB);
2177 for (
auto &BB :
F) {
2178 for (
auto &Phi : BB.
phis()) {
2179 if (Phi.getNumIncomingValues() == 1) {
2185 while (!Worklist.
empty()) {
2187 auto *OriginalValue = Phi->getIncomingValue(0);
2188 Phi->replaceAllUsesWith(OriginalValue);
2216 if (
auto *CleanupPad =
2221 dyn_cast<CatchSwitchInst>(Pred->getTerminator())) {
2224 assert(CS->getUnwindDest() == &BB);
2234 if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.
getFirstNonPHI()))) {
2249 IncomingBB->setName(BB.
getName() +
Twine(
".from.") + Pred->getName());
2267 if (
auto *PN = dyn_cast<PHINode>(&BB.
front()))
2268 if (PN->getNumIncomingValues() > 1)
2279 return (isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) ||
2280 isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V));
2286 return isa<CoroIdInst>(&
I) || isa<CoroSaveInst>(&
I) ||
2287 isa<CoroSuspendInst>(&
I);
2309 for (
const auto &E : AllRemats) {
2312 RematGraph *RG = E.second.get();
2320 auto InsertPoint = &*
Use->getParent()->getFirstInsertionPt();
2321 if (isa<AnyCoroSuspendInst>(
Use)) {
2323 Use->getParent()->getSinglePredecessor();
2324 assert(SuspendPredecessorBlock &&
"malformed coro suspend instruction");
2332 for (;
I != RPOT.
end(); ++
I) {
2334 CurrentMaterialization =
D->clone();
2335 CurrentMaterialization->
setName(
D->getName());
2337 InsertPoint = CurrentMaterialization;
2341 for (
auto &
I : InstructionsToProcess)
2342 I->replaceUsesOfWith(
D, CurrentMaterialization);
2347 for (
unsigned i = 0, E =
Use->getNumOperands(); i != E; ++i)
2348 if (
Use->getOperand(i) ==
D)
2350 {
Use,
D, CurrentMaterialization});
2352 InstructionsToProcess.push_back(CurrentMaterialization);
2357 for (
auto &R : FinalInstructionsToProcess) {
2358 if (
auto *PN = dyn_cast<PHINode>(R.Use)) {
2359 assert(PN->getNumIncomingValues() == 1 &&
"unexpected number of incoming "
2360 "values in the PHINode");
2361 PN->replaceAllUsesWith(R.Remat);
2362 PN->eraseFromParent();
2365 R.Use->replaceUsesOfWith(R.Def, R.Remat);
2372 auto *BB =
I->getParent();
2390 return isa<AnyCoroSuspendInst>(BB->
front());
2425 if (
auto FI = dyn_cast<CoroAllocaFreeInst>(
User))
2426 VisitedOrFreeBBs.
insert(FI->getParent());
2435 unsigned depth = 3) {
2438 if (depth == 0)
return false;
2457 for (
auto *U : AI->
users()) {
2458 auto FI = dyn_cast<CoroAllocaFreeInst>(U);
2473 for (
auto *AI : LocalAllocas) {
2478 Value *StackSave =
nullptr;
2486 for (
auto *U : AI->
users()) {
2488 if (isa<CoroAllocaGetInst>(U)) {
2489 U->replaceAllUsesWith(Alloca);
2495 auto FI = cast<CoroAllocaFreeInst>(U);
2501 DeadInsts.
push_back(cast<Instruction>(U));
2518 if (isa<CoroAllocaGetInst>(U)) {
2519 U->replaceAllUsesWith(Alloc);
2521 auto FI = cast<CoroAllocaFreeInst>(U);
2525 DeadInsts.
push_back(cast<Instruction>(U));
2532 return cast<Instruction>(Alloc);
2539 auto FnTy = FunctionType::get(ValueTy, {},
false);
2542 auto Call = Builder.
CreateCall(FnTy, Fn, {});
2554 auto FnTy = FunctionType::get(Builder.
getPtrTy(),
2555 {V->getType()},
false);
2558 auto Call = Builder.
CreateCall(FnTy, Fn, { V });
2577 auto ValueBeforeCall = Builder.
CreateLoad(ValueTy, Alloca);
2583 if (isa<CallInst>(Call)) {
2586 auto Invoke = cast<InvokeInst>(Call);
2587 Builder.
SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg());
2605 if (isa<LoadInst>(
User) || isa<StoreInst>(
User))
2609 auto Call = cast<Instruction>(
User);
2628 IRBuilder<> Builder(
F.getEntryBlock().getFirstNonPHIOrDbg());
2630 auto ArgTy = cast<PointerType>(Arg.
getType());
2631 auto ValueTy = PointerType::getUnqual(
F.getContext());
2636 auto Alloca = Builder.
CreateAlloca(ValueTy, ArgTy->getAddressSpace());
2651 auto FinalValue = Builder.
CreateLoad(ValueTy, Alloca);
2666 for (
auto &Arg :
F.args()) {
2667 if (!Arg.hasSwiftErrorAttr())
continue;
2674 for (
auto &Inst :
F.getEntryBlock()) {
2675 auto Alloca = dyn_cast<AllocaInst>(&Inst);
2687 if (!AllocasToPromote.
empty()) {
2696 const FrameDataInfo &FrameData,
2704 for (
auto *Def : FrameData.getAllDefs()) {
2705 for (
User *U : Def->users()) {
2706 auto Inst = cast<Instruction>(U);
2707 if (Inst->getParent() != CoroBegin->
getParent() ||
2715 while (!Worklist.
empty()) {
2717 for (
User *U : Def->users()) {
2718 auto Inst = cast<Instruction>(U);
2743 SuspendCrossingInfo &Checker,
2750 DomSet.
insert(&
F.getEntryBlock());
2754 "should have split coro.suspend into its own block");
2768 if (
auto*
II = dyn_cast<IntrinsicInst>(
I))
2769 return II->getIntrinsicID() == Intrinsic::lifetime_start;
2778 if (!U->hasOneUse() || U->stripPointerCasts() != AI)
2794 Checker.isDefinitionAcrossSuspend(DomBB, UI)) {
2797 if (collectLifetimeStart(UI, AI))
2805 if (Valid && Lifetimes.
size() != 0) {
2806 auto *NewLifetime = Lifetimes[0]->clone();
2807 NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), AI);
2808 NewLifetime->insertBefore(DomBB->getTerminator());
2812 S->eraseFromParent();
2821 const SuspendCrossingInfo &Checker,
2834 if (AI->
hasMetadata(LLVMContext::MD_coro_outside_frame))
2840 bool ShouldUseLifetimeStartInfo =
2841 (Shape.
ABI != coro::ABI::Async && Shape.
ABI != coro::ABI::Retcon &&
2842 Shape.
ABI != coro::ABI::RetconOnce);
2843 AllocaUseVisitor Visitor{AI->
getDataLayout(), DT, Shape, Checker,
2844 ShouldUseLifetimeStartInfo};
2845 Visitor.visitPtr(*AI);
2846 if (!Visitor.getShouldLiveOnFrame())
2849 Visitor.getMayWriteBeforeCoroBegin());
2852static std::optional<std::pair<Value &, DIExpression &>>
2857 auto InsertPt =
F->getEntryBlock().getFirstInsertionPt();
2858 while (isa<IntrinsicInst>(InsertPt))
2862 while (
auto *Inst = dyn_cast_or_null<Instruction>(Storage)) {
2863 if (
auto *LdInst = dyn_cast<LoadInst>(Inst)) {
2864 Storage = LdInst->getPointerOperand();
2871 if (!SkipOutermostLoad)
2873 }
else if (
auto *StInst = dyn_cast<StoreInst>(Inst)) {
2874 Storage = StInst->getValueOperand();
2881 if (!
Op || !AdditionalValues.
empty()) {
2889 SkipOutermostLoad =
false;
2892 return std::nullopt;
2894 auto *StorageAsArg = dyn_cast<Argument>(Storage);
2895 const bool IsSwiftAsyncArg =
2896 StorageAsArg && StorageAsArg->hasAttribute(Attribute::SwiftAsync);
2901 if (IsSwiftAsyncArg && UseEntryValue && !Expr->
isEntryValue() &&
2909 if (StorageAsArg && !IsSwiftAsyncArg) {
2910 auto &Cached = ArgToAllocaMap[StorageAsArg];
2913 Storage->
getName() +
".debug");
2927 return {{*Storage, *Expr}};
2937 bool SkipOutermostLoad = !isa<DbgValueInst>(DVI);
2946 Value *Storage = &SalvagedInfo->first;
2954 if (isa<DbgDeclareInst>(DVI)) {
2955 std::optional<BasicBlock::iterator> InsertPt;
2956 if (
auto *
I = dyn_cast<Instruction>(Storage)) {
2957 InsertPt =
I->getInsertionPointAfterDef();
2961 if (ILoc && DVILoc &&
2962 DVILoc->getScope()->getSubprogram() ==
2963 ILoc->getScope()->getSubprogram())
2965 }
else if (isa<Argument>(Storage))
2966 InsertPt =
F->getEntryBlock().begin();
2968 DVI.
moveBefore(*(*InsertPt)->getParent(), *InsertPt);
2988 Value *Storage = &SalvagedInfo->first;
2996 if (DVR.
getType() == DbgVariableRecord::LocationType::Declare) {
2997 std::optional<BasicBlock::iterator> InsertPt;
2998 if (
auto *
I = dyn_cast<Instruction>(Storage)) {
2999 InsertPt =
I->getInsertionPointAfterDef();
3003 if (ILoc && DVRLoc &&
3004 DVRLoc->getScope()->getSubprogram() ==
3005 ILoc->getScope()->getSubprogram())
3007 }
else if (isa<Argument>(Storage))
3008 InsertPt =
F->getEntryBlock().begin();
3011 (*InsertPt)->getParent()->insertDbgRecordBefore(&DVR, *InsertPt);
3017 Function &
F, SuspendCrossingInfo &Checker,
3018 const std::function<
bool(
Instruction &)> &MaterializableCallback) {
3028 if (!MaterializableCallback(
I))
3030 for (
User *U :
I.users())
3031 if (Checker.isDefinitionAcrossSuspend(
I, U))
3032 Spills[&
I].push_back(cast<Instruction>(U));
3051 for (
auto &E : Spills) {
3055 if (AllRemats.
count(U))
3060 std::make_unique<RematGraph>(MaterializableCallback, U, Checker);
3064 for (
auto I = RPOT.begin();
I != RPOT.end();
3065 ++
I) { (*I)->Node->dump(); }
dbgs()
3068 AllRemats[U] = std::move(RematUPtr);
3080 const std::function<
bool(
Instruction &)> &MaterializableCallback) {
3085 if (
Shape.
ABI == coro::ABI::Switch &&
3094 if (
auto *Save = CSI->getCoroSave())
3107 if (
auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) {
3108 auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction();
3109 if (!MustTailCallFn)
3129 SuspendCrossingInfo Checker(
F,
Shape);
3134 FrameDataInfo FrameData;
3138 Shape.
ABI != coro::ABI::RetconOnce)
3143 for (
User *U :
A.users())
3144 if (Checker.isDefinitionAcrossSuspend(
A, U))
3145 FrameData.Spills[&
A].push_back(cast<Instruction>(U));
3154 if (
auto AI = dyn_cast<CoroAllocaAllocInst>(&
I)) {
3168 for (
User *U : Alloc->users()) {
3169 if (Checker.isDefinitionAcrossSuspend(*Alloc, U))
3170 FrameData.Spills[Alloc].push_back(cast<Instruction>(U));
3176 if (isa<CoroAllocaGetInst>(
I))
3179 if (
auto *AI = dyn_cast<AllocaInst>(&
I)) {
3184 for (
User *U :
I.users())
3185 if (Checker.isDefinitionAcrossSuspend(
I, U)) {
3187 if (
I.getType()->isTokenTy())
3189 "token definition is separated from the use by a suspend point");
3190 FrameData.Spills[&
I].push_back(cast<Instruction>(U));
3200 for (
auto &Iter : FrameData.Spills) {
3201 auto *V = Iter.first;
3206 if (Checker.isDefinitionAcrossSuspend(*V, DVI))
3207 FrameData.Spills[V].push_back(DVI);
3210 if (Checker.isDefinitionAcrossSuspend(*V, DVR->Marker->MarkedInstr))
3211 FrameData.Spills[V].push_back(DVR->Marker->MarkedInstr);
3215 if (
Shape.
ABI == coro::ABI::Retcon ||
Shape.
ABI == coro::ABI::RetconOnce ||
3225 for (
auto *
I : DeadInstructions)
3226 I->eraseFromParent();
AMDGPU Lower Kernel Arguments
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
This file implements the BitVector class.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
static void cleanupSinglePredPHIs(Function &F)
static bool isSuspendReachableFrom(BasicBlock *From, VisitedBlocksSet &VisitedOrFreeBBs)
Does control flow starting at the given block ever reach a suspend instruction before reaching a bloc...
static bool isCoroutineStructureIntrinsic(Instruction &I)
SmallPtrSet< BasicBlock *, 8 > VisitedBlocksSet
static Instruction * lowerNonLocalAlloca(CoroAllocaAllocInst *AI, coro::Shape &Shape, SmallVectorImpl< Instruction * > &DeadInsts)
Turn the given coro.alloca.alloc call into a dynamic allocation.
static std::optional< std::pair< Value &, DIExpression & > > salvageDebugInfoImpl(SmallDenseMap< Argument *, AllocaInst *, 4 > &ArgToAllocaMap, bool UseEntryValue, Function *F, Value *Storage, DIExpression *Expr, bool SkipOutermostLoad)
static Instruction * splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch)
static void eliminateSwiftError(Function &F, coro::Shape &Shape)
Eliminate all problematic uses of swifterror arguments and allocas from the function.
static void lowerLocalAllocas(ArrayRef< CoroAllocaAllocInst * > LocalAllocas, SmallVectorImpl< Instruction * > &DeadInsts)
Turn each of the given local allocas into a normal (dynamic) alloca instruction.
static bool isLocalAlloca(CoroAllocaAllocInst *AI)
Is the given alloca "local", i.e.
static Value * emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V, coro::Shape &Shape)
Set the given value as the current swifterror value.
static Value * emitSetAndGetSwiftErrorValueAround(Instruction *Call, AllocaInst *Alloca, coro::Shape &Shape)
Set the swifterror value from the given alloca before a call, then put in back in the alloca afterwar...
static void cacheDIVar(FrameDataInfo &FrameData, DenseMap< Value *, DILocalVariable * > &DIVarCache)
static void collectFrameAlloca(AllocaInst *AI, coro::Shape &Shape, const SuspendCrossingInfo &Checker, SmallVectorImpl< AllocaInfo > &Allocas, const DominatorTree &DT)
static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI)
static void splitAround(Instruction *I, const Twine &Name)
static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca, coro::Shape &Shape)
Eliminate a formerly-swifterror alloca by inserting the get/set intrinsics and attempting to MemToReg...
static void rewritePHIs(BasicBlock &BB)
static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB, BasicBlock *InsertedBB, BasicBlock *PredBB, PHINode *UntilPHI=nullptr)
static DIType * solveDIType(DIBuilder &Builder, Type *Ty, const DataLayout &Layout, DIScope *Scope, unsigned LineNum, DenseMap< Type *, DIType * > &DITypeCache)
static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB, unsigned depth=3)
After we split the coroutine, will the given basic block be along an obvious exit path for the resump...
static void eliminateSwiftErrorArgument(Function &F, Argument &Arg, coro::Shape &Shape, SmallVectorImpl< AllocaInst * > &AllocasToPromote)
"Eliminate" a swifterror argument by reducing it to the alloca case and then loading and storing in t...
static void buildFrameDebugInfo(Function &F, coro::Shape &Shape, FrameDataInfo &FrameData)
Build artificial debug info for C++ coroutine frames to allow users to inspect the contents of the fr...
static StructType * buildFrameType(Function &F, coro::Shape &Shape, FrameDataInfo &FrameData)
static BasicBlock * splitBlockIfNotFirst(Instruction *I, const Twine &Name)
static void sinkSpillUsesAfterCoroBegin(Function &F, const FrameDataInfo &FrameData, CoroBeginInst *CoroBegin)
retcon and retcon.once conventions assume that all spill uses can be sunk after the coro....
static bool isSuspendBlock(BasicBlock *BB)
static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB, CleanupPadInst *CleanupPad)
static void rewriteMaterializableInstructions(const SmallMapVector< Instruction *, std::unique_ptr< RematGraph >, 8 > &AllRemats)
static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape, SuspendCrossingInfo &Checker, const DominatorTree &DT)
For each local variable that all of its user are only used inside one of suspended region,...
static void dumpAllocas(const SmallVectorImpl< AllocaInfo > &Allocas)
static StringRef solveTypeName(Type *Ty)
Create name for Type.
static void dumpSpills(StringRef Title, const SpillInfo &Spills)
static void doRematerializations(Function &F, SuspendCrossingInfo &Checker, const std::function< bool(Instruction &)> &MaterializableCallback)
static Value * emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy, coro::Shape &Shape)
Get the current swifterror value.
static void dumpRemats(StringRef Title, const SmallMapVector< Instruction *, std::unique_ptr< RematGraph >, 8 > &RM)
cl::opt< bool > UseNewDbgInfoFormat
static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape)
Given that RA is a live value
static bool isLifetimeStart(const Instruction *Inst)
static MaybeAlign getAlign(Value *Ptr)
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
iv Induction Variable Users
mir Rename Register Operands
uint64_t IntrinsicInst * II
This file provides an interface for laying out a sequence of fields as a struct in a way that attempt...
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
This file provides a collection of visitors which walk the (instruction) uses of a pointer.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getNumElements(Type *Ty)
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallString class.
static const unsigned FramePtr
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
void setSwiftError(bool V)
Specify whether this alloca is used to represent a swifterror.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const Instruction & front() const
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class represents a no-op cast from one type to another.
size_type size() const
size - Returns the number of bits in this bitvector.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool doesNotCapture(unsigned OpNo) const
Determine whether this data operand is not captured.
Value * getArgOperand(unsigned i) const
unsigned arg_size() const
Value * getParentPad() const
static CleanupPadInst * Create(Value *ParentPad, ArrayRef< Value * > Args=std::nullopt, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, InsertPosition InsertBefore=nullptr)
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This represents the llvm.coro.alloca.alloc instruction.
This class represents the llvm.coro.begin instruction.
This represents the llvm.coro.suspend instruction.
DICompositeType * createStructType(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber, uint64_t SizeInBits, uint32_t AlignInBits, DINode::DIFlags Flags, DIType *DerivedFrom, DINodeArray Elements, unsigned RunTimeLang=0, DIType *VTableHolder=nullptr, StringRef UniqueIdentifier="")
Create debugging information entry for a struct.
DIDerivedType * createPointerType(DIType *PointeeTy, uint64_t SizeInBits, uint32_t AlignInBits=0, std::optional< unsigned > DWARFAddressSpace=std::nullopt, StringRef Name="", DINodeArray Annotations=nullptr)
Create debugging information entry for a pointer.
DIExpression * createExpression(ArrayRef< uint64_t > Addr=std::nullopt)
Create a new descriptor for the specified variable which has a complex address expression for its add...
DISubrange * getOrCreateSubrange(int64_t Lo, int64_t Count)
Create a descriptor for a value range.
DICompositeType * createArrayType(uint64_t Size, uint32_t AlignInBits, DIType *Ty, DINodeArray Subscripts, PointerUnion< DIExpression *, DIVariable * > DataLocation=nullptr, PointerUnion< DIExpression *, DIVariable * > Associated=nullptr, PointerUnion< DIExpression *, DIVariable * > Allocated=nullptr, PointerUnion< DIExpression *, DIVariable * > Rank=nullptr)
Create debugging information entry for an array.
DIBasicType * createBasicType(StringRef Name, uint64_t SizeInBits, unsigned Encoding, DINode::DIFlags Flags=DINode::FlagZero)
Create debugging information entry for a basic type.
DINodeArray getOrCreateArray(ArrayRef< Metadata * > Elements)
Get a DINodeArray, create one if required.
DIDerivedType * createMemberType(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo, uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits, DINode::DIFlags Flags, DIType *Ty, DINodeArray Annotations=nullptr)
Create debugging information entry for a member.
DILocalVariable * createAutoVariable(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo, DIType *Ty, bool AlwaysPreserve=false, DINode::DIFlags Flags=DINode::FlagZero, uint32_t AlignInBits=0)
Create a new descriptor for an auto variable.
void replaceArrays(DICompositeType *&T, DINodeArray Elements, DINodeArray TParams=DINodeArray())
Replace arrays on a composite type.
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
static DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
bool isSingleLocationExpression() const
Return whether the evaluated expression makes use of a single location at the start of the expression...
Base class for scope-like contexts.
StringRef getName() const
uint64_t getSizeInBits() const
uint32_t getAlignInBits() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
DebugLoc getDebugLoc() const
void setDebugLoc(DebugLoc Loc)
This represents the llvm.dbg.value instruction.
This is the common base class for debug info intrinsics for variables.
void replaceVariableLocationOp(Value *OldValue, Value *NewValue, bool AllowEmpty=false)
Value * getVariableLocationOp(unsigned OpIdx) const
void setExpression(DIExpression *NewExpr)
DIExpression * getExpression() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LocationType getType() const
void setExpression(DIExpression *NewExpr)
DIExpression * getExpression() const
Value * getVariableLocationOp(unsigned OpIdx) const
void replaceVariableLocationOp(Value *OldValue, Value *NewValue, bool AllowEmpty=false)
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Module * getParent()
Get the module that this global value is contained inside of...
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
CallInst * CreateStackSave(const Twine &Name="")
Create a call to llvm.stacksave.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
UnreachableInst * CreateUnreachable()
BasicBlock::iterator GetInsertPoint() const
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
SwitchInst * CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases=10, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a switch instruction with the specified value, default dest, and with a hint for the number of...
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1, const Twine &Name="")
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
BranchInst * CreateBr(BasicBlock *Dest)
Create an unconditional 'br label X' instruction.
CallInst * CreateStackRestore(Value *Ptr, const Twine &Name="")
Create a call to llvm.stackrestore.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
void visitIntrinsicInst(IntrinsicInst &I)
void visitBitCastInst(BitCastInst &I)
void visit(Iterator Start, Iterator End)
void visitPHINode(PHINode &I)
void visitAddrSpaceCastInst(AddrSpaceCastInst &I)
void visitSelectInst(SelectInst &I)
void visitGetElementPtrInst(GetElementPtrInst &I)
void removeFromParent()
This method unlinks 'this' from the containing basic block, but does not delete it.
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
const Function * getFunction() const
Return the function this instruction belongs to.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
void insertAfter(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately after the specified instruction.
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
void replaceOperandWith(unsigned I, Metadata *New)
Replace a specific operand.
LLVMContext & getContext() const
static MDString * get(LLVMContext &Context, StringRef Str)
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
size_type count(const KeyT &Key) const
This is the common base class for memset/memcpy/memmove.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
A base class for visitors over the uses of a pointer value.
void visitCallBase(CallBase &CB)
void visitGetElementPtrInst(GetElementPtrInst &GEPI)
void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC)
void visitBitCastInst(BitCastInst &BC)
void visitStoreInst(StoreInst &SI)
void visitIntrinsicInst(IntrinsicInst &II)
void visitMemIntrinsic(MemIntrinsic &I)
This class represents the LLVM 'select' instruction.
iterator end()
Get an iterator to the end of the SetVector.
iterator begin()
Get an iterator to the beginning of the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
StringRef str() const
Explicit conversion to StringRef.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Compute live ranges of allocas.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
std::string str() const
str - Get the contents as an std::string.
TypeSize getElementOffsetInBits(unsigned Idx) const
Class to represent struct types.
void setBody(ArrayRef< Type * > Elements, bool isPacked=false)
Specify a body for an opaque identified type.
static StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
unsigned getNumElements() const
Random access to the elements.
Type * getElementType(unsigned N) const
void setDefaultDest(BasicBlock *DefaultCase)
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
std::string str() const
Return the twine contents as a std::string.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isPointerTy() const
True if this is an instance of PointerType.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
StringRef getStructName() const
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isStructTy() const
True if this is an instance of StructType.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
A Use represents the edge between a Value definition and its users.
User * getUser() const
Returns the User that contains this Use.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
void setName(const Twine &Name)
Change the name of the value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
LLVMContext & getContext() const
All values hold a context through their type.
iterator_range< use_iterator > uses()
StringRef getName() const
Return a constant reference to the value's name.
void takeName(Value *V)
Transfer the name from V to this value.
APInt Offset
The constant offset of the use if that is known.
void enqueueUsers(Instruction &I)
Enqueue the users of this instruction in the visit worklist.
SmallVector< UseToVisit, 8 > Worklist
The worklist of to-visit uses.
constexpr ScalarTy getFixedValue() const
const ParentTy * getParent() const
self_iterator getIterator()
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
A range adaptor for a pair of iterators.
A raw_ostream that writes to an SmallVector or SmallString.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
@ CE
Windows NT (Windows on ARM)
void salvageDebugInfo(SmallDenseMap< Argument *, AllocaInst *, 4 > &ArgToAllocaMap, DbgVariableIntrinsic &DVI, bool IsEntryPoint)
Attempts to rewrite the location operand of debug intrinsics in terms of the coroutine frame pointer,...
bool defaultMaterializable(Instruction &V)
Default materializable callback.
void buildCoroutineFrame(Function &F, Shape &Shape, TargetTransformInfo &TTI, const std::function< bool(Instruction &)> &MaterializableCallback)
CallInst * createMustTailCall(DebugLoc Loc, Function *MustTailCallFn, TargetTransformInfo &TTI, ArrayRef< Value * > Arguments, IRBuilder<> &)
bool isCPlusPlus(SourceLanguage S)
NodeAddr< DefNode * > Def
NodeAddr< BlockNode * > Block
This is an optimization pass for GlobalISel generic memory operations.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
TinyPtrVector< DbgDeclareInst * > findDbgDeclares(Value *V)
Finds dbg.declare intrinsics declaring local variables as living in the memory that 'V' points to.
void PromoteMemToReg(ArrayRef< AllocaInst * > Allocas, DominatorTree &DT, AssumptionCache *AC=nullptr)
Promote the specified list of alloca instructions into scalar registers, inserting PHI nodes as appro...
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
unsigned Log2_64_Ceil(uint64_t Value)
Return the ceil log base 2 of the specified value, 64 if the value is zero.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
void findDbgUsers(SmallVectorImpl< DbgVariableIntrinsic * > &DbgInsts, Value *V, SmallVectorImpl< DbgVariableRecord * > *DbgVariableRecords=nullptr)
Finds the debug info intrinsics describing a value.
auto successors(const MachineBasicBlock *BB)
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
void findDbgValues(SmallVectorImpl< DbgValueInst * > &DbgValues, Value *V, SmallVectorImpl< DbgVariableRecord * > *DbgVariableRecords=nullptr)
Finds the llvm.dbg.value intrinsics describing a value.
bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void sort(IteratorTy Start, IteratorTy End)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
bool isManyPotentiallyReachableFromMany(SmallVectorImpl< BasicBlock * > &Worklist, const SmallPtrSetImpl< const BasicBlock * > &StopSet, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether there is a potentially a path from at least one block in 'Worklist' to at least one...
BasicBlock * ehAwareSplitEdge(BasicBlock *BB, BasicBlock *Succ, LandingPadInst *OriginalPad=nullptr, PHINode *LandingPadReplacement=nullptr, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions(), const Twine &BBName="")
Split the edge connect the specficed blocks in the case that Succ is an Exception Handling Block.
Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
uint64_t offsetToAlignment(uint64_t Value, Align Alignment)
Returns the offset to the next integer (mod 2**64) that is greater than or equal to Value and is a mu...
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
std::pair< uint64_t, Align > performOptimizedStructLayout(MutableArrayRef< OptimizedStructLayoutField > Fields)
Compute a layout for a struct containing the given fields, making a best-effort attempt to minimize t...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
constexpr unsigned BitWidth
void updatePhiNodes(BasicBlock *DestBB, BasicBlock *OldPred, BasicBlock *NewPred, PHINode *Until=nullptr)
Replaces all uses of OldPred with the NewPred block in all PHINodes in a block.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
TinyPtrVector< DbgVariableRecord * > findDVRDeclares(Value *V)
As above, for DVRDeclares.
auto predecessors(const MachineBasicBlock *BB)
BasicBlock * SplitEdge(BasicBlock *From, BasicBlock *To, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the edge connecting the specified blocks, and return the newly created basic block between From...
void setUnwindEdgeTo(Instruction *TI, BasicBlock *Succ)
Sets the unwind edge of an instruction to a particular successor.
unsigned pred_size(const MachineBasicBlock *BB)
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
RematGraph::RematNode * NodeRef
static ChildIteratorType child_end(NodeRef N)
RematGraph::RematNode ** ChildIteratorType
static NodeRef getEntryNode(RematGraph *G)
static ChildIteratorType child_begin(NodeRef N)
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align Alignment
The required alignment of this field.
uint64_t Offset
The offset of this field in the final layout.
uint64_t Size
The required size of this field in bytes.
static constexpr uint64_t FlexibleOffset
A special value for Offset indicating that the field can be moved anywhere.
A MapVector that performs no allocations if smaller than a certain size.
Align getContextAlignment() const
uint64_t ContextHeaderSize
bool IsFrameInlineInStorage
AllocaInst * PromiseAlloca
AsyncLoweringStorage AsyncLowering
AnyCoroIdRetconInst * getRetconCoroId() const
CoroIdInst * getSwitchCoroId() const
SmallVector< AnyCoroSuspendInst *, 4 > CoroSuspends
Value * emitAlloc(IRBuilder<> &Builder, Value *Size, CallGraph *CG) const
Allocate memory according to the rules of the active lowering.
SmallVector< CallInst *, 2 > SwiftErrorOps
AllocaInst * getPromiseAlloca() const
bool OptimizeFrame
This would only be true if optimization are enabled.
SwitchLoweringStorage SwitchLowering
CoroBeginInst * CoroBegin
BasicBlock::iterator getInsertPtAfterFramePtr() const
void emitDealloc(IRBuilder<> &Builder, Value *Ptr, CallGraph *CG) const
Deallocate memory according to the rules of the active lowering.
RetconLoweringStorage RetconLowering
SmallVector< AnyCoroEndInst *, 4 > CoroEnds
BasicBlock * AllocaSpillBlock