24#include "llvm/Config/llvm-config.h"
48#define DEBUG_TYPE "coro-suspend-crossing"
54class BlockToIndexMapping {
58 size_t size()
const {
return V.size(); }
66 size_t blockToIndex(
BasicBlock const *BB)
const {
68 assert(
I !=
V.end() && *
I == BB &&
"BasicBlockNumberng: Unknown block");
92class SuspendCrossingInfo {
93 BlockToIndexMapping Mapping;
100 bool KillLoop =
false;
101 bool Changed =
false;
106 BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]);
111 return Block[Mapping.blockToIndex(BB)];
118 template <
bool Initialize = false>
122#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
132 size_t const FromIndex = Mapping.blockToIndex(
From);
133 size_t const ToIndex = Mapping.blockToIndex(To);
134 bool const Result =
Block[ToIndex].Kills[FromIndex];
136 <<
" answer is " << Result <<
"\n");
145 size_t const FromIndex = Mapping.blockToIndex(
From);
146 size_t const ToIndex = Mapping.blockToIndex(To);
150 <<
" answer is " << Result <<
" (path or loop)\n");
154 bool isDefinitionAcrossSuspend(
BasicBlock *DefBB,
User *U)
const {
155 auto *
I = cast<Instruction>(U);
159 if (
auto *PN = dyn_cast<PHINode>(
I))
160 if (PN->getNumIncomingValues() > 1)
168 if (isa<CoroSuspendRetconInst>(
I) || isa<CoroSuspendAsyncInst>(
I)) {
170 assert(UseBB &&
"should have split coro.suspend into its own block");
173 return hasPathCrossingSuspendPoint(DefBB, UseBB);
177 return isDefinitionAcrossSuspend(&
A.getParent()->getEntryBlock(), U);
181 auto *DefBB =
I.getParent();
186 if (isa<AnyCoroSuspendInst>(
I)) {
188 assert(DefBB &&
"should have split coro.suspend into its own block");
191 return isDefinitionAcrossSuspend(DefBB, U);
194 bool isDefinitionAcrossSuspend(
Value &V,
User *U)
const {
195 if (
auto *Arg = dyn_cast<Argument>(&V))
196 return isDefinitionAcrossSuspend(*Arg, U);
197 if (
auto *Inst = dyn_cast<Instruction>(&V))
198 return isDefinitionAcrossSuspend(*Inst, U);
201 "Coroutine could only collect Argument and Instruction now.");
206#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
210 for (
size_t I = 0,
N = BV.
size();
I <
N; ++
I)
212 dbgs() <<
" " << Mapping.indexToBlock(
I)->getName();
217 for (
size_t I = 0,
N =
Block.size();
I <
N; ++
I) {
219 dbgs() <<
B->getName() <<
":\n";
227template <
bool Initialize>
228bool SuspendCrossingInfo::computeBlockData(
230 bool Changed =
false;
233 auto BBNo = Mapping.blockToIndex(BB);
237 if constexpr (!Initialize)
241 return !
Block[Mapping.blockToIndex(BB)].Changed;
249 auto SavedConsumes =
B.Consumes;
250 auto SavedKills =
B.Kills;
253 auto PrevNo = Mapping.blockToIndex(PI);
257 B.Consumes |=
P.Consumes;
263 B.Kills |=
P.Consumes;
269 B.Kills |=
B.Consumes;
279 B.KillLoop |=
B.Kills[BBNo];
283 if constexpr (!Initialize) {
284 B.Changed = (
B.Kills != SavedKills) || (
B.Consumes != SavedConsumes);
285 Changed |=
B.Changed;
294 const size_t N = Mapping.size();
298 for (
size_t I = 0;
I <
N; ++
I) {
300 B.Consumes.resize(
N);
310 getBlockData(
CE->getParent()).End =
true;
318 auto &
B = getBlockData(SuspendBlock);
320 B.Kills |=
B.Consumes;
323 markSuspendBlock(CSI);
324 if (
auto *Save = CSI->getCoroSave())
325 markSuspendBlock(Save);
331 computeBlockData<
true>(RPOT);
332 while (computeBlockData</*Initialize*/ false>(RPOT))
352 RematNode() =
default;
356 RematNode *EntryNode;
361 SuspendCrossingInfo &Checker;
363 RematGraph(
const std::function<
bool(
Instruction &)> &MaterializableCallback,
365 : MaterializableCallback(MaterializableCallback), Checker(Checker) {
366 std::unique_ptr<RematNode> FirstNode = std::make_unique<RematNode>(
I);
367 EntryNode = FirstNode.get();
368 std::deque<std::unique_ptr<RematNode>> WorkList;
369 addNode(std::move(FirstNode), WorkList, cast<User>(
I));
370 while (WorkList.size()) {
371 std::unique_ptr<RematNode>
N = std::move(WorkList.front());
372 WorkList.pop_front();
373 addNode(std::move(
N), WorkList, cast<User>(
I));
377 void addNode(std::unique_ptr<RematNode> NUPtr,
378 std::deque<std::unique_ptr<RematNode>> &WorkList,
380 RematNode *
N = NUPtr.get();
381 if (Remats.count(
N->Node))
385 Remats[
N->Node] = std::move(NUPtr);
386 for (
auto &Def :
N->Node->operands()) {
388 if (!
D || !MaterializableCallback(*
D) ||
389 !Checker.isDefinitionAcrossSuspend(*
D, FirstUse))
392 if (Remats.count(
D)) {
394 N->Operands.push_back(Remats[
D].
get());
399 for (
auto &
I : WorkList) {
402 N->Operands.push_back(
I.get());
408 std::unique_ptr<RematNode> ChildNode = std::make_unique<RematNode>(
D);
409 N->Operands.push_back(ChildNode.get());
410 WorkList.push_back(std::move(ChildNode));
415#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
418 if (EntryNode->Node->getParent()->hasName())
419 dbgs() << EntryNode->Node->getParent()->getName();
421 EntryNode->Node->getParent()->printAsOperand(
dbgs(),
false);
422 dbgs() <<
") : " << *EntryNode->Node <<
"\n";
423 for (
auto &
E : Remats) {
424 dbgs() << *(
E.first) <<
"\n";
425 for (RematNode *U :
E.second->Operands)
426 dbgs() <<
" " << *
U->Node <<
"\n";
441 return N->Operands.begin();
449#define DEBUG_TYPE "coro-frame"
452class FrameTypeBuilder;
458 bool MayWriteBeforeCoroBegin;
461 bool MayWriteBeforeCoroBegin)
462 : Alloca(Alloca), Aliases(std::move(Aliases)),
463 MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {}
465struct FrameDataInfo {
475 for (
const auto &
P : Spills)
477 for (
const auto &
A : Allocas)
483 auto Itr = FieldIndexMap.find(V);
484 assert(Itr != FieldIndexMap.end() &&
485 "Value does not have a frame field index");
490 assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) &&
491 "Cannot set the index for the same field twice.");
492 FieldIndexMap[V] =
Index;
496 auto Iter = FieldAlignMap.find(V);
497 assert(Iter != FieldAlignMap.end());
502 assert(FieldAlignMap.count(V) == 0);
503 FieldAlignMap.insert({V, AL});
507 auto Iter = FieldDynamicAlignMap.find(V);
508 assert(Iter != FieldDynamicAlignMap.end());
513 assert(FieldDynamicAlignMap.count(V) == 0);
514 FieldDynamicAlignMap.insert({V,
Align});
518 auto Iter = FieldOffsetMap.find(V);
519 assert(Iter != FieldOffsetMap.end());
524 assert(FieldOffsetMap.count(V) == 0);
525 FieldOffsetMap.insert({V,
Offset});
529 void updateLayoutIndex(FrameTypeBuilder &
B);
534 bool LayoutIndexUpdateStarted =
false;
551 dbgs() <<
"------------- " << Title <<
"--------------\n";
552 for (
const auto &
E : Spills) {
555 for (
auto *
I :
E.second)
562 dbgs() <<
"------------- " << Title <<
"--------------\n";
563 for (
const auto &
E : RM) {
570 dbgs() <<
"------------- Allocas --------------\n";
571 for (
const auto &
A : Allocas) {
578using FieldIDType = size_t;
583class FrameTypeBuilder {
589 FieldIDType LayoutFieldIndex;
599 bool IsFinished =
false;
601 std::optional<Align> MaxFrameAlignment;
608 std::optional<Align> MaxFrameAlignment)
613 [[nodiscard]] FieldIDType addFieldForAlloca(
AllocaInst *AI,
614 bool IsHeader =
false) {
619 if (
auto *CI = dyn_cast<ConstantInt>(AI->
getArraySize()))
620 Ty = ArrayType::get(Ty, CI->getValue().getZExtValue());
625 return addField(Ty, AI->
getAlign(), IsHeader);
655 void addFieldForAllocas(
const Function &
F, FrameDataInfo &FrameData,
659 [[nodiscard]] FieldIDType addField(
Type *Ty,
MaybeAlign MaybeFieldAlignment,
660 bool IsHeader =
false,
661 bool IsSpillOfValue =
false) {
662 assert(!IsFinished &&
"adding fields to a finished builder");
663 assert(Ty &&
"must provide a type for a field");
670 if (FieldSize == 0) {
678 Align ABIAlign =
DL.getABITypeAlign(Ty);
679 Align TyAlignment = ABIAlign;
680 if (IsSpillOfValue && MaxFrameAlignment && *MaxFrameAlignment < ABIAlign)
681 TyAlignment = *MaxFrameAlignment;
682 Align FieldAlignment = MaybeFieldAlignment.value_or(TyAlignment);
688 if (MaxFrameAlignment && (FieldAlignment > *MaxFrameAlignment)) {
691 FieldAlignment = *MaxFrameAlignment;
692 FieldSize = FieldSize + DynamicAlignBuffer;
699 StructSize =
Offset + FieldSize;
706 Fields.
push_back({FieldSize,
Offset, Ty, 0, FieldAlignment, TyAlignment,
707 DynamicAlignBuffer});
708 return Fields.
size() - 1;
715 assert(IsFinished &&
"not yet finished!");
719 Align getStructAlign()
const {
720 assert(IsFinished &&
"not yet finished!");
724 FieldIDType getLayoutFieldIndex(FieldIDType Id)
const {
725 assert(IsFinished &&
"not yet finished!");
726 return Fields[
Id].LayoutFieldIndex;
729 Field getLayoutField(FieldIDType Id)
const {
730 assert(IsFinished &&
"not yet finished!");
736void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &
B) {
737 auto Updater = [&](
Value *
I) {
738 auto Field =
B.getLayoutField(getFieldIndex(
I));
739 setFieldIndex(
I,
Field.LayoutFieldIndex);
742 Field.DynamicAlignBuffer
745 setDynamicAlign(
I, dynamicAlign);
748 LayoutIndexUpdateStarted =
true;
749 for (
auto &S : Spills)
751 for (
const auto &
A : Allocas)
753 LayoutIndexUpdateStarted =
false;
756void FrameTypeBuilder::addFieldForAllocas(
const Function &
F,
757 FrameDataInfo &FrameData,
764 for (
auto AllocaList : NonOverlapedAllocas) {
765 auto *LargestAI = *AllocaList.begin();
766 FieldIDType
Id = addFieldForAlloca(LargestAI);
767 for (
auto *Alloca : AllocaList)
775 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
795 if (
auto *ConstSWI = dyn_cast<SwitchInst>(U)) {
796 auto *SWI =
const_cast<SwitchInst *
>(ConstSWI);
797 DefaultSuspendDest[SWI] = SWI->getDefaultDest();
798 SWI->setDefaultDest(SWI->getSuccessor(1));
803 auto ExtractAllocas = [&]() {
804 AllocaSetType Allocas;
807 Allocas.push_back(
A.Alloca);
811 StackLifetime::LivenessType::May);
812 StackLifetimeAnalyzer.run();
814 return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps(
815 StackLifetimeAnalyzer.getLiveRange(AI2));
817 auto GetAllocaSize = [&](
const AllocaInfo &
A) {
818 std::optional<TypeSize> RetSize =
A.Alloca->getAllocationSize(
DL);
819 assert(RetSize &&
"Variable Length Arrays (VLA) are not supported.\n");
820 assert(!RetSize->isScalable() &&
"Scalable vectors are not yet supported");
821 return RetSize->getFixedValue();
827 sort(
FrameData.Allocas, [&](
const auto &Iter1,
const auto &Iter2) {
828 return GetAllocaSize(Iter1) > GetAllocaSize(Iter2);
836 for (
auto &AllocaSet : NonOverlapedAllocas) {
837 assert(!AllocaSet.empty() &&
"Processing Alloca Set is not empty.\n");
838 bool NoInference =
none_of(AllocaSet, [&](
auto Iter) {
839 return IsAllocaInferenre(Alloca, Iter);
847 bool Alignable = [&]() ->
bool {
848 auto *LargestAlloca = *AllocaSet.begin();
849 return LargestAlloca->getAlign().value() % Alloca->
getAlign().
value() ==
852 bool CouldMerge = NoInference && Alignable;
855 AllocaSet.push_back(Alloca);
860 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
865 for (
auto SwitchAndDefaultDest : DefaultSuspendDest) {
867 BasicBlock *DestBB = SwitchAndDefaultDest.second;
872 : NonOverlapedAllocas) {
873 if (AllocaSet.size() > 1) {
874 dbgs() <<
"In Function:" << F.getName() <<
"\n";
875 dbgs() <<
"Find Union Set "
877 dbgs() <<
"\tAllocas are \n";
878 for (auto Alloca : AllocaSet)
879 dbgs() <<
"\t\t" << *Alloca <<
"\n";
884void FrameTypeBuilder::finish(
StructType *Ty) {
885 assert(!IsFinished &&
"already finished!");
891 for (
auto &
Field : Fields) {
898 StructSize = SizeAndAlign.first;
899 StructAlign = SizeAndAlign.second;
902 return *
static_cast<Field *
>(
const_cast<void*
>(LayoutField.Id));
908 for (
auto &LayoutField : LayoutFields) {
909 auto &
F = getField(LayoutField);
910 if (!
isAligned(
F.TyAlignment, LayoutField.Offset))
918 FieldTypes.
reserve(LayoutFields.size() * 3 / 2);
920 for (
auto &LayoutField : LayoutFields) {
921 auto &
F = getField(LayoutField);
923 auto Offset = LayoutField.Offset;
929 if (
Offset != LastOffset) {
936 F.LayoutFieldIndex = FieldTypes.
size();
939 if (
F.DynamicAlignBuffer) {
946 Ty->
setBody(FieldTypes, Packed);
950 auto Layout =
DL.getStructLayout(Ty);
951 for (
auto &
F : Fields) {
953 assert(Layout->getElementOffset(
F.LayoutFieldIndex) ==
F.Offset);
962 for (
auto *V : FrameData.getAllDefs()) {
966 auto CacheIt = [&DIVarCache, V](
const auto &Container) {
968 return DDI->getExpression()->getNumElements() == 0;
970 if (
I != Container.end())
971 DIVarCache.
insert({V, (*I)->getVariable()});
985 OS <<
"__int_" << cast<IntegerType>(Ty)->getBitWidth();
987 return MDName->getString();
995 return "__floating_type_";
999 return "PointerType";
1002 if (!cast<StructType>(Ty)->hasName())
1003 return "__LiteralStructType_";
1008 for (
auto &Iter : Buffer)
1009 if (Iter ==
'.' || Iter ==
':')
1012 return MDName->getString();
1015 return "UnknownType";
1027 DIType *RetType =
nullptr;
1030 auto BitWidth = cast<IntegerType>(Ty)->getBitWidth();
1032 llvm::DINode::FlagArtificial);
1035 dwarf::DW_ATE_float,
1036 llvm::DINode::FlagArtificial);
1048 std::nullopt,
Name);
1053 llvm::DINode::FlagArtificial,
nullptr, llvm::DINodeArray());
1055 auto *StructTy = cast<StructType>(Ty);
1057 for (
unsigned I = 0;
I < StructTy->getNumElements();
I++) {
1059 Scope, LineNum, DITypeCache);
1062 Scope, DITy->
getName(), Scope->getFile(), LineNum,
1065 llvm::DINode::FlagArtificial, DITy));
1075 Name, 8, dwarf::DW_ATE_unsigned_char, llvm::DINode::FlagArtificial);
1078 RetType = CharSizeType;
1089 DITypeCache.
insert({Ty, RetType});
1106 FrameDataInfo &FrameData) {
1111 if (!DIS || !DIS->getUnit() ||
1116 assert(Shape.
ABI == coro::ABI::Switch &&
1117 "We could only build debug infomation for C++ coroutine now.\n");
1123 "Coroutine with switch ABI should own Promise alloca");
1134 }
else if (!DPVs.
empty()) {
1144 unsigned LineNum = PromiseDIVariable->
getLine();
1147 DIS->getUnit(),
Twine(
F.getName() +
".coro_frame_ty").
str(),
1150 llvm::DINodeArray());
1153 DataLayout Layout =
F.getParent()->getDataLayout();
1163 NameCache.
insert({ResumeIndex,
"__resume_fn"});
1164 NameCache.
insert({DestroyIndex,
"__destroy_fn"});
1165 NameCache.
insert({IndexIndex,
"__coro_index"});
1186 dwarf::DW_ATE_unsigned_char)});
1188 for (
auto *V : FrameData.getAllDefs()) {
1192 auto Index = FrameData.getFieldIndex(V);
1194 NameCache.
insert({
Index, DIVarCache[V]->getName()});
1195 TyCache.
insert({
Index, DIVarCache[V]->getType()});
1201 OffsetCache.
insert({ResumeIndex, {8, 0}});
1202 OffsetCache.
insert({DestroyIndex, {8, 8}});
1207 for (
auto *V : FrameData.getAllDefs()) {
1208 auto Index = FrameData.getFieldIndex(V);
1211 {
Index, {FrameData.getAlign(V).
value(), FrameData.getOffset(V)}});
1219 unsigned UnknownTypeNum = 0;
1231 assert(Ty->
isSized() &&
"We can't handle type which is not sized.\n");
1233 AlignInBits = OffsetCache[
Index].first * 8;
1234 OffsetInBits = OffsetCache[
Index].second * 8;
1238 DITy = TyCache[
Index];
1240 DITy =
solveDIType(DBuilder, Ty, Layout, FrameDITy, LineNum, DITypeCache);
1241 assert(DITy &&
"SolveDIType shouldn't return nullptr.\n");
1243 Name +=
"_" + std::to_string(UnknownTypeNum);
1248 FrameDITy,
Name, DFile, LineNum, SizeInBits, AlignInBits, OffsetInBits,
1249 llvm::DINode::FlagArtificial, DITy));
1255 DFile, LineNum, FrameDITy,
1256 true, DINode::FlagArtificial);
1257 assert(FrameDIVar->isValidLocationForIntrinsic(DILoc));
1266 if (
auto *SubProgram = dyn_cast<DISubprogram>(PromiseDIScope)) {
1267 auto RetainedNodes = SubProgram->getRetainedNodes();
1269 RetainedNodes.end());
1271 SubProgram->replaceOperandWith(
1278 DILoc, DPValue::LocationType::Declare);
1280 It->getParent()->insertDbgRecordBefore(NewDPV, It);
1282 DBuilder.insertDeclare(Shape.
FramePtr, FrameDIVar,
1297 FrameDataInfo &FrameData) {
1302 Name.append(
".Frame");
1307 std::optional<Align> MaxFrameAlignment;
1308 if (Shape.
ABI == coro::ABI::Async)
1310 FrameTypeBuilder
B(
C,
DL, MaxFrameAlignment);
1313 std::optional<FieldIDType> SwitchIndexFieldId;
1315 if (Shape.
ABI == coro::ABI::Switch) {
1316 auto *FnPtrTy = PointerType::getUnqual(
C);
1320 (void)
B.addField(FnPtrTy, std::nullopt,
true);
1321 (void)
B.addField(FnPtrTy, std::nullopt,
true);
1327 FrameData.setFieldIndex(
1328 PromiseAlloca,
B.addFieldForAlloca(PromiseAlloca,
true));
1335 SwitchIndexFieldId =
B.addField(IndexType, std::nullopt);
1337 assert(PromiseAlloca ==
nullptr &&
"lowering doesn't support promises");
1342 B.addFieldForAllocas(
F, FrameData, Shape);
1347 if (Shape.
ABI == coro::ABI::Switch && PromiseAlloca)
1350 FrameData.Allocas.emplace_back(
1353 for (
auto &S : FrameData.Spills) {
1354 Type *FieldType = S.first->getType();
1357 if (
const Argument *
A = dyn_cast<Argument>(S.first))
1358 if (
A->hasByValAttr())
1359 FieldType =
A->getParamByValType();
1360 FieldIDType Id =
B.addField(FieldType, std::nullopt,
false ,
1362 FrameData.setFieldIndex(S.first, Id);
1366 FrameData.updateLayoutIndex(
B);
1370 switch (Shape.
ABI) {
1371 case coro::ABI::Switch: {
1373 auto IndexField =
B.getLayoutField(*SwitchIndexFieldId);
1385 case coro::ABI::Retcon:
1386 case coro::ABI::RetconOnce: {
1389 = (
B.getStructSize() <= Id->getStorageSize() &&
1390 B.getStructAlign() <= Id->getStorageAlignment());
1393 case coro::ABI::Async: {
1403 "The alignment requirment of frame variables cannot be higher than "
1404 "the alignment of the async function context");
1442 const CoroBeginInst &CB,
const SuspendCrossingInfo &Checker,
1443 bool ShouldUseLifetimeStartInfo)
1445 ShouldUseLifetimeStartInfo(ShouldUseLifetimeStartInfo) {}
1452 if (PI.isEscaped() && !DT.dominates(&CoroBegin, PI.getEscapingInst())) {
1453 MayWriteBeforeCoroBegin =
true;
1475 if (
SI.getValueOperand() !=
U->get())
1488 auto IsSimpleStoreThenLoad = [&]() {
1489 auto *AI = dyn_cast<AllocaInst>(
SI.getPointerOperand());
1497 while (!StoreAliases.
empty()) {
1499 for (
User *U :
I->users()) {
1502 if (
auto *LI = dyn_cast<LoadInst>(U)) {
1509 if (
auto *S = dyn_cast<StoreInst>(U))
1510 if (S->getPointerOperand() ==
I)
1512 if (
auto *II = dyn_cast<IntrinsicInst>(U))
1513 if (II->isLifetimeStartOrEnd())
1517 if (
auto *BI = dyn_cast<BitCastInst>(U)) {
1528 if (!IsSimpleStoreThenLoad())
1555 if (II.
getIntrinsicID() != Intrinsic::lifetime_start || !IsOffsetKnown ||
1558 LifetimeStarts.insert(&II);
1562 for (
unsigned Op = 0, OpCount = CB.
arg_size();
Op < OpCount; ++
Op)
1568 bool getShouldLiveOnFrame()
const {
1569 if (!ShouldLiveOnFrame)
1570 ShouldLiveOnFrame = computeShouldLiveOnFrame();
1571 return *ShouldLiveOnFrame;
1574 bool getMayWriteBeforeCoroBegin()
const {
return MayWriteBeforeCoroBegin; }
1577 assert(getShouldLiveOnFrame() &&
"This method should only be called if the "
1578 "alloca needs to live on the frame.");
1579 for (
const auto &
P : AliasOffetMap)
1582 "created before CoroBegin.");
1583 return AliasOffetMap;
1589 const SuspendCrossingInfo &Checker;
1596 bool MayWriteBeforeCoroBegin{
false};
1597 bool ShouldUseLifetimeStartInfo{
true};
1599 mutable std::optional<bool> ShouldLiveOnFrame{};
1601 bool computeShouldLiveOnFrame()
const {
1606 if (ShouldUseLifetimeStartInfo && !LifetimeStarts.empty()) {
1607 for (
auto *
I : Users)
1608 for (
auto *S : LifetimeStarts)
1609 if (Checker.isDefinitionAcrossSuspend(*S,
I))
1615 if (PI.isEscaped()) {
1616 for (
auto *
A : LifetimeStarts) {
1617 for (
auto *
B : LifetimeStarts) {
1618 if (Checker.hasPathOrLoopCrossingSuspendPoint(
A->getParent(),
1639 for (
auto *U1 : Users)
1640 for (
auto *U2 : Users)
1641 if (Checker.isDefinitionAcrossSuspend(*U1, U2))
1649 MayWriteBeforeCoroBegin =
true;
1653 for (
auto &U :
I.uses())
1663 if (DT.
dominates(&CoroBegin, &
I) || !usedAfterCoroBegin(
I))
1666 if (!IsOffsetKnown) {
1667 AliasOffetMap[&
I].reset();
1669 auto Itr = AliasOffetMap.
find(&
I);
1670 if (Itr == AliasOffetMap.end()) {
1672 }
else if (Itr->second && *Itr->second !=
Offset) {
1675 AliasOffetMap[&
I].reset();
1737 auto GetFramePointer = [&](
Value *Orig) ->
Value * {
1738 FieldIDType
Index = FrameData.getFieldIndex(Orig);
1744 if (
auto *AI = dyn_cast<AllocaInst>(Orig)) {
1745 if (
auto *CI = dyn_cast<ConstantInt>(AI->
getArraySize())) {
1746 auto Count = CI->getValue().getZExtValue();
1755 auto GEP = cast<GetElementPtrInst>(
1757 if (
auto *AI = dyn_cast<AllocaInst>(Orig)) {
1758 if (FrameData.getDynamicAlign(Orig) != 0) {
1761 auto *IntPtrTy = M->getDataLayout().getIntPtrType(AI->
getType());
1765 PtrValue = Builder.
CreateAdd(PtrValue, AlignMask);
1776 if (
GEP->getType() != Orig->getType())
1778 Orig->getName() +
Twine(
".cast"));
1783 for (
auto const &
E : FrameData.Spills) {
1785 auto SpillAlignment =
Align(FrameData.getAlign(Def));
1789 Type *ByValTy =
nullptr;
1790 if (
auto *Arg = dyn_cast<Argument>(Def)) {
1797 Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture);
1799 if (Arg->hasByValAttr())
1800 ByValTy = Arg->getParamByValType();
1801 }
else if (
auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) {
1804 InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHIIt();
1806 auto *
I = cast<Instruction>(Def);
1811 }
else if (
auto *II = dyn_cast<InvokeInst>(
I)) {
1815 InsertPt = NewBB->getTerminator()->getIterator();
1816 }
else if (isa<PHINode>(
I)) {
1819 if (
auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->
getTerminator()))
1824 assert(!
I->isTerminator() &&
"unexpected terminator");
1827 InsertPt =
I->getNextNode()->getIterator();
1831 auto Index = FrameData.getFieldIndex(Def);
1845 Value *CurrentReload =
nullptr;
1846 for (
auto *U :
E.second) {
1850 if (CurrentBlock != U->getParent()) {
1851 CurrentBlock = U->getParent();
1855 auto *
GEP = GetFramePointer(
E.first);
1856 GEP->setName(
E.first->getName() +
Twine(
".reload.addr"));
1858 CurrentReload =
GEP;
1862 SpillAlignment,
E.first->getName() +
Twine(
".reload"));
1869 if (
F->getSubprogram()) {
1871 while (DIs.
empty() && DPVs.
empty() && isa<LoadInst>(CurDef)) {
1872 auto *LdInst = cast<LoadInst>(CurDef);
1874 if (LdInst->getPointerOperandType() != LdInst->getType())
1876 CurDef = LdInst->getPointerOperand();
1877 if (!isa<AllocaInst, LoadInst>(CurDef))
1884 auto SalvageOne = [&](
auto *DDI) {
1885 bool AllowUnresolved =
false;
1892 DDI->getVariable(), DDI->getExpression(),
1893 DDI->getDebugLoc(), DPValue::LocationType::Declare);
1898 .insertDeclare(CurrentReload, DDI->getVariable(),
1899 DDI->getExpression(), DDI->getDebugLoc(),
1914 if (
auto *PN = dyn_cast<PHINode>(U)) {
1915 assert(PN->getNumIncomingValues() == 1 &&
1916 "unexpected number of incoming "
1917 "values in the PHINode");
1918 PN->replaceAllUsesWith(CurrentReload);
1919 PN->eraseFromParent();
1925 U->replaceUsesOfWith(Def, CurrentReload);
1929 DPV.replaceVariableLocationOp(Def, CurrentReload,
true);
1941 if (Shape.
ABI == coro::ABI::Retcon || Shape.
ABI == coro::ABI::RetconOnce ||
1942 Shape.
ABI == coro::ABI::Async) {
1945 for (
const auto &
P : FrameData.Allocas) {
1947 auto *
G = GetFramePointer(Alloca);
1951 G->takeName(Alloca);
1966 for (
const auto &
A : FrameData.Allocas) {
1968 UsersToUpdate.
clear();
1970 auto *
I = cast<Instruction>(U);
1974 if (UsersToUpdate.
empty())
1976 auto *
G = GetFramePointer(Alloca);
1982 for (
auto *DVI : DIs)
1983 DVI->replaceUsesOfWith(Alloca,
G);
1984 for (
auto *DPV : DPValues)
1985 DPV->replaceVariableLocationOp(Alloca,
G);
1991 if (
I->isLifetimeStartOrEnd()) {
1992 I->eraseFromParent();
1996 I->replaceUsesOfWith(Alloca,
G);
2000 for (
const auto &
A : FrameData.Allocas) {
2002 if (
A.MayWriteBeforeCoroBegin) {
2006 "Coroutines cannot handle copying of array allocas yet");
2008 auto *
G = GetFramePointer(Alloca);
2015 for (
const auto &Alias :
A.Aliases) {
2016 auto *
FramePtr = GetFramePointer(Alloca);
2017 auto &
Value = *Alias.second;
2022 AliasPtr, [&](
Use &U) {
return DT.
dominates(CB, U); });
2034 auto *Inst = dyn_cast<Instruction>(U.getUser());
2035 if (!Inst || DT.dominates(CB, Inst))
2038 if (auto *CI = dyn_cast<CallInst>(Inst)) {
2043 if (CI->onlyReadsMemory() ||
2044 CI->onlyReadsMemory(CI->getArgOperandNo(&U)))
2049 return isa<StoreInst>(Inst) ||
2052 isa<GetElementPtrInst>(Inst) ||
2057 isa<BitCastInst>(Inst);
2059 if (HasAccessingPromiseBeforeCB) {
2061 auto *
G = GetFramePointer(PA);
2073 PHINode *UntilPHI =
nullptr) {
2074 auto *PN = cast<PHINode>(&SuccBB->
front());
2076 int Index = PN->getBasicBlockIndex(InsertedBB);
2079 V->getType(), 1, V->getName() +
Twine(
".") + SuccBB->
getName());
2082 PN->setIncomingValue(
Index, InputV);
2083 PN = dyn_cast<PHINode>(PN->getNextNode());
2084 }
while (PN != UntilPHI);
2124 auto *NewCleanupPadBB =
2127 CleanupPadBB->
getParent(), CleanupPadBB);
2130 auto *SetDispatchValuePN =
2134 auto *SwitchOnDispatch = Builder.
CreateSwitch(SetDispatchValuePN, UnreachBB,
2137 int SwitchIndex = 0;
2143 Twine(
".from.") + Pred->getName(),
2144 CleanupPadBB->
getParent(), CleanupPadBB);
2146 CaseBB->setName(CleanupPadBB->
getName() +
Twine(
".from.") +
2156 auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex);
2157 SetDispatchValuePN->addIncoming(SwitchConstant, Pred);
2158 SwitchOnDispatch->addCase(SwitchConstant, CaseBB);
2165 for (
auto &BB :
F) {
2166 for (
auto &Phi : BB.
phis()) {
2167 if (Phi.getNumIncomingValues() == 1) {
2173 while (!Worklist.
empty()) {
2175 auto *OriginalValue = Phi->getIncomingValue(0);
2176 Phi->replaceAllUsesWith(OriginalValue);
2204 if (
auto *CleanupPad =
2209 dyn_cast<CatchSwitchInst>(Pred->getTerminator())) {
2212 assert(CS->getUnwindDest() == &BB);
2222 if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.
getFirstNonPHI()))) {
2237 IncomingBB->setName(BB.
getName() +
Twine(
".from.") + Pred->getName());
2255 if (
auto *PN = dyn_cast<PHINode>(&BB.
front()))
2256 if (PN->getNumIncomingValues() > 1)
2267 return (isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) ||
2268 isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V));
2274 return isa<CoroIdInst>(&
I) || isa<CoroSaveInst>(&
I) ||
2275 isa<CoroSuspendInst>(&
I);
2297 for (
const auto &
E : AllRemats) {
2300 RematGraph *RG =
E.second.get();
2308 auto InsertPoint = &*
Use->getParent()->getFirstInsertionPt();
2309 if (isa<AnyCoroSuspendInst>(
Use)) {
2311 Use->getParent()->getSinglePredecessor();
2312 assert(SuspendPredecessorBlock &&
"malformed coro suspend instruction");
2320 for (;
I != RPOT.
end(); ++
I) {
2322 CurrentMaterialization =
D->clone();
2323 CurrentMaterialization->
setName(
D->getName());
2325 InsertPoint = CurrentMaterialization;
2329 for (
auto &
I : InstructionsToProcess)
2330 I->replaceUsesOfWith(
D, CurrentMaterialization);
2335 for (
unsigned i = 0,
E =
Use->getNumOperands(); i !=
E; ++i)
2336 if (
Use->getOperand(i) ==
D)
2338 {
Use,
D, CurrentMaterialization});
2340 InstructionsToProcess.push_back(CurrentMaterialization);
2345 for (
auto &R : FinalInstructionsToProcess) {
2346 if (
auto *PN = dyn_cast<PHINode>(R.Use)) {
2347 assert(PN->getNumIncomingValues() == 1 &&
"unexpected number of incoming "
2348 "values in the PHINode");
2349 PN->replaceAllUsesWith(R.Remat);
2350 PN->eraseFromParent();
2353 R.Use->replaceUsesOfWith(R.Def, R.Remat);
2360 auto *BB =
I->getParent();
2378 return isa<AnyCoroSuspendInst>(BB->
front());
2413 if (
auto FI = dyn_cast<CoroAllocaFreeInst>(
User))
2414 VisitedOrFreeBBs.
insert(FI->getParent());
2423 unsigned depth = 3) {
2426 if (depth == 0)
return false;
2445 for (
auto *U : AI->
users()) {
2446 auto FI = dyn_cast<CoroAllocaFreeInst>(U);
2461 for (
auto *AI : LocalAllocas) {
2466 Value *StackSave =
nullptr;
2474 for (
auto *U : AI->
users()) {
2476 if (isa<CoroAllocaGetInst>(U)) {
2477 U->replaceAllUsesWith(Alloca);
2483 auto FI = cast<CoroAllocaFreeInst>(U);
2489 DeadInsts.
push_back(cast<Instruction>(U));
2506 if (isa<CoroAllocaGetInst>(U)) {
2507 U->replaceAllUsesWith(Alloc);
2509 auto FI = cast<CoroAllocaFreeInst>(U);
2513 DeadInsts.
push_back(cast<Instruction>(U));
2520 return cast<Instruction>(Alloc);
2527 auto FnTy = FunctionType::get(ValueTy, {},
false);
2530 auto Call = Builder.
CreateCall(FnTy, Fn, {});
2542 auto FnTy = FunctionType::get(Builder.
getPtrTy(),
2543 {V->getType()},
false);
2546 auto Call = Builder.
CreateCall(FnTy, Fn, { V });
2565 auto ValueBeforeCall = Builder.
CreateLoad(ValueTy, Alloca);
2571 if (isa<CallInst>(Call)) {
2574 auto Invoke = cast<InvokeInst>(Call);
2575 Builder.
SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg());
2593 if (isa<LoadInst>(
User) || isa<StoreInst>(
User))
2597 auto Call = cast<Instruction>(
User);
2616 IRBuilder<> Builder(
F.getEntryBlock().getFirstNonPHIOrDbg());
2618 auto ArgTy = cast<PointerType>(Arg.
getType());
2619 auto ValueTy = PointerType::getUnqual(
F.getContext());
2624 auto Alloca = Builder.
CreateAlloca(ValueTy, ArgTy->getAddressSpace());
2639 auto FinalValue = Builder.
CreateLoad(ValueTy, Alloca);
2654 for (
auto &Arg :
F.args()) {
2655 if (!Arg.hasSwiftErrorAttr())
continue;
2662 for (
auto &Inst :
F.getEntryBlock()) {
2663 auto Alloca = dyn_cast<AllocaInst>(&Inst);
2675 if (!AllocasToPromote.
empty()) {
2684 const FrameDataInfo &FrameData,
2692 for (
auto *Def : FrameData.getAllDefs()) {
2693 for (
User *U : Def->users()) {
2694 auto Inst = cast<Instruction>(U);
2695 if (Inst->getParent() != CoroBegin->
getParent() ||
2703 while (!Worklist.
empty()) {
2705 for (
User *U : Def->users()) {
2706 auto Inst = cast<Instruction>(U);
2731 SuspendCrossingInfo &Checker) {
2739 DomSet.
insert(&
F.getEntryBlock());
2743 "should have split coro.suspend into its own block");
2757 if (
auto* II = dyn_cast<IntrinsicInst>(
I))
2767 if (!U->hasOneUse() || U->stripPointerCasts() != AI)
2783 Checker.isDefinitionAcrossSuspend(DomBB, UI)) {
2786 if (collectLifetimeStart(UI, AI))
2794 if (Valid && Lifetimes.
size() != 0) {
2795 auto *NewLifetime = Lifetimes[0]->clone();
2796 NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), AI);
2797 NewLifetime->insertBefore(DomBB->getTerminator());
2801 S->eraseFromParent();
2810 const SuspendCrossingInfo &Checker,
2823 if (AI->
hasMetadata(LLVMContext::MD_coro_outside_frame))
2829 bool ShouldUseLifetimeStartInfo =
2830 (Shape.
ABI != coro::ABI::Async && Shape.
ABI != coro::ABI::Retcon &&
2831 Shape.
ABI != coro::ABI::RetconOnce);
2834 ShouldUseLifetimeStartInfo};
2835 Visitor.visitPtr(*AI);
2836 if (!Visitor.getShouldLiveOnFrame())
2839 Visitor.getMayWriteBeforeCoroBegin());
2842static std::optional<std::pair<Value &, DIExpression &>>
2844 bool OptimizeFrame,
bool UseEntryValue,
Function *
F,
2846 bool SkipOutermostLoad) {
2848 auto InsertPt =
F->getEntryBlock().getFirstInsertionPt();
2849 while (isa<IntrinsicInst>(InsertPt))
2853 while (
auto *Inst = dyn_cast_or_null<Instruction>(Storage)) {
2854 if (
auto *LdInst = dyn_cast<LoadInst>(Inst)) {
2855 Storage = LdInst->getPointerOperand();
2862 if (!SkipOutermostLoad)
2864 }
else if (
auto *StInst = dyn_cast<StoreInst>(Inst)) {
2865 Storage = StInst->getValueOperand();
2872 if (!
Op || !AdditionalValues.
empty()) {
2880 SkipOutermostLoad =
false;
2883 return std::nullopt;
2885 auto *StorageAsArg = dyn_cast<Argument>(Storage);
2886 const bool IsSwiftAsyncArg =
2887 StorageAsArg && StorageAsArg->hasAttribute(Attribute::SwiftAsync);
2892 if (IsSwiftAsyncArg && UseEntryValue && !Expr->
isEntryValue() &&
2901 if (StorageAsArg && !OptimizeFrame && !IsSwiftAsyncArg) {
2902 auto &Cached = ArgToAllocaMap[StorageAsArg];
2905 Storage->
getName() +
".debug");
2919 return {{*Storage, *Expr}};
2929 bool SkipOutermostLoad = !isa<DbgValueInst>(DVI);
2933 ArgToAllocaMap, OptimizeFrame, UseEntryValue,
F, OriginalStorage,
2938 Value *Storage = &SalvagedInfo->first;
2946 if (isa<DbgDeclareInst>(DVI)) {
2947 std::optional<BasicBlock::iterator> InsertPt;
2948 if (
auto *
I = dyn_cast<Instruction>(Storage)) {
2949 InsertPt =
I->getInsertionPointAfterDef();
2953 if (!OptimizeFrame &&
I->getDebugLoc())
2955 }
else if (isa<Argument>(Storage))
2956 InsertPt =
F->getEntryBlock().begin();
2958 DVI.
moveBefore(*(*InsertPt)->getParent(), *InsertPt);
2964 bool OptimizeFrame,
bool UseEntryValue) {
2973 ArgToAllocaMap, OptimizeFrame, UseEntryValue,
F, OriginalStorage,
2978 Value *Storage = &SalvagedInfo->first;
2986 if (DPV.
getType() == DPValue::LocationType::Declare) {
2987 std::optional<BasicBlock::iterator> InsertPt;
2988 if (
auto *
I = dyn_cast<Instruction>(Storage)) {
2989 InsertPt =
I->getInsertionPointAfterDef();
2993 if (!OptimizeFrame &&
I->getDebugLoc())
2995 }
else if (isa<Argument>(Storage))
2996 InsertPt =
F->getEntryBlock().begin();
2999 (*InsertPt)->getParent()->insertDbgRecordBefore(&DPV, *InsertPt);
3005 Function &
F, SuspendCrossingInfo &Checker,
3006 const std::function<
bool(
Instruction &)> &MaterializableCallback) {
3016 if (!MaterializableCallback(
I))
3018 for (
User *U :
I.users())
3019 if (Checker.isDefinitionAcrossSuspend(
I, U))
3020 Spills[&
I].push_back(cast<Instruction>(U));
3039 for (
auto &
E : Spills) {
3043 if (AllRemats.
count(U))
3048 std::make_unique<RematGraph>(MaterializableCallback, U, Checker);
3052 for (
auto I = RPOT.begin();
I != RPOT.end();
3053 ++
I) { (*I)->Node->dump(); }
dbgs()
3056 AllRemats[U] = std::move(RematUPtr);
3068 const std::function<
bool(
Instruction &)> &MaterializableCallback) {
3073 if (
Shape.
ABI == coro::ABI::Switch &&
3082 if (
auto *Save = CSI->getCoroSave())
3095 if (
auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) {
3096 auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction();
3097 if (!MustTailCallFn)
3117 SuspendCrossingInfo Checker(
F,
Shape);
3121 FrameDataInfo FrameData;
3125 Shape.
ABI != coro::ABI::RetconOnce)
3130 for (
User *U :
A.users())
3131 if (Checker.isDefinitionAcrossSuspend(
A, U))
3132 FrameData.Spills[&
A].push_back(cast<Instruction>(U));
3142 if (
auto AI = dyn_cast<CoroAllocaAllocInst>(&
I)) {
3156 for (
User *U : Alloc->users()) {
3157 if (Checker.isDefinitionAcrossSuspend(*Alloc, U))
3158 FrameData.Spills[Alloc].push_back(cast<Instruction>(U));
3164 if (isa<CoroAllocaGetInst>(
I))
3167 if (
auto *AI = dyn_cast<AllocaInst>(&
I)) {
3172 for (
User *U :
I.users())
3173 if (Checker.isDefinitionAcrossSuspend(
I, U)) {
3175 if (
I.getType()->isTokenTy())
3177 "token definition is separated from the use by a suspend point");
3178 FrameData.Spills[&
I].push_back(cast<Instruction>(U));
3188 for (
auto &Iter : FrameData.Spills) {
3189 auto *V = Iter.first;
3194 if (Checker.isDefinitionAcrossSuspend(*V, DVI))
3195 FrameData.Spills[V].push_back(DVI);
3198 if (Checker.isDefinitionAcrossSuspend(*V, DPV->Marker->MarkedInstr))
3199 FrameData.Spills[V].push_back(DPV->Marker->MarkedInstr);
3203 if (
Shape.
ABI == coro::ABI::Retcon ||
Shape.
ABI == coro::ABI::RetconOnce ||
3213 for (
auto *
I : DeadInstructions)
3214 I->eraseFromParent();
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
AMDGPU Lower Kernel Arguments
Expand Atomic instructions
This file implements the BitVector class.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
static void cleanupSinglePredPHIs(Function &F)
static bool isSuspendReachableFrom(BasicBlock *From, VisitedBlocksSet &VisitedOrFreeBBs)
Does control flow starting at the given block ever reach a suspend instruction before reaching a bloc...
static bool isCoroutineStructureIntrinsic(Instruction &I)
SmallPtrSet< BasicBlock *, 8 > VisitedBlocksSet
static Instruction * lowerNonLocalAlloca(CoroAllocaAllocInst *AI, coro::Shape &Shape, SmallVectorImpl< Instruction * > &DeadInsts)
Turn the given coro.alloca.alloc call into a dynamic allocation.
static Instruction * splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch)
static void eliminateSwiftError(Function &F, coro::Shape &Shape)
Eliminate all problematic uses of swifterror arguments and allocas from the function.
static void lowerLocalAllocas(ArrayRef< CoroAllocaAllocInst * > LocalAllocas, SmallVectorImpl< Instruction * > &DeadInsts)
Turn each of the given local allocas into a normal (dynamic) alloca instruction.
static bool isLocalAlloca(CoroAllocaAllocInst *AI)
Is the given alloca "local", i.e.
static Value * emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V, coro::Shape &Shape)
Set the given value as the current swifterror value.
static Value * emitSetAndGetSwiftErrorValueAround(Instruction *Call, AllocaInst *Alloca, coro::Shape &Shape)
Set the swifterror value from the given alloca before a call, then put in back in the alloca afterwar...
static void cacheDIVar(FrameDataInfo &FrameData, DenseMap< Value *, DILocalVariable * > &DIVarCache)
static void collectFrameAlloca(AllocaInst *AI, coro::Shape &Shape, const SuspendCrossingInfo &Checker, SmallVectorImpl< AllocaInfo > &Allocas, const DominatorTree &DT)
static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI)
static void splitAround(Instruction *I, const Twine &Name)
static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca, coro::Shape &Shape)
Eliminate a formerly-swifterror alloca by inserting the get/set intrinsics and attempting to MemToReg...
static void rewritePHIs(BasicBlock &BB)
static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB, BasicBlock *InsertedBB, BasicBlock *PredBB, PHINode *UntilPHI=nullptr)
static std::optional< std::pair< Value &, DIExpression & > > salvageDebugInfoImpl(SmallDenseMap< Argument *, AllocaInst *, 4 > &ArgToAllocaMap, bool OptimizeFrame, bool UseEntryValue, Function *F, Value *Storage, DIExpression *Expr, bool SkipOutermostLoad)
static DIType * solveDIType(DIBuilder &Builder, Type *Ty, const DataLayout &Layout, DIScope *Scope, unsigned LineNum, DenseMap< Type *, DIType * > &DITypeCache)
static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB, unsigned depth=3)
After we split the coroutine, will the given basic block be along an obvious exit path for the resump...
static void eliminateSwiftErrorArgument(Function &F, Argument &Arg, coro::Shape &Shape, SmallVectorImpl< AllocaInst * > &AllocasToPromote)
"Eliminate" a swifterror argument by reducing it to the alloca case and then loading and storing in t...
static void buildFrameDebugInfo(Function &F, coro::Shape &Shape, FrameDataInfo &FrameData)
Build artificial debug info for C++ coroutine frames to allow users to inspect the contents of the fr...
static StructType * buildFrameType(Function &F, coro::Shape &Shape, FrameDataInfo &FrameData)
static BasicBlock * splitBlockIfNotFirst(Instruction *I, const Twine &Name)
static void sinkSpillUsesAfterCoroBegin(Function &F, const FrameDataInfo &FrameData, CoroBeginInst *CoroBegin)
retcon and retcon.once conventions assume that all spill uses can be sunk after the coro....
static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape, SuspendCrossingInfo &Checker)
For each local variable that all of its user are only used inside one of suspended region,...
static bool isSuspendBlock(BasicBlock *BB)
static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB, CleanupPadInst *CleanupPad)
static void rewriteMaterializableInstructions(const SmallMapVector< Instruction *, std::unique_ptr< RematGraph >, 8 > &AllRemats)
static void dumpAllocas(const SmallVectorImpl< AllocaInfo > &Allocas)
static StringRef solveTypeName(Type *Ty)
Create name for Type.
static void dumpSpills(StringRef Title, const SpillInfo &Spills)
static void doRematerializations(Function &F, SuspendCrossingInfo &Checker, const std::function< bool(Instruction &)> &MaterializableCallback)
static Value * emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy, coro::Shape &Shape)
Get the current swifterror value.
static void dumpRemats(StringRef Title, const SmallMapVector< Instruction *, std::unique_ptr< RematGraph >, 8 > &RM)
static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape)
Given that RA is a live value
static bool isLifetimeStart(const Instruction *Inst)
static MaybeAlign getAlign(Value *Ptr)
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
iv Induction Variable Users
mir Rename Register Operands
This file provides an interface for laying out a sequence of fields as a struct in a way that attempt...
llvm::cl::opt< bool > UseNewDbgInfoFormat
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
This file provides a collection of visitors which walk the (instruction) uses of a pointer.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallString class.
static const unsigned FramePtr
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
void setSwiftError(bool V)
Specify whether this alloca is used to represent a swifterror.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const Instruction & front() const
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class represents a no-op cast from one type to another.
size_type size() const
size - Returns the number of bits in this bitvector.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool doesNotCapture(unsigned OpNo) const
Determine whether this data operand is not captured.
Value * getArgOperand(unsigned i) const
unsigned arg_size() const
Value * getParentPad() const
static CleanupPadInst * Create(Value *ParentPad, ArrayRef< Value * > Args, const Twine &NameStr, BasicBlock::iterator InsertBefore)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB, BasicBlock::iterator InsertBefore)
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This represents the llvm.coro.alloca.alloc instruction.
This class represents the llvm.coro.begin instruction.
This represents the llvm.coro.suspend instruction.
DICompositeType * createStructType(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber, uint64_t SizeInBits, uint32_t AlignInBits, DINode::DIFlags Flags, DIType *DerivedFrom, DINodeArray Elements, unsigned RunTimeLang=0, DIType *VTableHolder=nullptr, StringRef UniqueIdentifier="")
Create debugging information entry for a struct.
DIDerivedType * createPointerType(DIType *PointeeTy, uint64_t SizeInBits, uint32_t AlignInBits=0, std::optional< unsigned > DWARFAddressSpace=std::nullopt, StringRef Name="", DINodeArray Annotations=nullptr)
Create debugging information entry for a pointer.
DIExpression * createExpression(ArrayRef< uint64_t > Addr=std::nullopt)
Create a new descriptor for the specified variable which has a complex address expression for its add...
DISubrange * getOrCreateSubrange(int64_t Lo, int64_t Count)
Create a descriptor for a value range.
DICompositeType * createArrayType(uint64_t Size, uint32_t AlignInBits, DIType *Ty, DINodeArray Subscripts, PointerUnion< DIExpression *, DIVariable * > DataLocation=nullptr, PointerUnion< DIExpression *, DIVariable * > Associated=nullptr, PointerUnion< DIExpression *, DIVariable * > Allocated=nullptr, PointerUnion< DIExpression *, DIVariable * > Rank=nullptr)
Create debugging information entry for an array.
DIBasicType * createBasicType(StringRef Name, uint64_t SizeInBits, unsigned Encoding, DINode::DIFlags Flags=DINode::FlagZero)
Create debugging information entry for a basic type.
DINodeArray getOrCreateArray(ArrayRef< Metadata * > Elements)
Get a DINodeArray, create one if required.
DIDerivedType * createMemberType(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo, uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits, DINode::DIFlags Flags, DIType *Ty, DINodeArray Annotations=nullptr)
Create debugging information entry for a member.
DILocalVariable * createAutoVariable(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo, DIType *Ty, bool AlwaysPreserve=false, DINode::DIFlags Flags=DINode::FlagZero, uint32_t AlignInBits=0)
Create a new descriptor for an auto variable.
void replaceArrays(DICompositeType *&T, DINodeArray Elements, DINodeArray TParams=DINodeArray())
Replace arrays on a composite type.
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
static DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
bool isSingleLocationExpression() const
Return whether the evaluated expression makes use of a single location at the start of the expression...
DILocalScope * getScope() const
Get the local scope for this variable.
Base class for scope-like contexts.
StringRef getName() const
uint64_t getSizeInBits() const
uint32_t getAlignInBits() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
DIExpression * getExpression() const
LocationType getType() const
void setExpression(DIExpression *NewExpr)
Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
void replaceVariableLocationOp(Value *OldValue, Value *NewValue, bool AllowEmpty=false)
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
This represents the llvm.dbg.declare instruction.
DebugLoc getDebugLoc() const
void setDebugLoc(DebugLoc Loc)
This represents the llvm.dbg.value instruction.
This is the common base class for debug info intrinsics for variables.
void replaceVariableLocationOp(Value *OldValue, Value *NewValue)
Value * getVariableLocationOp(unsigned OpIdx) const
void setExpression(DIExpression *NewExpr)
DILocalVariable * getVariable() const
DIExpression * getExpression() const
DILocation * get() const
Get the underlying DILocation.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Module * getParent()
Get the module that this global value is contained inside of...
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
CallInst * CreateStackSave(const Twine &Name="")
Create a call to llvm.stacksave.
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", bool IsInBounds=false)
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
UnreachableInst * CreateUnreachable()
BasicBlock::iterator GetInsertPoint() const
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
SwitchInst * CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases=10, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a switch instruction with the specified value, default dest, and with a hint for the number of...
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1, const Twine &Name="")
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
BranchInst * CreateBr(BasicBlock *Dest)
Create an unconditional 'br label X' instruction.
CallInst * CreateStackRestore(Value *Ptr, const Twine &Name="")
Create a call to llvm.stackrestore.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
void visitIntrinsicInst(IntrinsicInst &I)
void visitBitCastInst(BitCastInst &I)
void visit(Iterator Start, Iterator End)
void visitPHINode(PHINode &I)
void visitAddrSpaceCastInst(AddrSpaceCastInst &I)
void visitSelectInst(SelectInst &I)
void visitGetElementPtrInst(GetElementPtrInst &I)
void removeFromParent()
This method unlinks 'this' from the containing basic block, but does not delete it.
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
const BasicBlock * getParent() const
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
const Function * getFunction() const
Return the function this instruction belongs to.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
void insertAfter(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately after the specified instruction.
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
static MDString * get(LLVMContext &Context, StringRef Str)
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
size_type count(const KeyT &Key) const
This is the common base class for memset/memcpy/memmove.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
A base class for visitors over the uses of a pointer value.
void visitCallBase(CallBase &CB)
void visitGetElementPtrInst(GetElementPtrInst &GEPI)
void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC)
void visitBitCastInst(BitCastInst &BC)
void visitStoreInst(StoreInst &SI)
void visitIntrinsicInst(IntrinsicInst &II)
void visitMemIntrinsic(MemIntrinsic &I)
This class represents the LLVM 'select' instruction.
iterator end()
Get an iterator to the end of the SetVector.
iterator begin()
Get an iterator to the beginning of the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
iterator find(ConstPtrType Ptr) const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
StringRef str() const
Explicit conversion to StringRef.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Compute live ranges of allocas.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
std::string str() const
str - Get the contents as an std::string.
TypeSize getElementOffsetInBits(unsigned Idx) const
Class to represent struct types.
void setBody(ArrayRef< Type * > Elements, bool isPacked=false)
Specify a body for an opaque identified type.
static StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
unsigned getNumElements() const
Random access to the elements.
Type * getElementType(unsigned N) const
void setDefaultDest(BasicBlock *DefaultCase)
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
std::string str() const
Return the twine contents as a std::string.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isPointerTy() const
True if this is an instance of PointerType.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
StringRef getStructName() const
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isStructTy() const
True if this is an instance of StructType.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
A Use represents the edge between a Value definition and its users.
User * getUser() const
Returns the User that contains this Use.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
void setName(const Twine &Name)
Change the name of the value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
LLVMContext & getContext() const
All values hold a context through their type.
iterator_range< use_iterator > uses()
StringRef getName() const
Return a constant reference to the value's name.
void takeName(Value *V)
Transfer the name from V to this value.
APInt Offset
The constant offset of the use if that is known.
void enqueueUsers(Instruction &I)
Enqueue the users of this instruction in the visit worklist.
constexpr ScalarTy getFixedValue() const
self_iterator getIterator()
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
A range adaptor for a pair of iterators.
A raw_ostream that writes to an SmallVector or SmallString.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
@ CE
Windows NT (Windows on ARM)
void salvageDebugInfo(SmallDenseMap< Argument *, AllocaInst *, 4 > &ArgToAllocaMap, DbgVariableIntrinsic &DVI, bool OptimizeFrame, bool IsEntryPoint)
Attempts to rewrite the location operand of debug intrinsics in terms of the coroutine frame pointer,...
bool defaultMaterializable(Instruction &V)
Default materializable callback.
void buildCoroutineFrame(Function &F, Shape &Shape, TargetTransformInfo &TTI, const std::function< bool(Instruction &)> &MaterializableCallback)
CallInst * createMustTailCall(DebugLoc Loc, Function *MustTailCallFn, TargetTransformInfo &TTI, ArrayRef< Value * > Arguments, IRBuilder<> &)
bool isCPlusPlus(SourceLanguage S)
NodeAddr< DefNode * > Def
NodeAddr< BlockNode * > Block
This is an optimization pass for GlobalISel generic memory operations.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
TinyPtrVector< DbgDeclareInst * > findDbgDeclares(Value *V)
Finds dbg.declare intrinsics declaring local variables as living in the memory that 'V' points to.
void PromoteMemToReg(ArrayRef< AllocaInst * > Allocas, DominatorTree &DT, AssumptionCache *AC=nullptr)
Promote the specified list of alloca instructions into scalar registers, inserting PHI nodes as appro...
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
unsigned Log2_64_Ceil(uint64_t Value)
Return the ceil log base 2 of the specified value, 64 if the value is zero.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
auto successors(const MachineBasicBlock *BB)
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void sort(IteratorTy Start, IteratorTy End)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
TinyPtrVector< DPValue * > findDPVDeclares(Value *V)
As above, for DPVDeclares.
BasicBlock * ehAwareSplitEdge(BasicBlock *BB, BasicBlock *Succ, LandingPadInst *OriginalPad=nullptr, PHINode *LandingPadReplacement=nullptr, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions(), const Twine &BBName="")
Split the edge connect the specficed blocks in the case that Succ is an Exception Handling Block.
Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
uint64_t offsetToAlignment(uint64_t Value, Align Alignment)
Returns the offset to the next integer (mod 2**64) that is greater than or equal to Value and is a mu...
void findDbgUsers(SmallVectorImpl< DbgVariableIntrinsic * > &DbgInsts, Value *V, SmallVectorImpl< DPValue * > *DPValues=nullptr)
Finds the debug info intrinsics describing a value.
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
void findDbgValues(SmallVectorImpl< DbgValueInst * > &DbgValues, Value *V, SmallVectorImpl< DPValue * > *DPValues=nullptr)
Finds the llvm.dbg.value intrinsics describing a value.
std::pair< uint64_t, Align > performOptimizedStructLayout(MutableArrayRef< OptimizedStructLayoutField > Fields)
Compute a layout for a struct containing the given fields, making a best-effort attempt to minimize t...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
constexpr unsigned BitWidth
void updatePhiNodes(BasicBlock *DestBB, BasicBlock *OldPred, BasicBlock *NewPred, PHINode *Until=nullptr)
Replaces all uses of OldPred with the NewPred block in all PHINodes in a block.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
auto predecessors(const MachineBasicBlock *BB)
BasicBlock * SplitEdge(BasicBlock *From, BasicBlock *To, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the edge connecting the specified blocks, and return the newly created basic block between From...
void setUnwindEdgeTo(Instruction *TI, BasicBlock *Succ)
Sets the unwind edge of an instruction to a particular successor.
unsigned pred_size(const MachineBasicBlock *BB)
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DPValue types only and downcast.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
RematGraph::RematNode * NodeRef
static ChildIteratorType child_end(NodeRef N)
RematGraph::RematNode ** ChildIteratorType
static NodeRef getEntryNode(RematGraph *G)
static ChildIteratorType child_begin(NodeRef N)
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align Alignment
The required alignment of this field.
uint64_t Offset
The offset of this field in the final layout.
uint64_t Size
The required size of this field in bytes.
static constexpr uint64_t FlexibleOffset
A special value for Offset indicating that the field can be moved anywhere.
A MapVector that performs no allocations if smaller than a certain size.
Align getContextAlignment() const
uint64_t ContextHeaderSize
bool IsFrameInlineInStorage
AllocaInst * PromiseAlloca
AsyncLoweringStorage AsyncLowering
AnyCoroIdRetconInst * getRetconCoroId() const
CoroIdInst * getSwitchCoroId() const
SmallVector< AnyCoroSuspendInst *, 4 > CoroSuspends
Value * emitAlloc(IRBuilder<> &Builder, Value *Size, CallGraph *CG) const
Allocate memory according to the rules of the active lowering.
SmallVector< CallInst *, 2 > SwiftErrorOps
AllocaInst * getPromiseAlloca() const
bool OptimizeFrame
This would only be true if optimization are enabled.
SwitchLoweringStorage SwitchLowering
CoroBeginInst * CoroBegin
BasicBlock::iterator getInsertPtAfterFramePtr() const
void emitDealloc(IRBuilder<> &Builder, Value *Ptr, CallGraph *CG) const
Deallocate memory according to the rules of the active lowering.
RetconLoweringStorage RetconLowering
SmallVector< AnyCoroEndInst *, 4 > CoroEnds
BasicBlock * AllocaSpillBlock