LLVM 18.0.0git
CoroFrame.cpp
Go to the documentation of this file.
1//===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8// This file contains classes used to discover if for a particular value
9// there from sue to definition that crosses a suspend block.
10//
11// Using the information discovered we form a Coroutine Frame structure to
12// contain those values. All uses of those values are replaced with appropriate
13// GEP + load from the coroutine frame. At the point of the definition we spill
14// the value into the coroutine frame.
15//===----------------------------------------------------------------------===//
16
17#include "CoroInternal.h"
18#include "llvm/ADT/BitVector.h"
20#include "llvm/ADT/ScopeExit.h"
24#include "llvm/Config/llvm-config.h"
25#include "llvm/IR/CFG.h"
26#include "llvm/IR/DIBuilder.h"
27#include "llvm/IR/DebugInfo.h"
28#include "llvm/IR/Dominators.h"
29#include "llvm/IR/IRBuilder.h"
32#include "llvm/Support/Debug.h"
40#include <algorithm>
41#include <deque>
42#include <optional>
43
44using namespace llvm;
45
46// The "coro-suspend-crossing" flag is very noisy. There is another debug type,
47// "coro-frame", which results in leaner debug spew.
48#define DEBUG_TYPE "coro-suspend-crossing"
49
51
52// Provides two way mapping between the blocks and numbers.
53namespace {
54class BlockToIndexMapping {
56
57public:
58 size_t size() const { return V.size(); }
59
60 BlockToIndexMapping(Function &F) {
61 for (BasicBlock &BB : F)
62 V.push_back(&BB);
63 llvm::sort(V);
64 }
65
66 size_t blockToIndex(BasicBlock const *BB) const {
67 auto *I = llvm::lower_bound(V, BB);
68 assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block");
69 return I - V.begin();
70 }
71
72 BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; }
73};
74} // end anonymous namespace
75
76// The SuspendCrossingInfo maintains data that allows to answer a question
77// whether given two BasicBlocks A and B there is a path from A to B that
78// passes through a suspend point.
79//
80// For every basic block 'i' it maintains a BlockData that consists of:
81// Consumes: a bit vector which contains a set of indices of blocks that can
82// reach block 'i'. A block can trivially reach itself.
83// Kills: a bit vector which contains a set of indices of blocks that can
84// reach block 'i' but there is a path crossing a suspend point
85// not repeating 'i' (path to 'i' without cycles containing 'i').
86// Suspend: a boolean indicating whether block 'i' contains a suspend point.
87// End: a boolean indicating whether block 'i' contains a coro.end intrinsic.
88// KillLoop: There is a path from 'i' to 'i' not otherwise repeating 'i' that
89// crosses a suspend point.
90//
91namespace {
92class SuspendCrossingInfo {
93 BlockToIndexMapping Mapping;
94
95 struct BlockData {
96 BitVector Consumes;
97 BitVector Kills;
98 bool Suspend = false;
99 bool End = false;
100 bool KillLoop = false;
101 bool Changed = false;
102 };
104
106 BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]);
107 return llvm::predecessors(BB);
108 }
109
110 BlockData &getBlockData(BasicBlock *BB) {
111 return Block[Mapping.blockToIndex(BB)];
112 }
113
114 /// Compute the BlockData for the current function in one iteration.
115 /// Initialize - Whether this is the first iteration, we can optimize
116 /// the initial case a little bit by manual loop switch.
117 /// Returns whether the BlockData changes in this iteration.
118 template <bool Initialize = false>
119 bool computeBlockData(const ReversePostOrderTraversal<Function *> &RPOT);
120
121public:
122#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
123 void dump() const;
124 void dump(StringRef Label, BitVector const &BV) const;
125#endif
126
127 SuspendCrossingInfo(Function &F, coro::Shape &Shape);
128
129 /// Returns true if there is a path from \p From to \p To crossing a suspend
130 /// point without crossing \p From a 2nd time.
131 bool hasPathCrossingSuspendPoint(BasicBlock *From, BasicBlock *To) const {
132 size_t const FromIndex = Mapping.blockToIndex(From);
133 size_t const ToIndex = Mapping.blockToIndex(To);
134 bool const Result = Block[ToIndex].Kills[FromIndex];
135 LLVM_DEBUG(dbgs() << From->getName() << " => " << To->getName()
136 << " answer is " << Result << "\n");
137 return Result;
138 }
139
140 /// Returns true if there is a path from \p From to \p To crossing a suspend
141 /// point without crossing \p From a 2nd time. If \p From is the same as \p To
142 /// this will also check if there is a looping path crossing a suspend point.
143 bool hasPathOrLoopCrossingSuspendPoint(BasicBlock *From,
144 BasicBlock *To) const {
145 size_t const FromIndex = Mapping.blockToIndex(From);
146 size_t const ToIndex = Mapping.blockToIndex(To);
147 bool Result = Block[ToIndex].Kills[FromIndex] ||
148 (From == To && Block[ToIndex].KillLoop);
149 LLVM_DEBUG(dbgs() << From->getName() << " => " << To->getName()
150 << " answer is " << Result << " (path or loop)\n");
151 return Result;
152 }
153
154 bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const {
155 auto *I = cast<Instruction>(U);
156
157 // We rewrote PHINodes, so that only the ones with exactly one incoming
158 // value need to be analyzed.
159 if (auto *PN = dyn_cast<PHINode>(I))
160 if (PN->getNumIncomingValues() > 1)
161 return false;
162
163 BasicBlock *UseBB = I->getParent();
164
165 // As a special case, treat uses by an llvm.coro.suspend.retcon or an
166 // llvm.coro.suspend.async as if they were uses in the suspend's single
167 // predecessor: the uses conceptually occur before the suspend.
168 if (isa<CoroSuspendRetconInst>(I) || isa<CoroSuspendAsyncInst>(I)) {
169 UseBB = UseBB->getSinglePredecessor();
170 assert(UseBB && "should have split coro.suspend into its own block");
171 }
172
173 return hasPathCrossingSuspendPoint(DefBB, UseBB);
174 }
175
176 bool isDefinitionAcrossSuspend(Argument &A, User *U) const {
177 return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U);
178 }
179
180 bool isDefinitionAcrossSuspend(Instruction &I, User *U) const {
181 auto *DefBB = I.getParent();
182
183 // As a special case, treat values produced by an llvm.coro.suspend.*
184 // as if they were defined in the single successor: the uses
185 // conceptually occur after the suspend.
186 if (isa<AnyCoroSuspendInst>(I)) {
187 DefBB = DefBB->getSingleSuccessor();
188 assert(DefBB && "should have split coro.suspend into its own block");
189 }
190
191 return isDefinitionAcrossSuspend(DefBB, U);
192 }
193
194 bool isDefinitionAcrossSuspend(Value &V, User *U) const {
195 if (auto *Arg = dyn_cast<Argument>(&V))
196 return isDefinitionAcrossSuspend(*Arg, U);
197 if (auto *Inst = dyn_cast<Instruction>(&V))
198 return isDefinitionAcrossSuspend(*Inst, U);
199
201 "Coroutine could only collect Argument and Instruction now.");
202 }
203};
204} // end anonymous namespace
205
206#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
207LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label,
208 BitVector const &BV) const {
209 dbgs() << Label << ":";
210 for (size_t I = 0, N = BV.size(); I < N; ++I)
211 if (BV[I])
212 dbgs() << " " << Mapping.indexToBlock(I)->getName();
213 dbgs() << "\n";
214}
215
216LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const {
217 for (size_t I = 0, N = Block.size(); I < N; ++I) {
218 BasicBlock *const B = Mapping.indexToBlock(I);
219 dbgs() << B->getName() << ":\n";
220 dump(" Consumes", Block[I].Consumes);
221 dump(" Kills", Block[I].Kills);
222 }
223 dbgs() << "\n";
224}
225#endif
226
227template <bool Initialize>
228bool SuspendCrossingInfo::computeBlockData(
230 bool Changed = false;
231
232 for (const BasicBlock *BB : RPOT) {
233 auto BBNo = Mapping.blockToIndex(BB);
234 auto &B = Block[BBNo];
235
236 // We don't need to count the predecessors when initialization.
237 if constexpr (!Initialize)
238 // If all the predecessors of the current Block don't change,
239 // the BlockData for the current block must not change too.
240 if (all_of(predecessors(B), [this](BasicBlock *BB) {
241 return !Block[Mapping.blockToIndex(BB)].Changed;
242 })) {
243 B.Changed = false;
244 continue;
245 }
246
247 // Saved Consumes and Kills bitsets so that it is easy to see
248 // if anything changed after propagation.
249 auto SavedConsumes = B.Consumes;
250 auto SavedKills = B.Kills;
251
252 for (BasicBlock *PI : predecessors(B)) {
253 auto PrevNo = Mapping.blockToIndex(PI);
254 auto &P = Block[PrevNo];
255
256 // Propagate Kills and Consumes from predecessors into B.
257 B.Consumes |= P.Consumes;
258 B.Kills |= P.Kills;
259
260 // If block P is a suspend block, it should propagate kills into block
261 // B for every block P consumes.
262 if (P.Suspend)
263 B.Kills |= P.Consumes;
264 }
265
266 if (B.Suspend) {
267 // If block B is a suspend block, it should kill all of the blocks it
268 // consumes.
269 B.Kills |= B.Consumes;
270 } else if (B.End) {
271 // If block B is an end block, it should not propagate kills as the
272 // blocks following coro.end() are reached during initial invocation
273 // of the coroutine while all the data are still available on the
274 // stack or in the registers.
275 B.Kills.reset();
276 } else {
277 // This is reached when B block it not Suspend nor coro.end and it
278 // need to make sure that it is not in the kill set.
279 B.KillLoop |= B.Kills[BBNo];
280 B.Kills.reset(BBNo);
281 }
282
283 if constexpr (!Initialize) {
284 B.Changed = (B.Kills != SavedKills) || (B.Consumes != SavedConsumes);
285 Changed |= B.Changed;
286 }
287 }
288
289 return Changed;
290}
291
292SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape)
293 : Mapping(F) {
294 const size_t N = Mapping.size();
295 Block.resize(N);
296
297 // Initialize every block so that it consumes itself
298 for (size_t I = 0; I < N; ++I) {
299 auto &B = Block[I];
300 B.Consumes.resize(N);
301 B.Kills.resize(N);
302 B.Consumes.set(I);
303 B.Changed = true;
304 }
305
306 // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as
307 // the code beyond coro.end is reachable during initial invocation of the
308 // coroutine.
309 for (auto *CE : Shape.CoroEnds)
310 getBlockData(CE->getParent()).End = true;
311
312 // Mark all suspend blocks and indicate that they kill everything they
313 // consume. Note, that crossing coro.save also requires a spill, as any code
314 // between coro.save and coro.suspend may resume the coroutine and all of the
315 // state needs to be saved by that time.
316 auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) {
317 BasicBlock *SuspendBlock = BarrierInst->getParent();
318 auto &B = getBlockData(SuspendBlock);
319 B.Suspend = true;
320 B.Kills |= B.Consumes;
321 };
322 for (auto *CSI : Shape.CoroSuspends) {
323 markSuspendBlock(CSI);
324 if (auto *Save = CSI->getCoroSave())
325 markSuspendBlock(Save);
326 }
327
328 // It is considered to be faster to use RPO traversal for forward-edges
329 // dataflow analysis.
331 computeBlockData</*Initialize=*/true>(RPOT);
332 while (computeBlockData</*Initialize*/ false>(RPOT))
333 ;
334
335 LLVM_DEBUG(dump());
336}
337
338namespace {
339
340// RematGraph is used to construct a DAG for rematerializable instructions
341// When the constructor is invoked with a candidate instruction (which is
342// materializable) it builds a DAG of materializable instructions from that
343// point.
344// Typically, for each instruction identified as re-materializable across a
345// suspend point, a RematGraph will be created.
346struct RematGraph {
347 // Each RematNode in the graph contains the edges to instructions providing
348 // operands in the current node.
349 struct RematNode {
352 RematNode() = default;
353 RematNode(Instruction *V) : Node(V) {}
354 };
355
356 RematNode *EntryNode;
357 using RematNodeMap =
359 RematNodeMap Remats;
360 const std::function<bool(Instruction &)> &MaterializableCallback;
361 SuspendCrossingInfo &Checker;
362
363 RematGraph(const std::function<bool(Instruction &)> &MaterializableCallback,
364 Instruction *I, SuspendCrossingInfo &Checker)
365 : MaterializableCallback(MaterializableCallback), Checker(Checker) {
366 std::unique_ptr<RematNode> FirstNode = std::make_unique<RematNode>(I);
367 EntryNode = FirstNode.get();
368 std::deque<std::unique_ptr<RematNode>> WorkList;
369 addNode(std::move(FirstNode), WorkList, cast<User>(I));
370 while (WorkList.size()) {
371 std::unique_ptr<RematNode> N = std::move(WorkList.front());
372 WorkList.pop_front();
373 addNode(std::move(N), WorkList, cast<User>(I));
374 }
375 }
376
377 void addNode(std::unique_ptr<RematNode> NUPtr,
378 std::deque<std::unique_ptr<RematNode>> &WorkList,
379 User *FirstUse) {
380 RematNode *N = NUPtr.get();
381 if (Remats.count(N->Node))
382 return;
383
384 // We haven't see this node yet - add to the list
385 Remats[N->Node] = std::move(NUPtr);
386 for (auto &Def : N->Node->operands()) {
387 Instruction *D = dyn_cast<Instruction>(Def.get());
388 if (!D || !MaterializableCallback(*D) ||
389 !Checker.isDefinitionAcrossSuspend(*D, FirstUse))
390 continue;
391
392 if (Remats.count(D)) {
393 // Already have this in the graph
394 N->Operands.push_back(Remats[D].get());
395 continue;
396 }
397
398 bool NoMatch = true;
399 for (auto &I : WorkList) {
400 if (I->Node == D) {
401 NoMatch = false;
402 N->Operands.push_back(I.get());
403 break;
404 }
405 }
406 if (NoMatch) {
407 // Create a new node
408 std::unique_ptr<RematNode> ChildNode = std::make_unique<RematNode>(D);
409 N->Operands.push_back(ChildNode.get());
410 WorkList.push_back(std::move(ChildNode));
411 }
412 }
413 }
414
415#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
416 void dump() const {
417 dbgs() << "Entry (";
418 if (EntryNode->Node->getParent()->hasName())
419 dbgs() << EntryNode->Node->getParent()->getName();
420 else
421 EntryNode->Node->getParent()->printAsOperand(dbgs(), false);
422 dbgs() << ") : " << *EntryNode->Node << "\n";
423 for (auto &E : Remats) {
424 dbgs() << *(E.first) << "\n";
425 for (RematNode *U : E.second->Operands)
426 dbgs() << " " << *U->Node << "\n";
427 }
428 }
429#endif
430};
431} // end anonymous namespace
432
433namespace llvm {
434
435template <> struct GraphTraits<RematGraph *> {
436 using NodeRef = RematGraph::RematNode *;
437 using ChildIteratorType = RematGraph::RematNode **;
438
439 static NodeRef getEntryNode(RematGraph *G) { return G->EntryNode; }
441 return N->Operands.begin();
442 }
443 static ChildIteratorType child_end(NodeRef N) { return N->Operands.end(); }
444};
445
446} // end namespace llvm
447
448#undef DEBUG_TYPE // "coro-suspend-crossing"
449#define DEBUG_TYPE "coro-frame"
450
451namespace {
452class FrameTypeBuilder;
453// Mapping from the to-be-spilled value to all the users that need reload.
455struct AllocaInfo {
456 AllocaInst *Alloca;
458 bool MayWriteBeforeCoroBegin;
459 AllocaInfo(AllocaInst *Alloca,
460 DenseMap<Instruction *, std::optional<APInt>> Aliases,
461 bool MayWriteBeforeCoroBegin)
462 : Alloca(Alloca), Aliases(std::move(Aliases)),
463 MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {}
464};
465struct FrameDataInfo {
466 // All the values (that are not allocas) that needs to be spilled to the
467 // frame.
468 SpillInfo Spills;
469 // Allocas contains all values defined as allocas that need to live in the
470 // frame.
472
473 SmallVector<Value *, 8> getAllDefs() const {
475 for (const auto &P : Spills)
476 Defs.push_back(P.first);
477 for (const auto &A : Allocas)
478 Defs.push_back(A.Alloca);
479 return Defs;
480 }
481
482 uint32_t getFieldIndex(Value *V) const {
483 auto Itr = FieldIndexMap.find(V);
484 assert(Itr != FieldIndexMap.end() &&
485 "Value does not have a frame field index");
486 return Itr->second;
487 }
488
489 void setFieldIndex(Value *V, uint32_t Index) {
490 assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) &&
491 "Cannot set the index for the same field twice.");
492 FieldIndexMap[V] = Index;
493 }
494
495 Align getAlign(Value *V) const {
496 auto Iter = FieldAlignMap.find(V);
497 assert(Iter != FieldAlignMap.end());
498 return Iter->second;
499 }
500
501 void setAlign(Value *V, Align AL) {
502 assert(FieldAlignMap.count(V) == 0);
503 FieldAlignMap.insert({V, AL});
504 }
505
506 uint64_t getDynamicAlign(Value *V) const {
507 auto Iter = FieldDynamicAlignMap.find(V);
508 assert(Iter != FieldDynamicAlignMap.end());
509 return Iter->second;
510 }
511
512 void setDynamicAlign(Value *V, uint64_t Align) {
513 assert(FieldDynamicAlignMap.count(V) == 0);
514 FieldDynamicAlignMap.insert({V, Align});
515 }
516
517 uint64_t getOffset(Value *V) const {
518 auto Iter = FieldOffsetMap.find(V);
519 assert(Iter != FieldOffsetMap.end());
520 return Iter->second;
521 }
522
523 void setOffset(Value *V, uint64_t Offset) {
524 assert(FieldOffsetMap.count(V) == 0);
525 FieldOffsetMap.insert({V, Offset});
526 }
527
528 // Remap the index of every field in the frame, using the final layout index.
529 void updateLayoutIndex(FrameTypeBuilder &B);
530
531private:
532 // LayoutIndexUpdateStarted is used to avoid updating the index of any field
533 // twice by mistake.
534 bool LayoutIndexUpdateStarted = false;
535 // Map from values to their slot indexes on the frame. They will be first set
536 // with their original insertion field index. After the frame is built, their
537 // indexes will be updated into the final layout index.
538 DenseMap<Value *, uint32_t> FieldIndexMap;
539 // Map from values to their alignment on the frame. They would be set after
540 // the frame is built.
541 DenseMap<Value *, Align> FieldAlignMap;
542 DenseMap<Value *, uint64_t> FieldDynamicAlignMap;
543 // Map from values to their offset on the frame. They would be set after
544 // the frame is built.
545 DenseMap<Value *, uint64_t> FieldOffsetMap;
546};
547} // namespace
548
549#ifndef NDEBUG
550static void dumpSpills(StringRef Title, const SpillInfo &Spills) {
551 dbgs() << "------------- " << Title << "--------------\n";
552 for (const auto &E : Spills) {
553 E.first->dump();
554 dbgs() << " user: ";
555 for (auto *I : E.second)
556 I->dump();
557 }
558}
559static void dumpRemats(
560 StringRef Title,
561 const SmallMapVector<Instruction *, std::unique_ptr<RematGraph>, 8> &RM) {
562 dbgs() << "------------- " << Title << "--------------\n";
563 for (const auto &E : RM) {
564 E.second->dump();
565 dbgs() << "--\n";
566 }
567}
568
569static void dumpAllocas(const SmallVectorImpl<AllocaInfo> &Allocas) {
570 dbgs() << "------------- Allocas --------------\n";
571 for (const auto &A : Allocas) {
572 A.Alloca->dump();
573 }
574}
575#endif
576
577namespace {
578using FieldIDType = size_t;
579// We cannot rely solely on natural alignment of a type when building a
580// coroutine frame and if the alignment specified on the Alloca instruction
581// differs from the natural alignment of the alloca type we will need to insert
582// padding.
583class FrameTypeBuilder {
584private:
585 struct Field {
588 Type *Ty;
589 FieldIDType LayoutFieldIndex;
590 Align Alignment;
591 Align TyAlignment;
592 uint64_t DynamicAlignBuffer;
593 };
594
595 const DataLayout &DL;
597 uint64_t StructSize = 0;
598 Align StructAlign;
599 bool IsFinished = false;
600
601 std::optional<Align> MaxFrameAlignment;
602
604 DenseMap<Value*, unsigned> FieldIndexByKey;
605
606public:
607 FrameTypeBuilder(LLVMContext &Context, const DataLayout &DL,
608 std::optional<Align> MaxFrameAlignment)
609 : DL(DL), Context(Context), MaxFrameAlignment(MaxFrameAlignment) {}
610
611 /// Add a field to this structure for the storage of an `alloca`
612 /// instruction.
613 [[nodiscard]] FieldIDType addFieldForAlloca(AllocaInst *AI,
614 bool IsHeader = false) {
615 Type *Ty = AI->getAllocatedType();
616
617 // Make an array type if this is a static array allocation.
618 if (AI->isArrayAllocation()) {
619 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
620 Ty = ArrayType::get(Ty, CI->getValue().getZExtValue());
621 else
622 report_fatal_error("Coroutines cannot handle non static allocas yet");
623 }
624
625 return addField(Ty, AI->getAlign(), IsHeader);
626 }
627
628 /// We want to put the allocas whose lifetime-ranges are not overlapped
629 /// into one slot of coroutine frame.
630 /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566
631 ///
632 /// cppcoro::task<void> alternative_paths(bool cond) {
633 /// if (cond) {
634 /// big_structure a;
635 /// process(a);
636 /// co_await something();
637 /// } else {
638 /// big_structure b;
639 /// process2(b);
640 /// co_await something();
641 /// }
642 /// }
643 ///
644 /// We want to put variable a and variable b in the same slot to
645 /// reduce the size of coroutine frame.
646 ///
647 /// This function use StackLifetime algorithm to partition the AllocaInsts in
648 /// Spills to non-overlapped sets in order to put Alloca in the same
649 /// non-overlapped set into the same slot in the Coroutine Frame. Then add
650 /// field for the allocas in the same non-overlapped set by using the largest
651 /// type as the field type.
652 ///
653 /// Side Effects: Because We sort the allocas, the order of allocas in the
654 /// frame may be different with the order in the source code.
655 void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData,
656 coro::Shape &Shape);
657
658 /// Add a field to this structure.
659 [[nodiscard]] FieldIDType addField(Type *Ty, MaybeAlign MaybeFieldAlignment,
660 bool IsHeader = false,
661 bool IsSpillOfValue = false) {
662 assert(!IsFinished && "adding fields to a finished builder");
663 assert(Ty && "must provide a type for a field");
664
665 // The field size is always the alloc size of the type.
666 uint64_t FieldSize = DL.getTypeAllocSize(Ty);
667
668 // For an alloca with size=0, we don't need to add a field and they
669 // can just point to any index in the frame. Use index 0.
670 if (FieldSize == 0) {
671 return 0;
672 }
673
674 // The field alignment might not be the type alignment, but we need
675 // to remember the type alignment anyway to build the type.
676 // If we are spilling values we don't need to worry about ABI alignment
677 // concerns.
678 Align ABIAlign = DL.getABITypeAlign(Ty);
679 Align TyAlignment = ABIAlign;
680 if (IsSpillOfValue && MaxFrameAlignment && *MaxFrameAlignment < ABIAlign)
681 TyAlignment = *MaxFrameAlignment;
682 Align FieldAlignment = MaybeFieldAlignment.value_or(TyAlignment);
683
684 // The field alignment could be bigger than the max frame case, in that case
685 // we request additional storage to be able to dynamically align the
686 // pointer.
687 uint64_t DynamicAlignBuffer = 0;
688 if (MaxFrameAlignment && (FieldAlignment > *MaxFrameAlignment)) {
689 DynamicAlignBuffer =
690 offsetToAlignment(MaxFrameAlignment->value(), FieldAlignment);
691 FieldAlignment = *MaxFrameAlignment;
692 FieldSize = FieldSize + DynamicAlignBuffer;
693 }
694
695 // Lay out header fields immediately.
697 if (IsHeader) {
698 Offset = alignTo(StructSize, FieldAlignment);
699 StructSize = Offset + FieldSize;
700
701 // Everything else has a flexible offset.
702 } else {
704 }
705
706 Fields.push_back({FieldSize, Offset, Ty, 0, FieldAlignment, TyAlignment,
707 DynamicAlignBuffer});
708 return Fields.size() - 1;
709 }
710
711 /// Finish the layout and set the body on the given type.
712 void finish(StructType *Ty);
713
714 uint64_t getStructSize() const {
715 assert(IsFinished && "not yet finished!");
716 return StructSize;
717 }
718
719 Align getStructAlign() const {
720 assert(IsFinished && "not yet finished!");
721 return StructAlign;
722 }
723
724 FieldIDType getLayoutFieldIndex(FieldIDType Id) const {
725 assert(IsFinished && "not yet finished!");
726 return Fields[Id].LayoutFieldIndex;
727 }
728
729 Field getLayoutField(FieldIDType Id) const {
730 assert(IsFinished && "not yet finished!");
731 return Fields[Id];
732 }
733};
734} // namespace
735
736void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) {
737 auto Updater = [&](Value *I) {
738 auto Field = B.getLayoutField(getFieldIndex(I));
739 setFieldIndex(I, Field.LayoutFieldIndex);
740 setAlign(I, Field.Alignment);
741 uint64_t dynamicAlign =
742 Field.DynamicAlignBuffer
743 ? Field.DynamicAlignBuffer + Field.Alignment.value()
744 : 0;
745 setDynamicAlign(I, dynamicAlign);
746 setOffset(I, Field.Offset);
747 };
748 LayoutIndexUpdateStarted = true;
749 for (auto &S : Spills)
750 Updater(S.first);
751 for (const auto &A : Allocas)
752 Updater(A.Alloca);
753 LayoutIndexUpdateStarted = false;
754}
755
756void FrameTypeBuilder::addFieldForAllocas(const Function &F,
757 FrameDataInfo &FrameData,
758 coro::Shape &Shape) {
759 using AllocaSetType = SmallVector<AllocaInst *, 4>;
760 SmallVector<AllocaSetType, 4> NonOverlapedAllocas;
761
762 // We need to add field for allocas at the end of this function.
763 auto AddFieldForAllocasAtExit = make_scope_exit([&]() {
764 for (auto AllocaList : NonOverlapedAllocas) {
765 auto *LargestAI = *AllocaList.begin();
766 FieldIDType Id = addFieldForAlloca(LargestAI);
767 for (auto *Alloca : AllocaList)
768 FrameData.setFieldIndex(Alloca, Id);
769 }
770 });
771
772 if (!Shape.OptimizeFrame) {
773 for (const auto &A : FrameData.Allocas) {
774 AllocaInst *Alloca = A.Alloca;
775 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
776 }
777 return;
778 }
779
780 // Because there are paths from the lifetime.start to coro.end
781 // for each alloca, the liferanges for every alloca is overlaped
782 // in the blocks who contain coro.end and the successor blocks.
783 // So we choose to skip there blocks when we calculate the liferange
784 // for each alloca. It should be reasonable since there shouldn't be uses
785 // in these blocks and the coroutine frame shouldn't be used outside the
786 // coroutine body.
787 //
788 // Note that the user of coro.suspend may not be SwitchInst. However, this
789 // case seems too complex to handle. And it is harmless to skip these
790 // patterns since it just prevend putting the allocas to live in the same
791 // slot.
792 DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest;
793 for (auto *CoroSuspendInst : Shape.CoroSuspends) {
794 for (auto *U : CoroSuspendInst->users()) {
795 if (auto *ConstSWI = dyn_cast<SwitchInst>(U)) {
796 auto *SWI = const_cast<SwitchInst *>(ConstSWI);
797 DefaultSuspendDest[SWI] = SWI->getDefaultDest();
798 SWI->setDefaultDest(SWI->getSuccessor(1));
799 }
800 }
801 }
802
803 auto ExtractAllocas = [&]() {
804 AllocaSetType Allocas;
805 Allocas.reserve(FrameData.Allocas.size());
806 for (const auto &A : FrameData.Allocas)
807 Allocas.push_back(A.Alloca);
808 return Allocas;
809 };
810 StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(),
811 StackLifetime::LivenessType::May);
812 StackLifetimeAnalyzer.run();
813 auto IsAllocaInferenre = [&](const AllocaInst *AI1, const AllocaInst *AI2) {
814 return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps(
815 StackLifetimeAnalyzer.getLiveRange(AI2));
816 };
817 auto GetAllocaSize = [&](const AllocaInfo &A) {
818 std::optional<TypeSize> RetSize = A.Alloca->getAllocationSize(DL);
819 assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n");
820 assert(!RetSize->isScalable() && "Scalable vectors are not yet supported");
821 return RetSize->getFixedValue();
822 };
823 // Put larger allocas in the front. So the larger allocas have higher
824 // priority to merge, which can save more space potentially. Also each
825 // AllocaSet would be ordered. So we can get the largest Alloca in one
826 // AllocaSet easily.
827 sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) {
828 return GetAllocaSize(Iter1) > GetAllocaSize(Iter2);
829 });
830 for (const auto &A : FrameData.Allocas) {
831 AllocaInst *Alloca = A.Alloca;
832 bool Merged = false;
833 // Try to find if the Alloca is not inferenced with any existing
834 // NonOverlappedAllocaSet. If it is true, insert the alloca to that
835 // NonOverlappedAllocaSet.
836 for (auto &AllocaSet : NonOverlapedAllocas) {
837 assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n");
838 bool NoInference = none_of(AllocaSet, [&](auto Iter) {
839 return IsAllocaInferenre(Alloca, Iter);
840 });
841 // If the alignment of A is multiple of the alignment of B, the address
842 // of A should satisfy the requirement for aligning for B.
843 //
844 // There may be other more fine-grained strategies to handle the alignment
845 // infomation during the merging process. But it seems hard to handle
846 // these strategies and benefit little.
847 bool Alignable = [&]() -> bool {
848 auto *LargestAlloca = *AllocaSet.begin();
849 return LargestAlloca->getAlign().value() % Alloca->getAlign().value() ==
850 0;
851 }();
852 bool CouldMerge = NoInference && Alignable;
853 if (!CouldMerge)
854 continue;
855 AllocaSet.push_back(Alloca);
856 Merged = true;
857 break;
858 }
859 if (!Merged) {
860 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
861 }
862 }
863 // Recover the default target destination for each Switch statement
864 // reserved.
865 for (auto SwitchAndDefaultDest : DefaultSuspendDest) {
866 SwitchInst *SWI = SwitchAndDefaultDest.first;
867 BasicBlock *DestBB = SwitchAndDefaultDest.second;
868 SWI->setDefaultDest(DestBB);
869 }
870 // This Debug Info could tell us which allocas are merged into one slot.
871 LLVM_DEBUG(for (auto &AllocaSet
872 : NonOverlapedAllocas) {
873 if (AllocaSet.size() > 1) {
874 dbgs() << "In Function:" << F.getName() << "\n";
875 dbgs() << "Find Union Set "
876 << "\n";
877 dbgs() << "\tAllocas are \n";
878 for (auto Alloca : AllocaSet)
879 dbgs() << "\t\t" << *Alloca << "\n";
880 }
881 });
882}
883
884void FrameTypeBuilder::finish(StructType *Ty) {
885 assert(!IsFinished && "already finished!");
886
887 // Prepare the optimal-layout field array.
888 // The Id in the layout field is a pointer to our Field for it.
890 LayoutFields.reserve(Fields.size());
891 for (auto &Field : Fields) {
892 LayoutFields.emplace_back(&Field, Field.Size, Field.Alignment,
893 Field.Offset);
894 }
895
896 // Perform layout.
897 auto SizeAndAlign = performOptimizedStructLayout(LayoutFields);
898 StructSize = SizeAndAlign.first;
899 StructAlign = SizeAndAlign.second;
900
901 auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & {
902 return *static_cast<Field *>(const_cast<void*>(LayoutField.Id));
903 };
904
905 // We need to produce a packed struct type if there's a field whose
906 // assigned offset isn't a multiple of its natural type alignment.
907 bool Packed = [&] {
908 for (auto &LayoutField : LayoutFields) {
909 auto &F = getField(LayoutField);
910 if (!isAligned(F.TyAlignment, LayoutField.Offset))
911 return true;
912 }
913 return false;
914 }();
915
916 // Build the struct body.
917 SmallVector<Type*, 16> FieldTypes;
918 FieldTypes.reserve(LayoutFields.size() * 3 / 2);
919 uint64_t LastOffset = 0;
920 for (auto &LayoutField : LayoutFields) {
921 auto &F = getField(LayoutField);
922
923 auto Offset = LayoutField.Offset;
924
925 // Add a padding field if there's a padding gap and we're either
926 // building a packed struct or the padding gap is more than we'd
927 // get from aligning to the field type's natural alignment.
928 assert(Offset >= LastOffset);
929 if (Offset != LastOffset) {
930 if (Packed || alignTo(LastOffset, F.TyAlignment) != Offset)
931 FieldTypes.push_back(ArrayType::get(Type::getInt8Ty(Context),
932 Offset - LastOffset));
933 }
934
935 F.Offset = Offset;
936 F.LayoutFieldIndex = FieldTypes.size();
937
938 FieldTypes.push_back(F.Ty);
939 if (F.DynamicAlignBuffer) {
940 FieldTypes.push_back(
941 ArrayType::get(Type::getInt8Ty(Context), F.DynamicAlignBuffer));
942 }
943 LastOffset = Offset + F.Size;
944 }
945
946 Ty->setBody(FieldTypes, Packed);
947
948#ifndef NDEBUG
949 // Check that the IR layout matches the offsets we expect.
950 auto Layout = DL.getStructLayout(Ty);
951 for (auto &F : Fields) {
952 assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty);
953 assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset);
954 }
955#endif
956
957 IsFinished = true;
958}
959
960static void cacheDIVar(FrameDataInfo &FrameData,
962 for (auto *V : FrameData.getAllDefs()) {
963 if (DIVarCache.contains(V))
964 continue;
965
966 auto DDIs = FindDbgDeclareUses(V);
967 auto *I = llvm::find_if(DDIs, [](DbgDeclareInst *DDI) {
968 return DDI->getExpression()->getNumElements() == 0;
969 });
970 if (I != DDIs.end())
971 DIVarCache.insert({V, (*I)->getVariable()});
972 }
973}
974
975/// Create name for Type. It uses MDString to store new created string to
976/// avoid memory leak.
978 if (Ty->isIntegerTy()) {
979 // The longest name in common may be '__int_128', which has 9 bits.
980 SmallString<16> Buffer;
981 raw_svector_ostream OS(Buffer);
982 OS << "__int_" << cast<IntegerType>(Ty)->getBitWidth();
983 auto *MDName = MDString::get(Ty->getContext(), OS.str());
984 return MDName->getString();
985 }
986
987 if (Ty->isFloatingPointTy()) {
988 if (Ty->isFloatTy())
989 return "__float_";
990 if (Ty->isDoubleTy())
991 return "__double_";
992 return "__floating_type_";
993 }
994
995 if (Ty->isPointerTy())
996 return "PointerType";
997
998 if (Ty->isStructTy()) {
999 if (!cast<StructType>(Ty)->hasName())
1000 return "__LiteralStructType_";
1001
1002 auto Name = Ty->getStructName();
1003
1004 SmallString<16> Buffer(Name);
1005 for (auto &Iter : Buffer)
1006 if (Iter == '.' || Iter == ':')
1007 Iter = '_';
1008 auto *MDName = MDString::get(Ty->getContext(), Buffer.str());
1009 return MDName->getString();
1010 }
1011
1012 return "UnknownType";
1013}
1014
1015static DIType *solveDIType(DIBuilder &Builder, Type *Ty,
1016 const DataLayout &Layout, DIScope *Scope,
1017 unsigned LineNum,
1018 DenseMap<Type *, DIType *> &DITypeCache) {
1019 if (DIType *DT = DITypeCache.lookup(Ty))
1020 return DT;
1021
1023
1024 DIType *RetType = nullptr;
1025
1026 if (Ty->isIntegerTy()) {
1027 auto BitWidth = cast<IntegerType>(Ty)->getBitWidth();
1028 RetType = Builder.createBasicType(Name, BitWidth, dwarf::DW_ATE_signed,
1029 llvm::DINode::FlagArtificial);
1030 } else if (Ty->isFloatingPointTy()) {
1031 RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty),
1032 dwarf::DW_ATE_float,
1033 llvm::DINode::FlagArtificial);
1034 } else if (Ty->isPointerTy()) {
1035 // Construct PointerType points to null (aka void *) instead of exploring
1036 // pointee type to avoid infinite search problem. For example, we would be
1037 // in trouble if we traverse recursively:
1038 //
1039 // struct Node {
1040 // Node* ptr;
1041 // };
1042 RetType =
1043 Builder.createPointerType(nullptr, Layout.getTypeSizeInBits(Ty),
1044 Layout.getABITypeAlign(Ty).value() * CHAR_BIT,
1045 /*DWARFAddressSpace=*/std::nullopt, Name);
1046 } else if (Ty->isStructTy()) {
1047 auto *DIStruct = Builder.createStructType(
1048 Scope, Name, Scope->getFile(), LineNum, Layout.getTypeSizeInBits(Ty),
1049 Layout.getPrefTypeAlign(Ty).value() * CHAR_BIT,
1050 llvm::DINode::FlagArtificial, nullptr, llvm::DINodeArray());
1051
1052 auto *StructTy = cast<StructType>(Ty);
1054 for (unsigned I = 0; I < StructTy->getNumElements(); I++) {
1055 DIType *DITy = solveDIType(Builder, StructTy->getElementType(I), Layout,
1056 Scope, LineNum, DITypeCache);
1057 assert(DITy);
1058 Elements.push_back(Builder.createMemberType(
1059 Scope, DITy->getName(), Scope->getFile(), LineNum,
1060 DITy->getSizeInBits(), DITy->getAlignInBits(),
1061 Layout.getStructLayout(StructTy)->getElementOffsetInBits(I),
1062 llvm::DINode::FlagArtificial, DITy));
1063 }
1064
1065 Builder.replaceArrays(DIStruct, Builder.getOrCreateArray(Elements));
1066
1067 RetType = DIStruct;
1068 } else {
1069 LLVM_DEBUG(dbgs() << "Unresolved Type: " << *Ty << "\n");
1070 TypeSize Size = Layout.getTypeSizeInBits(Ty);
1071 auto *CharSizeType = Builder.createBasicType(
1072 Name, 8, dwarf::DW_ATE_unsigned_char, llvm::DINode::FlagArtificial);
1073
1074 if (Size <= 8)
1075 RetType = CharSizeType;
1076 else {
1077 if (Size % 8 != 0)
1078 Size = TypeSize::getFixed(Size + 8 - (Size % 8));
1079
1080 RetType = Builder.createArrayType(
1081 Size, Layout.getPrefTypeAlign(Ty).value(), CharSizeType,
1082 Builder.getOrCreateArray(Builder.getOrCreateSubrange(0, Size / 8)));
1083 }
1084 }
1085
1086 DITypeCache.insert({Ty, RetType});
1087 return RetType;
1088}
1089
1090/// Build artificial debug info for C++ coroutine frames to allow users to
1091/// inspect the contents of the frame directly
1092///
1093/// Create Debug information for coroutine frame with debug name "__coro_frame".
1094/// The debug information for the fields of coroutine frame is constructed from
1095/// the following way:
1096/// 1. For all the value in the Frame, we search the use of dbg.declare to find
1097/// the corresponding debug variables for the value. If we can find the
1098/// debug variable, we can get full and accurate debug information.
1099/// 2. If we can't get debug information in step 1 and 2, we could only try to
1100/// build the DIType by Type. We did this in solveDIType. We only handle
1101/// integer, float, double, integer type and struct type for now.
1103 FrameDataInfo &FrameData) {
1104 DISubprogram *DIS = F.getSubprogram();
1105 // If there is no DISubprogram for F, it implies the Function are not compiled
1106 // with debug info. So we also don't need to generate debug info for the frame
1107 // neither.
1108 if (!DIS || !DIS->getUnit() ||
1110 (dwarf::SourceLanguage)DIS->getUnit()->getSourceLanguage()))
1111 return;
1112
1113 assert(Shape.ABI == coro::ABI::Switch &&
1114 "We could only build debug infomation for C++ coroutine now.\n");
1115
1116 DIBuilder DBuilder(*F.getParent(), /*AllowUnresolved*/ false);
1117
1118 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
1119 assert(PromiseAlloca &&
1120 "Coroutine with switch ABI should own Promise alloca");
1121
1123 if (DIs.empty())
1124 return;
1125
1126 DbgDeclareInst *PromiseDDI = DIs.front();
1127 DILocalVariable *PromiseDIVariable = PromiseDDI->getVariable();
1128 DILocalScope *PromiseDIScope = PromiseDIVariable->getScope();
1129 DIFile *DFile = PromiseDIScope->getFile();
1130 DILocation *DILoc = PromiseDDI->getDebugLoc().get();
1131 unsigned LineNum = PromiseDIVariable->getLine();
1132
1133 DICompositeType *FrameDITy = DBuilder.createStructType(
1134 DIS->getUnit(), Twine(F.getName() + ".coro_frame_ty").str(),
1135 DFile, LineNum, Shape.FrameSize * 8,
1136 Shape.FrameAlign.value() * 8, llvm::DINode::FlagArtificial, nullptr,
1137 llvm::DINodeArray());
1138 StructType *FrameTy = Shape.FrameTy;
1140 DataLayout Layout = F.getParent()->getDataLayout();
1141
1143 cacheDIVar(FrameData, DIVarCache);
1144
1145 unsigned ResumeIndex = coro::Shape::SwitchFieldIndex::Resume;
1146 unsigned DestroyIndex = coro::Shape::SwitchFieldIndex::Destroy;
1147 unsigned IndexIndex = Shape.SwitchLowering.IndexField;
1148
1150 NameCache.insert({ResumeIndex, "__resume_fn"});
1151 NameCache.insert({DestroyIndex, "__destroy_fn"});
1152 NameCache.insert({IndexIndex, "__coro_index"});
1153
1154 Type *ResumeFnTy = FrameTy->getElementType(ResumeIndex),
1155 *DestroyFnTy = FrameTy->getElementType(DestroyIndex),
1156 *IndexTy = FrameTy->getElementType(IndexIndex);
1157
1159 TyCache.insert(
1160 {ResumeIndex, DBuilder.createPointerType(
1161 nullptr, Layout.getTypeSizeInBits(ResumeFnTy))});
1162 TyCache.insert(
1163 {DestroyIndex, DBuilder.createPointerType(
1164 nullptr, Layout.getTypeSizeInBits(DestroyFnTy))});
1165
1166 /// FIXME: If we fill the field `SizeInBits` with the actual size of
1167 /// __coro_index in bits, then __coro_index wouldn't show in the debugger.
1168 TyCache.insert({IndexIndex, DBuilder.createBasicType(
1169 "__coro_index",
1170 (Layout.getTypeSizeInBits(IndexTy) < 8)
1171 ? 8
1172 : Layout.getTypeSizeInBits(IndexTy),
1173 dwarf::DW_ATE_unsigned_char)});
1174
1175 for (auto *V : FrameData.getAllDefs()) {
1176 if (!DIVarCache.contains(V))
1177 continue;
1178
1179 auto Index = FrameData.getFieldIndex(V);
1180
1181 NameCache.insert({Index, DIVarCache[V]->getName()});
1182 TyCache.insert({Index, DIVarCache[V]->getType()});
1183 }
1184
1185 // Cache from index to (Align, Offset Pair)
1187 // The Align and Offset of Resume function and Destroy function are fixed.
1188 OffsetCache.insert({ResumeIndex, {8, 0}});
1189 OffsetCache.insert({DestroyIndex, {8, 8}});
1190 OffsetCache.insert(
1191 {IndexIndex,
1193
1194 for (auto *V : FrameData.getAllDefs()) {
1195 auto Index = FrameData.getFieldIndex(V);
1196
1197 OffsetCache.insert(
1198 {Index, {FrameData.getAlign(V).value(), FrameData.getOffset(V)}});
1199 }
1200
1201 DenseMap<Type *, DIType *> DITypeCache;
1202 // This counter is used to avoid same type names. e.g., there would be
1203 // many i32 and i64 types in one coroutine. And we would use i32_0 and
1204 // i32_1 to avoid the same type. Since it makes no sense the name of the
1205 // fields confilicts with each other.
1206 unsigned UnknownTypeNum = 0;
1207 for (unsigned Index = 0; Index < FrameTy->getNumElements(); Index++) {
1208 if (!OffsetCache.contains(Index))
1209 continue;
1210
1211 std::string Name;
1212 uint64_t SizeInBits;
1213 uint32_t AlignInBits;
1214 uint64_t OffsetInBits;
1215 DIType *DITy = nullptr;
1216
1217 Type *Ty = FrameTy->getElementType(Index);
1218 assert(Ty->isSized() && "We can't handle type which is not sized.\n");
1219 SizeInBits = Layout.getTypeSizeInBits(Ty).getFixedValue();
1220 AlignInBits = OffsetCache[Index].first * 8;
1221 OffsetInBits = OffsetCache[Index].second * 8;
1222
1223 if (NameCache.contains(Index)) {
1224 Name = NameCache[Index].str();
1225 DITy = TyCache[Index];
1226 } else {
1227 DITy = solveDIType(DBuilder, Ty, Layout, FrameDITy, LineNum, DITypeCache);
1228 assert(DITy && "SolveDIType shouldn't return nullptr.\n");
1229 Name = DITy->getName().str();
1230 Name += "_" + std::to_string(UnknownTypeNum);
1231 UnknownTypeNum++;
1232 }
1233
1234 Elements.push_back(DBuilder.createMemberType(
1235 FrameDITy, Name, DFile, LineNum, SizeInBits, AlignInBits, OffsetInBits,
1236 llvm::DINode::FlagArtificial, DITy));
1237 }
1238
1239 DBuilder.replaceArrays(FrameDITy, DBuilder.getOrCreateArray(Elements));
1240
1241 auto *FrameDIVar = DBuilder.createAutoVariable(PromiseDIScope, "__coro_frame",
1242 DFile, LineNum, FrameDITy,
1243 true, DINode::FlagArtificial);
1244 assert(FrameDIVar->isValidLocationForIntrinsic(PromiseDDI->getDebugLoc()));
1245
1246 // Subprogram would have ContainedNodes field which records the debug
1247 // variables it contained. So we need to add __coro_frame to the
1248 // ContainedNodes of it.
1249 //
1250 // If we don't add __coro_frame to the RetainedNodes, user may get
1251 // `no symbol __coro_frame in context` rather than `__coro_frame`
1252 // is optimized out, which is more precise.
1253 if (auto *SubProgram = dyn_cast<DISubprogram>(PromiseDIScope)) {
1254 auto RetainedNodes = SubProgram->getRetainedNodes();
1255 SmallVector<Metadata *, 32> RetainedNodesVec(RetainedNodes.begin(),
1256 RetainedNodes.end());
1257 RetainedNodesVec.push_back(FrameDIVar);
1258 SubProgram->replaceOperandWith(
1259 7, (MDTuple::get(F.getContext(), RetainedNodesVec)));
1260 }
1261
1262 DBuilder.insertDeclare(Shape.FramePtr, FrameDIVar,
1263 DBuilder.createExpression(), DILoc,
1264 Shape.getInsertPtAfterFramePtr());
1265}
1266
1267// Build a struct that will keep state for an active coroutine.
1268// struct f.frame {
1269// ResumeFnTy ResumeFnAddr;
1270// ResumeFnTy DestroyFnAddr;
1271// int ResumeIndex;
1272// ... promise (if present) ...
1273// ... spills ...
1274// };
1276 FrameDataInfo &FrameData) {
1277 LLVMContext &C = F.getContext();
1278 const DataLayout &DL = F.getParent()->getDataLayout();
1279 StructType *FrameTy = [&] {
1280 SmallString<32> Name(F.getName());
1281 Name.append(".Frame");
1282 return StructType::create(C, Name);
1283 }();
1284
1285 // We will use this value to cap the alignment of spilled values.
1286 std::optional<Align> MaxFrameAlignment;
1287 if (Shape.ABI == coro::ABI::Async)
1288 MaxFrameAlignment = Shape.AsyncLowering.getContextAlignment();
1289 FrameTypeBuilder B(C, DL, MaxFrameAlignment);
1290
1291 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
1292 std::optional<FieldIDType> SwitchIndexFieldId;
1293
1294 if (Shape.ABI == coro::ABI::Switch) {
1295 auto *FnPtrTy = PointerType::getUnqual(C);
1296
1297 // Add header fields for the resume and destroy functions.
1298 // We can rely on these being perfectly packed.
1299 (void)B.addField(FnPtrTy, std::nullopt, /*header*/ true);
1300 (void)B.addField(FnPtrTy, std::nullopt, /*header*/ true);
1301
1302 // PromiseAlloca field needs to be explicitly added here because it's
1303 // a header field with a fixed offset based on its alignment. Hence it
1304 // needs special handling and cannot be added to FrameData.Allocas.
1305 if (PromiseAlloca)
1306 FrameData.setFieldIndex(
1307 PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, /*header*/ true));
1308
1309 // Add a field to store the suspend index. This doesn't need to
1310 // be in the header.
1311 unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size()));
1312 Type *IndexType = Type::getIntNTy(C, IndexBits);
1313
1314 SwitchIndexFieldId = B.addField(IndexType, std::nullopt);
1315 } else {
1316 assert(PromiseAlloca == nullptr && "lowering doesn't support promises");
1317 }
1318
1319 // Because multiple allocas may own the same field slot,
1320 // we add allocas to field here.
1321 B.addFieldForAllocas(F, FrameData, Shape);
1322 // Add PromiseAlloca to Allocas list so that
1323 // 1. updateLayoutIndex could update its index after
1324 // `performOptimizedStructLayout`
1325 // 2. it is processed in insertSpills.
1326 if (Shape.ABI == coro::ABI::Switch && PromiseAlloca)
1327 // We assume that the promise alloca won't be modified before
1328 // CoroBegin and no alias will be create before CoroBegin.
1329 FrameData.Allocas.emplace_back(
1330 PromiseAlloca, DenseMap<Instruction *, std::optional<APInt>>{}, false);
1331 // Create an entry for every spilled value.
1332 for (auto &S : FrameData.Spills) {
1333 Type *FieldType = S.first->getType();
1334 // For byval arguments, we need to store the pointed value in the frame,
1335 // instead of the pointer itself.
1336 if (const Argument *A = dyn_cast<Argument>(S.first))
1337 if (A->hasByValAttr())
1338 FieldType = A->getParamByValType();
1339 FieldIDType Id = B.addField(FieldType, std::nullopt, false /*header*/,
1340 true /*IsSpillOfValue*/);
1341 FrameData.setFieldIndex(S.first, Id);
1342 }
1343
1344 B.finish(FrameTy);
1345 FrameData.updateLayoutIndex(B);
1346 Shape.FrameAlign = B.getStructAlign();
1347 Shape.FrameSize = B.getStructSize();
1348
1349 switch (Shape.ABI) {
1350 case coro::ABI::Switch: {
1351 // In the switch ABI, remember the switch-index field.
1352 auto IndexField = B.getLayoutField(*SwitchIndexFieldId);
1353 Shape.SwitchLowering.IndexField = IndexField.LayoutFieldIndex;
1354 Shape.SwitchLowering.IndexAlign = IndexField.Alignment.value();
1355 Shape.SwitchLowering.IndexOffset = IndexField.Offset;
1356
1357 // Also round the frame size up to a multiple of its alignment, as is
1358 // generally expected in C/C++.
1359 Shape.FrameSize = alignTo(Shape.FrameSize, Shape.FrameAlign);
1360 break;
1361 }
1362
1363 // In the retcon ABI, remember whether the frame is inline in the storage.
1364 case coro::ABI::Retcon:
1365 case coro::ABI::RetconOnce: {
1366 auto Id = Shape.getRetconCoroId();
1368 = (B.getStructSize() <= Id->getStorageSize() &&
1369 B.getStructAlign() <= Id->getStorageAlignment());
1370 break;
1371 }
1372 case coro::ABI::Async: {
1375 // Also make the final context size a multiple of the context alignment to
1376 // make allocation easier for allocators.
1380 if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) {
1382 "The alignment requirment of frame variables cannot be higher than "
1383 "the alignment of the async function context");
1384 }
1385 break;
1386 }
1387 }
1388
1389 return FrameTy;
1390}
1391
1392// We use a pointer use visitor to track how an alloca is being used.
1393// The goal is to be able to answer the following three questions:
1394// 1. Should this alloca be allocated on the frame instead.
1395// 2. Could the content of the alloca be modified prior to CoroBegn, which would
1396// require copying the data from alloca to the frame after CoroBegin.
1397// 3. Is there any alias created for this alloca prior to CoroBegin, but used
1398// after CoroBegin. In that case, we will need to recreate the alias after
1399// CoroBegin based off the frame. To answer question 1, we track two things:
1400// a. List of all BasicBlocks that use this alloca or any of the aliases of
1401// the alloca. In the end, we check if there exists any two basic blocks that
1402// cross suspension points. If so, this alloca must be put on the frame. b.
1403// Whether the alloca or any alias of the alloca is escaped at some point,
1404// either by storing the address somewhere, or the address is used in a
1405// function call that might capture. If it's ever escaped, this alloca must be
1406// put on the frame conservatively.
1407// To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin.
1408// Whenever a potential write happens, either through a store instruction, a
1409// function call or any of the memory intrinsics, we check whether this
1410// instruction is prior to CoroBegin. To answer question 3, we track the offsets
1411// of all aliases created for the alloca prior to CoroBegin but used after
1412// CoroBegin. std::optional is used to be able to represent the case when the
1413// offset is unknown (e.g. when you have a PHINode that takes in different
1414// offset values). We cannot handle unknown offsets and will assert. This is the
1415// potential issue left out. An ideal solution would likely require a
1416// significant redesign.
1417namespace {
1418struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
1420 AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT,
1421 const CoroBeginInst &CB, const SuspendCrossingInfo &Checker,
1422 bool ShouldUseLifetimeStartInfo)
1423 : PtrUseVisitor(DL), DT(DT), CoroBegin(CB), Checker(Checker),
1424 ShouldUseLifetimeStartInfo(ShouldUseLifetimeStartInfo) {}
1425
1426 void visit(Instruction &I) {
1427 Users.insert(&I);
1428 Base::visit(I);
1429 // If the pointer is escaped prior to CoroBegin, we have to assume it would
1430 // be written into before CoroBegin as well.
1431 if (PI.isEscaped() && !DT.dominates(&CoroBegin, PI.getEscapingInst())) {
1432 MayWriteBeforeCoroBegin = true;
1433 }
1434 }
1435 // We need to provide this overload as PtrUseVisitor uses a pointer based
1436 // visiting function.
1437 void visit(Instruction *I) { return visit(*I); }
1438
1439 void visitPHINode(PHINode &I) {
1440 enqueueUsers(I);
1441 handleAlias(I);
1442 }
1443
1445 enqueueUsers(I);
1446 handleAlias(I);
1447 }
1448
1449 void visitStoreInst(StoreInst &SI) {
1450 // Regardless whether the alias of the alloca is the value operand or the
1451 // pointer operand, we need to assume the alloca is been written.
1452 handleMayWrite(SI);
1453
1454 if (SI.getValueOperand() != U->get())
1455 return;
1456
1457 // We are storing the pointer into a memory location, potentially escaping.
1458 // As an optimization, we try to detect simple cases where it doesn't
1459 // actually escape, for example:
1460 // %ptr = alloca ..
1461 // %addr = alloca ..
1462 // store %ptr, %addr
1463 // %x = load %addr
1464 // ..
1465 // If %addr is only used by loading from it, we could simply treat %x as
1466 // another alias of %ptr, and not considering %ptr being escaped.
1467 auto IsSimpleStoreThenLoad = [&]() {
1468 auto *AI = dyn_cast<AllocaInst>(SI.getPointerOperand());
1469 // If the memory location we are storing to is not an alloca, it
1470 // could be an alias of some other memory locations, which is difficult
1471 // to analyze.
1472 if (!AI)
1473 return false;
1474 // StoreAliases contains aliases of the memory location stored into.
1475 SmallVector<Instruction *, 4> StoreAliases = {AI};
1476 while (!StoreAliases.empty()) {
1477 Instruction *I = StoreAliases.pop_back_val();
1478 for (User *U : I->users()) {
1479 // If we are loading from the memory location, we are creating an
1480 // alias of the original pointer.
1481 if (auto *LI = dyn_cast<LoadInst>(U)) {
1482 enqueueUsers(*LI);
1483 handleAlias(*LI);
1484 continue;
1485 }
1486 // If we are overriding the memory location, the pointer certainly
1487 // won't escape.
1488 if (auto *S = dyn_cast<StoreInst>(U))
1489 if (S->getPointerOperand() == I)
1490 continue;
1491 if (auto *II = dyn_cast<IntrinsicInst>(U))
1492 if (II->isLifetimeStartOrEnd())
1493 continue;
1494 // BitCastInst creats aliases of the memory location being stored
1495 // into.
1496 if (auto *BI = dyn_cast<BitCastInst>(U)) {
1497 StoreAliases.push_back(BI);
1498 continue;
1499 }
1500 return false;
1501 }
1502 }
1503
1504 return true;
1505 };
1506
1507 if (!IsSimpleStoreThenLoad())
1508 PI.setEscaped(&SI);
1509 }
1510
1511 // All mem intrinsics modify the data.
1512 void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); }
1513
1514 void visitBitCastInst(BitCastInst &BC) {
1516 handleAlias(BC);
1517 }
1518
1521 handleAlias(ASC);
1522 }
1523
1525 // The base visitor will adjust Offset accordingly.
1527 handleAlias(GEPI);
1528 }
1529
1531 // When we found the lifetime markers refers to a
1532 // subrange of the original alloca, ignore the lifetime
1533 // markers to avoid misleading the analysis.
1534 if (II.getIntrinsicID() != Intrinsic::lifetime_start || !IsOffsetKnown ||
1535 !Offset.isZero())
1536 return Base::visitIntrinsicInst(II);
1537 LifetimeStarts.insert(&II);
1538 }
1539
1540 void visitCallBase(CallBase &CB) {
1541 for (unsigned Op = 0, OpCount = CB.arg_size(); Op < OpCount; ++Op)
1542 if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op))
1543 PI.setEscaped(&CB);
1544 handleMayWrite(CB);
1545 }
1546
1547 bool getShouldLiveOnFrame() const {
1548 if (!ShouldLiveOnFrame)
1549 ShouldLiveOnFrame = computeShouldLiveOnFrame();
1550 return *ShouldLiveOnFrame;
1551 }
1552
1553 bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; }
1554
1555 DenseMap<Instruction *, std::optional<APInt>> getAliasesCopy() const {
1556 assert(getShouldLiveOnFrame() && "This method should only be called if the "
1557 "alloca needs to live on the frame.");
1558 for (const auto &P : AliasOffetMap)
1559 if (!P.second)
1560 report_fatal_error("Unable to handle an alias with unknown offset "
1561 "created before CoroBegin.");
1562 return AliasOffetMap;
1563 }
1564
1565private:
1566 const DominatorTree &DT;
1567 const CoroBeginInst &CoroBegin;
1568 const SuspendCrossingInfo &Checker;
1569 // All alias to the original AllocaInst, created before CoroBegin and used
1570 // after CoroBegin. Each entry contains the instruction and the offset in the
1571 // original Alloca. They need to be recreated after CoroBegin off the frame.
1574 SmallPtrSet<IntrinsicInst *, 2> LifetimeStarts{};
1575 bool MayWriteBeforeCoroBegin{false};
1576 bool ShouldUseLifetimeStartInfo{true};
1577
1578 mutable std::optional<bool> ShouldLiveOnFrame{};
1579
1580 bool computeShouldLiveOnFrame() const {
1581 // If lifetime information is available, we check it first since it's
1582 // more precise. We look at every pair of lifetime.start intrinsic and
1583 // every basic block that uses the pointer to see if they cross suspension
1584 // points. The uses cover both direct uses as well as indirect uses.
1585 if (ShouldUseLifetimeStartInfo && !LifetimeStarts.empty()) {
1586 for (auto *I : Users)
1587 for (auto *S : LifetimeStarts)
1588 if (Checker.isDefinitionAcrossSuspend(*S, I))
1589 return true;
1590 // Addresses are guaranteed to be identical after every lifetime.start so
1591 // we cannot use the local stack if the address escaped and there is a
1592 // suspend point between lifetime markers. This should also cover the
1593 // case of a single lifetime.start intrinsic in a loop with suspend point.
1594 if (PI.isEscaped()) {
1595 for (auto *A : LifetimeStarts) {
1596 for (auto *B : LifetimeStarts) {
1597 if (Checker.hasPathOrLoopCrossingSuspendPoint(A->getParent(),
1598 B->getParent()))
1599 return true;
1600 }
1601 }
1602 }
1603 return false;
1604 }
1605 // FIXME: Ideally the isEscaped check should come at the beginning.
1606 // However there are a few loose ends that need to be fixed first before
1607 // we can do that. We need to make sure we are not over-conservative, so
1608 // that the data accessed in-between await_suspend and symmetric transfer
1609 // is always put on the stack, and also data accessed after coro.end is
1610 // always put on the stack (esp the return object). To fix that, we need
1611 // to:
1612 // 1) Potentially treat sret as nocapture in calls
1613 // 2) Special handle the return object and put it on the stack
1614 // 3) Utilize lifetime.end intrinsic
1615 if (PI.isEscaped())
1616 return true;
1617
1618 for (auto *U1 : Users)
1619 for (auto *U2 : Users)
1620 if (Checker.isDefinitionAcrossSuspend(*U1, U2))
1621 return true;
1622
1623 return false;
1624 }
1625
1626 void handleMayWrite(const Instruction &I) {
1627 if (!DT.dominates(&CoroBegin, &I))
1628 MayWriteBeforeCoroBegin = true;
1629 }
1630
1631 bool usedAfterCoroBegin(Instruction &I) {
1632 for (auto &U : I.uses())
1633 if (DT.dominates(&CoroBegin, U))
1634 return true;
1635 return false;
1636 }
1637
1638 void handleAlias(Instruction &I) {
1639 // We track all aliases created prior to CoroBegin but used after.
1640 // These aliases may need to be recreated after CoroBegin if the alloca
1641 // need to live on the frame.
1642 if (DT.dominates(&CoroBegin, &I) || !usedAfterCoroBegin(I))
1643 return;
1644
1645 if (!IsOffsetKnown) {
1646 AliasOffetMap[&I].reset();
1647 } else {
1648 auto Itr = AliasOffetMap.find(&I);
1649 if (Itr == AliasOffetMap.end()) {
1650 AliasOffetMap[&I] = Offset;
1651 } else if (Itr->second && *Itr->second != Offset) {
1652 // If we have seen two different possible values for this alias, we set
1653 // it to empty.
1654 AliasOffetMap[&I].reset();
1655 }
1656 }
1657 }
1658};
1659} // namespace
1660
1661// We need to make room to insert a spill after initial PHIs, but before
1662// catchswitch instruction. Placing it before violates the requirement that
1663// catchswitch, like all other EHPads must be the first nonPHI in a block.
1664//
1665// Split away catchswitch into a separate block and insert in its place:
1666//
1667// cleanuppad <InsertPt> cleanupret.
1668//
1669// cleanupret instruction will act as an insert point for the spill.
1671 BasicBlock *CurrentBlock = CatchSwitch->getParent();
1672 BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch);
1673 CurrentBlock->getTerminator()->eraseFromParent();
1674
1675 auto *CleanupPad =
1676 CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock);
1677 auto *CleanupRet =
1678 CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock);
1679 return CleanupRet;
1680}
1681
1682// Replace all alloca and SSA values that are accessed across suspend points
1683// with GetElementPointer from coroutine frame + loads and stores. Create an
1684// AllocaSpillBB that will become the new entry block for the resume parts of
1685// the coroutine:
1686//
1687// %hdl = coro.begin(...)
1688// whatever
1689//
1690// becomes:
1691//
1692// %hdl = coro.begin(...)
1693// br label %AllocaSpillBB
1694//
1695// AllocaSpillBB:
1696// ; geps corresponding to allocas that were moved to coroutine frame
1697// br label PostSpill
1698//
1699// PostSpill:
1700// whatever
1701//
1702//
1703static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape) {
1704 auto *CB = Shape.CoroBegin;
1705 LLVMContext &C = CB->getContext();
1706 Function *F = CB->getFunction();
1707 IRBuilder<> Builder(C);
1708 StructType *FrameTy = Shape.FrameTy;
1709 Value *FramePtr = Shape.FramePtr;
1710 DominatorTree DT(*F);
1712
1713 // Create a GEP with the given index into the coroutine frame for the original
1714 // value Orig. Appends an extra 0 index for array-allocas, preserving the
1715 // original type.
1716 auto GetFramePointer = [&](Value *Orig) -> Value * {
1717 FieldIDType Index = FrameData.getFieldIndex(Orig);
1718 SmallVector<Value *, 3> Indices = {
1721 };
1722
1723 if (auto *AI = dyn_cast<AllocaInst>(Orig)) {
1724 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) {
1725 auto Count = CI->getValue().getZExtValue();
1726 if (Count > 1) {
1728 }
1729 } else {
1730 report_fatal_error("Coroutines cannot handle non static allocas yet");
1731 }
1732 }
1733
1734 auto GEP = cast<GetElementPtrInst>(
1735 Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices));
1736 if (auto *AI = dyn_cast<AllocaInst>(Orig)) {
1737 if (FrameData.getDynamicAlign(Orig) != 0) {
1738 assert(FrameData.getDynamicAlign(Orig) == AI->getAlign().value());
1739 auto *M = AI->getModule();
1740 auto *IntPtrTy = M->getDataLayout().getIntPtrType(AI->getType());
1741 auto *PtrValue = Builder.CreatePtrToInt(GEP, IntPtrTy);
1742 auto *AlignMask =
1743 ConstantInt::get(IntPtrTy, AI->getAlign().value() - 1);
1744 PtrValue = Builder.CreateAdd(PtrValue, AlignMask);
1745 PtrValue = Builder.CreateAnd(PtrValue, Builder.CreateNot(AlignMask));
1746 return Builder.CreateIntToPtr(PtrValue, AI->getType());
1747 }
1748 // If the type of GEP is not equal to the type of AllocaInst, it implies
1749 // that the AllocaInst may be reused in the Frame slot of other
1750 // AllocaInst. So We cast GEP to the AllocaInst here to re-use
1751 // the Frame storage.
1752 //
1753 // Note: If we change the strategy dealing with alignment, we need to refine
1754 // this casting.
1755 if (GEP->getType() != Orig->getType())
1756 return Builder.CreateAddrSpaceCast(GEP, Orig->getType(),
1757 Orig->getName() + Twine(".cast"));
1758 }
1759 return GEP;
1760 };
1761
1762 for (auto const &E : FrameData.Spills) {
1763 Value *Def = E.first;
1764 auto SpillAlignment = Align(FrameData.getAlign(Def));
1765 // Create a store instruction storing the value into the
1766 // coroutine frame.
1767 BasicBlock::iterator InsertPt;
1768 Type *ByValTy = nullptr;
1769 if (auto *Arg = dyn_cast<Argument>(Def)) {
1770 // For arguments, we will place the store instruction right after
1771 // the coroutine frame pointer instruction, i.e. coro.begin.
1772 InsertPt = Shape.getInsertPtAfterFramePtr()->getIterator();
1773
1774 // If we're spilling an Argument, make sure we clear 'nocapture'
1775 // from the coroutine function.
1776 Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture);
1777
1778 if (Arg->hasByValAttr())
1779 ByValTy = Arg->getParamByValType();
1780 } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) {
1781 // Don't spill immediately after a suspend; splitting assumes
1782 // that the suspend will be followed by a branch.
1783 InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHIIt();
1784 } else {
1785 auto *I = cast<Instruction>(Def);
1786 if (!DT.dominates(CB, I)) {
1787 // If it is not dominated by CoroBegin, then spill should be
1788 // inserted immediately after CoroFrame is computed.
1789 InsertPt = Shape.getInsertPtAfterFramePtr()->getIterator();
1790 } else if (auto *II = dyn_cast<InvokeInst>(I)) {
1791 // If we are spilling the result of the invoke instruction, split
1792 // the normal edge and insert the spill in the new block.
1793 auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest());
1794 InsertPt = NewBB->getTerminator()->getIterator();
1795 } else if (isa<PHINode>(I)) {
1796 // Skip the PHINodes and EH pads instructions.
1797 BasicBlock *DefBlock = I->getParent();
1798 if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator()))
1799 InsertPt = splitBeforeCatchSwitch(CSI)->getIterator();
1800 else
1801 InsertPt = DefBlock->getFirstInsertionPt();
1802 } else {
1803 assert(!I->isTerminator() && "unexpected terminator");
1804 // For all other values, the spill is placed immediately after
1805 // the definition.
1806 InsertPt = I->getNextNode()->getIterator();
1807 }
1808 }
1809
1810 auto Index = FrameData.getFieldIndex(Def);
1811 Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
1812 auto *G = Builder.CreateConstInBoundsGEP2_32(
1813 FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr"));
1814 if (ByValTy) {
1815 // For byval arguments, we need to store the pointed value in the frame,
1816 // instead of the pointer itself.
1817 auto *Value = Builder.CreateLoad(ByValTy, Def);
1818 Builder.CreateAlignedStore(Value, G, SpillAlignment);
1819 } else {
1820 Builder.CreateAlignedStore(Def, G, SpillAlignment);
1821 }
1822
1823 BasicBlock *CurrentBlock = nullptr;
1824 Value *CurrentReload = nullptr;
1825 for (auto *U : E.second) {
1826 // If we have not seen the use block, create a load instruction to reload
1827 // the spilled value from the coroutine frame. Populates the Value pointer
1828 // reference provided with the frame GEP.
1829 if (CurrentBlock != U->getParent()) {
1830 CurrentBlock = U->getParent();
1831 Builder.SetInsertPoint(CurrentBlock,
1832 CurrentBlock->getFirstInsertionPt());
1833
1834 auto *GEP = GetFramePointer(E.first);
1835 GEP->setName(E.first->getName() + Twine(".reload.addr"));
1836 if (ByValTy)
1837 CurrentReload = GEP;
1838 else
1839 CurrentReload = Builder.CreateAlignedLoad(
1840 FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP,
1841 SpillAlignment, E.first->getName() + Twine(".reload"));
1842
1844 // Try best to find dbg.declare. If the spill is a temp, there may not
1845 // be a direct dbg.declare. Walk up the load chain to find one from an
1846 // alias.
1847 if (F->getSubprogram()) {
1848 auto *CurDef = Def;
1849 while (DIs.empty() && isa<LoadInst>(CurDef)) {
1850 auto *LdInst = cast<LoadInst>(CurDef);
1851 // Only consider ptr to ptr same type load.
1852 if (LdInst->getPointerOperandType() != LdInst->getType())
1853 break;
1854 CurDef = LdInst->getPointerOperand();
1855 if (!isa<AllocaInst, LoadInst>(CurDef))
1856 break;
1857 DIs = FindDbgDeclareUses(CurDef);
1858 }
1859 }
1860
1861 for (DbgDeclareInst *DDI : DIs) {
1862 bool AllowUnresolved = false;
1863 // This dbg.declare is preserved for all coro-split function
1864 // fragments. It will be unreachable in the main function, and
1865 // processed by coro::salvageDebugInfo() by CoroCloner.
1866 DIBuilder(*CurrentBlock->getParent()->getParent(), AllowUnresolved)
1867 .insertDeclare(CurrentReload, DDI->getVariable(),
1868 DDI->getExpression(), DDI->getDebugLoc(),
1869 &*Builder.GetInsertPoint());
1870 // This dbg.declare is for the main function entry point. It
1871 // will be deleted in all coro-split functions.
1872 coro::salvageDebugInfo(ArgToAllocaMap, DDI, Shape.OptimizeFrame,
1873 false /*UseEntryValue*/);
1874 }
1875 }
1876
1877 // If we have a single edge PHINode, remove it and replace it with a
1878 // reload from the coroutine frame. (We already took care of multi edge
1879 // PHINodes by rewriting them in the rewritePHIs function).
1880 if (auto *PN = dyn_cast<PHINode>(U)) {
1881 assert(PN->getNumIncomingValues() == 1 &&
1882 "unexpected number of incoming "
1883 "values in the PHINode");
1884 PN->replaceAllUsesWith(CurrentReload);
1885 PN->eraseFromParent();
1886 continue;
1887 }
1888
1889 // Replace all uses of CurrentValue in the current instruction with
1890 // reload.
1891 U->replaceUsesOfWith(Def, CurrentReload);
1892 }
1893 }
1894
1895 BasicBlock *FramePtrBB = Shape.getInsertPtAfterFramePtr()->getParent();
1896
1897 auto SpillBlock = FramePtrBB->splitBasicBlock(
1898 Shape.getInsertPtAfterFramePtr(), "AllocaSpillBB");
1899 SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill");
1900 Shape.AllocaSpillBlock = SpillBlock;
1901
1902 // retcon and retcon.once lowering assumes all uses have been sunk.
1903 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
1904 Shape.ABI == coro::ABI::Async) {
1905 // If we found any allocas, replace all of their remaining uses with Geps.
1906 Builder.SetInsertPoint(SpillBlock, SpillBlock->begin());
1907 for (const auto &P : FrameData.Allocas) {
1908 AllocaInst *Alloca = P.Alloca;
1909 auto *G = GetFramePointer(Alloca);
1910
1911 // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G))
1912 // here, as we are changing location of the instruction.
1913 G->takeName(Alloca);
1914 Alloca->replaceAllUsesWith(G);
1915 Alloca->eraseFromParent();
1916 }
1917 return;
1918 }
1919
1920 // If we found any alloca, replace all of their remaining uses with GEP
1921 // instructions. To remain debugbility, we replace the uses of allocas for
1922 // dbg.declares and dbg.values with the reload from the frame.
1923 // Note: We cannot replace the alloca with GEP instructions indiscriminately,
1924 // as some of the uses may not be dominated by CoroBegin.
1925 Builder.SetInsertPoint(Shape.AllocaSpillBlock,
1926 Shape.AllocaSpillBlock->begin());
1927 SmallVector<Instruction *, 4> UsersToUpdate;
1928 for (const auto &A : FrameData.Allocas) {
1929 AllocaInst *Alloca = A.Alloca;
1930 UsersToUpdate.clear();
1931 for (User *U : Alloca->users()) {
1932 auto *I = cast<Instruction>(U);
1933 if (DT.dominates(CB, I))
1934 UsersToUpdate.push_back(I);
1935 }
1936 if (UsersToUpdate.empty())
1937 continue;
1938 auto *G = GetFramePointer(Alloca);
1939 G->setName(Alloca->getName() + Twine(".reload.addr"));
1940
1942 findDbgUsers(DIs, Alloca);
1943 for (auto *DVI : DIs)
1944 DVI->replaceUsesOfWith(Alloca, G);
1945
1946 for (Instruction *I : UsersToUpdate) {
1947 // It is meaningless to retain the lifetime intrinsics refer for the
1948 // member of coroutine frames and the meaningless lifetime intrinsics
1949 // are possible to block further optimizations.
1950 if (I->isLifetimeStartOrEnd()) {
1951 I->eraseFromParent();
1952 continue;
1953 }
1954
1955 I->replaceUsesOfWith(Alloca, G);
1956 }
1957 }
1959 for (const auto &A : FrameData.Allocas) {
1960 AllocaInst *Alloca = A.Alloca;
1961 if (A.MayWriteBeforeCoroBegin) {
1962 // isEscaped really means potentially modified before CoroBegin.
1963 if (Alloca->isArrayAllocation())
1965 "Coroutines cannot handle copying of array allocas yet");
1966
1967 auto *G = GetFramePointer(Alloca);
1968 auto *Value = Builder.CreateLoad(Alloca->getAllocatedType(), Alloca);
1969 Builder.CreateStore(Value, G);
1970 }
1971 // For each alias to Alloca created before CoroBegin but used after
1972 // CoroBegin, we recreate them after CoroBegin by appplying the offset
1973 // to the pointer in the frame.
1974 for (const auto &Alias : A.Aliases) {
1975 auto *FramePtr = GetFramePointer(Alloca);
1976 auto &Value = *Alias.second;
1977 auto ITy = IntegerType::get(C, Value.getBitWidth());
1978 auto *AliasPtr = Builder.CreateGEP(Type::getInt8Ty(C), FramePtr,
1979 ConstantInt::get(ITy, Value));
1980 Alias.first->replaceUsesWithIf(
1981 AliasPtr, [&](Use &U) { return DT.dominates(CB, U); });
1982 }
1983 }
1984
1985 // PromiseAlloca is not collected in FrameData.Allocas. So we don't handle
1986 // the case that the PromiseAlloca may have writes before CoroBegin in the
1987 // above codes. And it may be problematic in edge cases. See
1988 // https://github.com/llvm/llvm-project/issues/57861 for an example.
1989 if (Shape.ABI == coro::ABI::Switch && Shape.SwitchLowering.PromiseAlloca) {
1991 // If there is memory accessing to promise alloca before CoroBegin;
1992 bool HasAccessingPromiseBeforeCB = llvm::any_of(PA->uses(), [&](Use &U) {
1993 auto *Inst = dyn_cast<Instruction>(U.getUser());
1994 if (!Inst || DT.dominates(CB, Inst))
1995 return false;
1996
1997 if (auto *CI = dyn_cast<CallInst>(Inst)) {
1998 // It is fine if the call wouldn't write to the Promise.
1999 // This is possible for @llvm.coro.id intrinsics, which
2000 // would take the promise as the second argument as a
2001 // marker.
2002 if (CI->onlyReadsMemory() ||
2003 CI->onlyReadsMemory(CI->getArgOperandNo(&U)))
2004 return false;
2005 return true;
2006 }
2007
2008 return isa<StoreInst>(Inst) ||
2009 // It may take too much time to track the uses.
2010 // Be conservative about the case the use may escape.
2011 isa<GetElementPtrInst>(Inst) ||
2012 // There would always be a bitcast for the promise alloca
2013 // before we enabled Opaque pointers. And now given
2014 // opaque pointers are enabled by default. This should be
2015 // fine.
2016 isa<BitCastInst>(Inst);
2017 });
2018 if (HasAccessingPromiseBeforeCB) {
2020 auto *G = GetFramePointer(PA);
2021 auto *Value = Builder.CreateLoad(PA->getAllocatedType(), PA);
2022 Builder.CreateStore(Value, G);
2023 }
2024 }
2025}
2026
2027// Moves the values in the PHIs in SuccBB that correspong to PredBB into a new
2028// PHI in InsertedBB.
2030 BasicBlock *InsertedBB,
2031 BasicBlock *PredBB,
2032 PHINode *UntilPHI = nullptr) {
2033 auto *PN = cast<PHINode>(&SuccBB->front());
2034 do {
2035 int Index = PN->getBasicBlockIndex(InsertedBB);
2036 Value *V = PN->getIncomingValue(Index);
2037 PHINode *InputV = PHINode::Create(
2038 V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName());
2039 InputV->insertBefore(InsertedBB->begin());
2040 InputV->addIncoming(V, PredBB);
2041 PN->setIncomingValue(Index, InputV);
2042 PN = dyn_cast<PHINode>(PN->getNextNode());
2043 } while (PN != UntilPHI);
2044}
2045
2046// Rewrites the PHI Nodes in a cleanuppad.
2047static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB,
2048 CleanupPadInst *CleanupPad) {
2049 // For every incoming edge to a CleanupPad we will create a new block holding
2050 // all incoming values in single-value PHI nodes. We will then create another
2051 // block to act as a dispather (as all unwind edges for related EH blocks
2052 // must be the same).
2053 //
2054 // cleanuppad:
2055 // %2 = phi i32[%0, %catchswitch], [%1, %catch.1]
2056 // %3 = cleanuppad within none []
2057 //
2058 // It will create:
2059 //
2060 // cleanuppad.corodispatch
2061 // %2 = phi i8[0, %catchswitch], [1, %catch.1]
2062 // %3 = cleanuppad within none []
2063 // switch i8 % 2, label %unreachable
2064 // [i8 0, label %cleanuppad.from.catchswitch
2065 // i8 1, label %cleanuppad.from.catch.1]
2066 // cleanuppad.from.catchswitch:
2067 // %4 = phi i32 [%0, %catchswitch]
2068 // br %label cleanuppad
2069 // cleanuppad.from.catch.1:
2070 // %6 = phi i32 [%1, %catch.1]
2071 // br %label cleanuppad
2072 // cleanuppad:
2073 // %8 = phi i32 [%4, %cleanuppad.from.catchswitch],
2074 // [%6, %cleanuppad.from.catch.1]
2075
2076 // Unreachable BB, in case switching on an invalid value in the dispatcher.
2077 auto *UnreachBB = BasicBlock::Create(
2078 CleanupPadBB->getContext(), "unreachable", CleanupPadBB->getParent());
2079 IRBuilder<> Builder(UnreachBB);
2080 Builder.CreateUnreachable();
2081
2082 // Create a new cleanuppad which will be the dispatcher.
2083 auto *NewCleanupPadBB =
2084 BasicBlock::Create(CleanupPadBB->getContext(),
2085 CleanupPadBB->getName() + Twine(".corodispatch"),
2086 CleanupPadBB->getParent(), CleanupPadBB);
2087 Builder.SetInsertPoint(NewCleanupPadBB);
2088 auto *SwitchType = Builder.getInt8Ty();
2089 auto *SetDispatchValuePN =
2090 Builder.CreatePHI(SwitchType, pred_size(CleanupPadBB));
2091 CleanupPad->removeFromParent();
2092 CleanupPad->insertAfter(SetDispatchValuePN);
2093 auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB,
2094 pred_size(CleanupPadBB));
2095
2096 int SwitchIndex = 0;
2097 SmallVector<BasicBlock *, 8> Preds(predecessors(CleanupPadBB));
2098 for (BasicBlock *Pred : Preds) {
2099 // Create a new cleanuppad and move the PHI values to there.
2100 auto *CaseBB = BasicBlock::Create(CleanupPadBB->getContext(),
2101 CleanupPadBB->getName() +
2102 Twine(".from.") + Pred->getName(),
2103 CleanupPadBB->getParent(), CleanupPadBB);
2104 updatePhiNodes(CleanupPadBB, Pred, CaseBB);
2105 CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") +
2106 Pred->getName());
2107 Builder.SetInsertPoint(CaseBB);
2108 Builder.CreateBr(CleanupPadBB);
2109 movePHIValuesToInsertedBlock(CleanupPadBB, CaseBB, NewCleanupPadBB);
2110
2111 // Update this Pred to the new unwind point.
2112 setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB);
2113
2114 // Setup the switch in the dispatcher.
2115 auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex);
2116 SetDispatchValuePN->addIncoming(SwitchConstant, Pred);
2117 SwitchOnDispatch->addCase(SwitchConstant, CaseBB);
2118 SwitchIndex++;
2119 }
2120}
2121
2124 for (auto &BB : F) {
2125 for (auto &Phi : BB.phis()) {
2126 if (Phi.getNumIncomingValues() == 1) {
2127 Worklist.push_back(&Phi);
2128 } else
2129 break;
2130 }
2131 }
2132 while (!Worklist.empty()) {
2133 auto *Phi = Worklist.pop_back_val();
2134 auto *OriginalValue = Phi->getIncomingValue(0);
2135 Phi->replaceAllUsesWith(OriginalValue);
2136 }
2137}
2138
2139static void rewritePHIs(BasicBlock &BB) {
2140 // For every incoming edge we will create a block holding all
2141 // incoming values in a single PHI nodes.
2142 //
2143 // loop:
2144 // %n.val = phi i32[%n, %entry], [%inc, %loop]
2145 //
2146 // It will create:
2147 //
2148 // loop.from.entry:
2149 // %n.loop.pre = phi i32 [%n, %entry]
2150 // br %label loop
2151 // loop.from.loop:
2152 // %inc.loop.pre = phi i32 [%inc, %loop]
2153 // br %label loop
2154 //
2155 // After this rewrite, further analysis will ignore any phi nodes with more
2156 // than one incoming edge.
2157
2158 // TODO: Simplify PHINodes in the basic block to remove duplicate
2159 // predecessors.
2160
2161 // Special case for CleanupPad: all EH blocks must have the same unwind edge
2162 // so we need to create an additional "dispatcher" block.
2163 if (auto *CleanupPad =
2164 dyn_cast_or_null<CleanupPadInst>(BB.getFirstNonPHI())) {
2166 for (BasicBlock *Pred : Preds) {
2167 if (CatchSwitchInst *CS =
2168 dyn_cast<CatchSwitchInst>(Pred->getTerminator())) {
2169 // CleanupPad with a CatchSwitch predecessor: therefore this is an
2170 // unwind destination that needs to be handle specially.
2171 assert(CS->getUnwindDest() == &BB);
2172 (void)CS;
2173 rewritePHIsForCleanupPad(&BB, CleanupPad);
2174 return;
2175 }
2176 }
2177 }
2178
2179 LandingPadInst *LandingPad = nullptr;
2180 PHINode *ReplPHI = nullptr;
2181 if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) {
2182 // ehAwareSplitEdge will clone the LandingPad in all the edge blocks.
2183 // We replace the original landing pad with a PHINode that will collect the
2184 // results from all of them.
2185 ReplPHI = PHINode::Create(LandingPad->getType(), 1, "");
2186 ReplPHI->insertBefore(LandingPad->getIterator());
2187 ReplPHI->takeName(LandingPad);
2188 LandingPad->replaceAllUsesWith(ReplPHI);
2189 // We will erase the original landing pad at the end of this function after
2190 // ehAwareSplitEdge cloned it in the transition blocks.
2191 }
2192
2194 for (BasicBlock *Pred : Preds) {
2195 auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI);
2196 IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName());
2197
2198 // Stop the moving of values at ReplPHI, as this is either null or the PHI
2199 // that replaced the landing pad.
2200 movePHIValuesToInsertedBlock(&BB, IncomingBB, Pred, ReplPHI);
2201 }
2202
2203 if (LandingPad) {
2204 // Calls to ehAwareSplitEdge function cloned the original lading pad.
2205 // No longer need it.
2206 LandingPad->eraseFromParent();
2207 }
2208}
2209
2210static void rewritePHIs(Function &F) {
2212
2213 for (BasicBlock &BB : F)
2214 if (auto *PN = dyn_cast<PHINode>(&BB.front()))
2215 if (PN->getNumIncomingValues() > 1)
2216 WorkList.push_back(&BB);
2217
2218 for (BasicBlock *BB : WorkList)
2219 rewritePHIs(*BB);
2220}
2221
2222/// Default materializable callback
2223// Check for instructions that we can recreate on resume as opposed to spill
2224// the result into a coroutine frame.
2226 return (isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) ||
2227 isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V));
2228}
2229
2230// Check for structural coroutine intrinsics that should not be spilled into
2231// the coroutine frame.
2233 return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) ||
2234 isa<CoroSuspendInst>(&I);
2235}
2236
2237// For each instruction identified as materializable across the suspend point,
2238// and its associated DAG of other rematerializable instructions,
2239// recreate the DAG of instructions after the suspend point.
2241 const SmallMapVector<Instruction *, std::unique_ptr<RematGraph>, 8>
2242 &AllRemats) {
2243 // This has to be done in 2 phases
2244 // Do the remats and record the required defs to be replaced in the
2245 // original use instructions
2246 // Once all the remats are complete, replace the uses in the final
2247 // instructions with the new defs
2248 typedef struct {
2250 Instruction *Def;
2251 Instruction *Remat;
2252 } ProcessNode;
2253
2254 SmallVector<ProcessNode> FinalInstructionsToProcess;
2255
2256 for (const auto &E : AllRemats) {
2257 Instruction *Use = E.first;
2258 Instruction *CurrentMaterialization = nullptr;
2259 RematGraph *RG = E.second.get();
2261 SmallVector<Instruction *> InstructionsToProcess;
2262
2263 // If the target use is actually a suspend instruction then we have to
2264 // insert the remats into the end of the predecessor (there should only be
2265 // one). This is so that suspend blocks always have the suspend instruction
2266 // as the first instruction.
2267 auto InsertPoint = &*Use->getParent()->getFirstInsertionPt();
2268 if (isa<AnyCoroSuspendInst>(Use)) {
2269 BasicBlock *SuspendPredecessorBlock =
2270 Use->getParent()->getSinglePredecessor();
2271 assert(SuspendPredecessorBlock && "malformed coro suspend instruction");
2272 InsertPoint = SuspendPredecessorBlock->getTerminator();
2273 }
2274
2275 // Note: skip the first instruction as this is the actual use that we're
2276 // rematerializing everything for.
2277 auto I = RPOT.begin();
2278 ++I;
2279 for (; I != RPOT.end(); ++I) {
2280 Instruction *D = (*I)->Node;
2281 CurrentMaterialization = D->clone();
2282 CurrentMaterialization->setName(D->getName());
2283 CurrentMaterialization->insertBefore(InsertPoint);
2284 InsertPoint = CurrentMaterialization;
2285
2286 // Replace all uses of Def in the instructions being added as part of this
2287 // rematerialization group
2288 for (auto &I : InstructionsToProcess)
2289 I->replaceUsesOfWith(D, CurrentMaterialization);
2290
2291 // Don't replace the final use at this point as this can cause problems
2292 // for other materializations. Instead, for any final use that uses a
2293 // define that's being rematerialized, record the replace values
2294 for (unsigned i = 0, E = Use->getNumOperands(); i != E; ++i)
2295 if (Use->getOperand(i) == D) // Is this operand pointing to oldval?
2296 FinalInstructionsToProcess.push_back(
2297 {Use, D, CurrentMaterialization});
2298
2299 InstructionsToProcess.push_back(CurrentMaterialization);
2300 }
2301 }
2302
2303 // Finally, replace the uses with the defines that we've just rematerialized
2304 for (auto &R : FinalInstructionsToProcess) {
2305 if (auto *PN = dyn_cast<PHINode>(R.Use)) {
2306 assert(PN->getNumIncomingValues() == 1 && "unexpected number of incoming "
2307 "values in the PHINode");
2308 PN->replaceAllUsesWith(R.Remat);
2309 PN->eraseFromParent();
2310 continue;
2311 }
2312 R.Use->replaceUsesOfWith(R.Def, R.Remat);
2313 }
2314}
2315
2316// Splits the block at a particular instruction unless it is the first
2317// instruction in the block with a single predecessor.
2319 auto *BB = I->getParent();
2320 if (&BB->front() == I) {
2321 if (BB->getSinglePredecessor()) {
2322 BB->setName(Name);
2323 return BB;
2324 }
2325 }
2326 return BB->splitBasicBlock(I, Name);
2327}
2328
2329// Split above and below a particular instruction so that it
2330// will be all alone by itself in a block.
2331static void splitAround(Instruction *I, const Twine &Name) {
2333 splitBlockIfNotFirst(I->getNextNode(), "After" + Name);
2334}
2335
2336static bool isSuspendBlock(BasicBlock *BB) {
2337 return isa<AnyCoroSuspendInst>(BB->front());
2338}
2339
2341
2342/// Does control flow starting at the given block ever reach a suspend
2343/// instruction before reaching a block in VisitedOrFreeBBs?
2345 VisitedBlocksSet &VisitedOrFreeBBs) {
2346 // Eagerly try to add this block to the visited set. If it's already
2347 // there, stop recursing; this path doesn't reach a suspend before
2348 // either looping or reaching a freeing block.
2349 if (!VisitedOrFreeBBs.insert(From).second)
2350 return false;
2351
2352 // We assume that we'll already have split suspends into their own blocks.
2353 if (isSuspendBlock(From))
2354 return true;
2355
2356 // Recurse on the successors.
2357 for (auto *Succ : successors(From)) {
2358 if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs))
2359 return true;
2360 }
2361
2362 return false;
2363}
2364
2365/// Is the given alloca "local", i.e. bounded in lifetime to not cross a
2366/// suspend point?
2368 // Seed the visited set with all the basic blocks containing a free
2369 // so that we won't pass them up.
2370 VisitedBlocksSet VisitedOrFreeBBs;
2371 for (auto *User : AI->users()) {
2372 if (auto FI = dyn_cast<CoroAllocaFreeInst>(User))
2373 VisitedOrFreeBBs.insert(FI->getParent());
2374 }
2375
2376 return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs);
2377}
2378
2379/// After we split the coroutine, will the given basic block be along
2380/// an obvious exit path for the resumption function?
2382 unsigned depth = 3) {
2383 // If we've bottomed out our depth count, stop searching and assume
2384 // that the path might loop back.
2385 if (depth == 0) return false;
2386
2387 // If this is a suspend block, we're about to exit the resumption function.
2388 if (isSuspendBlock(BB)) return true;
2389
2390 // Recurse into the successors.
2391 for (auto *Succ : successors(BB)) {
2392 if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1))
2393 return false;
2394 }
2395
2396 // If none of the successors leads back in a loop, we're on an exit/abort.
2397 return true;
2398}
2399
2401 // Look for a free that isn't sufficiently obviously followed by
2402 // either a suspend or a termination, i.e. something that will leave
2403 // the coro resumption frame.
2404 for (auto *U : AI->users()) {
2405 auto FI = dyn_cast<CoroAllocaFreeInst>(U);
2406 if (!FI) continue;
2407
2408 if (!willLeaveFunctionImmediatelyAfter(FI->getParent()))
2409 return true;
2410 }
2411
2412 // If we never found one, we don't need a stack save.
2413 return false;
2414}
2415
2416/// Turn each of the given local allocas into a normal (dynamic) alloca
2417/// instruction.
2419 SmallVectorImpl<Instruction*> &DeadInsts) {
2420 for (auto *AI : LocalAllocas) {
2421 IRBuilder<> Builder(AI);
2422
2423 // Save the stack depth. Try to avoid doing this if the stackrestore
2424 // is going to immediately precede a return or something.
2425 Value *StackSave = nullptr;
2427 StackSave = Builder.CreateStackSave();
2428
2429 // Allocate memory.
2430 auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize());
2431 Alloca->setAlignment(AI->getAlignment());
2432
2433 for (auto *U : AI->users()) {
2434 // Replace gets with the allocation.
2435 if (isa<CoroAllocaGetInst>(U)) {
2436 U->replaceAllUsesWith(Alloca);
2437
2438 // Replace frees with stackrestores. This is safe because
2439 // alloca.alloc is required to obey a stack discipline, although we
2440 // don't enforce that structurally.
2441 } else {
2442 auto FI = cast<CoroAllocaFreeInst>(U);
2443 if (StackSave) {
2444 Builder.SetInsertPoint(FI);
2445 Builder.CreateStackRestore(StackSave);
2446 }
2447 }
2448 DeadInsts.push_back(cast<Instruction>(U));
2449 }
2450
2451 DeadInsts.push_back(AI);
2452 }
2453}
2454
2455/// Turn the given coro.alloca.alloc call into a dynamic allocation.
2456/// This happens during the all-instructions iteration, so it must not
2457/// delete the call.
2459 coro::Shape &Shape,
2460 SmallVectorImpl<Instruction*> &DeadInsts) {
2461 IRBuilder<> Builder(AI);
2462 auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr);
2463
2464 for (User *U : AI->users()) {
2465 if (isa<CoroAllocaGetInst>(U)) {
2466 U->replaceAllUsesWith(Alloc);
2467 } else {
2468 auto FI = cast<CoroAllocaFreeInst>(U);
2469 Builder.SetInsertPoint(FI);
2470 Shape.emitDealloc(Builder, Alloc, nullptr);
2471 }
2472 DeadInsts.push_back(cast<Instruction>(U));
2473 }
2474
2475 // Push this on last so that it gets deleted after all the others.
2476 DeadInsts.push_back(AI);
2477
2478 // Return the new allocation value so that we can check for needed spills.
2479 return cast<Instruction>(Alloc);
2480}
2481
2482/// Get the current swifterror value.
2484 coro::Shape &Shape) {
2485 // Make a fake function pointer as a sort of intrinsic.
2486 auto FnTy = FunctionType::get(ValueTy, {}, false);
2487 auto Fn = ConstantPointerNull::get(Builder.getPtrTy());
2488
2489 auto Call = Builder.CreateCall(FnTy, Fn, {});
2490 Shape.SwiftErrorOps.push_back(Call);
2491
2492 return Call;
2493}
2494
2495/// Set the given value as the current swifterror value.
2496///
2497/// Returns a slot that can be used as a swifterror slot.
2499 coro::Shape &Shape) {
2500 // Make a fake function pointer as a sort of intrinsic.
2501 auto FnTy = FunctionType::get(Builder.getPtrTy(),
2502 {V->getType()}, false);
2503 auto Fn = ConstantPointerNull::get(Builder.getPtrTy());
2504
2505 auto Call = Builder.CreateCall(FnTy, Fn, { V });
2506 Shape.SwiftErrorOps.push_back(Call);
2507
2508 return Call;
2509}
2510
2511/// Set the swifterror value from the given alloca before a call,
2512/// then put in back in the alloca afterwards.
2513///
2514/// Returns an address that will stand in for the swifterror slot
2515/// until splitting.
2517 AllocaInst *Alloca,
2518 coro::Shape &Shape) {
2519 auto ValueTy = Alloca->getAllocatedType();
2520 IRBuilder<> Builder(Call);
2521
2522 // Load the current value from the alloca and set it as the
2523 // swifterror value.
2524 auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca);
2525 auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape);
2526
2527 // Move to after the call. Since swifterror only has a guaranteed
2528 // value on normal exits, we can ignore implicit and explicit unwind
2529 // edges.
2530 if (isa<CallInst>(Call)) {
2531 Builder.SetInsertPoint(Call->getNextNode());
2532 } else {
2533 auto Invoke = cast<InvokeInst>(Call);
2534 Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg());
2535 }
2536
2537 // Get the current swifterror value and store it to the alloca.
2538 auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape);
2539 Builder.CreateStore(ValueAfterCall, Alloca);
2540
2541 return Addr;
2542}
2543
2544/// Eliminate a formerly-swifterror alloca by inserting the get/set
2545/// intrinsics and attempting to MemToReg the alloca away.
2547 coro::Shape &Shape) {
2548 for (Use &Use : llvm::make_early_inc_range(Alloca->uses())) {
2549 // swifterror values can only be used in very specific ways.
2550 // We take advantage of that here.
2551 auto User = Use.getUser();
2552 if (isa<LoadInst>(User) || isa<StoreInst>(User))
2553 continue;
2554
2555 assert(isa<CallInst>(User) || isa<InvokeInst>(User));
2556 auto Call = cast<Instruction>(User);
2557
2558 auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape);
2559
2560 // Use the returned slot address as the call argument.
2561 Use.set(Addr);
2562 }
2563
2564 // All the uses should be loads and stores now.
2565 assert(isAllocaPromotable(Alloca));
2566}
2567
2568/// "Eliminate" a swifterror argument by reducing it to the alloca case
2569/// and then loading and storing in the prologue and epilog.
2570///
2571/// The argument keeps the swifterror flag.
2573 coro::Shape &Shape,
2574 SmallVectorImpl<AllocaInst*> &AllocasToPromote) {
2575 IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
2576
2577 auto ArgTy = cast<PointerType>(Arg.getType());
2578 auto ValueTy = PointerType::getUnqual(F.getContext());
2579
2580 // Reduce to the alloca case:
2581
2582 // Create an alloca and replace all uses of the arg with it.
2583 auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace());
2584 Arg.replaceAllUsesWith(Alloca);
2585
2586 // Set an initial value in the alloca. swifterror is always null on entry.
2587 auto InitialValue = Constant::getNullValue(ValueTy);
2588 Builder.CreateStore(InitialValue, Alloca);
2589
2590 // Find all the suspends in the function and save and restore around them.
2591 for (auto *Suspend : Shape.CoroSuspends) {
2592 (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape);
2593 }
2594
2595 // Find all the coro.ends in the function and restore the error value.
2596 for (auto *End : Shape.CoroEnds) {
2597 Builder.SetInsertPoint(End);
2598 auto FinalValue = Builder.CreateLoad(ValueTy, Alloca);
2599 (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape);
2600 }
2601
2602 // Now we can use the alloca logic.
2603 AllocasToPromote.push_back(Alloca);
2604 eliminateSwiftErrorAlloca(F, Alloca, Shape);
2605}
2606
2607/// Eliminate all problematic uses of swifterror arguments and allocas
2608/// from the function. We'll fix them up later when splitting the function.
2610 SmallVector<AllocaInst*, 4> AllocasToPromote;
2611
2612 // Look for a swifterror argument.
2613 for (auto &Arg : F.args()) {
2614 if (!Arg.hasSwiftErrorAttr()) continue;
2615
2616 eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote);
2617 break;
2618 }
2619
2620 // Look for swifterror allocas.
2621 for (auto &Inst : F.getEntryBlock()) {
2622 auto Alloca = dyn_cast<AllocaInst>(&Inst);
2623 if (!Alloca || !Alloca->isSwiftError()) continue;
2624
2625 // Clear the swifterror flag.
2626 Alloca->setSwiftError(false);
2627
2628 AllocasToPromote.push_back(Alloca);
2629 eliminateSwiftErrorAlloca(F, Alloca, Shape);
2630 }
2631
2632 // If we have any allocas to promote, compute a dominator tree and
2633 // promote them en masse.
2634 if (!AllocasToPromote.empty()) {
2635 DominatorTree DT(F);
2636 PromoteMemToReg(AllocasToPromote, DT);
2637 }
2638}
2639
2640/// retcon and retcon.once conventions assume that all spill uses can be sunk
2641/// after the coro.begin intrinsic.
2643 const FrameDataInfo &FrameData,
2644 CoroBeginInst *CoroBegin) {
2645 DominatorTree Dom(F);
2646
2649
2650 // Collect all users that precede coro.begin.
2651 for (auto *Def : FrameData.getAllDefs()) {
2652 for (User *U : Def->users()) {
2653 auto Inst = cast<Instruction>(U);
2654 if (Inst->getParent() != CoroBegin->getParent() ||
2655 Dom.dominates(CoroBegin, Inst))
2656 continue;
2657 if (ToMove.insert(Inst))
2658 Worklist.push_back(Inst);
2659 }
2660 }
2661 // Recursively collect users before coro.begin.
2662 while (!Worklist.empty()) {
2663 auto *Def = Worklist.pop_back_val();
2664 for (User *U : Def->users()) {
2665 auto Inst = cast<Instruction>(U);
2666 if (Dom.dominates(CoroBegin, Inst))
2667 continue;
2668 if (ToMove.insert(Inst))
2669 Worklist.push_back(Inst);
2670 }
2671 }
2672
2673 // Sort by dominance.
2674 SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end());
2675 llvm::sort(InsertionList, [&Dom](Instruction *A, Instruction *B) -> bool {
2676 // If a dominates b it should preceed (<) b.
2677 return Dom.dominates(A, B);
2678 });
2679
2680 Instruction *InsertPt = CoroBegin->getNextNode();
2681 for (Instruction *Inst : InsertionList)
2682 Inst->moveBefore(InsertPt);
2683}
2684
2685/// For each local variable that all of its user are only used inside one of
2686/// suspended region, we sink their lifetime.start markers to the place where
2687/// after the suspend block. Doing so minimizes the lifetime of each variable,
2688/// hence minimizing the amount of data we end up putting on the frame.
2690 SuspendCrossingInfo &Checker) {
2691 if (F.hasOptNone())
2692 return;
2693
2694 DominatorTree DT(F);
2695
2696 // Collect all possible basic blocks which may dominate all uses of allocas.
2698 DomSet.insert(&F.getEntryBlock());
2699 for (auto *CSI : Shape.CoroSuspends) {
2700 BasicBlock *SuspendBlock = CSI->getParent();
2701 assert(isSuspendBlock(SuspendBlock) && SuspendBlock->getSingleSuccessor() &&
2702 "should have split coro.suspend into its own block");
2703 DomSet.insert(SuspendBlock->getSingleSuccessor());
2704 }
2705
2706 for (Instruction &I : instructions(F)) {
2707 AllocaInst* AI = dyn_cast<AllocaInst>(&I);
2708 if (!AI)
2709 continue;
2710
2711 for (BasicBlock *DomBB : DomSet) {
2712 bool Valid = true;
2714
2715 auto isLifetimeStart = [](Instruction* I) {
2716 if (auto* II = dyn_cast<IntrinsicInst>(I))
2717 return II->getIntrinsicID() == Intrinsic::lifetime_start;
2718 return false;
2719 };
2720
2721 auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) {
2722 if (isLifetimeStart(U)) {
2723 Lifetimes.push_back(U);
2724 return true;
2725 }
2726 if (!U->hasOneUse() || U->stripPointerCasts() != AI)
2727 return false;
2728 if (isLifetimeStart(U->user_back())) {
2729 Lifetimes.push_back(U->user_back());
2730 return true;
2731 }
2732 return false;
2733 };
2734
2735 for (User *U : AI->users()) {
2736 Instruction *UI = cast<Instruction>(U);
2737 // For all users except lifetime.start markers, if they are all
2738 // dominated by one of the basic blocks and do not cross
2739 // suspend points as well, then there is no need to spill the
2740 // instruction.
2741 if (!DT.dominates(DomBB, UI->getParent()) ||
2742 Checker.isDefinitionAcrossSuspend(DomBB, UI)) {
2743 // Skip lifetime.start, GEP and bitcast used by lifetime.start
2744 // markers.
2745 if (collectLifetimeStart(UI, AI))
2746 continue;
2747 Valid = false;
2748 break;
2749 }
2750 }
2751 // Sink lifetime.start markers to dominate block when they are
2752 // only used outside the region.
2753 if (Valid && Lifetimes.size() != 0) {
2754 auto *NewLifetime = Lifetimes[0]->clone();
2755 NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), AI);
2756 NewLifetime->insertBefore(DomBB->getTerminator());
2757
2758 // All the outsided lifetime.start markers are no longer necessary.
2759 for (Instruction *S : Lifetimes)
2760 S->eraseFromParent();
2761
2762 break;
2763 }
2764 }
2765 }
2766}
2767
2769 const SuspendCrossingInfo &Checker,
2771 const DominatorTree &DT) {
2772 if (Shape.CoroSuspends.empty())
2773 return;
2774
2775 // The PromiseAlloca will be specially handled since it needs to be in a
2776 // fixed position in the frame.
2777 if (AI == Shape.SwitchLowering.PromiseAlloca)
2778 return;
2779
2780 // The __coro_gro alloca should outlive the promise, make sure we
2781 // keep it outside the frame.
2782 if (AI->hasMetadata(LLVMContext::MD_coro_outside_frame))
2783 return;
2784
2785 // The code that uses lifetime.start intrinsic does not work for functions
2786 // with loops without exit. Disable it on ABIs we know to generate such
2787 // code.
2788 bool ShouldUseLifetimeStartInfo =
2789 (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon &&
2790 Shape.ABI != coro::ABI::RetconOnce);
2791 AllocaUseVisitor Visitor{AI->getModule()->getDataLayout(), DT,
2792 *Shape.CoroBegin, Checker,
2793 ShouldUseLifetimeStartInfo};
2794 Visitor.visitPtr(*AI);
2795 if (!Visitor.getShouldLiveOnFrame())
2796 return;
2797 Allocas.emplace_back(AI, Visitor.getAliasesCopy(),
2798 Visitor.getMayWriteBeforeCoroBegin());
2799}
2800
2803 DbgVariableIntrinsic *DVI, bool OptimizeFrame, bool UseEntryValue) {
2804 Function *F = DVI->getFunction();
2805 IRBuilder<> Builder(F->getContext());
2806 auto InsertPt = F->getEntryBlock().getFirstInsertionPt();
2807 while (isa<IntrinsicInst>(InsertPt))
2808 ++InsertPt;
2809 Builder.SetInsertPoint(&F->getEntryBlock(), InsertPt);
2810 DIExpression *Expr = DVI->getExpression();
2811 // Follow the pointer arithmetic all the way to the incoming
2812 // function argument and convert into a DIExpression.
2813 bool SkipOutermostLoad = !isa<DbgValueInst>(DVI);
2814 Value *Storage = DVI->getVariableLocationOp(0);
2815 Value *OriginalStorage = Storage;
2816
2817 while (auto *Inst = dyn_cast_or_null<Instruction>(Storage)) {
2818 if (auto *LdInst = dyn_cast<LoadInst>(Inst)) {
2819 Storage = LdInst->getPointerOperand();
2820 // FIXME: This is a heuristic that works around the fact that
2821 // LLVM IR debug intrinsics cannot yet distinguish between
2822 // memory and value locations: Because a dbg.declare(alloca) is
2823 // implicitly a memory location no DW_OP_deref operation for the
2824 // last direct load from an alloca is necessary. This condition
2825 // effectively drops the *last* DW_OP_deref in the expression.
2826 if (!SkipOutermostLoad)
2828 } else if (auto *StInst = dyn_cast<StoreInst>(Inst)) {
2829 Storage = StInst->getValueOperand();
2830 } else {
2832 SmallVector<Value *, 0> AdditionalValues;
2834 *Inst, Expr ? Expr->getNumLocationOperands() : 0, Ops,
2835 AdditionalValues);
2836 if (!Op || !AdditionalValues.empty()) {
2837 // If salvaging failed or salvaging produced more than one location
2838 // operand, give up.
2839 break;
2840 }
2841 Storage = Op;
2842 Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, /*StackValue*/ false);
2843 }
2844 SkipOutermostLoad = false;
2845 }
2846 if (!Storage)
2847 return;
2848
2849 auto *StorageAsArg = dyn_cast<Argument>(Storage);
2850 const bool IsSwiftAsyncArg =
2851 StorageAsArg && StorageAsArg->hasAttribute(Attribute::SwiftAsync);
2852
2853 // Swift async arguments are described by an entry value of the ABI-defined
2854 // register containing the coroutine context.
2855 // Entry values in variadic expressions are not supported.
2856 if (IsSwiftAsyncArg && UseEntryValue && !Expr->isEntryValue() &&
2859
2860 // If the coroutine frame is an Argument, store it in an alloca to improve
2861 // its availability (e.g. registers may be clobbered).
2862 // Avoid this if optimizations are enabled (they would remove the alloca) or
2863 // if the value is guaranteed to be available through other means (e.g. swift
2864 // ABI guarantees).
2865 if (StorageAsArg && !OptimizeFrame && !IsSwiftAsyncArg) {
2866 auto &Cached = ArgToAllocaMap[StorageAsArg];
2867 if (!Cached) {
2868 Cached = Builder.CreateAlloca(Storage->getType(), 0, nullptr,
2869 Storage->getName() + ".debug");
2870 Builder.CreateStore(Storage, Cached);
2871 }
2872 Storage = Cached;
2873 // FIXME: LLVM lacks nuanced semantics to differentiate between
2874 // memory and direct locations at the IR level. The backend will
2875 // turn a dbg.declare(alloca, ..., DIExpression()) into a memory
2876 // location. Thus, if there are deref and offset operations in the
2877 // expression, we need to add a DW_OP_deref at the *start* of the
2878 // expression to first load the contents of the alloca before
2879 // adjusting it with the expression.
2881 }
2882
2883 DVI->replaceVariableLocationOp(OriginalStorage, Storage);
2884 DVI->setExpression(Expr);
2885 // We only hoist dbg.declare today since it doesn't make sense to hoist
2886 // dbg.value since it does not have the same function wide guarantees that
2887 // dbg.declare does.
2888 if (isa<DbgDeclareInst>(DVI)) {
2889 std::optional<BasicBlock::iterator> InsertPt;
2890 if (auto *I = dyn_cast<Instruction>(Storage))
2891 InsertPt = I->getInsertionPointAfterDef();
2892 else if (isa<Argument>(Storage))
2893 InsertPt = F->getEntryBlock().begin();
2894 if (InsertPt)
2895 DVI->moveBefore(*(*InsertPt)->getParent(), *InsertPt);
2896 }
2897}
2898
2900 Function &F, SuspendCrossingInfo &Checker,
2901 const std::function<bool(Instruction &)> &MaterializableCallback) {
2902 if (F.hasOptNone())
2903 return;
2904
2905 SpillInfo Spills;
2906
2907 // See if there are materializable instructions across suspend points
2908 // We record these as the starting point to also identify materializable
2909 // defs of uses in these operations
2910 for (Instruction &I : instructions(F)) {
2911 if (!MaterializableCallback(I))
2912 continue;
2913 for (User *U : I.users())
2914 if (Checker.isDefinitionAcrossSuspend(I, U))
2915 Spills[&I].push_back(cast<Instruction>(U));
2916 }
2917
2918 // Process each of the identified rematerializable instructions
2919 // and add predecessor instructions that can also be rematerialized.
2920 // This is actually a graph of instructions since we could potentially
2921 // have multiple uses of a def in the set of predecessor instructions.
2922 // The approach here is to maintain a graph of instructions for each bottom
2923 // level instruction - where we have a unique set of instructions (nodes)
2924 // and edges between them. We then walk the graph in reverse post-dominator
2925 // order to insert them past the suspend point, but ensure that ordering is
2926 // correct. We also rely on CSE removing duplicate defs for remats of
2927 // different instructions with a def in common (rather than maintaining more
2928 // complex graphs for each suspend point)
2929
2930 // We can do this by adding new nodes to the list for each suspend
2931 // point. Then using standard GraphTraits to give a reverse post-order
2932 // traversal when we insert the nodes after the suspend
2934 for (auto &E : Spills) {
2935 for (Instruction *U : E.second) {
2936 // Don't process a user twice (this can happen if the instruction uses
2937 // more than one rematerializable def)
2938 if (AllRemats.count(U))
2939 continue;
2940
2941 // Constructor creates the whole RematGraph for the given Use
2942 auto RematUPtr =
2943 std::make_unique<RematGraph>(MaterializableCallback, U, Checker);
2944
2945 LLVM_DEBUG(dbgs() << "***** Next remat group *****\n";
2946 ReversePostOrderTraversal<RematGraph *> RPOT(RematUPtr.get());
2947 for (auto I = RPOT.begin(); I != RPOT.end();
2948 ++I) { (*I)->Node->dump(); } dbgs()
2949 << "\n";);
2950
2951 AllRemats[U] = std::move(RematUPtr);
2952 }
2953 }
2954
2955 // Rewrite materializable instructions to be materialized at the use
2956 // point.
2957 LLVM_DEBUG(dumpRemats("Materializations", AllRemats));
2959}
2960
2962 Function &F, Shape &Shape,
2963 const std::function<bool(Instruction &)> &MaterializableCallback) {
2964 // Don't eliminate swifterror in async functions that won't be split.
2965 if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
2967
2968 if (Shape.ABI == coro::ABI::Switch &&
2971 }
2972
2973 // Make sure that all coro.save, coro.suspend and the fallthrough coro.end
2974 // intrinsics are in their own blocks to simplify the logic of building up
2975 // SuspendCrossing data.
2976 for (auto *CSI : Shape.CoroSuspends) {
2977 if (auto *Save = CSI->getCoroSave())
2978 splitAround(Save, "CoroSave");
2979 splitAround(CSI, "CoroSuspend");
2980 }
2981
2982 // Put CoroEnds into their own blocks.
2983 for (AnyCoroEndInst *CE : Shape.CoroEnds) {
2984 splitAround(CE, "CoroEnd");
2985
2986 // Emit the musttail call function in a new block before the CoroEnd.
2987 // We do this here so that the right suspend crossing info is computed for
2988 // the uses of the musttail call function call. (Arguments to the coro.end
2989 // instructions would be ignored)
2990 if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) {
2991 auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction();
2992 if (!MustTailCallFn)
2993 continue;
2994 IRBuilder<> Builder(AsyncEnd);
2995 SmallVector<Value *, 8> Args(AsyncEnd->args());
2996 auto Arguments = ArrayRef<Value *>(Args).drop_front(3);
2997 auto *Call = createMustTailCall(AsyncEnd->getDebugLoc(), MustTailCallFn,
2998 Arguments, Builder);
2999 splitAround(Call, "MustTailCall.Before.CoroEnd");
3000 }
3001 }
3002
3003 // Later code makes structural assumptions about single predecessors phis e.g
3004 // that they are not live across a suspend point.
3006
3007 // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will
3008 // never has its definition separated from the PHI by the suspend point.
3009 rewritePHIs(F);
3010
3011 // Build suspend crossing info.
3012 SuspendCrossingInfo Checker(F, Shape);
3013
3014 doRematerializations(F, Checker, MaterializableCallback);
3015
3016 FrameDataInfo FrameData;
3018 SmallVector<Instruction*, 4> DeadInstructions;
3019 if (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon &&
3020 Shape.ABI != coro::ABI::RetconOnce)
3021 sinkLifetimeStartMarkers(F, Shape, Checker);
3022
3023 // Collect the spills for arguments and other not-materializable values.
3024 for (Argument &A : F.args())
3025 for (User *U : A.users())
3026 if (Checker.isDefinitionAcrossSuspend(A, U))
3027 FrameData.Spills[&A].push_back(cast<Instruction>(U));
3028
3029 const DominatorTree DT(F);
3030 for (Instruction &I : instructions(F)) {
3031 // Values returned from coroutine structure intrinsics should not be part
3032 // of the Coroutine Frame.
3034 continue;
3035
3036 // Handle alloca.alloc specially here.
3037 if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) {
3038 // Check whether the alloca's lifetime is bounded by suspend points.
3039 if (isLocalAlloca(AI)) {
3040 LocalAllocas.push_back(AI);
3041 continue;
3042 }
3043
3044 // If not, do a quick rewrite of the alloca and then add spills of
3045 // the rewritten value. The rewrite doesn't invalidate anything in
3046 // Spills because the other alloca intrinsics have no other operands
3047 // besides AI, and it doesn't invalidate the iteration because we delay
3048 // erasing AI.
3049 auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions);
3050
3051 for (User *U : Alloc->users()) {
3052 if (Checker.isDefinitionAcrossSuspend(*Alloc, U))
3053 FrameData.Spills[Alloc].push_back(cast<Instruction>(U));
3054 }
3055 continue;
3056 }
3057
3058 // Ignore alloca.get; we process this as part of coro.alloca.alloc.
3059 if (isa<CoroAllocaGetInst>(I))
3060 continue;
3061
3062 if (auto *AI = dyn_cast<AllocaInst>(&I)) {
3063 collectFrameAlloca(AI, Shape, Checker, FrameData.Allocas, DT);
3064 continue;
3065 }
3066
3067 for (User *U : I.users())
3068 if (Checker.isDefinitionAcrossSuspend(I, U)) {
3069 // We cannot spill a token.
3070 if (I.getType()->isTokenTy())
3072 "token definition is separated from the use by a suspend point");
3073 FrameData.Spills[&I].push_back(cast<Instruction>(U));
3074 }
3075 }
3076
3077 LLVM_DEBUG(dumpAllocas(FrameData.Allocas));
3078
3079 // We don't want the layout of coroutine frame to be affected
3080 // by debug information. So we only choose to salvage DbgValueInst for
3081 // whose value is already in the frame.
3082 // We would handle the dbg.values for allocas specially
3083 for (auto &Iter : FrameData.Spills) {
3084 auto *V = Iter.first;
3086 findDbgValues(DVIs, V);
3087 for (DbgValueInst *DVI : DVIs)
3088 if (Checker.isDefinitionAcrossSuspend(*V, DVI))
3089 FrameData.Spills[V].push_back(DVI);
3090 }
3091
3092 LLVM_DEBUG(dumpSpills("Spills", FrameData.Spills));
3093 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
3094 Shape.ABI == coro::ABI::Async)
3096 Shape.FrameTy = buildFrameType(F, Shape, FrameData);
3098 // For now, this works for C++ programs only.
3099 buildFrameDebugInfo(F, Shape, FrameData);
3100 insertSpills(FrameData, Shape);
3101 lowerLocalAllocas(LocalAllocas, DeadInstructions);
3102
3103 for (auto *I : DeadInstructions)
3104 I->eraseFromParent();
3105}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
AMDGPU Lower Kernel Arguments
This file implements the BitVector class.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition: Compiler.h:510
static void cleanupSinglePredPHIs(Function &F)
Definition: CoroFrame.cpp:2122
static bool isSuspendReachableFrom(BasicBlock *From, VisitedBlocksSet &VisitedOrFreeBBs)
Does control flow starting at the given block ever reach a suspend instruction before reaching a bloc...
Definition: CoroFrame.cpp:2344
static bool isCoroutineStructureIntrinsic(Instruction &I)
Definition: CoroFrame.cpp:2232
SmallPtrSet< BasicBlock *, 8 > VisitedBlocksSet
Definition: CoroFrame.cpp:2340
static Instruction * lowerNonLocalAlloca(CoroAllocaAllocInst *AI, coro::Shape &Shape, SmallVectorImpl< Instruction * > &DeadInsts)
Turn the given coro.alloca.alloc call into a dynamic allocation.
Definition: CoroFrame.cpp:2458
static Instruction * splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch)
Definition: CoroFrame.cpp:1670
static void eliminateSwiftError(Function &F, coro::Shape &Shape)
Eliminate all problematic uses of swifterror arguments and allocas from the function.
Definition: CoroFrame.cpp:2609
static void lowerLocalAllocas(ArrayRef< CoroAllocaAllocInst * > LocalAllocas, SmallVectorImpl< Instruction * > &DeadInsts)
Turn each of the given local allocas into a normal (dynamic) alloca instruction.
Definition: CoroFrame.cpp:2418
static bool isLocalAlloca(CoroAllocaAllocInst *AI)
Is the given alloca "local", i.e.
Definition: CoroFrame.cpp:2367
static Value * emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V, coro::Shape &Shape)
Set the given value as the current swifterror value.
Definition: CoroFrame.cpp:2498
static Value * emitSetAndGetSwiftErrorValueAround(Instruction *Call, AllocaInst *Alloca, coro::Shape &Shape)
Set the swifterror value from the given alloca before a call, then put in back in the alloca afterwar...
Definition: CoroFrame.cpp:2516
static void cacheDIVar(FrameDataInfo &FrameData, DenseMap< Value *, DILocalVariable * > &DIVarCache)
Definition: CoroFrame.cpp:960
static void collectFrameAlloca(AllocaInst *AI, coro::Shape &Shape, const SuspendCrossingInfo &Checker, SmallVectorImpl< AllocaInfo > &Allocas, const DominatorTree &DT)
Definition: CoroFrame.cpp:2768
static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI)
Definition: CoroFrame.cpp:2400
static void splitAround(Instruction *I, const Twine &Name)
Definition: CoroFrame.cpp:2331
static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca, coro::Shape &Shape)
Eliminate a formerly-swifterror alloca by inserting the get/set intrinsics and attempting to MemToReg...
Definition: CoroFrame.cpp:2546
static void rewritePHIs(BasicBlock &BB)
Definition: CoroFrame.cpp:2139
static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB, BasicBlock *InsertedBB, BasicBlock *PredBB, PHINode *UntilPHI=nullptr)
Definition: CoroFrame.cpp:2029
static DIType * solveDIType(DIBuilder &Builder, Type *Ty, const DataLayout &Layout, DIScope *Scope, unsigned LineNum, DenseMap< Type *, DIType * > &DITypeCache)
Definition: CoroFrame.cpp:1015
static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB, unsigned depth=3)
After we split the coroutine, will the given basic block be along an obvious exit path for the resump...
Definition: CoroFrame.cpp:2381
static void eliminateSwiftErrorArgument(Function &F, Argument &Arg, coro::Shape &Shape, SmallVectorImpl< AllocaInst * > &AllocasToPromote)
"Eliminate" a swifterror argument by reducing it to the alloca case and then loading and storing in t...
Definition: CoroFrame.cpp:2572
static void buildFrameDebugInfo(Function &F, coro::Shape &Shape, FrameDataInfo &FrameData)
Build artificial debug info for C++ coroutine frames to allow users to inspect the contents of the fr...
Definition: CoroFrame.cpp:1102
static StructType * buildFrameType(Function &F, coro::Shape &Shape, FrameDataInfo &FrameData)
Definition: CoroFrame.cpp:1275
static BasicBlock * splitBlockIfNotFirst(Instruction *I, const Twine &Name)
Definition: CoroFrame.cpp:2318
static void sinkSpillUsesAfterCoroBegin(Function &F, const FrameDataInfo &FrameData, CoroBeginInst *CoroBegin)
retcon and retcon.once conventions assume that all spill uses can be sunk after the coro....
Definition: CoroFrame.cpp:2642
static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape, SuspendCrossingInfo &Checker)
For each local variable that all of its user are only used inside one of suspended region,...
Definition: CoroFrame.cpp:2689
static bool isSuspendBlock(BasicBlock *BB)
Definition: CoroFrame.cpp:2336
static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB, CleanupPadInst *CleanupPad)
Definition: CoroFrame.cpp:2047
static void rewriteMaterializableInstructions(const SmallMapVector< Instruction *, std::unique_ptr< RematGraph >, 8 > &AllRemats)
Definition: CoroFrame.cpp:2240
static void dumpAllocas(const SmallVectorImpl< AllocaInfo > &Allocas)
Definition: CoroFrame.cpp:569
static StringRef solveTypeName(Type *Ty)
Create name for Type.
Definition: CoroFrame.cpp:977
static void dumpSpills(StringRef Title, const SpillInfo &Spills)
Definition: CoroFrame.cpp:550
static void doRematerializations(Function &F, SuspendCrossingInfo &Checker, const std::function< bool(Instruction &)> &MaterializableCallback)
Definition: CoroFrame.cpp:2899
static Value * emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy, coro::Shape &Shape)
Get the current swifterror value.
Definition: CoroFrame.cpp:2483
static void dumpRemats(StringRef Title, const SmallMapVector< Instruction *, std::unique_ptr< RematGraph >, 8 > &RM)
Definition: CoroFrame.cpp:559
static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape)
Definition: CoroFrame.cpp:1703
@ SmallVectorThreshold
Definition: CoroFrame.cpp:50
Given that RA is a live value
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Addr
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:478
static bool isLifetimeStart(const Instruction *Inst)
Definition: GVN.cpp:1067
Hexagon Common GEP
static MaybeAlign getAlign(Value *Ptr)
Definition: IRBuilder.cpp:530
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
iv Induction Variable Users
Definition: IVUsers.cpp:48
Select target instructions out of generic instructions
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
mir Rename Register Operands
LLVMContext & Context
This file provides an interface for laying out a sequence of fields as a struct in a way that attempt...
#define P(N)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
This file provides a collection of visitors which walk the (instruction) uses of a pointer.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallString class.
static const unsigned FramePtr
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
Definition: Instructions.h:58
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:150
void setSwiftError(bool V)
Specify whether this alloca is used to represent a swifterror.
Definition: Instructions.h:152
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:125
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:100
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:118
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
Definition: Instructions.h:129
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:96
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:204
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:437
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:506
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:446
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:399
const Instruction & front() const
Definition: BasicBlock.h:460
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:206
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
Definition: BasicBlock.cpp:607
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Definition: BasicBlock.cpp:489
const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
Definition: BasicBlock.cpp:519
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:213
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:173
LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition: BasicBlock.cpp:207
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:228
This class represents a no-op cast from one type to another.
size_type size() const
size - Returns the number of bits in this bitvector.
Definition: BitVector.h:159
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1227
bool doesNotCapture(unsigned OpNo) const
Determine whether this data operand is not captured.
Definition: InstrTypes.h:1730
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1394
unsigned arg_size() const
Definition: InstrTypes.h:1392
Value * getParentPad() const
static CleanupPadInst * Create(Value *ParentPad, ArrayRef< Value * > Args=std::nullopt, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, Instruction *InsertBefore=nullptr)
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:888
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Definition: Constants.cpp:1691
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:356
This represents the llvm.coro.alloca.alloc instruction.
Definition: CoroInstr.h:706
Value * getSize() const
Definition: CoroInstr.h:709
This class represents the llvm.coro.begin instruction.
Definition: CoroInstr.h:418
void clearPromise()
Definition: CoroInstr.h:124
This represents the llvm.coro.suspend instruction.
Definition: CoroInstr.h:491
DICompositeType * createStructType(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber, uint64_t SizeInBits, uint32_t AlignInBits, DINode::DIFlags Flags, DIType *DerivedFrom, DINodeArray Elements, unsigned RunTimeLang=0, DIType *VTableHolder=nullptr, StringRef UniqueIdentifier="")
Create debugging information entry for a struct.
Definition: DIBuilder.cpp:493
DIDerivedType * createPointerType(DIType *PointeeTy, uint64_t SizeInBits, uint32_t AlignInBits=0, std::optional< unsigned > DWARFAddressSpace=std::nullopt, StringRef Name="", DINodeArray Annotations=nullptr)
Create debugging information entry for a pointer.
Definition: DIBuilder.cpp:303
DIExpression * createExpression(ArrayRef< uint64_t > Addr=std::nullopt)
Create a new descriptor for the specified variable which has a complex address expression for its add...
Definition: DIBuilder.cpp:817
DISubrange * getOrCreateSubrange(int64_t Lo, int64_t Count)
Create a descriptor for a value range.
Definition: DIBuilder.cpp:686
DICompositeType * createArrayType(uint64_t Size, uint32_t AlignInBits, DIType *Ty, DINodeArray Subscripts, PointerUnion< DIExpression *, DIVariable * > DataLocation=nullptr, PointerUnion< DIExpression *, DIVariable * > Associated=nullptr, PointerUnion< DIExpression *, DIVariable * > Allocated=nullptr, PointerUnion< DIExpression *, DIVariable * > Rank=nullptr)
Create debugging information entry for an array.
Definition: DIBuilder.cpp:567
DIBasicType * createBasicType(StringRef Name, uint64_t SizeInBits, unsigned Encoding, DINode::DIFlags Flags=DINode::FlagZero)
Create debugging information entry for a basic type.
Definition: DIBuilder.cpp:267
DINodeArray getOrCreateArray(ArrayRef< Metadata * > Elements)
Get a DINodeArray, create one if required.
Definition: DIBuilder.cpp:666
DIDerivedType * createMemberType(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo, uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits, DINode::DIFlags Flags, DIType *Ty, DINodeArray Annotations=nullptr)
Create debugging information entry for a member.
Definition: DIBuilder.cpp:364
DILocalVariable * createAutoVariable(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo, DIType *Ty, bool AlwaysPreserve=false, DINode::DIFlags Flags=DINode::FlagZero, uint32_t AlignInBits=0)
Create a new descriptor for an auto variable.
Definition: DIBuilder.cpp:779
void replaceArrays(DICompositeType *&T, DINodeArray Elements, DINodeArray TParams=DINodeArray())
Replace arrays on a composite type.
Definition: DIBuilder.cpp:1122
DWARF expression.
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
static DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
bool isSingleLocationExpression() const
Return whether the evaluated expression makes use of a single location at the start of the expression...
A scope for locals.
DILocalScope * getScope() const
Get the local scope for this variable.
Debug location.
Base class for scope-like contexts.
DIFile * getFile() const
Subprogram description.
Base class for types.
StringRef getName() const
uint64_t getSizeInBits() const
uint32_t getAlignInBits() const
unsigned getLine() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:720
Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:865
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:672
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Definition: DataLayout.cpp:874
This represents the llvm.dbg.declare instruction.
This represents the llvm.dbg.value instruction.
This is the common base class for debug info intrinsics for variables.
void replaceVariableLocationOp(Value *OldValue, Value *NewValue)
Value * getVariableLocationOp(unsigned OpIdx) const
void setExpression(DIExpression *NewExpr)
DILocalVariable * getVariable() const
DIExpression * getExpression() const
DILocation * get() const
Get the underlying DILocation.
Definition: DebugLoc.cpp:20
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:202
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition: DenseMap.h:145
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Definition: DenseMap.h:103
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:164
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:123
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:948
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:652
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Definition: IRBuilder.h:1772
CallInst * CreateStackSave(const Twine &Name="")
Create a call to llvm.stacksave.
Definition: IRBuilder.h:1047
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition: IRBuilder.h:1806
UnreachableInst * CreateUnreachable()
Definition: IRBuilder.h:1257
BasicBlock::iterator GetInsertPoint() const
Definition: IRBuilder.h:175
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2095
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
Definition: IRBuilder.h:1875
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition: IRBuilder.h:2370
Value * CreateNot(Value *V, const Twine &Name="")
Definition: IRBuilder.h:1748
SwitchInst * CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases=10, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a switch instruction with the specified value, default dest, and with a hint for the number of...
Definition: IRBuilder.h:1137
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1789
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1469
Value * CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1, const Twine &Name="")
Definition: IRBuilder.h:1913
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition: IRBuilder.h:1802
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1321
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2090
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Definition: IRBuilder.h:563
BranchInst * CreateBr(BasicBlock *Dest)
Create an unconditional 'br label X' instruction.
Definition: IRBuilder.h:1108
CallInst * CreateStackRestore(Value *Ptr, const Twine &Name="")
Create a call to llvm.stackrestore.
Definition: IRBuilder.h:1054
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:180
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Definition: IRBuilder.h:1825
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2385
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", bool IsInBounds=false)
Definition: IRBuilder.h:1865
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition: IRBuilder.h:510
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2105
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2639
void visitIntrinsicInst(IntrinsicInst &I)
Definition: InstVisitor.h:219
void visitBitCastInst(BitCastInst &I)
Definition: InstVisitor.h:187
void visit(Iterator Start, Iterator End)
Definition: InstVisitor.h:87
void visitPHINode(PHINode &I)
Definition: InstVisitor.h:175
void visitAddrSpaceCastInst(AddrSpaceCastInst &I)
Definition: InstVisitor.h:188
void visitSelectInst(SelectInst &I)
Definition: InstVisitor.h:189
void visitGetElementPtrInst(GetElementPtrInst &I)
Definition: InstVisitor.h:174
void removeFromParent()
This method unlinks 'this' from the containing basic block, but does not delete it.
Definition: Instruction.cpp:79
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
Definition: Instruction.cpp:98
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:438
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:71
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
Definition: Instruction.h:328
const BasicBlock * getParent() const
Definition: Instruction.h:139
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:93
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:75
void insertAfter(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately after the specified instruction.
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:285
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:54
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
The landingpad instruction holds all of the information necessary to generate correct exception handl...
static MDString * get(LLVMContext &Context, StringRef Str)
Definition: Metadata.cpp:559
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1461
size_type count(const KeyT &Key) const
Definition: MapVector.h:165
This is the common base class for memset/memcpy/memmove.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.h:275
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
A base class for visitors over the uses of a pointer value.
void visitCallBase(CallBase &CB)
void visitGetElementPtrInst(GetElementPtrInst &GEPI)
void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC)
void visitBitCastInst(BitCastInst &BC)
void visitStoreInst(StoreInst &SI)
void visitIntrinsicInst(IntrinsicInst &II)
void visitMemIntrinsic(MemIntrinsic &I)
This class represents the LLVM 'select' instruction.
iterator end()
Get an iterator to the end of the SetVector.
Definition: SetVector.h:113
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition: SetVector.h:103
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:162
iterator find(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:387
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:366
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:451
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:370
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
StringRef str() const
Explicit conversion to StringRef.
Definition: SmallString.h:261
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:941
void reserve(size_type N)
Definition: SmallVector.h:667
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
Compute live ranges of allocas.
Definition: StackLifetime.h:37
An instruction for storing to memory.
Definition: Instructions.h:301
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:222
TypeSize getElementOffsetInBits(unsigned Idx) const
Definition: DataLayout.h:656
Class to represent struct types.
Definition: DerivedTypes.h:216
void setBody(ArrayRef< Type * > Elements, bool isPacked=false)
Specify a body for an opaque identified type.
Definition: Type.cpp:452
static StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition: Type.cpp:520
unsigned getNumElements() const
Random access to the elements.
Definition: DerivedTypes.h:341
Type * getElementType(unsigned N) const
Definition: DerivedTypes.h:342
Multiway switch.
void setDefaultDest(BasicBlock *DefaultCase)
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:29
EltTy front() const
bool empty() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
std::string str() const
Return the twine contents as a std::string.
Definition: Twine.cpp:17
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition: TypeSize.h:333
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
Definition: Type.h:154
StringRef getStructName() const
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isStructTy() const
True if this is an instance of StructType.
Definition: Type.h:249
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:302
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:129
static IntegerType * getInt8Ty(LLVMContext &C)
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
Definition: Type.h:157
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:185
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
void set(Value *Val)
Definition: Value.h:879
User * getUser() const
Returns the User that contains this Use.
Definition: Use.h:72
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
iterator_range< user_iterator > users()
Definition: Value.h:421
void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition: Value.cpp:542
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1074
iterator_range< use_iterator > uses()
Definition: Value.h:376
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
APInt Offset
The constant offset of the use if that is known.
void enqueueUsers(Instruction &I)
Enqueue the users of this instruction in the visit worklist.
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:188
self_iterator getIterator()
Definition: ilist_node.h:109
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:316
A range adaptor for a pair of iterators.
A raw_ostream that writes to an SmallVector or SmallString.
Definition: raw_ostream.h:672
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ CE
Windows NT (Windows on ARM)
void buildCoroutineFrame(Function &F, Shape &Shape, const std::function< bool(Instruction &)> &MaterializableCallback)
Definition: CoroFrame.cpp:2961
bool defaultMaterializable(Instruction &V)
Default materializable callback.
Definition: CoroFrame.cpp:2225
void salvageDebugInfo(SmallDenseMap< Argument *, AllocaInst *, 4 > &ArgToAllocaMap, DbgVariableIntrinsic *DVI, bool OptimizeFrame, bool IsEntryPoint)
Attempts to rewrite the location operand of debug intrinsics in terms of the coroutine frame pointer,...
Definition: CoroFrame.cpp:2801
CallInst * createMustTailCall(DebugLoc Loc, Function *MustTailCallFn, ArrayRef< Value * > Arguments, IRBuilder<> &)
Definition: CoroSplit.cpp:1742
SourceLanguage
Definition: Dwarf.h:204
bool isCPlusPlus(SourceLanguage S)
Definition: Dwarf.h:212
NodeAddr< DefNode * > Def
Definition: RDFGraph.h:384
NodeAddr< BlockNode * > Block
Definition: RDFGraph.h:392
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
@ Offset
Definition: DWP.cpp:456
TinyPtrVector< DbgDeclareInst * > FindDbgDeclareUses(Value *V)
Finds dbg.declare intrinsics declaring local variables as living in the memory that 'V' points to.
Definition: DebugInfo.cpp:47
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1726
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1684
void PromoteMemToReg(ArrayRef< AllocaInst * > Allocas, DominatorTree &DT, AssumptionCache *AC=nullptr)
Promote the specified list of alloca instructions into scalar registers, inserting PHI nodes as appro...
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:59
unsigned Log2_64_Ceil(uint64_t Value)
Return the ceil log base 2 of the specified value, 64 if the value is zero.
Definition: MathExtras.h:332
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
Definition: Alignment.h:145
auto successors(const MachineBasicBlock *BB)
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:665
bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1733
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1651
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1740
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
BasicBlock * ehAwareSplitEdge(BasicBlock *BB, BasicBlock *Succ, LandingPadInst *OriginalPad=nullptr, PHINode *LandingPadReplacement=nullptr, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions(), const Twine &BBName="")
Split the edge connect the specficed blocks in the case that Succ is an Exception Handling Block.
Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
Definition: Local.cpp:2501
uint64_t offsetToAlignment(uint64_t Value, Align Alignment)
Returns the offset to the next integer (mod 2**64) that is greater than or equal to Value and is a mu...
Definition: Alignment.h:197
void findDbgUsers(SmallVectorImpl< DbgVariableIntrinsic * > &DbgInsts, Value *V, SmallVectorImpl< DPValue * > *DPValues=nullptr)
Finds the debug info intrinsics describing a value.
Definition: DebugInfo.cpp:125
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1945
void findDbgValues(SmallVectorImpl< DbgValueInst * > &DbgValues, Value *V, SmallVectorImpl< DPValue * > *DPValues=nullptr)
Finds the llvm.dbg.value intrinsics describing a value.
Definition: DebugInfo.cpp:120
std::pair< uint64_t, Align > performOptimizedStructLayout(MutableArrayRef< OptimizedStructLayoutField > Fields)
Compute a layout for a struct containing the given fields, making a best-effort attempt to minimize t...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
DWARFExpression::Operation Op
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
void updatePhiNodes(BasicBlock *DestBB, BasicBlock *OldPred, BasicBlock *NewPred, PHINode *Until=nullptr)
Replaces all uses of OldPred with the NewPred block in all PHINodes in a block.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1753
auto predecessors(const MachineBasicBlock *BB)
BasicBlock * SplitEdge(BasicBlock *From, BasicBlock *To, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the edge connecting the specified blocks, and return the newly created basic block between From...
void setUnwindEdgeTo(Instruction *TI, BasicBlock *Succ)
Sets the unwind edge of an instruction to a particular successor.
unsigned pred_size(const MachineBasicBlock *BB)
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
RematGraph::RematNode * NodeRef
Definition: CoroFrame.cpp:436
static ChildIteratorType child_end(NodeRef N)
Definition: CoroFrame.cpp:443
RematGraph::RematNode ** ChildIteratorType
Definition: CoroFrame.cpp:437
static NodeRef getEntryNode(RematGraph *G)
Definition: CoroFrame.cpp:439
static ChildIteratorType child_begin(NodeRef N)
Definition: CoroFrame.cpp:440
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Align Alignment
The required alignment of this field.
uint64_t Offset
The offset of this field in the final layout.
uint64_t Size
The required size of this field in bytes.
static constexpr uint64_t FlexibleOffset
A special value for Offset indicating that the field can be moved anywhere.
A MapVector that performs no allocations if smaller than a certain size.
Definition: MapVector.h:254
AsyncLoweringStorage AsyncLowering
Definition: CoroInternal.h:145
StructType * FrameTy
Definition: CoroInternal.h:101
AnyCoroIdRetconInst * getRetconCoroId() const
Definition: CoroInternal.h:153
CoroIdInst * getSwitchCoroId() const
Definition: CoroInternal.h:148
Instruction * getInsertPtAfterFramePtr() const
Definition: CoroInternal.h:243
SmallVector< AnyCoroSuspendInst *, 4 > CoroSuspends
Definition: CoroInternal.h:81
Value * emitAlloc(IRBuilder<> &Builder, Value *Size, CallGraph *CG) const
Allocate memory according to the rules of the active lowering.
Definition: Coroutines.cpp:449
SmallVector< CallInst *, 2 > SwiftErrorOps
Definition: CoroInternal.h:82
AllocaInst * getPromiseAlloca() const
Definition: CoroInternal.h:237
bool OptimizeFrame
This would only be true if optimization are enabled.
Definition: CoroInternal.h:108
SwitchLoweringStorage SwitchLowering
Definition: CoroInternal.h:143
CoroBeginInst * CoroBegin
Definition: CoroInternal.h:77
void emitDealloc(IRBuilder<> &Builder, Value *Ptr, CallGraph *CG) const
Deallocate memory according to the rules of the active lowering.
Definition: Coroutines.cpp:472
RetconLoweringStorage RetconLowering
Definition: CoroInternal.h:144
SmallVector< AnyCoroEndInst *, 4 > CoroEnds
Definition: CoroInternal.h:78
BasicBlock * AllocaSpillBlock
Definition: CoroInternal.h:105