LLVM 20.0.0git
CoroFrame.cpp
Go to the documentation of this file.
1//===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8// This file contains classes used to discover if for a particular value
9// there from sue to definition that crosses a suspend block.
10//
11// Using the information discovered we form a Coroutine Frame structure to
12// contain those values. All uses of those values are replaced with appropriate
13// GEP + load from the coroutine frame. At the point of the definition we spill
14// the value into the coroutine frame.
15//===----------------------------------------------------------------------===//
16
17#include "CoroInternal.h"
18#include "llvm/ADT/BitVector.h"
20#include "llvm/ADT/ScopeExit.h"
22#include "llvm/Analysis/CFG.h"
25#include "llvm/Config/llvm-config.h"
26#include "llvm/IR/CFG.h"
27#include "llvm/IR/DIBuilder.h"
28#include "llvm/IR/DebugInfo.h"
29#include "llvm/IR/Dominators.h"
30#include "llvm/IR/IRBuilder.h"
33#include "llvm/Support/Debug.h"
41#include <algorithm>
42#include <deque>
43#include <optional>
44
45using namespace llvm;
46
48
49// The "coro-suspend-crossing" flag is very noisy. There is another debug type,
50// "coro-frame", which results in leaner debug spew.
51#define DEBUG_TYPE "coro-suspend-crossing"
52
54
55// Provides two way mapping between the blocks and numbers.
56namespace {
57class BlockToIndexMapping {
59
60public:
61 size_t size() const { return V.size(); }
62
63 BlockToIndexMapping(Function &F) {
64 for (BasicBlock &BB : F)
65 V.push_back(&BB);
66 llvm::sort(V);
67 }
68
69 size_t blockToIndex(BasicBlock const *BB) const {
70 auto *I = llvm::lower_bound(V, BB);
71 assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block");
72 return I - V.begin();
73 }
74
75 BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; }
76};
77} // end anonymous namespace
78
79// The SuspendCrossingInfo maintains data that allows to answer a question
80// whether given two BasicBlocks A and B there is a path from A to B that
81// passes through a suspend point.
82//
83// For every basic block 'i' it maintains a BlockData that consists of:
84// Consumes: a bit vector which contains a set of indices of blocks that can
85// reach block 'i'. A block can trivially reach itself.
86// Kills: a bit vector which contains a set of indices of blocks that can
87// reach block 'i' but there is a path crossing a suspend point
88// not repeating 'i' (path to 'i' without cycles containing 'i').
89// Suspend: a boolean indicating whether block 'i' contains a suspend point.
90// End: a boolean indicating whether block 'i' contains a coro.end intrinsic.
91// KillLoop: There is a path from 'i' to 'i' not otherwise repeating 'i' that
92// crosses a suspend point.
93//
94namespace {
95class SuspendCrossingInfo {
96 BlockToIndexMapping Mapping;
97
98 struct BlockData {
99 BitVector Consumes;
100 BitVector Kills;
101 bool Suspend = false;
102 bool End = false;
103 bool KillLoop = false;
104 bool Changed = false;
105 };
107
109 BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]);
110 return llvm::predecessors(BB);
111 }
112
113 BlockData &getBlockData(BasicBlock *BB) {
114 return Block[Mapping.blockToIndex(BB)];
115 }
116
117 /// Compute the BlockData for the current function in one iteration.
118 /// Initialize - Whether this is the first iteration, we can optimize
119 /// the initial case a little bit by manual loop switch.
120 /// Returns whether the BlockData changes in this iteration.
121 template <bool Initialize = false>
122 bool computeBlockData(const ReversePostOrderTraversal<Function *> &RPOT);
123
124public:
125#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
126 void dump() const;
127 void dump(StringRef Label, BitVector const &BV) const;
128#endif
129
130 SuspendCrossingInfo(Function &F, coro::Shape &Shape);
131
132 /// Returns true if there is a path from \p From to \p To crossing a suspend
133 /// point without crossing \p From a 2nd time.
134 bool hasPathCrossingSuspendPoint(BasicBlock *From, BasicBlock *To) const {
135 size_t const FromIndex = Mapping.blockToIndex(From);
136 size_t const ToIndex = Mapping.blockToIndex(To);
137 bool const Result = Block[ToIndex].Kills[FromIndex];
138 LLVM_DEBUG(dbgs() << From->getName() << " => " << To->getName()
139 << " answer is " << Result << "\n");
140 return Result;
141 }
142
143 /// Returns true if there is a path from \p From to \p To crossing a suspend
144 /// point without crossing \p From a 2nd time. If \p From is the same as \p To
145 /// this will also check if there is a looping path crossing a suspend point.
146 bool hasPathOrLoopCrossingSuspendPoint(BasicBlock *From,
147 BasicBlock *To) const {
148 size_t const FromIndex = Mapping.blockToIndex(From);
149 size_t const ToIndex = Mapping.blockToIndex(To);
150 bool Result = Block[ToIndex].Kills[FromIndex] ||
151 (From == To && Block[ToIndex].KillLoop);
152 LLVM_DEBUG(dbgs() << From->getName() << " => " << To->getName()
153 << " answer is " << Result << " (path or loop)\n");
154 return Result;
155 }
156
157 bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const {
158 auto *I = cast<Instruction>(U);
159
160 // We rewrote PHINodes, so that only the ones with exactly one incoming
161 // value need to be analyzed.
162 if (auto *PN = dyn_cast<PHINode>(I))
163 if (PN->getNumIncomingValues() > 1)
164 return false;
165
166 BasicBlock *UseBB = I->getParent();
167
168 // As a special case, treat uses by an llvm.coro.suspend.retcon or an
169 // llvm.coro.suspend.async as if they were uses in the suspend's single
170 // predecessor: the uses conceptually occur before the suspend.
171 if (isa<CoroSuspendRetconInst>(I) || isa<CoroSuspendAsyncInst>(I)) {
172 UseBB = UseBB->getSinglePredecessor();
173 assert(UseBB && "should have split coro.suspend into its own block");
174 }
175
176 return hasPathCrossingSuspendPoint(DefBB, UseBB);
177 }
178
179 bool isDefinitionAcrossSuspend(Argument &A, User *U) const {
180 return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U);
181 }
182
183 bool isDefinitionAcrossSuspend(Instruction &I, User *U) const {
184 auto *DefBB = I.getParent();
185
186 // As a special case, treat values produced by an llvm.coro.suspend.*
187 // as if they were defined in the single successor: the uses
188 // conceptually occur after the suspend.
189 if (isa<AnyCoroSuspendInst>(I)) {
190 DefBB = DefBB->getSingleSuccessor();
191 assert(DefBB && "should have split coro.suspend into its own block");
192 }
193
194 return isDefinitionAcrossSuspend(DefBB, U);
195 }
196
197 bool isDefinitionAcrossSuspend(Value &V, User *U) const {
198 if (auto *Arg = dyn_cast<Argument>(&V))
199 return isDefinitionAcrossSuspend(*Arg, U);
200 if (auto *Inst = dyn_cast<Instruction>(&V))
201 return isDefinitionAcrossSuspend(*Inst, U);
202
204 "Coroutine could only collect Argument and Instruction now.");
205 }
206};
207} // end anonymous namespace
208
209#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
210LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label,
211 BitVector const &BV) const {
212 dbgs() << Label << ":";
213 for (size_t I = 0, N = BV.size(); I < N; ++I)
214 if (BV[I])
215 dbgs() << " " << Mapping.indexToBlock(I)->getName();
216 dbgs() << "\n";
217}
218
219LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const {
220 for (size_t I = 0, N = Block.size(); I < N; ++I) {
221 BasicBlock *const B = Mapping.indexToBlock(I);
222 dbgs() << B->getName() << ":\n";
223 dump(" Consumes", Block[I].Consumes);
224 dump(" Kills", Block[I].Kills);
225 }
226 dbgs() << "\n";
227}
228#endif
229
230template <bool Initialize>
231bool SuspendCrossingInfo::computeBlockData(
233 bool Changed = false;
234
235 for (const BasicBlock *BB : RPOT) {
236 auto BBNo = Mapping.blockToIndex(BB);
237 auto &B = Block[BBNo];
238
239 // We don't need to count the predecessors when initialization.
240 if constexpr (!Initialize)
241 // If all the predecessors of the current Block don't change,
242 // the BlockData for the current block must not change too.
243 if (all_of(predecessors(B), [this](BasicBlock *BB) {
244 return !Block[Mapping.blockToIndex(BB)].Changed;
245 })) {
246 B.Changed = false;
247 continue;
248 }
249
250 // Saved Consumes and Kills bitsets so that it is easy to see
251 // if anything changed after propagation.
252 auto SavedConsumes = B.Consumes;
253 auto SavedKills = B.Kills;
254
255 for (BasicBlock *PI : predecessors(B)) {
256 auto PrevNo = Mapping.blockToIndex(PI);
257 auto &P = Block[PrevNo];
258
259 // Propagate Kills and Consumes from predecessors into B.
260 B.Consumes |= P.Consumes;
261 B.Kills |= P.Kills;
262
263 // If block P is a suspend block, it should propagate kills into block
264 // B for every block P consumes.
265 if (P.Suspend)
266 B.Kills |= P.Consumes;
267 }
268
269 if (B.Suspend) {
270 // If block B is a suspend block, it should kill all of the blocks it
271 // consumes.
272 B.Kills |= B.Consumes;
273 } else if (B.End) {
274 // If block B is an end block, it should not propagate kills as the
275 // blocks following coro.end() are reached during initial invocation
276 // of the coroutine while all the data are still available on the
277 // stack or in the registers.
278 B.Kills.reset();
279 } else {
280 // This is reached when B block it not Suspend nor coro.end and it
281 // need to make sure that it is not in the kill set.
282 B.KillLoop |= B.Kills[BBNo];
283 B.Kills.reset(BBNo);
284 }
285
286 if constexpr (!Initialize) {
287 B.Changed = (B.Kills != SavedKills) || (B.Consumes != SavedConsumes);
288 Changed |= B.Changed;
289 }
290 }
291
292 return Changed;
293}
294
295SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape)
296 : Mapping(F) {
297 const size_t N = Mapping.size();
298 Block.resize(N);
299
300 // Initialize every block so that it consumes itself
301 for (size_t I = 0; I < N; ++I) {
302 auto &B = Block[I];
303 B.Consumes.resize(N);
304 B.Kills.resize(N);
305 B.Consumes.set(I);
306 B.Changed = true;
307 }
308
309 // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as
310 // the code beyond coro.end is reachable during initial invocation of the
311 // coroutine.
312 for (auto *CE : Shape.CoroEnds)
313 getBlockData(CE->getParent()).End = true;
314
315 // Mark all suspend blocks and indicate that they kill everything they
316 // consume. Note, that crossing coro.save also requires a spill, as any code
317 // between coro.save and coro.suspend may resume the coroutine and all of the
318 // state needs to be saved by that time.
319 auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) {
320 BasicBlock *SuspendBlock = BarrierInst->getParent();
321 auto &B = getBlockData(SuspendBlock);
322 B.Suspend = true;
323 B.Kills |= B.Consumes;
324 };
325 for (auto *CSI : Shape.CoroSuspends) {
326 markSuspendBlock(CSI);
327 if (auto *Save = CSI->getCoroSave())
328 markSuspendBlock(Save);
329 }
330
331 // It is considered to be faster to use RPO traversal for forward-edges
332 // dataflow analysis.
334 computeBlockData</*Initialize=*/true>(RPOT);
335 while (computeBlockData</*Initialize*/ false>(RPOT))
336 ;
337
338 LLVM_DEBUG(dump());
339}
340
341namespace {
342
343// RematGraph is used to construct a DAG for rematerializable instructions
344// When the constructor is invoked with a candidate instruction (which is
345// materializable) it builds a DAG of materializable instructions from that
346// point.
347// Typically, for each instruction identified as re-materializable across a
348// suspend point, a RematGraph will be created.
349struct RematGraph {
350 // Each RematNode in the graph contains the edges to instructions providing
351 // operands in the current node.
352 struct RematNode {
355 RematNode() = default;
356 RematNode(Instruction *V) : Node(V) {}
357 };
358
359 RematNode *EntryNode;
360 using RematNodeMap =
362 RematNodeMap Remats;
363 const std::function<bool(Instruction &)> &MaterializableCallback;
364 SuspendCrossingInfo &Checker;
365
366 RematGraph(const std::function<bool(Instruction &)> &MaterializableCallback,
367 Instruction *I, SuspendCrossingInfo &Checker)
368 : MaterializableCallback(MaterializableCallback), Checker(Checker) {
369 std::unique_ptr<RematNode> FirstNode = std::make_unique<RematNode>(I);
370 EntryNode = FirstNode.get();
371 std::deque<std::unique_ptr<RematNode>> WorkList;
372 addNode(std::move(FirstNode), WorkList, cast<User>(I));
373 while (WorkList.size()) {
374 std::unique_ptr<RematNode> N = std::move(WorkList.front());
375 WorkList.pop_front();
376 addNode(std::move(N), WorkList, cast<User>(I));
377 }
378 }
379
380 void addNode(std::unique_ptr<RematNode> NUPtr,
381 std::deque<std::unique_ptr<RematNode>> &WorkList,
382 User *FirstUse) {
383 RematNode *N = NUPtr.get();
384 if (Remats.count(N->Node))
385 return;
386
387 // We haven't see this node yet - add to the list
388 Remats[N->Node] = std::move(NUPtr);
389 for (auto &Def : N->Node->operands()) {
390 Instruction *D = dyn_cast<Instruction>(Def.get());
391 if (!D || !MaterializableCallback(*D) ||
392 !Checker.isDefinitionAcrossSuspend(*D, FirstUse))
393 continue;
394
395 if (Remats.count(D)) {
396 // Already have this in the graph
397 N->Operands.push_back(Remats[D].get());
398 continue;
399 }
400
401 bool NoMatch = true;
402 for (auto &I : WorkList) {
403 if (I->Node == D) {
404 NoMatch = false;
405 N->Operands.push_back(I.get());
406 break;
407 }
408 }
409 if (NoMatch) {
410 // Create a new node
411 std::unique_ptr<RematNode> ChildNode = std::make_unique<RematNode>(D);
412 N->Operands.push_back(ChildNode.get());
413 WorkList.push_back(std::move(ChildNode));
414 }
415 }
416 }
417
418#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
419 void dump() const {
420 dbgs() << "Entry (";
421 if (EntryNode->Node->getParent()->hasName())
422 dbgs() << EntryNode->Node->getParent()->getName();
423 else
424 EntryNode->Node->getParent()->printAsOperand(dbgs(), false);
425 dbgs() << ") : " << *EntryNode->Node << "\n";
426 for (auto &E : Remats) {
427 dbgs() << *(E.first) << "\n";
428 for (RematNode *U : E.second->Operands)
429 dbgs() << " " << *U->Node << "\n";
430 }
431 }
432#endif
433};
434} // end anonymous namespace
435
436namespace llvm {
437
438template <> struct GraphTraits<RematGraph *> {
439 using NodeRef = RematGraph::RematNode *;
440 using ChildIteratorType = RematGraph::RematNode **;
441
442 static NodeRef getEntryNode(RematGraph *G) { return G->EntryNode; }
444 return N->Operands.begin();
445 }
446 static ChildIteratorType child_end(NodeRef N) { return N->Operands.end(); }
447};
448
449} // end namespace llvm
450
451#undef DEBUG_TYPE // "coro-suspend-crossing"
452#define DEBUG_TYPE "coro-frame"
453
454namespace {
455class FrameTypeBuilder;
456// Mapping from the to-be-spilled value to all the users that need reload.
458struct AllocaInfo {
459 AllocaInst *Alloca;
461 bool MayWriteBeforeCoroBegin;
462 AllocaInfo(AllocaInst *Alloca,
463 DenseMap<Instruction *, std::optional<APInt>> Aliases,
464 bool MayWriteBeforeCoroBegin)
465 : Alloca(Alloca), Aliases(std::move(Aliases)),
466 MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {}
467};
468struct FrameDataInfo {
469 // All the values (that are not allocas) that needs to be spilled to the
470 // frame.
471 SpillInfo Spills;
472 // Allocas contains all values defined as allocas that need to live in the
473 // frame.
475
476 SmallVector<Value *, 8> getAllDefs() const {
478 for (const auto &P : Spills)
479 Defs.push_back(P.first);
480 for (const auto &A : Allocas)
481 Defs.push_back(A.Alloca);
482 return Defs;
483 }
484
485 uint32_t getFieldIndex(Value *V) const {
486 auto Itr = FieldIndexMap.find(V);
487 assert(Itr != FieldIndexMap.end() &&
488 "Value does not have a frame field index");
489 return Itr->second;
490 }
491
492 void setFieldIndex(Value *V, uint32_t Index) {
493 assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) &&
494 "Cannot set the index for the same field twice.");
495 FieldIndexMap[V] = Index;
496 }
497
498 Align getAlign(Value *V) const {
499 auto Iter = FieldAlignMap.find(V);
500 assert(Iter != FieldAlignMap.end());
501 return Iter->second;
502 }
503
504 void setAlign(Value *V, Align AL) {
505 assert(FieldAlignMap.count(V) == 0);
506 FieldAlignMap.insert({V, AL});
507 }
508
509 uint64_t getDynamicAlign(Value *V) const {
510 auto Iter = FieldDynamicAlignMap.find(V);
511 assert(Iter != FieldDynamicAlignMap.end());
512 return Iter->second;
513 }
514
515 void setDynamicAlign(Value *V, uint64_t Align) {
516 assert(FieldDynamicAlignMap.count(V) == 0);
517 FieldDynamicAlignMap.insert({V, Align});
518 }
519
520 uint64_t getOffset(Value *V) const {
521 auto Iter = FieldOffsetMap.find(V);
522 assert(Iter != FieldOffsetMap.end());
523 return Iter->second;
524 }
525
526 void setOffset(Value *V, uint64_t Offset) {
527 assert(FieldOffsetMap.count(V) == 0);
528 FieldOffsetMap.insert({V, Offset});
529 }
530
531 // Remap the index of every field in the frame, using the final layout index.
532 void updateLayoutIndex(FrameTypeBuilder &B);
533
534private:
535 // LayoutIndexUpdateStarted is used to avoid updating the index of any field
536 // twice by mistake.
537 bool LayoutIndexUpdateStarted = false;
538 // Map from values to their slot indexes on the frame. They will be first set
539 // with their original insertion field index. After the frame is built, their
540 // indexes will be updated into the final layout index.
541 DenseMap<Value *, uint32_t> FieldIndexMap;
542 // Map from values to their alignment on the frame. They would be set after
543 // the frame is built.
544 DenseMap<Value *, Align> FieldAlignMap;
545 DenseMap<Value *, uint64_t> FieldDynamicAlignMap;
546 // Map from values to their offset on the frame. They would be set after
547 // the frame is built.
548 DenseMap<Value *, uint64_t> FieldOffsetMap;
549};
550} // namespace
551
552#ifndef NDEBUG
553static void dumpSpills(StringRef Title, const SpillInfo &Spills) {
554 dbgs() << "------------- " << Title << "--------------\n";
555 for (const auto &E : Spills) {
556 E.first->dump();
557 dbgs() << " user: ";
558 for (auto *I : E.second)
559 I->dump();
560 }
561}
562static void dumpRemats(
563 StringRef Title,
564 const SmallMapVector<Instruction *, std::unique_ptr<RematGraph>, 8> &RM) {
565 dbgs() << "------------- " << Title << "--------------\n";
566 for (const auto &E : RM) {
567 E.second->dump();
568 dbgs() << "--\n";
569 }
570}
571
572static void dumpAllocas(const SmallVectorImpl<AllocaInfo> &Allocas) {
573 dbgs() << "------------- Allocas --------------\n";
574 for (const auto &A : Allocas) {
575 A.Alloca->dump();
576 }
577}
578#endif
579
580namespace {
581using FieldIDType = size_t;
582// We cannot rely solely on natural alignment of a type when building a
583// coroutine frame and if the alignment specified on the Alloca instruction
584// differs from the natural alignment of the alloca type we will need to insert
585// padding.
586class FrameTypeBuilder {
587private:
588 struct Field {
591 Type *Ty;
592 FieldIDType LayoutFieldIndex;
593 Align Alignment;
594 Align TyAlignment;
595 uint64_t DynamicAlignBuffer;
596 };
597
598 const DataLayout &DL;
599 LLVMContext &Context;
600 uint64_t StructSize = 0;
601 Align StructAlign;
602 bool IsFinished = false;
603
604 std::optional<Align> MaxFrameAlignment;
605
607 DenseMap<Value*, unsigned> FieldIndexByKey;
608
609public:
610 FrameTypeBuilder(LLVMContext &Context, const DataLayout &DL,
611 std::optional<Align> MaxFrameAlignment)
612 : DL(DL), Context(Context), MaxFrameAlignment(MaxFrameAlignment) {}
613
614 /// Add a field to this structure for the storage of an `alloca`
615 /// instruction.
616 [[nodiscard]] FieldIDType addFieldForAlloca(AllocaInst *AI,
617 bool IsHeader = false) {
618 Type *Ty = AI->getAllocatedType();
619
620 // Make an array type if this is a static array allocation.
621 if (AI->isArrayAllocation()) {
622 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
623 Ty = ArrayType::get(Ty, CI->getValue().getZExtValue());
624 else
625 report_fatal_error("Coroutines cannot handle non static allocas yet");
626 }
627
628 return addField(Ty, AI->getAlign(), IsHeader);
629 }
630
631 /// We want to put the allocas whose lifetime-ranges are not overlapped
632 /// into one slot of coroutine frame.
633 /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566
634 ///
635 /// cppcoro::task<void> alternative_paths(bool cond) {
636 /// if (cond) {
637 /// big_structure a;
638 /// process(a);
639 /// co_await something();
640 /// } else {
641 /// big_structure b;
642 /// process2(b);
643 /// co_await something();
644 /// }
645 /// }
646 ///
647 /// We want to put variable a and variable b in the same slot to
648 /// reduce the size of coroutine frame.
649 ///
650 /// This function use StackLifetime algorithm to partition the AllocaInsts in
651 /// Spills to non-overlapped sets in order to put Alloca in the same
652 /// non-overlapped set into the same slot in the Coroutine Frame. Then add
653 /// field for the allocas in the same non-overlapped set by using the largest
654 /// type as the field type.
655 ///
656 /// Side Effects: Because We sort the allocas, the order of allocas in the
657 /// frame may be different with the order in the source code.
658 void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData,
659 coro::Shape &Shape);
660
661 /// Add a field to this structure.
662 [[nodiscard]] FieldIDType addField(Type *Ty, MaybeAlign MaybeFieldAlignment,
663 bool IsHeader = false,
664 bool IsSpillOfValue = false) {
665 assert(!IsFinished && "adding fields to a finished builder");
666 assert(Ty && "must provide a type for a field");
667
668 // The field size is always the alloc size of the type.
669 uint64_t FieldSize = DL.getTypeAllocSize(Ty);
670
671 // For an alloca with size=0, we don't need to add a field and they
672 // can just point to any index in the frame. Use index 0.
673 if (FieldSize == 0) {
674 return 0;
675 }
676
677 // The field alignment might not be the type alignment, but we need
678 // to remember the type alignment anyway to build the type.
679 // If we are spilling values we don't need to worry about ABI alignment
680 // concerns.
681 Align ABIAlign = DL.getABITypeAlign(Ty);
682 Align TyAlignment = ABIAlign;
683 if (IsSpillOfValue && MaxFrameAlignment && *MaxFrameAlignment < ABIAlign)
684 TyAlignment = *MaxFrameAlignment;
685 Align FieldAlignment = MaybeFieldAlignment.value_or(TyAlignment);
686
687 // The field alignment could be bigger than the max frame case, in that case
688 // we request additional storage to be able to dynamically align the
689 // pointer.
690 uint64_t DynamicAlignBuffer = 0;
691 if (MaxFrameAlignment && (FieldAlignment > *MaxFrameAlignment)) {
692 DynamicAlignBuffer =
693 offsetToAlignment(MaxFrameAlignment->value(), FieldAlignment);
694 FieldAlignment = *MaxFrameAlignment;
695 FieldSize = FieldSize + DynamicAlignBuffer;
696 }
697
698 // Lay out header fields immediately.
700 if (IsHeader) {
701 Offset = alignTo(StructSize, FieldAlignment);
702 StructSize = Offset + FieldSize;
703
704 // Everything else has a flexible offset.
705 } else {
707 }
708
709 Fields.push_back({FieldSize, Offset, Ty, 0, FieldAlignment, TyAlignment,
710 DynamicAlignBuffer});
711 return Fields.size() - 1;
712 }
713
714 /// Finish the layout and set the body on the given type.
715 void finish(StructType *Ty);
716
717 uint64_t getStructSize() const {
718 assert(IsFinished && "not yet finished!");
719 return StructSize;
720 }
721
722 Align getStructAlign() const {
723 assert(IsFinished && "not yet finished!");
724 return StructAlign;
725 }
726
727 FieldIDType getLayoutFieldIndex(FieldIDType Id) const {
728 assert(IsFinished && "not yet finished!");
729 return Fields[Id].LayoutFieldIndex;
730 }
731
732 Field getLayoutField(FieldIDType Id) const {
733 assert(IsFinished && "not yet finished!");
734 return Fields[Id];
735 }
736};
737} // namespace
738
739void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) {
740 auto Updater = [&](Value *I) {
741 auto Field = B.getLayoutField(getFieldIndex(I));
742 setFieldIndex(I, Field.LayoutFieldIndex);
743 setAlign(I, Field.Alignment);
744 uint64_t dynamicAlign =
745 Field.DynamicAlignBuffer
746 ? Field.DynamicAlignBuffer + Field.Alignment.value()
747 : 0;
748 setDynamicAlign(I, dynamicAlign);
749 setOffset(I, Field.Offset);
750 };
751 LayoutIndexUpdateStarted = true;
752 for (auto &S : Spills)
753 Updater(S.first);
754 for (const auto &A : Allocas)
755 Updater(A.Alloca);
756 LayoutIndexUpdateStarted = false;
757}
758
759void FrameTypeBuilder::addFieldForAllocas(const Function &F,
760 FrameDataInfo &FrameData,
761 coro::Shape &Shape) {
762 using AllocaSetType = SmallVector<AllocaInst *, 4>;
763 SmallVector<AllocaSetType, 4> NonOverlapedAllocas;
764
765 // We need to add field for allocas at the end of this function.
766 auto AddFieldForAllocasAtExit = make_scope_exit([&]() {
767 for (auto AllocaList : NonOverlapedAllocas) {
768 auto *LargestAI = *AllocaList.begin();
769 FieldIDType Id = addFieldForAlloca(LargestAI);
770 for (auto *Alloca : AllocaList)
771 FrameData.setFieldIndex(Alloca, Id);
772 }
773 });
774
775 if (!Shape.OptimizeFrame) {
776 for (const auto &A : FrameData.Allocas) {
777 AllocaInst *Alloca = A.Alloca;
778 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
779 }
780 return;
781 }
782
783 // Because there are paths from the lifetime.start to coro.end
784 // for each alloca, the liferanges for every alloca is overlaped
785 // in the blocks who contain coro.end and the successor blocks.
786 // So we choose to skip there blocks when we calculate the liferange
787 // for each alloca. It should be reasonable since there shouldn't be uses
788 // in these blocks and the coroutine frame shouldn't be used outside the
789 // coroutine body.
790 //
791 // Note that the user of coro.suspend may not be SwitchInst. However, this
792 // case seems too complex to handle. And it is harmless to skip these
793 // patterns since it just prevend putting the allocas to live in the same
794 // slot.
795 DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest;
796 for (auto *CoroSuspendInst : Shape.CoroSuspends) {
797 for (auto *U : CoroSuspendInst->users()) {
798 if (auto *ConstSWI = dyn_cast<SwitchInst>(U)) {
799 auto *SWI = const_cast<SwitchInst *>(ConstSWI);
800 DefaultSuspendDest[SWI] = SWI->getDefaultDest();
801 SWI->setDefaultDest(SWI->getSuccessor(1));
802 }
803 }
804 }
805
806 auto ExtractAllocas = [&]() {
807 AllocaSetType Allocas;
808 Allocas.reserve(FrameData.Allocas.size());
809 for (const auto &A : FrameData.Allocas)
810 Allocas.push_back(A.Alloca);
811 return Allocas;
812 };
813 StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(),
814 StackLifetime::LivenessType::May);
815 StackLifetimeAnalyzer.run();
816 auto IsAllocaInferenre = [&](const AllocaInst *AI1, const AllocaInst *AI2) {
817 return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps(
818 StackLifetimeAnalyzer.getLiveRange(AI2));
819 };
820 auto GetAllocaSize = [&](const AllocaInfo &A) {
821 std::optional<TypeSize> RetSize = A.Alloca->getAllocationSize(DL);
822 assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n");
823 assert(!RetSize->isScalable() && "Scalable vectors are not yet supported");
824 return RetSize->getFixedValue();
825 };
826 // Put larger allocas in the front. So the larger allocas have higher
827 // priority to merge, which can save more space potentially. Also each
828 // AllocaSet would be ordered. So we can get the largest Alloca in one
829 // AllocaSet easily.
830 sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) {
831 return GetAllocaSize(Iter1) > GetAllocaSize(Iter2);
832 });
833 for (const auto &A : FrameData.Allocas) {
834 AllocaInst *Alloca = A.Alloca;
835 bool Merged = false;
836 // Try to find if the Alloca is not inferenced with any existing
837 // NonOverlappedAllocaSet. If it is true, insert the alloca to that
838 // NonOverlappedAllocaSet.
839 for (auto &AllocaSet : NonOverlapedAllocas) {
840 assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n");
841 bool NoInference = none_of(AllocaSet, [&](auto Iter) {
842 return IsAllocaInferenre(Alloca, Iter);
843 });
844 // If the alignment of A is multiple of the alignment of B, the address
845 // of A should satisfy the requirement for aligning for B.
846 //
847 // There may be other more fine-grained strategies to handle the alignment
848 // infomation during the merging process. But it seems hard to handle
849 // these strategies and benefit little.
850 bool Alignable = [&]() -> bool {
851 auto *LargestAlloca = *AllocaSet.begin();
852 return LargestAlloca->getAlign().value() % Alloca->getAlign().value() ==
853 0;
854 }();
855 bool CouldMerge = NoInference && Alignable;
856 if (!CouldMerge)
857 continue;
858 AllocaSet.push_back(Alloca);
859 Merged = true;
860 break;
861 }
862 if (!Merged) {
863 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
864 }
865 }
866 // Recover the default target destination for each Switch statement
867 // reserved.
868 for (auto SwitchAndDefaultDest : DefaultSuspendDest) {
869 SwitchInst *SWI = SwitchAndDefaultDest.first;
870 BasicBlock *DestBB = SwitchAndDefaultDest.second;
871 SWI->setDefaultDest(DestBB);
872 }
873 // This Debug Info could tell us which allocas are merged into one slot.
874 LLVM_DEBUG(for (auto &AllocaSet
875 : NonOverlapedAllocas) {
876 if (AllocaSet.size() > 1) {
877 dbgs() << "In Function:" << F.getName() << "\n";
878 dbgs() << "Find Union Set "
879 << "\n";
880 dbgs() << "\tAllocas are \n";
881 for (auto Alloca : AllocaSet)
882 dbgs() << "\t\t" << *Alloca << "\n";
883 }
884 });
885}
886
887void FrameTypeBuilder::finish(StructType *Ty) {
888 assert(!IsFinished && "already finished!");
889
890 // Prepare the optimal-layout field array.
891 // The Id in the layout field is a pointer to our Field for it.
893 LayoutFields.reserve(Fields.size());
894 for (auto &Field : Fields) {
895 LayoutFields.emplace_back(&Field, Field.Size, Field.Alignment,
896 Field.Offset);
897 }
898
899 // Perform layout.
900 auto SizeAndAlign = performOptimizedStructLayout(LayoutFields);
901 StructSize = SizeAndAlign.first;
902 StructAlign = SizeAndAlign.second;
903
904 auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & {
905 return *static_cast<Field *>(const_cast<void*>(LayoutField.Id));
906 };
907
908 // We need to produce a packed struct type if there's a field whose
909 // assigned offset isn't a multiple of its natural type alignment.
910 bool Packed = [&] {
911 for (auto &LayoutField : LayoutFields) {
912 auto &F = getField(LayoutField);
913 if (!isAligned(F.TyAlignment, LayoutField.Offset))
914 return true;
915 }
916 return false;
917 }();
918
919 // Build the struct body.
920 SmallVector<Type*, 16> FieldTypes;
921 FieldTypes.reserve(LayoutFields.size() * 3 / 2);
922 uint64_t LastOffset = 0;
923 for (auto &LayoutField : LayoutFields) {
924 auto &F = getField(LayoutField);
925
926 auto Offset = LayoutField.Offset;
927
928 // Add a padding field if there's a padding gap and we're either
929 // building a packed struct or the padding gap is more than we'd
930 // get from aligning to the field type's natural alignment.
931 assert(Offset >= LastOffset);
932 if (Offset != LastOffset) {
933 if (Packed || alignTo(LastOffset, F.TyAlignment) != Offset)
934 FieldTypes.push_back(ArrayType::get(Type::getInt8Ty(Context),
935 Offset - LastOffset));
936 }
937
938 F.Offset = Offset;
939 F.LayoutFieldIndex = FieldTypes.size();
940
941 FieldTypes.push_back(F.Ty);
942 if (F.DynamicAlignBuffer) {
943 FieldTypes.push_back(
944 ArrayType::get(Type::getInt8Ty(Context), F.DynamicAlignBuffer));
945 }
946 LastOffset = Offset + F.Size;
947 }
948
949 Ty->setBody(FieldTypes, Packed);
950
951#ifndef NDEBUG
952 // Check that the IR layout matches the offsets we expect.
953 auto Layout = DL.getStructLayout(Ty);
954 for (auto &F : Fields) {
955 assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty);
956 assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset);
957 }
958#endif
959
960 IsFinished = true;
961}
962
963static void cacheDIVar(FrameDataInfo &FrameData,
965 for (auto *V : FrameData.getAllDefs()) {
966 if (DIVarCache.contains(V))
967 continue;
968
969 auto CacheIt = [&DIVarCache, V](const auto &Container) {
970 auto *I = llvm::find_if(Container, [](auto *DDI) {
971 return DDI->getExpression()->getNumElements() == 0;
972 });
973 if (I != Container.end())
974 DIVarCache.insert({V, (*I)->getVariable()});
975 };
976 CacheIt(findDbgDeclares(V));
977 CacheIt(findDVRDeclares(V));
978 }
979}
980
981/// Create name for Type. It uses MDString to store new created string to
982/// avoid memory leak.
984 if (Ty->isIntegerTy()) {
985 // The longest name in common may be '__int_128', which has 9 bits.
986 SmallString<16> Buffer;
987 raw_svector_ostream OS(Buffer);
988 OS << "__int_" << cast<IntegerType>(Ty)->getBitWidth();
989 auto *MDName = MDString::get(Ty->getContext(), OS.str());
990 return MDName->getString();
991 }
992
993 if (Ty->isFloatingPointTy()) {
994 if (Ty->isFloatTy())
995 return "__float_";
996 if (Ty->isDoubleTy())
997 return "__double_";
998 return "__floating_type_";
999 }
1000
1001 if (Ty->isPointerTy())
1002 return "PointerType";
1003
1004 if (Ty->isStructTy()) {
1005 if (!cast<StructType>(Ty)->hasName())
1006 return "__LiteralStructType_";
1007
1008 auto Name = Ty->getStructName();
1009
1010 SmallString<16> Buffer(Name);
1011 for (auto &Iter : Buffer)
1012 if (Iter == '.' || Iter == ':')
1013 Iter = '_';
1014 auto *MDName = MDString::get(Ty->getContext(), Buffer.str());
1015 return MDName->getString();
1016 }
1017
1018 return "UnknownType";
1019}
1020
1021static DIType *solveDIType(DIBuilder &Builder, Type *Ty,
1022 const DataLayout &Layout, DIScope *Scope,
1023 unsigned LineNum,
1024 DenseMap<Type *, DIType *> &DITypeCache) {
1025 if (DIType *DT = DITypeCache.lookup(Ty))
1026 return DT;
1027
1029
1030 DIType *RetType = nullptr;
1031
1032 if (Ty->isIntegerTy()) {
1033 auto BitWidth = cast<IntegerType>(Ty)->getBitWidth();
1034 RetType = Builder.createBasicType(Name, BitWidth, dwarf::DW_ATE_signed,
1035 llvm::DINode::FlagArtificial);
1036 } else if (Ty->isFloatingPointTy()) {
1037 RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty),
1038 dwarf::DW_ATE_float,
1039 llvm::DINode::FlagArtificial);
1040 } else if (Ty->isPointerTy()) {
1041 // Construct PointerType points to null (aka void *) instead of exploring
1042 // pointee type to avoid infinite search problem. For example, we would be
1043 // in trouble if we traverse recursively:
1044 //
1045 // struct Node {
1046 // Node* ptr;
1047 // };
1048 RetType =
1049 Builder.createPointerType(nullptr, Layout.getTypeSizeInBits(Ty),
1050 Layout.getABITypeAlign(Ty).value() * CHAR_BIT,
1051 /*DWARFAddressSpace=*/std::nullopt, Name);
1052 } else if (Ty->isStructTy()) {
1053 auto *DIStruct = Builder.createStructType(
1054 Scope, Name, Scope->getFile(), LineNum, Layout.getTypeSizeInBits(Ty),
1055 Layout.getPrefTypeAlign(Ty).value() * CHAR_BIT,
1056 llvm::DINode::FlagArtificial, nullptr, llvm::DINodeArray());
1057
1058 auto *StructTy = cast<StructType>(Ty);
1060 for (unsigned I = 0; I < StructTy->getNumElements(); I++) {
1061 DIType *DITy = solveDIType(Builder, StructTy->getElementType(I), Layout,
1062 Scope, LineNum, DITypeCache);
1063 assert(DITy);
1064 Elements.push_back(Builder.createMemberType(
1065 Scope, DITy->getName(), Scope->getFile(), LineNum,
1066 DITy->getSizeInBits(), DITy->getAlignInBits(),
1067 Layout.getStructLayout(StructTy)->getElementOffsetInBits(I),
1068 llvm::DINode::FlagArtificial, DITy));
1069 }
1070
1071 Builder.replaceArrays(DIStruct, Builder.getOrCreateArray(Elements));
1072
1073 RetType = DIStruct;
1074 } else {
1075 LLVM_DEBUG(dbgs() << "Unresolved Type: " << *Ty << "\n");
1076 TypeSize Size = Layout.getTypeSizeInBits(Ty);
1077 auto *CharSizeType = Builder.createBasicType(
1078 Name, 8, dwarf::DW_ATE_unsigned_char, llvm::DINode::FlagArtificial);
1079
1080 if (Size <= 8)
1081 RetType = CharSizeType;
1082 else {
1083 if (Size % 8 != 0)
1084 Size = TypeSize::getFixed(Size + 8 - (Size % 8));
1085
1086 RetType = Builder.createArrayType(
1087 Size, Layout.getPrefTypeAlign(Ty).value(), CharSizeType,
1088 Builder.getOrCreateArray(Builder.getOrCreateSubrange(0, Size / 8)));
1089 }
1090 }
1091
1092 DITypeCache.insert({Ty, RetType});
1093 return RetType;
1094}
1095
1096/// Build artificial debug info for C++ coroutine frames to allow users to
1097/// inspect the contents of the frame directly
1098///
1099/// Create Debug information for coroutine frame with debug name "__coro_frame".
1100/// The debug information for the fields of coroutine frame is constructed from
1101/// the following way:
1102/// 1. For all the value in the Frame, we search the use of dbg.declare to find
1103/// the corresponding debug variables for the value. If we can find the
1104/// debug variable, we can get full and accurate debug information.
1105/// 2. If we can't get debug information in step 1 and 2, we could only try to
1106/// build the DIType by Type. We did this in solveDIType. We only handle
1107/// integer, float, double, integer type and struct type for now.
1109 FrameDataInfo &FrameData) {
1110 DISubprogram *DIS = F.getSubprogram();
1111 // If there is no DISubprogram for F, it implies the Function are not compiled
1112 // with debug info. So we also don't need to generate debug info for the frame
1113 // neither.
1114 if (!DIS || !DIS->getUnit() ||
1116 (dwarf::SourceLanguage)DIS->getUnit()->getSourceLanguage()))
1117 return;
1118
1119 assert(Shape.ABI == coro::ABI::Switch &&
1120 "We could only build debug infomation for C++ coroutine now.\n");
1121
1122 DIBuilder DBuilder(*F.getParent(), /*AllowUnresolved*/ false);
1123
1124 assert(Shape.getPromiseAlloca() &&
1125 "Coroutine with switch ABI should own Promise alloca");
1126
1127 DIFile *DFile = DIS->getFile();
1128 unsigned LineNum = DIS->getLine();
1129
1130 DICompositeType *FrameDITy = DBuilder.createStructType(
1131 DIS->getUnit(), Twine(F.getName() + ".coro_frame_ty").str(),
1132 DFile, LineNum, Shape.FrameSize * 8,
1133 Shape.FrameAlign.value() * 8, llvm::DINode::FlagArtificial, nullptr,
1134 llvm::DINodeArray());
1135 StructType *FrameTy = Shape.FrameTy;
1137 DataLayout Layout = F.getDataLayout();
1138
1140 cacheDIVar(FrameData, DIVarCache);
1141
1142 unsigned ResumeIndex = coro::Shape::SwitchFieldIndex::Resume;
1143 unsigned DestroyIndex = coro::Shape::SwitchFieldIndex::Destroy;
1144 unsigned IndexIndex = Shape.SwitchLowering.IndexField;
1145
1147 NameCache.insert({ResumeIndex, "__resume_fn"});
1148 NameCache.insert({DestroyIndex, "__destroy_fn"});
1149 NameCache.insert({IndexIndex, "__coro_index"});
1150
1151 Type *ResumeFnTy = FrameTy->getElementType(ResumeIndex),
1152 *DestroyFnTy = FrameTy->getElementType(DestroyIndex),
1153 *IndexTy = FrameTy->getElementType(IndexIndex);
1154
1156 TyCache.insert(
1157 {ResumeIndex, DBuilder.createPointerType(
1158 nullptr, Layout.getTypeSizeInBits(ResumeFnTy))});
1159 TyCache.insert(
1160 {DestroyIndex, DBuilder.createPointerType(
1161 nullptr, Layout.getTypeSizeInBits(DestroyFnTy))});
1162
1163 /// FIXME: If we fill the field `SizeInBits` with the actual size of
1164 /// __coro_index in bits, then __coro_index wouldn't show in the debugger.
1165 TyCache.insert({IndexIndex, DBuilder.createBasicType(
1166 "__coro_index",
1167 (Layout.getTypeSizeInBits(IndexTy) < 8)
1168 ? 8
1169 : Layout.getTypeSizeInBits(IndexTy),
1170 dwarf::DW_ATE_unsigned_char)});
1171
1172 for (auto *V : FrameData.getAllDefs()) {
1173 if (!DIVarCache.contains(V))
1174 continue;
1175
1176 auto Index = FrameData.getFieldIndex(V);
1177
1178 NameCache.insert({Index, DIVarCache[V]->getName()});
1179 TyCache.insert({Index, DIVarCache[V]->getType()});
1180 }
1181
1182 // Cache from index to (Align, Offset Pair)
1184 // The Align and Offset of Resume function and Destroy function are fixed.
1185 OffsetCache.insert({ResumeIndex, {8, 0}});
1186 OffsetCache.insert({DestroyIndex, {8, 8}});
1187 OffsetCache.insert(
1188 {IndexIndex,
1190
1191 for (auto *V : FrameData.getAllDefs()) {
1192 auto Index = FrameData.getFieldIndex(V);
1193
1194 OffsetCache.insert(
1195 {Index, {FrameData.getAlign(V).value(), FrameData.getOffset(V)}});
1196 }
1197
1198 DenseMap<Type *, DIType *> DITypeCache;
1199 // This counter is used to avoid same type names. e.g., there would be
1200 // many i32 and i64 types in one coroutine. And we would use i32_0 and
1201 // i32_1 to avoid the same type. Since it makes no sense the name of the
1202 // fields confilicts with each other.
1203 unsigned UnknownTypeNum = 0;
1204 for (unsigned Index = 0; Index < FrameTy->getNumElements(); Index++) {
1205 if (!OffsetCache.contains(Index))
1206 continue;
1207
1208 std::string Name;
1209 uint64_t SizeInBits;
1210 uint32_t AlignInBits;
1211 uint64_t OffsetInBits;
1212 DIType *DITy = nullptr;
1213
1214 Type *Ty = FrameTy->getElementType(Index);
1215 assert(Ty->isSized() && "We can't handle type which is not sized.\n");
1216 SizeInBits = Layout.getTypeSizeInBits(Ty).getFixedValue();
1217 AlignInBits = OffsetCache[Index].first * 8;
1218 OffsetInBits = OffsetCache[Index].second * 8;
1219
1220 if (NameCache.contains(Index)) {
1221 Name = NameCache[Index].str();
1222 DITy = TyCache[Index];
1223 } else {
1224 DITy = solveDIType(DBuilder, Ty, Layout, FrameDITy, LineNum, DITypeCache);
1225 assert(DITy && "SolveDIType shouldn't return nullptr.\n");
1226 Name = DITy->getName().str();
1227 Name += "_" + std::to_string(UnknownTypeNum);
1228 UnknownTypeNum++;
1229 }
1230
1231 Elements.push_back(DBuilder.createMemberType(
1232 FrameDITy, Name, DFile, LineNum, SizeInBits, AlignInBits, OffsetInBits,
1233 llvm::DINode::FlagArtificial, DITy));
1234 }
1235
1236 DBuilder.replaceArrays(FrameDITy, DBuilder.getOrCreateArray(Elements));
1237
1238 auto *FrameDIVar =
1239 DBuilder.createAutoVariable(DIS, "__coro_frame", DFile, LineNum,
1240 FrameDITy, true, DINode::FlagArtificial);
1241
1242 // Subprogram would have ContainedNodes field which records the debug
1243 // variables it contained. So we need to add __coro_frame to the
1244 // ContainedNodes of it.
1245 //
1246 // If we don't add __coro_frame to the RetainedNodes, user may get
1247 // `no symbol __coro_frame in context` rather than `__coro_frame`
1248 // is optimized out, which is more precise.
1249 auto RetainedNodes = DIS->getRetainedNodes();
1250 SmallVector<Metadata *, 32> RetainedNodesVec(RetainedNodes.begin(),
1251 RetainedNodes.end());
1252 RetainedNodesVec.push_back(FrameDIVar);
1253 DIS->replaceOperandWith(7, (MDTuple::get(F.getContext(), RetainedNodesVec)));
1254
1255 // Construct the location for the frame debug variable. The column number
1256 // is fake but it should be fine.
1257 DILocation *DILoc =
1258 DILocation::get(DIS->getContext(), LineNum, /*Column=*/1, DIS);
1259 assert(FrameDIVar->isValidLocationForIntrinsic(DILoc));
1260
1261 if (UseNewDbgInfoFormat) {
1262 DbgVariableRecord *NewDVR =
1263 new DbgVariableRecord(ValueAsMetadata::get(Shape.FramePtr), FrameDIVar,
1264 DBuilder.createExpression(), DILoc,
1265 DbgVariableRecord::LocationType::Declare);
1267 It->getParent()->insertDbgRecordBefore(NewDVR, It);
1268 } else {
1269 DBuilder.insertDeclare(Shape.FramePtr, FrameDIVar,
1270 DBuilder.createExpression(), DILoc,
1271 &*Shape.getInsertPtAfterFramePtr());
1272 }
1273}
1274
1275// Build a struct that will keep state for an active coroutine.
1276// struct f.frame {
1277// ResumeFnTy ResumeFnAddr;
1278// ResumeFnTy DestroyFnAddr;
1279// ... promise (if present) ...
1280// int ResumeIndex;
1281// ... spills ...
1282// };
1284 FrameDataInfo &FrameData) {
1285 LLVMContext &C = F.getContext();
1286 const DataLayout &DL = F.getDataLayout();
1287 StructType *FrameTy = [&] {
1288 SmallString<32> Name(F.getName());
1289 Name.append(".Frame");
1290 return StructType::create(C, Name);
1291 }();
1292
1293 // We will use this value to cap the alignment of spilled values.
1294 std::optional<Align> MaxFrameAlignment;
1295 if (Shape.ABI == coro::ABI::Async)
1296 MaxFrameAlignment = Shape.AsyncLowering.getContextAlignment();
1297 FrameTypeBuilder B(C, DL, MaxFrameAlignment);
1298
1299 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
1300 std::optional<FieldIDType> SwitchIndexFieldId;
1301
1302 if (Shape.ABI == coro::ABI::Switch) {
1303 auto *FnPtrTy = PointerType::getUnqual(C);
1304
1305 // Add header fields for the resume and destroy functions.
1306 // We can rely on these being perfectly packed.
1307 (void)B.addField(FnPtrTy, std::nullopt, /*header*/ true);
1308 (void)B.addField(FnPtrTy, std::nullopt, /*header*/ true);
1309
1310 // PromiseAlloca field needs to be explicitly added here because it's
1311 // a header field with a fixed offset based on its alignment. Hence it
1312 // needs special handling and cannot be added to FrameData.Allocas.
1313 if (PromiseAlloca)
1314 FrameData.setFieldIndex(
1315 PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, /*header*/ true));
1316
1317 // Add a field to store the suspend index. This doesn't need to
1318 // be in the header.
1319 unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size()));
1320 Type *IndexType = Type::getIntNTy(C, IndexBits);
1321
1322 SwitchIndexFieldId = B.addField(IndexType, std::nullopt);
1323 } else {
1324 assert(PromiseAlloca == nullptr && "lowering doesn't support promises");
1325 }
1326
1327 // Because multiple allocas may own the same field slot,
1328 // we add allocas to field here.
1329 B.addFieldForAllocas(F, FrameData, Shape);
1330 // Add PromiseAlloca to Allocas list so that
1331 // 1. updateLayoutIndex could update its index after
1332 // `performOptimizedStructLayout`
1333 // 2. it is processed in insertSpills.
1334 if (Shape.ABI == coro::ABI::Switch && PromiseAlloca)
1335 // We assume that the promise alloca won't be modified before
1336 // CoroBegin and no alias will be create before CoroBegin.
1337 FrameData.Allocas.emplace_back(
1338 PromiseAlloca, DenseMap<Instruction *, std::optional<APInt>>{}, false);
1339 // Create an entry for every spilled value.
1340 for (auto &S : FrameData.Spills) {
1341 Type *FieldType = S.first->getType();
1342 // For byval arguments, we need to store the pointed value in the frame,
1343 // instead of the pointer itself.
1344 if (const Argument *A = dyn_cast<Argument>(S.first))
1345 if (A->hasByValAttr())
1346 FieldType = A->getParamByValType();
1347 FieldIDType Id = B.addField(FieldType, std::nullopt, false /*header*/,
1348 true /*IsSpillOfValue*/);
1349 FrameData.setFieldIndex(S.first, Id);
1350 }
1351
1352 B.finish(FrameTy);
1353 FrameData.updateLayoutIndex(B);
1354 Shape.FrameAlign = B.getStructAlign();
1355 Shape.FrameSize = B.getStructSize();
1356
1357 switch (Shape.ABI) {
1358 case coro::ABI::Switch: {
1359 // In the switch ABI, remember the switch-index field.
1360 auto IndexField = B.getLayoutField(*SwitchIndexFieldId);
1361 Shape.SwitchLowering.IndexField = IndexField.LayoutFieldIndex;
1362 Shape.SwitchLowering.IndexAlign = IndexField.Alignment.value();
1363 Shape.SwitchLowering.IndexOffset = IndexField.Offset;
1364
1365 // Also round the frame size up to a multiple of its alignment, as is
1366 // generally expected in C/C++.
1367 Shape.FrameSize = alignTo(Shape.FrameSize, Shape.FrameAlign);
1368 break;
1369 }
1370
1371 // In the retcon ABI, remember whether the frame is inline in the storage.
1372 case coro::ABI::Retcon:
1373 case coro::ABI::RetconOnce: {
1374 auto Id = Shape.getRetconCoroId();
1376 = (B.getStructSize() <= Id->getStorageSize() &&
1377 B.getStructAlign() <= Id->getStorageAlignment());
1378 break;
1379 }
1380 case coro::ABI::Async: {
1383 // Also make the final context size a multiple of the context alignment to
1384 // make allocation easier for allocators.
1388 if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) {
1390 "The alignment requirment of frame variables cannot be higher than "
1391 "the alignment of the async function context");
1392 }
1393 break;
1394 }
1395 }
1396
1397 return FrameTy;
1398}
1399
1400// We use a pointer use visitor to track how an alloca is being used.
1401// The goal is to be able to answer the following three questions:
1402// 1. Should this alloca be allocated on the frame instead.
1403// 2. Could the content of the alloca be modified prior to CoroBegn, which would
1404// require copying the data from alloca to the frame after CoroBegin.
1405// 3. Is there any alias created for this alloca prior to CoroBegin, but used
1406// after CoroBegin. In that case, we will need to recreate the alias after
1407// CoroBegin based off the frame. To answer question 1, we track two things:
1408// a. List of all BasicBlocks that use this alloca or any of the aliases of
1409// the alloca. In the end, we check if there exists any two basic blocks that
1410// cross suspension points. If so, this alloca must be put on the frame. b.
1411// Whether the alloca or any alias of the alloca is escaped at some point,
1412// either by storing the address somewhere, or the address is used in a
1413// function call that might capture. If it's ever escaped, this alloca must be
1414// put on the frame conservatively.
1415// To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin.
1416// Whenever a potential write happens, either through a store instruction, a
1417// function call or any of the memory intrinsics, we check whether this
1418// instruction is prior to CoroBegin. To answer question 3, we track the offsets
1419// of all aliases created for the alloca prior to CoroBegin but used after
1420// CoroBegin. std::optional is used to be able to represent the case when the
1421// offset is unknown (e.g. when you have a PHINode that takes in different
1422// offset values). We cannot handle unknown offsets and will assert. This is the
1423// potential issue left out. An ideal solution would likely require a
1424// significant redesign.
1425namespace {
1426struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
1428 AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT,
1429 const coro::Shape &CoroShape,
1430 const SuspendCrossingInfo &Checker,
1431 bool ShouldUseLifetimeStartInfo)
1432 : PtrUseVisitor(DL), DT(DT), CoroShape(CoroShape), Checker(Checker),
1433 ShouldUseLifetimeStartInfo(ShouldUseLifetimeStartInfo) {
1434 for (AnyCoroSuspendInst *SuspendInst : CoroShape.CoroSuspends)
1435 CoroSuspendBBs.insert(SuspendInst->getParent());
1436 }
1437
1438 void visit(Instruction &I) {
1439 Users.insert(&I);
1440 Base::visit(I);
1441 // If the pointer is escaped prior to CoroBegin, we have to assume it would
1442 // be written into before CoroBegin as well.
1443 if (PI.isEscaped() &&
1444 !DT.dominates(CoroShape.CoroBegin, PI.getEscapingInst())) {
1445 MayWriteBeforeCoroBegin = true;
1446 }
1447 }
1448 // We need to provide this overload as PtrUseVisitor uses a pointer based
1449 // visiting function.
1450 void visit(Instruction *I) { return visit(*I); }
1451
1452 void visitPHINode(PHINode &I) {
1453 enqueueUsers(I);
1454 handleAlias(I);
1455 }
1456
1458 enqueueUsers(I);
1459 handleAlias(I);
1460 }
1461
1462 void visitStoreInst(StoreInst &SI) {
1463 // Regardless whether the alias of the alloca is the value operand or the
1464 // pointer operand, we need to assume the alloca is been written.
1465 handleMayWrite(SI);
1466
1467 if (SI.getValueOperand() != U->get())
1468 return;
1469
1470 // We are storing the pointer into a memory location, potentially escaping.
1471 // As an optimization, we try to detect simple cases where it doesn't
1472 // actually escape, for example:
1473 // %ptr = alloca ..
1474 // %addr = alloca ..
1475 // store %ptr, %addr
1476 // %x = load %addr
1477 // ..
1478 // If %addr is only used by loading from it, we could simply treat %x as
1479 // another alias of %ptr, and not considering %ptr being escaped.
1480 auto IsSimpleStoreThenLoad = [&]() {
1481 auto *AI = dyn_cast<AllocaInst>(SI.getPointerOperand());
1482 // If the memory location we are storing to is not an alloca, it
1483 // could be an alias of some other memory locations, which is difficult
1484 // to analyze.
1485 if (!AI)
1486 return false;
1487 // StoreAliases contains aliases of the memory location stored into.
1488 SmallVector<Instruction *, 4> StoreAliases = {AI};
1489 while (!StoreAliases.empty()) {
1490 Instruction *I = StoreAliases.pop_back_val();
1491 for (User *U : I->users()) {
1492 // If we are loading from the memory location, we are creating an
1493 // alias of the original pointer.
1494 if (auto *LI = dyn_cast<LoadInst>(U)) {
1495 enqueueUsers(*LI);
1496 handleAlias(*LI);
1497 continue;
1498 }
1499 // If we are overriding the memory location, the pointer certainly
1500 // won't escape.
1501 if (auto *S = dyn_cast<StoreInst>(U))
1502 if (S->getPointerOperand() == I)
1503 continue;
1504 if (auto *II = dyn_cast<IntrinsicInst>(U))
1505 if (II->isLifetimeStartOrEnd())
1506 continue;
1507 // BitCastInst creats aliases of the memory location being stored
1508 // into.
1509 if (auto *BI = dyn_cast<BitCastInst>(U)) {
1510 StoreAliases.push_back(BI);
1511 continue;
1512 }
1513 return false;
1514 }
1515 }
1516
1517 return true;
1518 };
1519
1520 if (!IsSimpleStoreThenLoad())
1521 PI.setEscaped(&SI);
1522 }
1523
1524 // All mem intrinsics modify the data.
1525 void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); }
1526
1527 void visitBitCastInst(BitCastInst &BC) {
1529 handleAlias(BC);
1530 }
1531
1534 handleAlias(ASC);
1535 }
1536
1538 // The base visitor will adjust Offset accordingly.
1540 handleAlias(GEPI);
1541 }
1542
1544 // When we found the lifetime markers refers to a
1545 // subrange of the original alloca, ignore the lifetime
1546 // markers to avoid misleading the analysis.
1547 if (!IsOffsetKnown || !Offset.isZero())
1549 switch (II.getIntrinsicID()) {
1550 default:
1552 case Intrinsic::lifetime_start:
1553 LifetimeStarts.insert(&II);
1554 LifetimeStartBBs.push_back(II.getParent());
1555 break;
1556 case Intrinsic::lifetime_end:
1557 LifetimeEndBBs.insert(II.getParent());
1558 break;
1559 }
1560 }
1561
1562 void visitCallBase(CallBase &CB) {
1563 for (unsigned Op = 0, OpCount = CB.arg_size(); Op < OpCount; ++Op)
1564 if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op))
1565 PI.setEscaped(&CB);
1566 handleMayWrite(CB);
1567 }
1568
1569 bool getShouldLiveOnFrame() const {
1570 if (!ShouldLiveOnFrame)
1571 ShouldLiveOnFrame = computeShouldLiveOnFrame();
1572 return *ShouldLiveOnFrame;
1573 }
1574
1575 bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; }
1576
1577 DenseMap<Instruction *, std::optional<APInt>> getAliasesCopy() const {
1578 assert(getShouldLiveOnFrame() && "This method should only be called if the "
1579 "alloca needs to live on the frame.");
1580 for (const auto &P : AliasOffetMap)
1581 if (!P.second)
1582 report_fatal_error("Unable to handle an alias with unknown offset "
1583 "created before CoroBegin.");
1584 return AliasOffetMap;
1585 }
1586
1587private:
1588 const DominatorTree &DT;
1589 const coro::Shape &CoroShape;
1590 const SuspendCrossingInfo &Checker;
1591 // All alias to the original AllocaInst, created before CoroBegin and used
1592 // after CoroBegin. Each entry contains the instruction and the offset in the
1593 // original Alloca. They need to be recreated after CoroBegin off the frame.
1596 SmallPtrSet<IntrinsicInst *, 2> LifetimeStarts{};
1597 SmallVector<BasicBlock *> LifetimeStartBBs{};
1598 SmallPtrSet<BasicBlock *, 2> LifetimeEndBBs{};
1599 SmallPtrSet<const BasicBlock *, 2> CoroSuspendBBs{};
1600 bool MayWriteBeforeCoroBegin{false};
1601 bool ShouldUseLifetimeStartInfo{true};
1602
1603 mutable std::optional<bool> ShouldLiveOnFrame{};
1604
1605 bool computeShouldLiveOnFrame() const {
1606 // If lifetime information is available, we check it first since it's
1607 // more precise. We look at every pair of lifetime.start intrinsic and
1608 // every basic block that uses the pointer to see if they cross suspension
1609 // points. The uses cover both direct uses as well as indirect uses.
1610 if (ShouldUseLifetimeStartInfo && !LifetimeStarts.empty()) {
1611 // If there is no explicit lifetime.end, then assume the address can
1612 // cross suspension points.
1613 if (LifetimeEndBBs.empty())
1614 return true;
1615
1616 // If there is a path from a lifetime.start to a suspend without a
1617 // corresponding lifetime.end, then the alloca's lifetime persists
1618 // beyond that suspension point and the alloca must go on the frame.
1619 llvm::SmallVector<BasicBlock *> Worklist(LifetimeStartBBs);
1620 if (isManyPotentiallyReachableFromMany(Worklist, CoroSuspendBBs,
1621 &LifetimeEndBBs, &DT))
1622 return true;
1623
1624 // Addresses are guaranteed to be identical after every lifetime.start so
1625 // we cannot use the local stack if the address escaped and there is a
1626 // suspend point between lifetime markers. This should also cover the
1627 // case of a single lifetime.start intrinsic in a loop with suspend point.
1628 if (PI.isEscaped()) {
1629 for (auto *A : LifetimeStarts) {
1630 for (auto *B : LifetimeStarts) {
1631 if (Checker.hasPathOrLoopCrossingSuspendPoint(A->getParent(),
1632 B->getParent()))
1633 return true;
1634 }
1635 }
1636 }
1637 return false;
1638 }
1639 // FIXME: Ideally the isEscaped check should come at the beginning.
1640 // However there are a few loose ends that need to be fixed first before
1641 // we can do that. We need to make sure we are not over-conservative, so
1642 // that the data accessed in-between await_suspend and symmetric transfer
1643 // is always put on the stack, and also data accessed after coro.end is
1644 // always put on the stack (esp the return object). To fix that, we need
1645 // to:
1646 // 1) Potentially treat sret as nocapture in calls
1647 // 2) Special handle the return object and put it on the stack
1648 // 3) Utilize lifetime.end intrinsic
1649 if (PI.isEscaped())
1650 return true;
1651
1652 for (auto *U1 : Users)
1653 for (auto *U2 : Users)
1654 if (Checker.isDefinitionAcrossSuspend(*U1, U2))
1655 return true;
1656
1657 return false;
1658 }
1659
1660 void handleMayWrite(const Instruction &I) {
1661 if (!DT.dominates(CoroShape.CoroBegin, &I))
1662 MayWriteBeforeCoroBegin = true;
1663 }
1664
1665 bool usedAfterCoroBegin(Instruction &I) {
1666 for (auto &U : I.uses())
1667 if (DT.dominates(CoroShape.CoroBegin, U))
1668 return true;
1669 return false;
1670 }
1671
1672 void handleAlias(Instruction &I) {
1673 // We track all aliases created prior to CoroBegin but used after.
1674 // These aliases may need to be recreated after CoroBegin if the alloca
1675 // need to live on the frame.
1676 if (DT.dominates(CoroShape.CoroBegin, &I) || !usedAfterCoroBegin(I))
1677 return;
1678
1679 if (!IsOffsetKnown) {
1680 AliasOffetMap[&I].reset();
1681 } else {
1682 auto Itr = AliasOffetMap.find(&I);
1683 if (Itr == AliasOffetMap.end()) {
1684 AliasOffetMap[&I] = Offset;
1685 } else if (Itr->second && *Itr->second != Offset) {
1686 // If we have seen two different possible values for this alias, we set
1687 // it to empty.
1688 AliasOffetMap[&I].reset();
1689 }
1690 }
1691 }
1692};
1693} // namespace
1694
1695// We need to make room to insert a spill after initial PHIs, but before
1696// catchswitch instruction. Placing it before violates the requirement that
1697// catchswitch, like all other EHPads must be the first nonPHI in a block.
1698//
1699// Split away catchswitch into a separate block and insert in its place:
1700//
1701// cleanuppad <InsertPt> cleanupret.
1702//
1703// cleanupret instruction will act as an insert point for the spill.
1705 BasicBlock *CurrentBlock = CatchSwitch->getParent();
1706 BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch);
1707 CurrentBlock->getTerminator()->eraseFromParent();
1708
1709 auto *CleanupPad =
1710 CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock);
1711 auto *CleanupRet =
1712 CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock);
1713 return CleanupRet;
1714}
1715
1716// Replace all alloca and SSA values that are accessed across suspend points
1717// with GetElementPointer from coroutine frame + loads and stores. Create an
1718// AllocaSpillBB that will become the new entry block for the resume parts of
1719// the coroutine:
1720//
1721// %hdl = coro.begin(...)
1722// whatever
1723//
1724// becomes:
1725//
1726// %hdl = coro.begin(...)
1727// br label %AllocaSpillBB
1728//
1729// AllocaSpillBB:
1730// ; geps corresponding to allocas that were moved to coroutine frame
1731// br label PostSpill
1732//
1733// PostSpill:
1734// whatever
1735//
1736//
1737static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape) {
1738 auto *CB = Shape.CoroBegin;
1739 LLVMContext &C = CB->getContext();
1740 Function *F = CB->getFunction();
1741 IRBuilder<> Builder(C);
1742 StructType *FrameTy = Shape.FrameTy;
1743 Value *FramePtr = Shape.FramePtr;
1744 DominatorTree DT(*F);
1746
1747 // Create a GEP with the given index into the coroutine frame for the original
1748 // value Orig. Appends an extra 0 index for array-allocas, preserving the
1749 // original type.
1750 auto GetFramePointer = [&](Value *Orig) -> Value * {
1751 FieldIDType Index = FrameData.getFieldIndex(Orig);
1752 SmallVector<Value *, 3> Indices = {
1753 ConstantInt::get(Type::getInt32Ty(C), 0),
1754 ConstantInt::get(Type::getInt32Ty(C), Index),
1755 };
1756
1757 if (auto *AI = dyn_cast<AllocaInst>(Orig)) {
1758 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) {
1759 auto Count = CI->getValue().getZExtValue();
1760 if (Count > 1) {
1761 Indices.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
1762 }
1763 } else {
1764 report_fatal_error("Coroutines cannot handle non static allocas yet");
1765 }
1766 }
1767
1768 auto GEP = cast<GetElementPtrInst>(
1769 Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices));
1770 if (auto *AI = dyn_cast<AllocaInst>(Orig)) {
1771 if (FrameData.getDynamicAlign(Orig) != 0) {
1772 assert(FrameData.getDynamicAlign(Orig) == AI->getAlign().value());
1773 auto *M = AI->getModule();
1774 auto *IntPtrTy = M->getDataLayout().getIntPtrType(AI->getType());
1775 auto *PtrValue = Builder.CreatePtrToInt(GEP, IntPtrTy);
1776 auto *AlignMask =
1777 ConstantInt::get(IntPtrTy, AI->getAlign().value() - 1);
1778 PtrValue = Builder.CreateAdd(PtrValue, AlignMask);
1779 PtrValue = Builder.CreateAnd(PtrValue, Builder.CreateNot(AlignMask));
1780 return Builder.CreateIntToPtr(PtrValue, AI->getType());
1781 }
1782 // If the type of GEP is not equal to the type of AllocaInst, it implies
1783 // that the AllocaInst may be reused in the Frame slot of other
1784 // AllocaInst. So We cast GEP to the AllocaInst here to re-use
1785 // the Frame storage.
1786 //
1787 // Note: If we change the strategy dealing with alignment, we need to refine
1788 // this casting.
1789 if (GEP->getType() != Orig->getType())
1790 return Builder.CreateAddrSpaceCast(GEP, Orig->getType(),
1791 Orig->getName() + Twine(".cast"));
1792 }
1793 return GEP;
1794 };
1795
1796 for (auto const &E : FrameData.Spills) {
1797 Value *Def = E.first;
1798 auto SpillAlignment = Align(FrameData.getAlign(Def));
1799 // Create a store instruction storing the value into the
1800 // coroutine frame.
1801 BasicBlock::iterator InsertPt;
1802 Type *ByValTy = nullptr;
1803 if (auto *Arg = dyn_cast<Argument>(Def)) {
1804 // For arguments, we will place the store instruction right after
1805 // the coroutine frame pointer instruction, i.e. coro.begin.
1806 InsertPt = Shape.getInsertPtAfterFramePtr();
1807
1808 // If we're spilling an Argument, make sure we clear 'nocapture'
1809 // from the coroutine function.
1810 Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture);
1811
1812 if (Arg->hasByValAttr())
1813 ByValTy = Arg->getParamByValType();
1814 } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) {
1815 // Don't spill immediately after a suspend; splitting assumes
1816 // that the suspend will be followed by a branch.
1817 InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHIIt();
1818 } else {
1819 auto *I = cast<Instruction>(Def);
1820 if (!DT.dominates(CB, I)) {
1821 // If it is not dominated by CoroBegin, then spill should be
1822 // inserted immediately after CoroFrame is computed.
1823 InsertPt = Shape.getInsertPtAfterFramePtr();
1824 } else if (auto *II = dyn_cast<InvokeInst>(I)) {
1825 // If we are spilling the result of the invoke instruction, split
1826 // the normal edge and insert the spill in the new block.
1827 auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest());
1828 InsertPt = NewBB->getTerminator()->getIterator();
1829 } else if (isa<PHINode>(I)) {
1830 // Skip the PHINodes and EH pads instructions.
1831 BasicBlock *DefBlock = I->getParent();
1832 if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator()))
1833 InsertPt = splitBeforeCatchSwitch(CSI)->getIterator();
1834 else
1835 InsertPt = DefBlock->getFirstInsertionPt();
1836 } else {
1837 assert(!I->isTerminator() && "unexpected terminator");
1838 // For all other values, the spill is placed immediately after
1839 // the definition.
1840 InsertPt = I->getNextNode()->getIterator();
1841 }
1842 }
1843
1844 auto Index = FrameData.getFieldIndex(Def);
1845 Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
1846 auto *G = Builder.CreateConstInBoundsGEP2_32(
1847 FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr"));
1848 if (ByValTy) {
1849 // For byval arguments, we need to store the pointed value in the frame,
1850 // instead of the pointer itself.
1851 auto *Value = Builder.CreateLoad(ByValTy, Def);
1852 Builder.CreateAlignedStore(Value, G, SpillAlignment);
1853 } else {
1854 Builder.CreateAlignedStore(Def, G, SpillAlignment);
1855 }
1856
1857 BasicBlock *CurrentBlock = nullptr;
1858 Value *CurrentReload = nullptr;
1859 for (auto *U : E.second) {
1860 // If we have not seen the use block, create a load instruction to reload
1861 // the spilled value from the coroutine frame. Populates the Value pointer
1862 // reference provided with the frame GEP.
1863 if (CurrentBlock != U->getParent()) {
1864 CurrentBlock = U->getParent();
1865 Builder.SetInsertPoint(CurrentBlock,
1866 CurrentBlock->getFirstInsertionPt());
1867
1868 auto *GEP = GetFramePointer(E.first);
1869 GEP->setName(E.first->getName() + Twine(".reload.addr"));
1870 if (ByValTy)
1871 CurrentReload = GEP;
1872 else
1873 CurrentReload = Builder.CreateAlignedLoad(
1874 FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP,
1875 SpillAlignment, E.first->getName() + Twine(".reload"));
1876
1879 // Try best to find dbg.declare. If the spill is a temp, there may not
1880 // be a direct dbg.declare. Walk up the load chain to find one from an
1881 // alias.
1882 if (F->getSubprogram()) {
1883 auto *CurDef = Def;
1884 while (DIs.empty() && DVRs.empty() && isa<LoadInst>(CurDef)) {
1885 auto *LdInst = cast<LoadInst>(CurDef);
1886 // Only consider ptr to ptr same type load.
1887 if (LdInst->getPointerOperandType() != LdInst->getType())
1888 break;
1889 CurDef = LdInst->getPointerOperand();
1890 if (!isa<AllocaInst, LoadInst>(CurDef))
1891 break;
1892 DIs = findDbgDeclares(CurDef);
1893 DVRs = findDVRDeclares(CurDef);
1894 }
1895 }
1896
1897 auto SalvageOne = [&](auto *DDI) {
1898 bool AllowUnresolved = false;
1899 // This dbg.declare is preserved for all coro-split function
1900 // fragments. It will be unreachable in the main function, and
1901 // processed by coro::salvageDebugInfo() by CoroCloner.
1902 if (UseNewDbgInfoFormat) {
1904 ValueAsMetadata::get(CurrentReload), DDI->getVariable(),
1905 DDI->getExpression(), DDI->getDebugLoc(),
1906 DbgVariableRecord::LocationType::Declare);
1907 Builder.GetInsertPoint()->getParent()->insertDbgRecordBefore(
1908 NewDVR, Builder.GetInsertPoint());
1909 } else {
1910 DIBuilder(*CurrentBlock->getParent()->getParent(), AllowUnresolved)
1911 .insertDeclare(CurrentReload, DDI->getVariable(),
1912 DDI->getExpression(), DDI->getDebugLoc(),
1913 &*Builder.GetInsertPoint());
1914 }
1915 // This dbg.declare is for the main function entry point. It
1916 // will be deleted in all coro-split functions.
1917 coro::salvageDebugInfo(ArgToAllocaMap, *DDI, false /*UseEntryValue*/);
1918 };
1919 for_each(DIs, SalvageOne);
1920 for_each(DVRs, SalvageOne);
1921 }
1922
1923 // If we have a single edge PHINode, remove it and replace it with a
1924 // reload from the coroutine frame. (We already took care of multi edge
1925 // PHINodes by rewriting them in the rewritePHIs function).
1926 if (auto *PN = dyn_cast<PHINode>(U)) {
1927 assert(PN->getNumIncomingValues() == 1 &&
1928 "unexpected number of incoming "
1929 "values in the PHINode");
1930 PN->replaceAllUsesWith(CurrentReload);
1931 PN->eraseFromParent();
1932 continue;
1933 }
1934
1935 // Replace all uses of CurrentValue in the current instruction with
1936 // reload.
1937 U->replaceUsesOfWith(Def, CurrentReload);
1938 // Instructions are added to Def's user list if the attached
1939 // debug records use Def. Update those now.
1940 for (DbgVariableRecord &DVR : filterDbgVars(U->getDbgRecordRange()))
1941 DVR.replaceVariableLocationOp(Def, CurrentReload, true);
1942 }
1943 }
1944
1945 BasicBlock *FramePtrBB = Shape.getInsertPtAfterFramePtr()->getParent();
1946
1947 auto SpillBlock = FramePtrBB->splitBasicBlock(
1948 Shape.getInsertPtAfterFramePtr(), "AllocaSpillBB");
1949 SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill");
1950 Shape.AllocaSpillBlock = SpillBlock;
1951
1952 // retcon and retcon.once lowering assumes all uses have been sunk.
1953 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
1954 Shape.ABI == coro::ABI::Async) {
1955 // If we found any allocas, replace all of their remaining uses with Geps.
1956 Builder.SetInsertPoint(SpillBlock, SpillBlock->begin());
1957 for (const auto &P : FrameData.Allocas) {
1958 AllocaInst *Alloca = P.Alloca;
1959 auto *G = GetFramePointer(Alloca);
1960
1961 // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G))
1962 // here, as we are changing location of the instruction.
1963 G->takeName(Alloca);
1964 Alloca->replaceAllUsesWith(G);
1965 Alloca->eraseFromParent();
1966 }
1967 return;
1968 }
1969
1970 // If we found any alloca, replace all of their remaining uses with GEP
1971 // instructions. To remain debugbility, we replace the uses of allocas for
1972 // dbg.declares and dbg.values with the reload from the frame.
1973 // Note: We cannot replace the alloca with GEP instructions indiscriminately,
1974 // as some of the uses may not be dominated by CoroBegin.
1975 Builder.SetInsertPoint(Shape.AllocaSpillBlock,
1976 Shape.AllocaSpillBlock->begin());
1977 SmallVector<Instruction *, 4> UsersToUpdate;
1978 for (const auto &A : FrameData.Allocas) {
1979 AllocaInst *Alloca = A.Alloca;
1980 UsersToUpdate.clear();
1981 for (User *U : Alloca->users()) {
1982 auto *I = cast<Instruction>(U);
1983 if (DT.dominates(CB, I))
1984 UsersToUpdate.push_back(I);
1985 }
1986 if (UsersToUpdate.empty())
1987 continue;
1988 auto *G = GetFramePointer(Alloca);
1989 G->setName(Alloca->getName() + Twine(".reload.addr"));
1990
1992 SmallVector<DbgVariableRecord *> DbgVariableRecords;
1993 findDbgUsers(DIs, Alloca, &DbgVariableRecords);
1994 for (auto *DVI : DIs)
1995 DVI->replaceUsesOfWith(Alloca, G);
1996 for (auto *DVR : DbgVariableRecords)
1997 DVR->replaceVariableLocationOp(Alloca, G);
1998
1999 for (Instruction *I : UsersToUpdate) {
2000 // It is meaningless to retain the lifetime intrinsics refer for the
2001 // member of coroutine frames and the meaningless lifetime intrinsics
2002 // are possible to block further optimizations.
2003 if (I->isLifetimeStartOrEnd()) {
2004 I->eraseFromParent();
2005 continue;
2006 }
2007
2008 I->replaceUsesOfWith(Alloca, G);
2009 }
2010 }
2011 Builder.SetInsertPoint(&*Shape.getInsertPtAfterFramePtr());
2012 for (const auto &A : FrameData.Allocas) {
2013 AllocaInst *Alloca = A.Alloca;
2014 if (A.MayWriteBeforeCoroBegin) {
2015 // isEscaped really means potentially modified before CoroBegin.
2016 if (Alloca->isArrayAllocation())
2018 "Coroutines cannot handle copying of array allocas yet");
2019
2020 auto *G = GetFramePointer(Alloca);
2021 auto *Value = Builder.CreateLoad(Alloca->getAllocatedType(), Alloca);
2022 Builder.CreateStore(Value, G);
2023 }
2024 // For each alias to Alloca created before CoroBegin but used after
2025 // CoroBegin, we recreate them after CoroBegin by appplying the offset
2026 // to the pointer in the frame.
2027 for (const auto &Alias : A.Aliases) {
2028 auto *FramePtr = GetFramePointer(Alloca);
2029 auto &Value = *Alias.second;
2030 auto ITy = IntegerType::get(C, Value.getBitWidth());
2031 auto *AliasPtr =
2032 Builder.CreatePtrAdd(FramePtr, ConstantInt::get(ITy, Value));
2033 Alias.first->replaceUsesWithIf(
2034 AliasPtr, [&](Use &U) { return DT.dominates(CB, U); });
2035 }
2036 }
2037
2038 // PromiseAlloca is not collected in FrameData.Allocas. So we don't handle
2039 // the case that the PromiseAlloca may have writes before CoroBegin in the
2040 // above codes. And it may be problematic in edge cases. See
2041 // https://github.com/llvm/llvm-project/issues/57861 for an example.
2042 if (Shape.ABI == coro::ABI::Switch && Shape.SwitchLowering.PromiseAlloca) {
2044 // If there is memory accessing to promise alloca before CoroBegin;
2045 bool HasAccessingPromiseBeforeCB = llvm::any_of(PA->uses(), [&](Use &U) {
2046 auto *Inst = dyn_cast<Instruction>(U.getUser());
2047 if (!Inst || DT.dominates(CB, Inst))
2048 return false;
2049
2050 if (auto *CI = dyn_cast<CallInst>(Inst)) {
2051 // It is fine if the call wouldn't write to the Promise.
2052 // This is possible for @llvm.coro.id intrinsics, which
2053 // would take the promise as the second argument as a
2054 // marker.
2055 if (CI->onlyReadsMemory() ||
2056 CI->onlyReadsMemory(CI->getArgOperandNo(&U)))
2057 return false;
2058 return true;
2059 }
2060
2061 return isa<StoreInst>(Inst) ||
2062 // It may take too much time to track the uses.
2063 // Be conservative about the case the use may escape.
2064 isa<GetElementPtrInst>(Inst) ||
2065 // There would always be a bitcast for the promise alloca
2066 // before we enabled Opaque pointers. And now given
2067 // opaque pointers are enabled by default. This should be
2068 // fine.
2069 isa<BitCastInst>(Inst);
2070 });
2071 if (HasAccessingPromiseBeforeCB) {
2072 Builder.SetInsertPoint(&*Shape.getInsertPtAfterFramePtr());
2073 auto *G = GetFramePointer(PA);
2074 auto *Value = Builder.CreateLoad(PA->getAllocatedType(), PA);
2075 Builder.CreateStore(Value, G);
2076 }
2077 }
2078}
2079
2080// Moves the values in the PHIs in SuccBB that correspong to PredBB into a new
2081// PHI in InsertedBB.
2083 BasicBlock *InsertedBB,
2084 BasicBlock *PredBB,
2085 PHINode *UntilPHI = nullptr) {
2086 auto *PN = cast<PHINode>(&SuccBB->front());
2087 do {
2088 int Index = PN->getBasicBlockIndex(InsertedBB);
2089 Value *V = PN->getIncomingValue(Index);
2090 PHINode *InputV = PHINode::Create(
2091 V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName());
2092 InputV->insertBefore(InsertedBB->begin());
2093 InputV->addIncoming(V, PredBB);
2094 PN->setIncomingValue(Index, InputV);
2095 PN = dyn_cast<PHINode>(PN->getNextNode());
2096 } while (PN != UntilPHI);
2097}
2098
2099// Rewrites the PHI Nodes in a cleanuppad.
2100static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB,
2101 CleanupPadInst *CleanupPad) {
2102 // For every incoming edge to a CleanupPad we will create a new block holding
2103 // all incoming values in single-value PHI nodes. We will then create another
2104 // block to act as a dispather (as all unwind edges for related EH blocks
2105 // must be the same).
2106 //
2107 // cleanuppad:
2108 // %2 = phi i32[%0, %catchswitch], [%1, %catch.1]
2109 // %3 = cleanuppad within none []
2110 //
2111 // It will create:
2112 //
2113 // cleanuppad.corodispatch
2114 // %2 = phi i8[0, %catchswitch], [1, %catch.1]
2115 // %3 = cleanuppad within none []
2116 // switch i8 % 2, label %unreachable
2117 // [i8 0, label %cleanuppad.from.catchswitch
2118 // i8 1, label %cleanuppad.from.catch.1]
2119 // cleanuppad.from.catchswitch:
2120 // %4 = phi i32 [%0, %catchswitch]
2121 // br %label cleanuppad
2122 // cleanuppad.from.catch.1:
2123 // %6 = phi i32 [%1, %catch.1]
2124 // br %label cleanuppad
2125 // cleanuppad:
2126 // %8 = phi i32 [%4, %cleanuppad.from.catchswitch],
2127 // [%6, %cleanuppad.from.catch.1]
2128
2129 // Unreachable BB, in case switching on an invalid value in the dispatcher.
2130 auto *UnreachBB = BasicBlock::Create(
2131 CleanupPadBB->getContext(), "unreachable", CleanupPadBB->getParent());
2132 IRBuilder<> Builder(UnreachBB);
2133 Builder.CreateUnreachable();
2134
2135 // Create a new cleanuppad which will be the dispatcher.
2136 auto *NewCleanupPadBB =
2137 BasicBlock::Create(CleanupPadBB->getContext(),
2138 CleanupPadBB->getName() + Twine(".corodispatch"),
2139 CleanupPadBB->getParent(), CleanupPadBB);
2140 Builder.SetInsertPoint(NewCleanupPadBB);
2141 auto *SwitchType = Builder.getInt8Ty();
2142 auto *SetDispatchValuePN =
2143 Builder.CreatePHI(SwitchType, pred_size(CleanupPadBB));
2144 CleanupPad->removeFromParent();
2145 CleanupPad->insertAfter(SetDispatchValuePN);
2146 auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB,
2147 pred_size(CleanupPadBB));
2148
2149 int SwitchIndex = 0;
2150 SmallVector<BasicBlock *, 8> Preds(predecessors(CleanupPadBB));
2151 for (BasicBlock *Pred : Preds) {
2152 // Create a new cleanuppad and move the PHI values to there.
2153 auto *CaseBB = BasicBlock::Create(CleanupPadBB->getContext(),
2154 CleanupPadBB->getName() +
2155 Twine(".from.") + Pred->getName(),
2156 CleanupPadBB->getParent(), CleanupPadBB);
2157 updatePhiNodes(CleanupPadBB, Pred, CaseBB);
2158 CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") +
2159 Pred->getName());
2160 Builder.SetInsertPoint(CaseBB);
2161 Builder.CreateBr(CleanupPadBB);
2162 movePHIValuesToInsertedBlock(CleanupPadBB, CaseBB, NewCleanupPadBB);
2163
2164 // Update this Pred to the new unwind point.
2165 setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB);
2166
2167 // Setup the switch in the dispatcher.
2168 auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex);
2169 SetDispatchValuePN->addIncoming(SwitchConstant, Pred);
2170 SwitchOnDispatch->addCase(SwitchConstant, CaseBB);
2171 SwitchIndex++;
2172 }
2173}
2174
2177 for (auto &BB : F) {
2178 for (auto &Phi : BB.phis()) {
2179 if (Phi.getNumIncomingValues() == 1) {
2180 Worklist.push_back(&Phi);
2181 } else
2182 break;
2183 }
2184 }
2185 while (!Worklist.empty()) {
2186 auto *Phi = Worklist.pop_back_val();
2187 auto *OriginalValue = Phi->getIncomingValue(0);
2188 Phi->replaceAllUsesWith(OriginalValue);
2189 }
2190}
2191
2192static void rewritePHIs(BasicBlock &BB) {
2193 // For every incoming edge we will create a block holding all
2194 // incoming values in a single PHI nodes.
2195 //
2196 // loop:
2197 // %n.val = phi i32[%n, %entry], [%inc, %loop]
2198 //
2199 // It will create:
2200 //
2201 // loop.from.entry:
2202 // %n.loop.pre = phi i32 [%n, %entry]
2203 // br %label loop
2204 // loop.from.loop:
2205 // %inc.loop.pre = phi i32 [%inc, %loop]
2206 // br %label loop
2207 //
2208 // After this rewrite, further analysis will ignore any phi nodes with more
2209 // than one incoming edge.
2210
2211 // TODO: Simplify PHINodes in the basic block to remove duplicate
2212 // predecessors.
2213
2214 // Special case for CleanupPad: all EH blocks must have the same unwind edge
2215 // so we need to create an additional "dispatcher" block.
2216 if (auto *CleanupPad =
2217 dyn_cast_or_null<CleanupPadInst>(BB.getFirstNonPHI())) {
2219 for (BasicBlock *Pred : Preds) {
2220 if (CatchSwitchInst *CS =
2221 dyn_cast<CatchSwitchInst>(Pred->getTerminator())) {
2222 // CleanupPad with a CatchSwitch predecessor: therefore this is an
2223 // unwind destination that needs to be handle specially.
2224 assert(CS->getUnwindDest() == &BB);
2225 (void)CS;
2226 rewritePHIsForCleanupPad(&BB, CleanupPad);
2227 return;
2228 }
2229 }
2230 }
2231
2232 LandingPadInst *LandingPad = nullptr;
2233 PHINode *ReplPHI = nullptr;
2234 if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) {
2235 // ehAwareSplitEdge will clone the LandingPad in all the edge blocks.
2236 // We replace the original landing pad with a PHINode that will collect the
2237 // results from all of them.
2238 ReplPHI = PHINode::Create(LandingPad->getType(), 1, "");
2239 ReplPHI->insertBefore(LandingPad->getIterator());
2240 ReplPHI->takeName(LandingPad);
2241 LandingPad->replaceAllUsesWith(ReplPHI);
2242 // We will erase the original landing pad at the end of this function after
2243 // ehAwareSplitEdge cloned it in the transition blocks.
2244 }
2245
2247 for (BasicBlock *Pred : Preds) {
2248 auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI);
2249 IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName());
2250
2251 // Stop the moving of values at ReplPHI, as this is either null or the PHI
2252 // that replaced the landing pad.
2253 movePHIValuesToInsertedBlock(&BB, IncomingBB, Pred, ReplPHI);
2254 }
2255
2256 if (LandingPad) {
2257 // Calls to ehAwareSplitEdge function cloned the original lading pad.
2258 // No longer need it.
2259 LandingPad->eraseFromParent();
2260 }
2261}
2262
2263static void rewritePHIs(Function &F) {
2265
2266 for (BasicBlock &BB : F)
2267 if (auto *PN = dyn_cast<PHINode>(&BB.front()))
2268 if (PN->getNumIncomingValues() > 1)
2269 WorkList.push_back(&BB);
2270
2271 for (BasicBlock *BB : WorkList)
2272 rewritePHIs(*BB);
2273}
2274
2275/// Default materializable callback
2276// Check for instructions that we can recreate on resume as opposed to spill
2277// the result into a coroutine frame.
2279 return (isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) ||
2280 isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V));
2281}
2282
2283// Check for structural coroutine intrinsics that should not be spilled into
2284// the coroutine frame.
2286 return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) ||
2287 isa<CoroSuspendInst>(&I);
2288}
2289
2290// For each instruction identified as materializable across the suspend point,
2291// and its associated DAG of other rematerializable instructions,
2292// recreate the DAG of instructions after the suspend point.
2294 const SmallMapVector<Instruction *, std::unique_ptr<RematGraph>, 8>
2295 &AllRemats) {
2296 // This has to be done in 2 phases
2297 // Do the remats and record the required defs to be replaced in the
2298 // original use instructions
2299 // Once all the remats are complete, replace the uses in the final
2300 // instructions with the new defs
2301 typedef struct {
2303 Instruction *Def;
2304 Instruction *Remat;
2305 } ProcessNode;
2306
2307 SmallVector<ProcessNode> FinalInstructionsToProcess;
2308
2309 for (const auto &E : AllRemats) {
2310 Instruction *Use = E.first;
2311 Instruction *CurrentMaterialization = nullptr;
2312 RematGraph *RG = E.second.get();
2314 SmallVector<Instruction *> InstructionsToProcess;
2315
2316 // If the target use is actually a suspend instruction then we have to
2317 // insert the remats into the end of the predecessor (there should only be
2318 // one). This is so that suspend blocks always have the suspend instruction
2319 // as the first instruction.
2320 auto InsertPoint = &*Use->getParent()->getFirstInsertionPt();
2321 if (isa<AnyCoroSuspendInst>(Use)) {
2322 BasicBlock *SuspendPredecessorBlock =
2323 Use->getParent()->getSinglePredecessor();
2324 assert(SuspendPredecessorBlock && "malformed coro suspend instruction");
2325 InsertPoint = SuspendPredecessorBlock->getTerminator();
2326 }
2327
2328 // Note: skip the first instruction as this is the actual use that we're
2329 // rematerializing everything for.
2330 auto I = RPOT.begin();
2331 ++I;
2332 for (; I != RPOT.end(); ++I) {
2333 Instruction *D = (*I)->Node;
2334 CurrentMaterialization = D->clone();
2335 CurrentMaterialization->setName(D->getName());
2336 CurrentMaterialization->insertBefore(InsertPoint);
2337 InsertPoint = CurrentMaterialization;
2338
2339 // Replace all uses of Def in the instructions being added as part of this
2340 // rematerialization group
2341 for (auto &I : InstructionsToProcess)
2342 I->replaceUsesOfWith(D, CurrentMaterialization);
2343
2344 // Don't replace the final use at this point as this can cause problems
2345 // for other materializations. Instead, for any final use that uses a
2346 // define that's being rematerialized, record the replace values
2347 for (unsigned i = 0, E = Use->getNumOperands(); i != E; ++i)
2348 if (Use->getOperand(i) == D) // Is this operand pointing to oldval?
2349 FinalInstructionsToProcess.push_back(
2350 {Use, D, CurrentMaterialization});
2351
2352 InstructionsToProcess.push_back(CurrentMaterialization);
2353 }
2354 }
2355
2356 // Finally, replace the uses with the defines that we've just rematerialized
2357 for (auto &R : FinalInstructionsToProcess) {
2358 if (auto *PN = dyn_cast<PHINode>(R.Use)) {
2359 assert(PN->getNumIncomingValues() == 1 && "unexpected number of incoming "
2360 "values in the PHINode");
2361 PN->replaceAllUsesWith(R.Remat);
2362 PN->eraseFromParent();
2363 continue;
2364 }
2365 R.Use->replaceUsesOfWith(R.Def, R.Remat);
2366 }
2367}
2368
2369// Splits the block at a particular instruction unless it is the first
2370// instruction in the block with a single predecessor.
2372 auto *BB = I->getParent();
2373 if (&BB->front() == I) {
2374 if (BB->getSinglePredecessor()) {
2375 BB->setName(Name);
2376 return BB;
2377 }
2378 }
2379 return BB->splitBasicBlock(I, Name);
2380}
2381
2382// Split above and below a particular instruction so that it
2383// will be all alone by itself in a block.
2384static void splitAround(Instruction *I, const Twine &Name) {
2386 splitBlockIfNotFirst(I->getNextNode(), "After" + Name);
2387}
2388
2389static bool isSuspendBlock(BasicBlock *BB) {
2390 return isa<AnyCoroSuspendInst>(BB->front());
2391}
2392
2394
2395/// Does control flow starting at the given block ever reach a suspend
2396/// instruction before reaching a block in VisitedOrFreeBBs?
2398 VisitedBlocksSet &VisitedOrFreeBBs) {
2399 // Eagerly try to add this block to the visited set. If it's already
2400 // there, stop recursing; this path doesn't reach a suspend before
2401 // either looping or reaching a freeing block.
2402 if (!VisitedOrFreeBBs.insert(From).second)
2403 return false;
2404
2405 // We assume that we'll already have split suspends into their own blocks.
2406 if (isSuspendBlock(From))
2407 return true;
2408
2409 // Recurse on the successors.
2410 for (auto *Succ : successors(From)) {
2411 if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs))
2412 return true;
2413 }
2414
2415 return false;
2416}
2417
2418/// Is the given alloca "local", i.e. bounded in lifetime to not cross a
2419/// suspend point?
2421 // Seed the visited set with all the basic blocks containing a free
2422 // so that we won't pass them up.
2423 VisitedBlocksSet VisitedOrFreeBBs;
2424 for (auto *User : AI->users()) {
2425 if (auto FI = dyn_cast<CoroAllocaFreeInst>(User))
2426 VisitedOrFreeBBs.insert(FI->getParent());
2427 }
2428
2429 return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs);
2430}
2431
2432/// After we split the coroutine, will the given basic block be along
2433/// an obvious exit path for the resumption function?
2435 unsigned depth = 3) {
2436 // If we've bottomed out our depth count, stop searching and assume
2437 // that the path might loop back.
2438 if (depth == 0) return false;
2439
2440 // If this is a suspend block, we're about to exit the resumption function.
2441 if (isSuspendBlock(BB)) return true;
2442
2443 // Recurse into the successors.
2444 for (auto *Succ : successors(BB)) {
2445 if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1))
2446 return false;
2447 }
2448
2449 // If none of the successors leads back in a loop, we're on an exit/abort.
2450 return true;
2451}
2452
2454 // Look for a free that isn't sufficiently obviously followed by
2455 // either a suspend or a termination, i.e. something that will leave
2456 // the coro resumption frame.
2457 for (auto *U : AI->users()) {
2458 auto FI = dyn_cast<CoroAllocaFreeInst>(U);
2459 if (!FI) continue;
2460
2461 if (!willLeaveFunctionImmediatelyAfter(FI->getParent()))
2462 return true;
2463 }
2464
2465 // If we never found one, we don't need a stack save.
2466 return false;
2467}
2468
2469/// Turn each of the given local allocas into a normal (dynamic) alloca
2470/// instruction.
2472 SmallVectorImpl<Instruction*> &DeadInsts) {
2473 for (auto *AI : LocalAllocas) {
2474 IRBuilder<> Builder(AI);
2475
2476 // Save the stack depth. Try to avoid doing this if the stackrestore
2477 // is going to immediately precede a return or something.
2478 Value *StackSave = nullptr;
2480 StackSave = Builder.CreateStackSave();
2481
2482 // Allocate memory.
2483 auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize());
2484 Alloca->setAlignment(AI->getAlignment());
2485
2486 for (auto *U : AI->users()) {
2487 // Replace gets with the allocation.
2488 if (isa<CoroAllocaGetInst>(U)) {
2489 U->replaceAllUsesWith(Alloca);
2490
2491 // Replace frees with stackrestores. This is safe because
2492 // alloca.alloc is required to obey a stack discipline, although we
2493 // don't enforce that structurally.
2494 } else {
2495 auto FI = cast<CoroAllocaFreeInst>(U);
2496 if (StackSave) {
2497 Builder.SetInsertPoint(FI);
2498 Builder.CreateStackRestore(StackSave);
2499 }
2500 }
2501 DeadInsts.push_back(cast<Instruction>(U));
2502 }
2503
2504 DeadInsts.push_back(AI);
2505 }
2506}
2507
2508/// Turn the given coro.alloca.alloc call into a dynamic allocation.
2509/// This happens during the all-instructions iteration, so it must not
2510/// delete the call.
2512 coro::Shape &Shape,
2513 SmallVectorImpl<Instruction*> &DeadInsts) {
2514 IRBuilder<> Builder(AI);
2515 auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr);
2516
2517 for (User *U : AI->users()) {
2518 if (isa<CoroAllocaGetInst>(U)) {
2519 U->replaceAllUsesWith(Alloc);
2520 } else {
2521 auto FI = cast<CoroAllocaFreeInst>(U);
2522 Builder.SetInsertPoint(FI);
2523 Shape.emitDealloc(Builder, Alloc, nullptr);
2524 }
2525 DeadInsts.push_back(cast<Instruction>(U));
2526 }
2527
2528 // Push this on last so that it gets deleted after all the others.
2529 DeadInsts.push_back(AI);
2530
2531 // Return the new allocation value so that we can check for needed spills.
2532 return cast<Instruction>(Alloc);
2533}
2534
2535/// Get the current swifterror value.
2537 coro::Shape &Shape) {
2538 // Make a fake function pointer as a sort of intrinsic.
2539 auto FnTy = FunctionType::get(ValueTy, {}, false);
2540 auto Fn = ConstantPointerNull::get(Builder.getPtrTy());
2541
2542 auto Call = Builder.CreateCall(FnTy, Fn, {});
2543 Shape.SwiftErrorOps.push_back(Call);
2544
2545 return Call;
2546}
2547
2548/// Set the given value as the current swifterror value.
2549///
2550/// Returns a slot that can be used as a swifterror slot.
2552 coro::Shape &Shape) {
2553 // Make a fake function pointer as a sort of intrinsic.
2554 auto FnTy = FunctionType::get(Builder.getPtrTy(),
2555 {V->getType()}, false);
2556 auto Fn = ConstantPointerNull::get(Builder.getPtrTy());
2557
2558 auto Call = Builder.CreateCall(FnTy, Fn, { V });
2559 Shape.SwiftErrorOps.push_back(Call);
2560
2561 return Call;
2562}
2563
2564/// Set the swifterror value from the given alloca before a call,
2565/// then put in back in the alloca afterwards.
2566///
2567/// Returns an address that will stand in for the swifterror slot
2568/// until splitting.
2570 AllocaInst *Alloca,
2571 coro::Shape &Shape) {
2572 auto ValueTy = Alloca->getAllocatedType();
2573 IRBuilder<> Builder(Call);
2574
2575 // Load the current value from the alloca and set it as the
2576 // swifterror value.
2577 auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca);
2578 auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape);
2579
2580 // Move to after the call. Since swifterror only has a guaranteed
2581 // value on normal exits, we can ignore implicit and explicit unwind
2582 // edges.
2583 if (isa<CallInst>(Call)) {
2584 Builder.SetInsertPoint(Call->getNextNode());
2585 } else {
2586 auto Invoke = cast<InvokeInst>(Call);
2587 Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg());
2588 }
2589
2590 // Get the current swifterror value and store it to the alloca.
2591 auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape);
2592 Builder.CreateStore(ValueAfterCall, Alloca);
2593
2594 return Addr;
2595}
2596
2597/// Eliminate a formerly-swifterror alloca by inserting the get/set
2598/// intrinsics and attempting to MemToReg the alloca away.
2600 coro::Shape &Shape) {
2601 for (Use &Use : llvm::make_early_inc_range(Alloca->uses())) {
2602 // swifterror values can only be used in very specific ways.
2603 // We take advantage of that here.
2604 auto User = Use.getUser();
2605 if (isa<LoadInst>(User) || isa<StoreInst>(User))
2606 continue;
2607
2608 assert(isa<CallInst>(User) || isa<InvokeInst>(User));
2609 auto Call = cast<Instruction>(User);
2610
2611 auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape);
2612
2613 // Use the returned slot address as the call argument.
2614 Use.set(Addr);
2615 }
2616
2617 // All the uses should be loads and stores now.
2618 assert(isAllocaPromotable(Alloca));
2619}
2620
2621/// "Eliminate" a swifterror argument by reducing it to the alloca case
2622/// and then loading and storing in the prologue and epilog.
2623///
2624/// The argument keeps the swifterror flag.
2626 coro::Shape &Shape,
2627 SmallVectorImpl<AllocaInst*> &AllocasToPromote) {
2628 IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
2629
2630 auto ArgTy = cast<PointerType>(Arg.getType());
2631 auto ValueTy = PointerType::getUnqual(F.getContext());
2632
2633 // Reduce to the alloca case:
2634
2635 // Create an alloca and replace all uses of the arg with it.
2636 auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace());
2637 Arg.replaceAllUsesWith(Alloca);
2638
2639 // Set an initial value in the alloca. swifterror is always null on entry.
2640 auto InitialValue = Constant::getNullValue(ValueTy);
2641 Builder.CreateStore(InitialValue, Alloca);
2642
2643 // Find all the suspends in the function and save and restore around them.
2644 for (auto *Suspend : Shape.CoroSuspends) {
2645 (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape);
2646 }
2647
2648 // Find all the coro.ends in the function and restore the error value.
2649 for (auto *End : Shape.CoroEnds) {
2650 Builder.SetInsertPoint(End);
2651 auto FinalValue = Builder.CreateLoad(ValueTy, Alloca);
2652 (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape);
2653 }
2654
2655 // Now we can use the alloca logic.
2656 AllocasToPromote.push_back(Alloca);
2657 eliminateSwiftErrorAlloca(F, Alloca, Shape);
2658}
2659
2660/// Eliminate all problematic uses of swifterror arguments and allocas
2661/// from the function. We'll fix them up later when splitting the function.
2663 SmallVector<AllocaInst*, 4> AllocasToPromote;
2664
2665 // Look for a swifterror argument.
2666 for (auto &Arg : F.args()) {
2667 if (!Arg.hasSwiftErrorAttr()) continue;
2668
2669 eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote);
2670 break;
2671 }
2672
2673 // Look for swifterror allocas.
2674 for (auto &Inst : F.getEntryBlock()) {
2675 auto Alloca = dyn_cast<AllocaInst>(&Inst);
2676 if (!Alloca || !Alloca->isSwiftError()) continue;
2677
2678 // Clear the swifterror flag.
2679 Alloca->setSwiftError(false);
2680
2681 AllocasToPromote.push_back(Alloca);
2682 eliminateSwiftErrorAlloca(F, Alloca, Shape);
2683 }
2684
2685 // If we have any allocas to promote, compute a dominator tree and
2686 // promote them en masse.
2687 if (!AllocasToPromote.empty()) {
2688 DominatorTree DT(F);
2689 PromoteMemToReg(AllocasToPromote, DT);
2690 }
2691}
2692
2693/// retcon and retcon.once conventions assume that all spill uses can be sunk
2694/// after the coro.begin intrinsic.
2696 const FrameDataInfo &FrameData,
2697 CoroBeginInst *CoroBegin) {
2698 DominatorTree Dom(F);
2699
2702
2703 // Collect all users that precede coro.begin.
2704 for (auto *Def : FrameData.getAllDefs()) {
2705 for (User *U : Def->users()) {
2706 auto Inst = cast<Instruction>(U);
2707 if (Inst->getParent() != CoroBegin->getParent() ||
2708 Dom.dominates(CoroBegin, Inst))
2709 continue;
2710 if (ToMove.insert(Inst))
2711 Worklist.push_back(Inst);
2712 }
2713 }
2714 // Recursively collect users before coro.begin.
2715 while (!Worklist.empty()) {
2716 auto *Def = Worklist.pop_back_val();
2717 for (User *U : Def->users()) {
2718 auto Inst = cast<Instruction>(U);
2719 if (Dom.dominates(CoroBegin, Inst))
2720 continue;
2721 if (ToMove.insert(Inst))
2722 Worklist.push_back(Inst);
2723 }
2724 }
2725
2726 // Sort by dominance.
2727 SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end());
2728 llvm::sort(InsertionList, [&Dom](Instruction *A, Instruction *B) -> bool {
2729 // If a dominates b it should preceed (<) b.
2730 return Dom.dominates(A, B);
2731 });
2732
2733 Instruction *InsertPt = CoroBegin->getNextNode();
2734 for (Instruction *Inst : InsertionList)
2735 Inst->moveBefore(InsertPt);
2736}
2737
2738/// For each local variable that all of its user are only used inside one of
2739/// suspended region, we sink their lifetime.start markers to the place where
2740/// after the suspend block. Doing so minimizes the lifetime of each variable,
2741/// hence minimizing the amount of data we end up putting on the frame.
2743 SuspendCrossingInfo &Checker,
2744 const DominatorTree &DT) {
2745 if (F.hasOptNone())
2746 return;
2747
2748 // Collect all possible basic blocks which may dominate all uses of allocas.
2750 DomSet.insert(&F.getEntryBlock());
2751 for (auto *CSI : Shape.CoroSuspends) {
2752 BasicBlock *SuspendBlock = CSI->getParent();
2753 assert(isSuspendBlock(SuspendBlock) && SuspendBlock->getSingleSuccessor() &&
2754 "should have split coro.suspend into its own block");
2755 DomSet.insert(SuspendBlock->getSingleSuccessor());
2756 }
2757
2758 for (Instruction &I : instructions(F)) {
2759 AllocaInst* AI = dyn_cast<AllocaInst>(&I);
2760 if (!AI)
2761 continue;
2762
2763 for (BasicBlock *DomBB : DomSet) {
2764 bool Valid = true;
2766
2767 auto isLifetimeStart = [](Instruction* I) {
2768 if (auto* II = dyn_cast<IntrinsicInst>(I))
2769 return II->getIntrinsicID() == Intrinsic::lifetime_start;
2770 return false;
2771 };
2772
2773 auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) {
2774 if (isLifetimeStart(U)) {
2775 Lifetimes.push_back(U);
2776 return true;
2777 }
2778 if (!U->hasOneUse() || U->stripPointerCasts() != AI)
2779 return false;
2780 if (isLifetimeStart(U->user_back())) {
2781 Lifetimes.push_back(U->user_back());
2782 return true;
2783 }
2784 return false;
2785 };
2786
2787 for (User *U : AI->users()) {
2788 Instruction *UI = cast<Instruction>(U);
2789 // For all users except lifetime.start markers, if they are all
2790 // dominated by one of the basic blocks and do not cross
2791 // suspend points as well, then there is no need to spill the
2792 // instruction.
2793 if (!DT.dominates(DomBB, UI->getParent()) ||
2794 Checker.isDefinitionAcrossSuspend(DomBB, UI)) {
2795 // Skip lifetime.start, GEP and bitcast used by lifetime.start
2796 // markers.
2797 if (collectLifetimeStart(UI, AI))
2798 continue;
2799 Valid = false;
2800 break;
2801 }
2802 }
2803 // Sink lifetime.start markers to dominate block when they are
2804 // only used outside the region.
2805 if (Valid && Lifetimes.size() != 0) {
2806 auto *NewLifetime = Lifetimes[0]->clone();
2807 NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), AI);
2808 NewLifetime->insertBefore(DomBB->getTerminator());
2809
2810 // All the outsided lifetime.start markers are no longer necessary.
2811 for (Instruction *S : Lifetimes)
2812 S->eraseFromParent();
2813
2814 break;
2815 }
2816 }
2817 }
2818}
2819
2821 const SuspendCrossingInfo &Checker,
2823 const DominatorTree &DT) {
2824 if (Shape.CoroSuspends.empty())
2825 return;
2826
2827 // The PromiseAlloca will be specially handled since it needs to be in a
2828 // fixed position in the frame.
2829 if (AI == Shape.SwitchLowering.PromiseAlloca)
2830 return;
2831
2832 // The __coro_gro alloca should outlive the promise, make sure we
2833 // keep it outside the frame.
2834 if (AI->hasMetadata(LLVMContext::MD_coro_outside_frame))
2835 return;
2836
2837 // The code that uses lifetime.start intrinsic does not work for functions
2838 // with loops without exit. Disable it on ABIs we know to generate such
2839 // code.
2840 bool ShouldUseLifetimeStartInfo =
2841 (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon &&
2842 Shape.ABI != coro::ABI::RetconOnce);
2843 AllocaUseVisitor Visitor{AI->getDataLayout(), DT, Shape, Checker,
2844 ShouldUseLifetimeStartInfo};
2845 Visitor.visitPtr(*AI);
2846 if (!Visitor.getShouldLiveOnFrame())
2847 return;
2848 Allocas.emplace_back(AI, Visitor.getAliasesCopy(),
2849 Visitor.getMayWriteBeforeCoroBegin());
2850}
2851
2852static std::optional<std::pair<Value &, DIExpression &>>
2854 bool UseEntryValue, Function *F, Value *Storage,
2855 DIExpression *Expr, bool SkipOutermostLoad) {
2856 IRBuilder<> Builder(F->getContext());
2857 auto InsertPt = F->getEntryBlock().getFirstInsertionPt();
2858 while (isa<IntrinsicInst>(InsertPt))
2859 ++InsertPt;
2860 Builder.SetInsertPoint(&F->getEntryBlock(), InsertPt);
2861
2862 while (auto *Inst = dyn_cast_or_null<Instruction>(Storage)) {
2863 if (auto *LdInst = dyn_cast<LoadInst>(Inst)) {
2864 Storage = LdInst->getPointerOperand();
2865 // FIXME: This is a heuristic that works around the fact that
2866 // LLVM IR debug intrinsics cannot yet distinguish between
2867 // memory and value locations: Because a dbg.declare(alloca) is
2868 // implicitly a memory location no DW_OP_deref operation for the
2869 // last direct load from an alloca is necessary. This condition
2870 // effectively drops the *last* DW_OP_deref in the expression.
2871 if (!SkipOutermostLoad)
2873 } else if (auto *StInst = dyn_cast<StoreInst>(Inst)) {
2874 Storage = StInst->getValueOperand();
2875 } else {
2877 SmallVector<Value *, 0> AdditionalValues;
2879 *Inst, Expr ? Expr->getNumLocationOperands() : 0, Ops,
2880 AdditionalValues);
2881 if (!Op || !AdditionalValues.empty()) {
2882 // If salvaging failed or salvaging produced more than one location
2883 // operand, give up.
2884 break;
2885 }
2886 Storage = Op;
2887 Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, /*StackValue*/ false);
2888 }
2889 SkipOutermostLoad = false;
2890 }
2891 if (!Storage)
2892 return std::nullopt;
2893
2894 auto *StorageAsArg = dyn_cast<Argument>(Storage);
2895 const bool IsSwiftAsyncArg =
2896 StorageAsArg && StorageAsArg->hasAttribute(Attribute::SwiftAsync);
2897
2898 // Swift async arguments are described by an entry value of the ABI-defined
2899 // register containing the coroutine context.
2900 // Entry values in variadic expressions are not supported.
2901 if (IsSwiftAsyncArg && UseEntryValue && !Expr->isEntryValue() &&
2904
2905 // If the coroutine frame is an Argument, store it in an alloca to improve
2906 // its availability (e.g. registers may be clobbered).
2907 // Avoid this if the value is guaranteed to be available through other means
2908 // (e.g. swift ABI guarantees).
2909 if (StorageAsArg && !IsSwiftAsyncArg) {
2910 auto &Cached = ArgToAllocaMap[StorageAsArg];
2911 if (!Cached) {
2912 Cached = Builder.CreateAlloca(Storage->getType(), 0, nullptr,
2913 Storage->getName() + ".debug");
2914 Builder.CreateStore(Storage, Cached);
2915 }
2916 Storage = Cached;
2917 // FIXME: LLVM lacks nuanced semantics to differentiate between
2918 // memory and direct locations at the IR level. The backend will
2919 // turn a dbg.declare(alloca, ..., DIExpression()) into a memory
2920 // location. Thus, if there are deref and offset operations in the
2921 // expression, we need to add a DW_OP_deref at the *start* of the
2922 // expression to first load the contents of the alloca before
2923 // adjusting it with the expression.
2925 }
2926
2927 return {{*Storage, *Expr}};
2928}
2929
2932 DbgVariableIntrinsic &DVI, bool UseEntryValue) {
2933
2934 Function *F = DVI.getFunction();
2935 // Follow the pointer arithmetic all the way to the incoming
2936 // function argument and convert into a DIExpression.
2937 bool SkipOutermostLoad = !isa<DbgValueInst>(DVI);
2938 Value *OriginalStorage = DVI.getVariableLocationOp(0);
2939
2940 auto SalvagedInfo =
2941 ::salvageDebugInfoImpl(ArgToAllocaMap, UseEntryValue, F, OriginalStorage,
2942 DVI.getExpression(), SkipOutermostLoad);
2943 if (!SalvagedInfo)
2944 return;
2945
2946 Value *Storage = &SalvagedInfo->first;
2947 DIExpression *Expr = &SalvagedInfo->second;
2948
2949 DVI.replaceVariableLocationOp(OriginalStorage, Storage);
2950 DVI.setExpression(Expr);
2951 // We only hoist dbg.declare today since it doesn't make sense to hoist
2952 // dbg.value since it does not have the same function wide guarantees that
2953 // dbg.declare does.
2954 if (isa<DbgDeclareInst>(DVI)) {
2955 std::optional<BasicBlock::iterator> InsertPt;
2956 if (auto *I = dyn_cast<Instruction>(Storage)) {
2957 InsertPt = I->getInsertionPointAfterDef();
2958 // Update DILocation only if variable was not inlined.
2959 DebugLoc ILoc = I->getDebugLoc();
2960 DebugLoc DVILoc = DVI.getDebugLoc();
2961 if (ILoc && DVILoc &&
2962 DVILoc->getScope()->getSubprogram() ==
2963 ILoc->getScope()->getSubprogram())
2964 DVI.setDebugLoc(I->getDebugLoc());
2965 } else if (isa<Argument>(Storage))
2966 InsertPt = F->getEntryBlock().begin();
2967 if (InsertPt)
2968 DVI.moveBefore(*(*InsertPt)->getParent(), *InsertPt);
2969 }
2970}
2971
2974 DbgVariableRecord &DVR, bool UseEntryValue) {
2975
2976 Function *F = DVR.getFunction();
2977 // Follow the pointer arithmetic all the way to the incoming
2978 // function argument and convert into a DIExpression.
2979 bool SkipOutermostLoad = DVR.isDbgDeclare();
2980 Value *OriginalStorage = DVR.getVariableLocationOp(0);
2981
2982 auto SalvagedInfo =
2983 ::salvageDebugInfoImpl(ArgToAllocaMap, UseEntryValue, F, OriginalStorage,
2984 DVR.getExpression(), SkipOutermostLoad);
2985 if (!SalvagedInfo)
2986 return;
2987
2988 Value *Storage = &SalvagedInfo->first;
2989 DIExpression *Expr = &SalvagedInfo->second;
2990
2991 DVR.replaceVariableLocationOp(OriginalStorage, Storage);
2992 DVR.setExpression(Expr);
2993 // We only hoist dbg.declare today since it doesn't make sense to hoist
2994 // dbg.value since it does not have the same function wide guarantees that
2995 // dbg.declare does.
2996 if (DVR.getType() == DbgVariableRecord::LocationType::Declare) {
2997 std::optional<BasicBlock::iterator> InsertPt;
2998 if (auto *I = dyn_cast<Instruction>(Storage)) {
2999 InsertPt = I->getInsertionPointAfterDef();
3000 // Update DILocation only if variable was not inlined.
3001 DebugLoc ILoc = I->getDebugLoc();
3002 DebugLoc DVRLoc = DVR.getDebugLoc();
3003 if (ILoc && DVRLoc &&
3004 DVRLoc->getScope()->getSubprogram() ==
3005 ILoc->getScope()->getSubprogram())
3006 DVR.setDebugLoc(ILoc);
3007 } else if (isa<Argument>(Storage))
3008 InsertPt = F->getEntryBlock().begin();
3009 if (InsertPt) {
3010 DVR.removeFromParent();
3011 (*InsertPt)->getParent()->insertDbgRecordBefore(&DVR, *InsertPt);
3012 }
3013 }
3014}
3015
3017 Function &F, SuspendCrossingInfo &Checker,
3018 const std::function<bool(Instruction &)> &MaterializableCallback) {
3019 if (F.hasOptNone())
3020 return;
3021
3022 SpillInfo Spills;
3023
3024 // See if there are materializable instructions across suspend points
3025 // We record these as the starting point to also identify materializable
3026 // defs of uses in these operations
3027 for (Instruction &I : instructions(F)) {
3028 if (!MaterializableCallback(I))
3029 continue;
3030 for (User *U : I.users())
3031 if (Checker.isDefinitionAcrossSuspend(I, U))
3032 Spills[&I].push_back(cast<Instruction>(U));
3033 }
3034
3035 // Process each of the identified rematerializable instructions
3036 // and add predecessor instructions that can also be rematerialized.
3037 // This is actually a graph of instructions since we could potentially
3038 // have multiple uses of a def in the set of predecessor instructions.
3039 // The approach here is to maintain a graph of instructions for each bottom
3040 // level instruction - where we have a unique set of instructions (nodes)
3041 // and edges between them. We then walk the graph in reverse post-dominator
3042 // order to insert them past the suspend point, but ensure that ordering is
3043 // correct. We also rely on CSE removing duplicate defs for remats of
3044 // different instructions with a def in common (rather than maintaining more
3045 // complex graphs for each suspend point)
3046
3047 // We can do this by adding new nodes to the list for each suspend
3048 // point. Then using standard GraphTraits to give a reverse post-order
3049 // traversal when we insert the nodes after the suspend
3051 for (auto &E : Spills) {
3052 for (Instruction *U : E.second) {
3053 // Don't process a user twice (this can happen if the instruction uses
3054 // more than one rematerializable def)
3055 if (AllRemats.count(U))
3056 continue;
3057
3058 // Constructor creates the whole RematGraph for the given Use
3059 auto RematUPtr =
3060 std::make_unique<RematGraph>(MaterializableCallback, U, Checker);
3061
3062 LLVM_DEBUG(dbgs() << "***** Next remat group *****\n";
3063 ReversePostOrderTraversal<RematGraph *> RPOT(RematUPtr.get());
3064 for (auto I = RPOT.begin(); I != RPOT.end();
3065 ++I) { (*I)->Node->dump(); } dbgs()
3066 << "\n";);
3067
3068 AllRemats[U] = std::move(RematUPtr);
3069 }
3070 }
3071
3072 // Rewrite materializable instructions to be materialized at the use
3073 // point.
3074 LLVM_DEBUG(dumpRemats("Materializations", AllRemats));
3076}
3077
3080 const std::function<bool(Instruction &)> &MaterializableCallback) {
3081 // Don't eliminate swifterror in async functions that won't be split.
3082 if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
3084
3085 if (Shape.ABI == coro::ABI::Switch &&
3088 }
3089
3090 // Make sure that all coro.save, coro.suspend and the fallthrough coro.end
3091 // intrinsics are in their own blocks to simplify the logic of building up
3092 // SuspendCrossing data.
3093 for (auto *CSI : Shape.CoroSuspends) {
3094 if (auto *Save = CSI->getCoroSave())
3095 splitAround(Save, "CoroSave");
3096 splitAround(CSI, "CoroSuspend");
3097 }
3098
3099 // Put CoroEnds into their own blocks.
3100 for (AnyCoroEndInst *CE : Shape.CoroEnds) {
3101 splitAround(CE, "CoroEnd");
3102
3103 // Emit the musttail call function in a new block before the CoroEnd.
3104 // We do this here so that the right suspend crossing info is computed for
3105 // the uses of the musttail call function call. (Arguments to the coro.end
3106 // instructions would be ignored)
3107 if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) {
3108 auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction();
3109 if (!MustTailCallFn)
3110 continue;
3111 IRBuilder<> Builder(AsyncEnd);
3112 SmallVector<Value *, 8> Args(AsyncEnd->args());
3113 auto Arguments = ArrayRef<Value *>(Args).drop_front(3);
3114 auto *Call = createMustTailCall(AsyncEnd->getDebugLoc(), MustTailCallFn,
3115 TTI, Arguments, Builder);
3116 splitAround(Call, "MustTailCall.Before.CoroEnd");
3117 }
3118 }
3119
3120 // Later code makes structural assumptions about single predecessors phis e.g
3121 // that they are not live across a suspend point.
3123
3124 // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will
3125 // never has its definition separated from the PHI by the suspend point.
3126 rewritePHIs(F);
3127
3128 // Build suspend crossing info.
3129 SuspendCrossingInfo Checker(F, Shape);
3130
3131 doRematerializations(F, Checker, MaterializableCallback);
3132
3133 const DominatorTree DT(F);
3134 FrameDataInfo FrameData;
3136 SmallVector<Instruction*, 4> DeadInstructions;
3137 if (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon &&
3138 Shape.ABI != coro::ABI::RetconOnce)
3139 sinkLifetimeStartMarkers(F, Shape, Checker, DT);
3140
3141 // Collect the spills for arguments and other not-materializable values.
3142 for (Argument &A : F.args())
3143 for (User *U : A.users())
3144 if (Checker.isDefinitionAcrossSuspend(A, U))
3145 FrameData.Spills[&A].push_back(cast<Instruction>(U));
3146
3147 for (Instruction &I : instructions(F)) {
3148 // Values returned from coroutine structure intrinsics should not be part
3149 // of the Coroutine Frame.
3151 continue;
3152
3153 // Handle alloca.alloc specially here.
3154 if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) {
3155 // Check whether the alloca's lifetime is bounded by suspend points.
3156 if (isLocalAlloca(AI)) {
3157 LocalAllocas.push_back(AI);
3158 continue;
3159 }
3160
3161 // If not, do a quick rewrite of the alloca and then add spills of
3162 // the rewritten value. The rewrite doesn't invalidate anything in
3163 // Spills because the other alloca intrinsics have no other operands
3164 // besides AI, and it doesn't invalidate the iteration because we delay
3165 // erasing AI.
3166 auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions);
3167
3168 for (User *U : Alloc->users()) {
3169 if (Checker.isDefinitionAcrossSuspend(*Alloc, U))
3170 FrameData.Spills[Alloc].push_back(cast<Instruction>(U));
3171 }
3172 continue;
3173 }
3174
3175 // Ignore alloca.get; we process this as part of coro.alloca.alloc.
3176 if (isa<CoroAllocaGetInst>(I))
3177 continue;
3178
3179 if (auto *AI = dyn_cast<AllocaInst>(&I)) {
3180 collectFrameAlloca(AI, Shape, Checker, FrameData.Allocas, DT);
3181 continue;
3182 }
3183
3184 for (User *U : I.users())
3185 if (Checker.isDefinitionAcrossSuspend(I, U)) {
3186 // We cannot spill a token.
3187 if (I.getType()->isTokenTy())
3189 "token definition is separated from the use by a suspend point");
3190 FrameData.Spills[&I].push_back(cast<Instruction>(U));
3191 }
3192 }
3193
3194 LLVM_DEBUG(dumpAllocas(FrameData.Allocas));
3195
3196 // We don't want the layout of coroutine frame to be affected
3197 // by debug information. So we only choose to salvage DbgValueInst for
3198 // whose value is already in the frame.
3199 // We would handle the dbg.values for allocas specially
3200 for (auto &Iter : FrameData.Spills) {
3201 auto *V = Iter.first;
3204 findDbgValues(DVIs, V, &DVRs);
3205 for (DbgValueInst *DVI : DVIs)
3206 if (Checker.isDefinitionAcrossSuspend(*V, DVI))
3207 FrameData.Spills[V].push_back(DVI);
3208 // Add the instructions which carry debug info that is in the frame.
3209 for (DbgVariableRecord *DVR : DVRs)
3210 if (Checker.isDefinitionAcrossSuspend(*V, DVR->Marker->MarkedInstr))
3211 FrameData.Spills[V].push_back(DVR->Marker->MarkedInstr);
3212 }
3213
3214 LLVM_DEBUG(dumpSpills("Spills", FrameData.Spills));
3215 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
3216 Shape.ABI == coro::ABI::Async)
3218 Shape.FrameTy = buildFrameType(F, Shape, FrameData);
3220 // For now, this works for C++ programs only.
3221 buildFrameDebugInfo(F, Shape, FrameData);
3222 insertSpills(FrameData, Shape);
3223 lowerLocalAllocas(LocalAllocas, DeadInstructions);
3224
3225 for (auto *I : DeadInstructions)
3226 I->eraseFromParent();
3227}
AMDGPU Lower Kernel Arguments
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
This file implements the BitVector class.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition: Compiler.h:533
static void cleanupSinglePredPHIs(Function &F)
Definition: CoroFrame.cpp:2175
static bool isSuspendReachableFrom(BasicBlock *From, VisitedBlocksSet &VisitedOrFreeBBs)
Does control flow starting at the given block ever reach a suspend instruction before reaching a bloc...
Definition: CoroFrame.cpp:2397
static bool isCoroutineStructureIntrinsic(Instruction &I)
Definition: CoroFrame.cpp:2285
SmallPtrSet< BasicBlock *, 8 > VisitedBlocksSet
Definition: CoroFrame.cpp:2393
static Instruction * lowerNonLocalAlloca(CoroAllocaAllocInst *AI, coro::Shape &Shape, SmallVectorImpl< Instruction * > &DeadInsts)
Turn the given coro.alloca.alloc call into a dynamic allocation.
Definition: CoroFrame.cpp:2511
static std::optional< std::pair< Value &, DIExpression & > > salvageDebugInfoImpl(SmallDenseMap< Argument *, AllocaInst *, 4 > &ArgToAllocaMap, bool UseEntryValue, Function *F, Value *Storage, DIExpression *Expr, bool SkipOutermostLoad)
Definition: CoroFrame.cpp:2853
static Instruction * splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch)
Definition: CoroFrame.cpp:1704
static void eliminateSwiftError(Function &F, coro::Shape &Shape)
Eliminate all problematic uses of swifterror arguments and allocas from the function.
Definition: CoroFrame.cpp:2662
static void lowerLocalAllocas(ArrayRef< CoroAllocaAllocInst * > LocalAllocas, SmallVectorImpl< Instruction * > &DeadInsts)
Turn each of the given local allocas into a normal (dynamic) alloca instruction.
Definition: CoroFrame.cpp:2471
static bool isLocalAlloca(CoroAllocaAllocInst *AI)
Is the given alloca "local", i.e.
Definition: CoroFrame.cpp:2420
static Value * emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V, coro::Shape &Shape)
Set the given value as the current swifterror value.
Definition: CoroFrame.cpp:2551
static Value * emitSetAndGetSwiftErrorValueAround(Instruction *Call, AllocaInst *Alloca, coro::Shape &Shape)
Set the swifterror value from the given alloca before a call, then put in back in the alloca afterwar...
Definition: CoroFrame.cpp:2569
static void cacheDIVar(FrameDataInfo &FrameData, DenseMap< Value *, DILocalVariable * > &DIVarCache)
Definition: CoroFrame.cpp:963
static void collectFrameAlloca(AllocaInst *AI, coro::Shape &Shape, const SuspendCrossingInfo &Checker, SmallVectorImpl< AllocaInfo > &Allocas, const DominatorTree &DT)
Definition: CoroFrame.cpp:2820
static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI)
Definition: CoroFrame.cpp:2453
static void splitAround(Instruction *I, const Twine &Name)
Definition: CoroFrame.cpp:2384
static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca, coro::Shape &Shape)
Eliminate a formerly-swifterror alloca by inserting the get/set intrinsics and attempting to MemToReg...
Definition: CoroFrame.cpp:2599
static void rewritePHIs(BasicBlock &BB)
Definition: CoroFrame.cpp:2192
static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB, BasicBlock *InsertedBB, BasicBlock *PredBB, PHINode *UntilPHI=nullptr)
Definition: CoroFrame.cpp:2082
static DIType * solveDIType(DIBuilder &Builder, Type *Ty, const DataLayout &Layout, DIScope *Scope, unsigned LineNum, DenseMap< Type *, DIType * > &DITypeCache)
Definition: CoroFrame.cpp:1021
static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB, unsigned depth=3)
After we split the coroutine, will the given basic block be along an obvious exit path for the resump...
Definition: CoroFrame.cpp:2434
static void eliminateSwiftErrorArgument(Function &F, Argument &Arg, coro::Shape &Shape, SmallVectorImpl< AllocaInst * > &AllocasToPromote)
"Eliminate" a swifterror argument by reducing it to the alloca case and then loading and storing in t...
Definition: CoroFrame.cpp:2625
static void buildFrameDebugInfo(Function &F, coro::Shape &Shape, FrameDataInfo &FrameData)
Build artificial debug info for C++ coroutine frames to allow users to inspect the contents of the fr...
Definition: CoroFrame.cpp:1108
static StructType * buildFrameType(Function &F, coro::Shape &Shape, FrameDataInfo &FrameData)
Definition: CoroFrame.cpp:1283
static BasicBlock * splitBlockIfNotFirst(Instruction *I, const Twine &Name)
Definition: CoroFrame.cpp:2371
static void sinkSpillUsesAfterCoroBegin(Function &F, const FrameDataInfo &FrameData, CoroBeginInst *CoroBegin)
retcon and retcon.once conventions assume that all spill uses can be sunk after the coro....
Definition: CoroFrame.cpp:2695
static bool isSuspendBlock(BasicBlock *BB)
Definition: CoroFrame.cpp:2389
static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB, CleanupPadInst *CleanupPad)
Definition: CoroFrame.cpp:2100
static void rewriteMaterializableInstructions(const SmallMapVector< Instruction *, std::unique_ptr< RematGraph >, 8 > &AllRemats)
Definition: CoroFrame.cpp:2293
static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape, SuspendCrossingInfo &Checker, const DominatorTree &DT)
For each local variable that all of its user are only used inside one of suspended region,...
Definition: CoroFrame.cpp:2742
static void dumpAllocas(const SmallVectorImpl< AllocaInfo > &Allocas)
Definition: CoroFrame.cpp:572
@ SmallVectorThreshold
Definition: CoroFrame.cpp:53
static StringRef solveTypeName(Type *Ty)
Create name for Type.
Definition: CoroFrame.cpp:983
static void dumpSpills(StringRef Title, const SpillInfo &Spills)
Definition: CoroFrame.cpp:553
static void doRematerializations(Function &F, SuspendCrossingInfo &Checker, const std::function< bool(Instruction &)> &MaterializableCallback)
Definition: CoroFrame.cpp:3016
static Value * emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy, coro::Shape &Shape)
Get the current swifterror value.
Definition: CoroFrame.cpp:2536
static void dumpRemats(StringRef Title, const SmallMapVector< Instruction *, std::unique_ptr< RematGraph >, 8 > &RM)
Definition: CoroFrame.cpp:562
cl::opt< bool > UseNewDbgInfoFormat
static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape)
Definition: CoroFrame.cpp:1737
Given that RA is a live value
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Addr
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
static bool isLifetimeStart(const Instruction *Inst)
Definition: GVN.cpp:1132
Hexagon Common GEP
static MaybeAlign getAlign(Value *Ptr)
Definition: IRBuilder.cpp:531
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
iv Induction Variable Users
Definition: IVUsers.cpp:48
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
mir Rename Register Operands
uint64_t IntrinsicInst * II
This file provides an interface for laying out a sequence of fields as a struct in a way that attempt...
#define P(N)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
This file provides a collection of visitors which walk the (instruction) uses of a pointer.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getNumElements(Type *Ty)
raw_pwrite_stream & OS
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallString class.
static const unsigned FramePtr
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
Definition: Instructions.h:61
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:147
void setSwiftError(bool V)
Specify whether this alloca is used to represent a swifterror.
Definition: Instructions.h:149
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:122
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:97
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:115
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
Definition: Instructions.h:126
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:93
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:204
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:448
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:517
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:416
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:367
const Instruction & front() const
Definition: BasicBlock.h:471
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:212
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
Definition: BasicBlock.cpp:577
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Definition: BasicBlock.cpp:459
const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
Definition: BasicBlock.cpp:489
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:219
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:177
LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition: BasicBlock.cpp:168
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
This class represents a no-op cast from one type to another.
size_type size() const
size - Returns the number of bits in this bitvector.
Definition: BitVector.h:159
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1236
bool doesNotCapture(unsigned OpNo) const
Determine whether this data operand is not captured.
Definition: InstrTypes.h:1769
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1410
unsigned arg_size() const
Definition: InstrTypes.h:1408
Value * getParentPad() const
static CleanupPadInst * Create(Value *ParentPad, ArrayRef< Value * > Args=std::nullopt, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, InsertPosition InsertBefore=nullptr)
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Definition: Constants.cpp:1800
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
This represents the llvm.coro.alloca.alloc instruction.
Definition: CoroInstr.h:739
Value * getSize() const
Definition: CoroInstr.h:742
This class represents the llvm.coro.begin instruction.
Definition: CoroInstr.h:451
void clearPromise()
Definition: CoroInstr.h:157
This represents the llvm.coro.suspend instruction.
Definition: CoroInstr.h:524
DICompositeType * createStructType(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber, uint64_t SizeInBits, uint32_t AlignInBits, DINode::DIFlags Flags, DIType *DerivedFrom, DINodeArray Elements, unsigned RunTimeLang=0, DIType *VTableHolder=nullptr, StringRef UniqueIdentifier="")
Create debugging information entry for a struct.
Definition: DIBuilder.cpp:520
DIDerivedType * createPointerType(DIType *PointeeTy, uint64_t SizeInBits, uint32_t AlignInBits=0, std::optional< unsigned > DWARFAddressSpace=std::nullopt, StringRef Name="", DINodeArray Annotations=nullptr)
Create debugging information entry for a pointer.
Definition: DIBuilder.cpp:316
DIExpression * createExpression(ArrayRef< uint64_t > Addr=std::nullopt)
Create a new descriptor for the specified variable which has a complex address expression for its add...
Definition: DIBuilder.cpp:844
DISubrange * getOrCreateSubrange(int64_t Lo, int64_t Count)
Create a descriptor for a value range.
Definition: DIBuilder.cpp:713
DICompositeType * createArrayType(uint64_t Size, uint32_t AlignInBits, DIType *Ty, DINodeArray Subscripts, PointerUnion< DIExpression *, DIVariable * > DataLocation=nullptr, PointerUnion< DIExpression *, DIVariable * > Associated=nullptr, PointerUnion< DIExpression *, DIVariable * > Allocated=nullptr, PointerUnion< DIExpression *, DIVariable * > Rank=nullptr)
Create debugging information entry for an array.
Definition: DIBuilder.cpp:594
DIBasicType * createBasicType(StringRef Name, uint64_t SizeInBits, unsigned Encoding, DINode::DIFlags Flags=DINode::FlagZero)
Create debugging information entry for a basic type.
Definition: DIBuilder.cpp:267
DINodeArray getOrCreateArray(ArrayRef< Metadata * > Elements)
Get a DINodeArray, create one if required.
Definition: DIBuilder.cpp:693
DIDerivedType * createMemberType(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo, uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits, DINode::DIFlags Flags, DIType *Ty, DINodeArray Annotations=nullptr)
Create debugging information entry for a member.
Definition: DIBuilder.cpp:390
DILocalVariable * createAutoVariable(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo, DIType *Ty, bool AlwaysPreserve=false, DINode::DIFlags Flags=DINode::FlagZero, uint32_t AlignInBits=0)
Create a new descriptor for an auto variable.
Definition: DIBuilder.cpp:806
void replaceArrays(DICompositeType *&T, DINodeArray Elements, DINodeArray TParams=DINodeArray())
Replace arrays on a composite type.
Definition: DIBuilder.cpp:1206
DWARF expression.
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
static DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
bool isSingleLocationExpression() const
Return whether the evaluated expression makes use of a single location at the start of the expression...
Debug location.
Base class for scope-like contexts.
DIFile * getFile() const
Subprogram description.
Base class for types.
StringRef getName() const
uint64_t getSizeInBits() const
uint32_t getAlignInBits() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:695
Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:838
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:621
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Definition: DataLayout.cpp:842
DebugLoc getDebugLoc() const
void setDebugLoc(DebugLoc Loc)
This represents the llvm.dbg.value instruction.
This is the common base class for debug info intrinsics for variables.
void replaceVariableLocationOp(Value *OldValue, Value *NewValue, bool AllowEmpty=false)
Value * getVariableLocationOp(unsigned OpIdx) const
void setExpression(DIExpression *NewExpr)
DIExpression * getExpression() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
void setExpression(DIExpression *NewExpr)
DIExpression * getExpression() const
Value * getVariableLocationOp(unsigned OpIdx) const
void replaceVariableLocationOp(Value *OldValue, Value *NewValue, bool AllowEmpty=false)
A debug info location.
Definition: DebugLoc.h:33
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:194
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition: DenseMap.h:146
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:211
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Definition: DenseMap.h:103
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:915
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Definition: IRBuilder.h:1790
CallInst * CreateStackSave(const Twine &Name="")
Create a call to llvm.stacksave.
Definition: IRBuilder.h:1070
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition: IRBuilder.h:1824
UnreachableInst * CreateUnreachable()
Definition: IRBuilder.h:1280
BasicBlock::iterator GetInsertPoint() const
Definition: IRBuilder.h:172
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2142
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1996
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
Definition: IRBuilder.h:1891
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition: IRBuilder.h:2417
Value * CreateNot(Value *V, const Twine &Name="")
Definition: IRBuilder.h:1766
SwitchInst * CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases=10, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a switch instruction with the specified value, default dest, and with a hint for the number of...
Definition: IRBuilder.h:1160
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1807
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1492
Value * CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1, const Twine &Name="")
Definition: IRBuilder.h:1930
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition: IRBuilder.h:1820
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1344
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2137
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Definition: IRBuilder.h:566
BranchInst * CreateBr(BasicBlock *Dest)
Create an unconditional 'br label X' instruction.
Definition: IRBuilder.h:1131
CallInst * CreateStackRestore(Value *Ptr, const Twine &Name="")
Create a call to llvm.stackrestore.
Definition: IRBuilder.h:1077
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:177
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Definition: IRBuilder.h:1843
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2432
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition: IRBuilder.h:513
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2152
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2686
void visitIntrinsicInst(IntrinsicInst &I)
Definition: InstVisitor.h:219
void visitBitCastInst(BitCastInst &I)
Definition: InstVisitor.h:187
void visit(Iterator Start, Iterator End)
Definition: InstVisitor.h:87
void visitPHINode(PHINode &I)
Definition: InstVisitor.h:175
void visitAddrSpaceCastInst(AddrSpaceCastInst &I)
Definition: InstVisitor.h:188
void visitSelectInst(SelectInst &I)
Definition: InstVisitor.h:189
void visitGetElementPtrInst(GetElementPtrInst &I)
Definition: InstVisitor.h:174
void removeFromParent()
This method unlinks 'this' from the containing basic block, but does not delete it.
Definition: Instruction.cpp:78
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
Definition: Instruction.cpp:97
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:466
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:66
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
Definition: Instruction.h:363
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:92
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:70
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:463
void insertAfter(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately after the specified instruction.
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Definition: Instruction.cpp:74
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:266
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
The landingpad instruction holds all of the information necessary to generate correct exception handl...
void replaceOperandWith(unsigned I, Metadata *New)
Replace a specific operand.
Definition: Metadata.cpp:1077
LLVMContext & getContext() const
Definition: Metadata.h:1233
static MDString * get(LLVMContext &Context, StringRef Str)
Definition: Metadata.cpp:606
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1499
size_type count(const KeyT &Key) const
Definition: MapVector.h:165
This is the common base class for memset/memcpy/memmove.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
A base class for visitors over the uses of a pointer value.
void visitCallBase(CallBase &CB)
void visitGetElementPtrInst(GetElementPtrInst &GEPI)
void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC)
void visitBitCastInst(BitCastInst &BC)
void visitStoreInst(StoreInst &SI)
void visitIntrinsicInst(IntrinsicInst &II)
void visitMemIntrinsic(MemIntrinsic &I)
This class represents the LLVM 'select' instruction.
iterator end()
Get an iterator to the end of the SetVector.
Definition: SetVector.h:113
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition: SetVector.h:103
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:162
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:367
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:502
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:370
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
StringRef str() const
Explicit conversion to StringRef.
Definition: SmallString.h:254
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void reserve(size_type N)
Definition: SmallVector.h:676
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
Compute live ranges of allocas.
Definition: StackLifetime.h:37
An instruction for storing to memory.
Definition: Instructions.h:290
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:215
TypeSize getElementOffsetInBits(unsigned Idx) const
Definition: DataLayout.h:605
Class to represent struct types.
Definition: DerivedTypes.h:216
void setBody(ArrayRef< Type * > Elements, bool isPacked=false)
Specify a body for an opaque identified type.
Definition: Type.cpp:433
static StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition: Type.cpp:501
unsigned getNumElements() const
Random access to the elements.
Definition: DerivedTypes.h:341
Type * getElementType(unsigned N) const
Definition: DerivedTypes.h:342
Multiway switch.
void setDefaultDest(BasicBlock *DefaultCase)
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:29
bool empty() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
std::string str() const
Return the twine contents as a std::string.
Definition: Twine.cpp:17
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition: TypeSize.h:345
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:251
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
Definition: Type.h:153
StringRef getStructName() const
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isStructTy() const
True if this is an instance of StructType.
Definition: Type.h:245
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:298
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
static IntegerType * getInt8Ty(LLVMContext &C)
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
Definition: Type.h:156
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:184
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:224
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
void set(Value *Val)
Definition: Value.h:882
User * getUser() const
Returns the User that contains this Use.
Definition: Use.h:72
static ValueAsMetadata * get(Value *V)
Definition: Metadata.cpp:501
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
iterator_range< user_iterator > users()
Definition: Value.h:421
void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition: Value.cpp:542
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
iterator_range< use_iterator > uses()
Definition: Value.h:376
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
APInt Offset
The constant offset of the use if that is known.
void enqueueUsers(Instruction &I)
Enqueue the users of this instruction in the visit worklist.
SmallVector< UseToVisit, 8 > Worklist
The worklist of to-visit uses.
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:353
A range adaptor for a pair of iterators.
A raw_ostream that writes to an SmallVector or SmallString.
Definition: raw_ostream.h:691
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ CE
Windows NT (Windows on ARM)
void salvageDebugInfo(SmallDenseMap< Argument *, AllocaInst *, 4 > &ArgToAllocaMap, DbgVariableIntrinsic &DVI, bool IsEntryPoint)
Attempts to rewrite the location operand of debug intrinsics in terms of the coroutine frame pointer,...
Definition: CoroFrame.cpp:2930
bool defaultMaterializable(Instruction &V)
Default materializable callback.
Definition: CoroFrame.cpp:2278
void buildCoroutineFrame(Function &F, Shape &Shape, TargetTransformInfo &TTI, const std::function< bool(Instruction &)> &MaterializableCallback)
Definition: CoroFrame.cpp:3078
CallInst * createMustTailCall(DebugLoc Loc, Function *MustTailCallFn, TargetTransformInfo &TTI, ArrayRef< Value * > Arguments, IRBuilder<> &)
Definition: CoroSplit.cpp:1656
SourceLanguage
Definition: Dwarf.h:207
bool isCPlusPlus(SourceLanguage S)
Definition: Dwarf.h:493
NodeAddr< DefNode * > Def
Definition: RDFGraph.h:384
NodeAddr< BlockNode * > Block
Definition: RDFGraph.h:392
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
@ Offset
Definition: DWP.cpp:480
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1715
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1680
TinyPtrVector< DbgDeclareInst * > findDbgDeclares(Value *V)
Finds dbg.declare intrinsics declaring local variables as living in the memory that 'V' points to.
Definition: DebugInfo.cpp:47
void PromoteMemToReg(ArrayRef< AllocaInst * > Allocas, DominatorTree &DT, AssumptionCache *AC=nullptr)
Promote the specified list of alloca instructions into scalar registers, inserting PHI nodes as appro...
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:59
unsigned Log2_64_Ceil(uint64_t Value)
Return the ceil log base 2 of the specified value, 64 if the value is zero.
Definition: MathExtras.h:359
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
Definition: Alignment.h:145
void findDbgUsers(SmallVectorImpl< DbgVariableIntrinsic * > &DbgInsts, Value *V, SmallVectorImpl< DbgVariableRecord * > *DbgVariableRecords=nullptr)
Finds the debug info intrinsics describing a value.
Definition: DebugInfo.cpp:145
auto successors(const MachineBasicBlock *BB)
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:656
void findDbgValues(SmallVectorImpl< DbgValueInst * > &DbgValues, Value *V, SmallVectorImpl< DbgVariableRecord * > *DbgVariableRecords=nullptr)
Finds the llvm.dbg.value intrinsics describing a value.
Definition: DebugInfo.cpp:138
bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1647
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1736
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
bool isManyPotentiallyReachableFromMany(SmallVectorImpl< BasicBlock * > &Worklist, const SmallPtrSetImpl< const BasicBlock * > &StopSet, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether there is a potentially a path from at least one block in 'Worklist' to at least one...
Definition: CFG.cpp:248
BasicBlock * ehAwareSplitEdge(BasicBlock *BB, BasicBlock *Succ, LandingPadInst *OriginalPad=nullptr, PHINode *LandingPadReplacement=nullptr, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions(), const Twine &BBName="")
Split the edge connect the specficed blocks in the case that Succ is an Exception Handling Block.
Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
Definition: Local.cpp:2545
uint64_t offsetToAlignment(uint64_t Value, Align Alignment)
Returns the offset to the next integer (mod 2**64) that is greater than or equal to Value and is a mu...
Definition: Alignment.h:197
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1961
std::pair< uint64_t, Align > performOptimizedStructLayout(MutableArrayRef< OptimizedStructLayoutField > Fields)
Compute a layout for a struct containing the given fields, making a best-effort attempt to minimize t...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
DWARFExpression::Operation Op
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
void updatePhiNodes(BasicBlock *DestBB, BasicBlock *OldPred, BasicBlock *NewPred, PHINode *Until=nullptr)
Replaces all uses of OldPred with the NewPred block in all PHINodes in a block.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1749
TinyPtrVector< DbgVariableRecord * > findDVRDeclares(Value *V)
As above, for DVRDeclares.
Definition: DebugInfo.cpp:66
auto predecessors(const MachineBasicBlock *BB)
BasicBlock * SplitEdge(BasicBlock *From, BasicBlock *To, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the edge connecting the specified blocks, and return the newly created basic block between From...
void setUnwindEdgeTo(Instruction *TI, BasicBlock *Succ)
Sets the unwind edge of an instruction to a particular successor.
unsigned pred_size(const MachineBasicBlock *BB)
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
RematGraph::RematNode * NodeRef
Definition: CoroFrame.cpp:439
static ChildIteratorType child_end(NodeRef N)
Definition: CoroFrame.cpp:446
RematGraph::RematNode ** ChildIteratorType
Definition: CoroFrame.cpp:440
static NodeRef getEntryNode(RematGraph *G)
Definition: CoroFrame.cpp:442
static ChildIteratorType child_begin(NodeRef N)
Definition: CoroFrame.cpp:443
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Align Alignment
The required alignment of this field.
uint64_t Offset
The offset of this field in the final layout.
uint64_t Size
The required size of this field in bytes.
static constexpr uint64_t FlexibleOffset
A special value for Offset indicating that the field can be moved anywhere.
A MapVector that performs no allocations if smaller than a certain size.
Definition: MapVector.h:254
AsyncLoweringStorage AsyncLowering
Definition: CoroInternal.h:151
StructType * FrameTy
Definition: CoroInternal.h:107
AnyCoroIdRetconInst * getRetconCoroId() const
Definition: CoroInternal.h:159
CoroIdInst * getSwitchCoroId() const
Definition: CoroInternal.h:154
SmallVector< AnyCoroSuspendInst *, 4 > CoroSuspends
Definition: CoroInternal.h:85
Value * emitAlloc(IRBuilder<> &Builder, Value *Size, CallGraph *CG) const
Allocate memory according to the rules of the active lowering.
Definition: Coroutines.cpp:456
SmallVector< CallInst *, 2 > SwiftErrorOps
Definition: CoroInternal.h:86
AllocaInst * getPromiseAlloca() const
Definition: CoroInternal.h:243
bool OptimizeFrame
This would only be true if optimization are enabled.
Definition: CoroInternal.h:114
SwitchLoweringStorage SwitchLowering
Definition: CoroInternal.h:149
CoroBeginInst * CoroBegin
Definition: CoroInternal.h:81
BasicBlock::iterator getInsertPtAfterFramePtr() const
Definition: CoroInternal.h:249
void emitDealloc(IRBuilder<> &Builder, Value *Ptr, CallGraph *CG) const
Deallocate memory according to the rules of the active lowering.
Definition: Coroutines.cpp:479
RetconLoweringStorage RetconLowering
Definition: CoroInternal.h:150
SmallVector< AnyCoroEndInst *, 4 > CoroEnds
Definition: CoroInternal.h:82
BasicBlock * AllocaSpillBlock
Definition: CoroInternal.h:111