LLVM 19.0.0git
CoroFrame.cpp
Go to the documentation of this file.
1//===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8// This file contains classes used to discover if for a particular value
9// there from sue to definition that crosses a suspend block.
10//
11// Using the information discovered we form a Coroutine Frame structure to
12// contain those values. All uses of those values are replaced with appropriate
13// GEP + load from the coroutine frame. At the point of the definition we spill
14// the value into the coroutine frame.
15//===----------------------------------------------------------------------===//
16
17#include "CoroInternal.h"
18#include "llvm/ADT/BitVector.h"
20#include "llvm/ADT/ScopeExit.h"
24#include "llvm/Config/llvm-config.h"
25#include "llvm/IR/CFG.h"
26#include "llvm/IR/DIBuilder.h"
27#include "llvm/IR/DebugInfo.h"
28#include "llvm/IR/Dominators.h"
29#include "llvm/IR/IRBuilder.h"
32#include "llvm/Support/Debug.h"
40#include <algorithm>
41#include <deque>
42#include <optional>
43
44using namespace llvm;
45
46// The "coro-suspend-crossing" flag is very noisy. There is another debug type,
47// "coro-frame", which results in leaner debug spew.
48#define DEBUG_TYPE "coro-suspend-crossing"
49
51
52// Provides two way mapping between the blocks and numbers.
53namespace {
54class BlockToIndexMapping {
56
57public:
58 size_t size() const { return V.size(); }
59
60 BlockToIndexMapping(Function &F) {
61 for (BasicBlock &BB : F)
62 V.push_back(&BB);
63 llvm::sort(V);
64 }
65
66 size_t blockToIndex(BasicBlock const *BB) const {
67 auto *I = llvm::lower_bound(V, BB);
68 assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block");
69 return I - V.begin();
70 }
71
72 BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; }
73};
74} // end anonymous namespace
75
76// The SuspendCrossingInfo maintains data that allows to answer a question
77// whether given two BasicBlocks A and B there is a path from A to B that
78// passes through a suspend point.
79//
80// For every basic block 'i' it maintains a BlockData that consists of:
81// Consumes: a bit vector which contains a set of indices of blocks that can
82// reach block 'i'. A block can trivially reach itself.
83// Kills: a bit vector which contains a set of indices of blocks that can
84// reach block 'i' but there is a path crossing a suspend point
85// not repeating 'i' (path to 'i' without cycles containing 'i').
86// Suspend: a boolean indicating whether block 'i' contains a suspend point.
87// End: a boolean indicating whether block 'i' contains a coro.end intrinsic.
88// KillLoop: There is a path from 'i' to 'i' not otherwise repeating 'i' that
89// crosses a suspend point.
90//
91namespace {
92class SuspendCrossingInfo {
93 BlockToIndexMapping Mapping;
94
95 struct BlockData {
96 BitVector Consumes;
97 BitVector Kills;
98 bool Suspend = false;
99 bool End = false;
100 bool KillLoop = false;
101 bool Changed = false;
102 };
104
106 BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]);
107 return llvm::predecessors(BB);
108 }
109
110 BlockData &getBlockData(BasicBlock *BB) {
111 return Block[Mapping.blockToIndex(BB)];
112 }
113
114 /// Compute the BlockData for the current function in one iteration.
115 /// Initialize - Whether this is the first iteration, we can optimize
116 /// the initial case a little bit by manual loop switch.
117 /// Returns whether the BlockData changes in this iteration.
118 template <bool Initialize = false>
119 bool computeBlockData(const ReversePostOrderTraversal<Function *> &RPOT);
120
121public:
122#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
123 void dump() const;
124 void dump(StringRef Label, BitVector const &BV) const;
125#endif
126
127 SuspendCrossingInfo(Function &F, coro::Shape &Shape);
128
129 /// Returns true if there is a path from \p From to \p To crossing a suspend
130 /// point without crossing \p From a 2nd time.
131 bool hasPathCrossingSuspendPoint(BasicBlock *From, BasicBlock *To) const {
132 size_t const FromIndex = Mapping.blockToIndex(From);
133 size_t const ToIndex = Mapping.blockToIndex(To);
134 bool const Result = Block[ToIndex].Kills[FromIndex];
135 LLVM_DEBUG(dbgs() << From->getName() << " => " << To->getName()
136 << " answer is " << Result << "\n");
137 return Result;
138 }
139
140 /// Returns true if there is a path from \p From to \p To crossing a suspend
141 /// point without crossing \p From a 2nd time. If \p From is the same as \p To
142 /// this will also check if there is a looping path crossing a suspend point.
143 bool hasPathOrLoopCrossingSuspendPoint(BasicBlock *From,
144 BasicBlock *To) const {
145 size_t const FromIndex = Mapping.blockToIndex(From);
146 size_t const ToIndex = Mapping.blockToIndex(To);
147 bool Result = Block[ToIndex].Kills[FromIndex] ||
148 (From == To && Block[ToIndex].KillLoop);
149 LLVM_DEBUG(dbgs() << From->getName() << " => " << To->getName()
150 << " answer is " << Result << " (path or loop)\n");
151 return Result;
152 }
153
154 bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const {
155 auto *I = cast<Instruction>(U);
156
157 // We rewrote PHINodes, so that only the ones with exactly one incoming
158 // value need to be analyzed.
159 if (auto *PN = dyn_cast<PHINode>(I))
160 if (PN->getNumIncomingValues() > 1)
161 return false;
162
163 BasicBlock *UseBB = I->getParent();
164
165 // As a special case, treat uses by an llvm.coro.suspend.retcon or an
166 // llvm.coro.suspend.async as if they were uses in the suspend's single
167 // predecessor: the uses conceptually occur before the suspend.
168 if (isa<CoroSuspendRetconInst>(I) || isa<CoroSuspendAsyncInst>(I)) {
169 UseBB = UseBB->getSinglePredecessor();
170 assert(UseBB && "should have split coro.suspend into its own block");
171 }
172
173 return hasPathCrossingSuspendPoint(DefBB, UseBB);
174 }
175
176 bool isDefinitionAcrossSuspend(Argument &A, User *U) const {
177 return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U);
178 }
179
180 bool isDefinitionAcrossSuspend(Instruction &I, User *U) const {
181 auto *DefBB = I.getParent();
182
183 // As a special case, treat values produced by an llvm.coro.suspend.*
184 // as if they were defined in the single successor: the uses
185 // conceptually occur after the suspend.
186 if (isa<AnyCoroSuspendInst>(I)) {
187 DefBB = DefBB->getSingleSuccessor();
188 assert(DefBB && "should have split coro.suspend into its own block");
189 }
190
191 return isDefinitionAcrossSuspend(DefBB, U);
192 }
193
194 bool isDefinitionAcrossSuspend(Value &V, User *U) const {
195 if (auto *Arg = dyn_cast<Argument>(&V))
196 return isDefinitionAcrossSuspend(*Arg, U);
197 if (auto *Inst = dyn_cast<Instruction>(&V))
198 return isDefinitionAcrossSuspend(*Inst, U);
199
201 "Coroutine could only collect Argument and Instruction now.");
202 }
203};
204} // end anonymous namespace
205
206#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
207LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label,
208 BitVector const &BV) const {
209 dbgs() << Label << ":";
210 for (size_t I = 0, N = BV.size(); I < N; ++I)
211 if (BV[I])
212 dbgs() << " " << Mapping.indexToBlock(I)->getName();
213 dbgs() << "\n";
214}
215
216LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const {
217 for (size_t I = 0, N = Block.size(); I < N; ++I) {
218 BasicBlock *const B = Mapping.indexToBlock(I);
219 dbgs() << B->getName() << ":\n";
220 dump(" Consumes", Block[I].Consumes);
221 dump(" Kills", Block[I].Kills);
222 }
223 dbgs() << "\n";
224}
225#endif
226
227template <bool Initialize>
228bool SuspendCrossingInfo::computeBlockData(
230 bool Changed = false;
231
232 for (const BasicBlock *BB : RPOT) {
233 auto BBNo = Mapping.blockToIndex(BB);
234 auto &B = Block[BBNo];
235
236 // We don't need to count the predecessors when initialization.
237 if constexpr (!Initialize)
238 // If all the predecessors of the current Block don't change,
239 // the BlockData for the current block must not change too.
240 if (all_of(predecessors(B), [this](BasicBlock *BB) {
241 return !Block[Mapping.blockToIndex(BB)].Changed;
242 })) {
243 B.Changed = false;
244 continue;
245 }
246
247 // Saved Consumes and Kills bitsets so that it is easy to see
248 // if anything changed after propagation.
249 auto SavedConsumes = B.Consumes;
250 auto SavedKills = B.Kills;
251
252 for (BasicBlock *PI : predecessors(B)) {
253 auto PrevNo = Mapping.blockToIndex(PI);
254 auto &P = Block[PrevNo];
255
256 // Propagate Kills and Consumes from predecessors into B.
257 B.Consumes |= P.Consumes;
258 B.Kills |= P.Kills;
259
260 // If block P is a suspend block, it should propagate kills into block
261 // B for every block P consumes.
262 if (P.Suspend)
263 B.Kills |= P.Consumes;
264 }
265
266 if (B.Suspend) {
267 // If block B is a suspend block, it should kill all of the blocks it
268 // consumes.
269 B.Kills |= B.Consumes;
270 } else if (B.End) {
271 // If block B is an end block, it should not propagate kills as the
272 // blocks following coro.end() are reached during initial invocation
273 // of the coroutine while all the data are still available on the
274 // stack or in the registers.
275 B.Kills.reset();
276 } else {
277 // This is reached when B block it not Suspend nor coro.end and it
278 // need to make sure that it is not in the kill set.
279 B.KillLoop |= B.Kills[BBNo];
280 B.Kills.reset(BBNo);
281 }
282
283 if constexpr (!Initialize) {
284 B.Changed = (B.Kills != SavedKills) || (B.Consumes != SavedConsumes);
285 Changed |= B.Changed;
286 }
287 }
288
289 return Changed;
290}
291
292SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape)
293 : Mapping(F) {
294 const size_t N = Mapping.size();
295 Block.resize(N);
296
297 // Initialize every block so that it consumes itself
298 for (size_t I = 0; I < N; ++I) {
299 auto &B = Block[I];
300 B.Consumes.resize(N);
301 B.Kills.resize(N);
302 B.Consumes.set(I);
303 B.Changed = true;
304 }
305
306 // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as
307 // the code beyond coro.end is reachable during initial invocation of the
308 // coroutine.
309 for (auto *CE : Shape.CoroEnds)
310 getBlockData(CE->getParent()).End = true;
311
312 // Mark all suspend blocks and indicate that they kill everything they
313 // consume. Note, that crossing coro.save also requires a spill, as any code
314 // between coro.save and coro.suspend may resume the coroutine and all of the
315 // state needs to be saved by that time.
316 auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) {
317 BasicBlock *SuspendBlock = BarrierInst->getParent();
318 auto &B = getBlockData(SuspendBlock);
319 B.Suspend = true;
320 B.Kills |= B.Consumes;
321 };
322 for (auto *CSI : Shape.CoroSuspends) {
323 markSuspendBlock(CSI);
324 if (auto *Save = CSI->getCoroSave())
325 markSuspendBlock(Save);
326 }
327
328 // It is considered to be faster to use RPO traversal for forward-edges
329 // dataflow analysis.
331 computeBlockData</*Initialize=*/true>(RPOT);
332 while (computeBlockData</*Initialize*/ false>(RPOT))
333 ;
334
335 LLVM_DEBUG(dump());
336}
337
338namespace {
339
340// RematGraph is used to construct a DAG for rematerializable instructions
341// When the constructor is invoked with a candidate instruction (which is
342// materializable) it builds a DAG of materializable instructions from that
343// point.
344// Typically, for each instruction identified as re-materializable across a
345// suspend point, a RematGraph will be created.
346struct RematGraph {
347 // Each RematNode in the graph contains the edges to instructions providing
348 // operands in the current node.
349 struct RematNode {
352 RematNode() = default;
353 RematNode(Instruction *V) : Node(V) {}
354 };
355
356 RematNode *EntryNode;
357 using RematNodeMap =
359 RematNodeMap Remats;
360 const std::function<bool(Instruction &)> &MaterializableCallback;
361 SuspendCrossingInfo &Checker;
362
363 RematGraph(const std::function<bool(Instruction &)> &MaterializableCallback,
364 Instruction *I, SuspendCrossingInfo &Checker)
365 : MaterializableCallback(MaterializableCallback), Checker(Checker) {
366 std::unique_ptr<RematNode> FirstNode = std::make_unique<RematNode>(I);
367 EntryNode = FirstNode.get();
368 std::deque<std::unique_ptr<RematNode>> WorkList;
369 addNode(std::move(FirstNode), WorkList, cast<User>(I));
370 while (WorkList.size()) {
371 std::unique_ptr<RematNode> N = std::move(WorkList.front());
372 WorkList.pop_front();
373 addNode(std::move(N), WorkList, cast<User>(I));
374 }
375 }
376
377 void addNode(std::unique_ptr<RematNode> NUPtr,
378 std::deque<std::unique_ptr<RematNode>> &WorkList,
379 User *FirstUse) {
380 RematNode *N = NUPtr.get();
381 if (Remats.count(N->Node))
382 return;
383
384 // We haven't see this node yet - add to the list
385 Remats[N->Node] = std::move(NUPtr);
386 for (auto &Def : N->Node->operands()) {
387 Instruction *D = dyn_cast<Instruction>(Def.get());
388 if (!D || !MaterializableCallback(*D) ||
389 !Checker.isDefinitionAcrossSuspend(*D, FirstUse))
390 continue;
391
392 if (Remats.count(D)) {
393 // Already have this in the graph
394 N->Operands.push_back(Remats[D].get());
395 continue;
396 }
397
398 bool NoMatch = true;
399 for (auto &I : WorkList) {
400 if (I->Node == D) {
401 NoMatch = false;
402 N->Operands.push_back(I.get());
403 break;
404 }
405 }
406 if (NoMatch) {
407 // Create a new node
408 std::unique_ptr<RematNode> ChildNode = std::make_unique<RematNode>(D);
409 N->Operands.push_back(ChildNode.get());
410 WorkList.push_back(std::move(ChildNode));
411 }
412 }
413 }
414
415#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
416 void dump() const {
417 dbgs() << "Entry (";
418 if (EntryNode->Node->getParent()->hasName())
419 dbgs() << EntryNode->Node->getParent()->getName();
420 else
421 EntryNode->Node->getParent()->printAsOperand(dbgs(), false);
422 dbgs() << ") : " << *EntryNode->Node << "\n";
423 for (auto &E : Remats) {
424 dbgs() << *(E.first) << "\n";
425 for (RematNode *U : E.second->Operands)
426 dbgs() << " " << *U->Node << "\n";
427 }
428 }
429#endif
430};
431} // end anonymous namespace
432
433namespace llvm {
434
435template <> struct GraphTraits<RematGraph *> {
436 using NodeRef = RematGraph::RematNode *;
437 using ChildIteratorType = RematGraph::RematNode **;
438
439 static NodeRef getEntryNode(RematGraph *G) { return G->EntryNode; }
441 return N->Operands.begin();
442 }
443 static ChildIteratorType child_end(NodeRef N) { return N->Operands.end(); }
444};
445
446} // end namespace llvm
447
448#undef DEBUG_TYPE // "coro-suspend-crossing"
449#define DEBUG_TYPE "coro-frame"
450
451namespace {
452class FrameTypeBuilder;
453// Mapping from the to-be-spilled value to all the users that need reload.
455struct AllocaInfo {
456 AllocaInst *Alloca;
458 bool MayWriteBeforeCoroBegin;
459 AllocaInfo(AllocaInst *Alloca,
460 DenseMap<Instruction *, std::optional<APInt>> Aliases,
461 bool MayWriteBeforeCoroBegin)
462 : Alloca(Alloca), Aliases(std::move(Aliases)),
463 MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {}
464};
465struct FrameDataInfo {
466 // All the values (that are not allocas) that needs to be spilled to the
467 // frame.
468 SpillInfo Spills;
469 // Allocas contains all values defined as allocas that need to live in the
470 // frame.
472
473 SmallVector<Value *, 8> getAllDefs() const {
475 for (const auto &P : Spills)
476 Defs.push_back(P.first);
477 for (const auto &A : Allocas)
478 Defs.push_back(A.Alloca);
479 return Defs;
480 }
481
482 uint32_t getFieldIndex(Value *V) const {
483 auto Itr = FieldIndexMap.find(V);
484 assert(Itr != FieldIndexMap.end() &&
485 "Value does not have a frame field index");
486 return Itr->second;
487 }
488
489 void setFieldIndex(Value *V, uint32_t Index) {
490 assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) &&
491 "Cannot set the index for the same field twice.");
492 FieldIndexMap[V] = Index;
493 }
494
495 Align getAlign(Value *V) const {
496 auto Iter = FieldAlignMap.find(V);
497 assert(Iter != FieldAlignMap.end());
498 return Iter->second;
499 }
500
501 void setAlign(Value *V, Align AL) {
502 assert(FieldAlignMap.count(V) == 0);
503 FieldAlignMap.insert({V, AL});
504 }
505
506 uint64_t getDynamicAlign(Value *V) const {
507 auto Iter = FieldDynamicAlignMap.find(V);
508 assert(Iter != FieldDynamicAlignMap.end());
509 return Iter->second;
510 }
511
512 void setDynamicAlign(Value *V, uint64_t Align) {
513 assert(FieldDynamicAlignMap.count(V) == 0);
514 FieldDynamicAlignMap.insert({V, Align});
515 }
516
517 uint64_t getOffset(Value *V) const {
518 auto Iter = FieldOffsetMap.find(V);
519 assert(Iter != FieldOffsetMap.end());
520 return Iter->second;
521 }
522
523 void setOffset(Value *V, uint64_t Offset) {
524 assert(FieldOffsetMap.count(V) == 0);
525 FieldOffsetMap.insert({V, Offset});
526 }
527
528 // Remap the index of every field in the frame, using the final layout index.
529 void updateLayoutIndex(FrameTypeBuilder &B);
530
531private:
532 // LayoutIndexUpdateStarted is used to avoid updating the index of any field
533 // twice by mistake.
534 bool LayoutIndexUpdateStarted = false;
535 // Map from values to their slot indexes on the frame. They will be first set
536 // with their original insertion field index. After the frame is built, their
537 // indexes will be updated into the final layout index.
538 DenseMap<Value *, uint32_t> FieldIndexMap;
539 // Map from values to their alignment on the frame. They would be set after
540 // the frame is built.
541 DenseMap<Value *, Align> FieldAlignMap;
542 DenseMap<Value *, uint64_t> FieldDynamicAlignMap;
543 // Map from values to their offset on the frame. They would be set after
544 // the frame is built.
545 DenseMap<Value *, uint64_t> FieldOffsetMap;
546};
547} // namespace
548
549#ifndef NDEBUG
550static void dumpSpills(StringRef Title, const SpillInfo &Spills) {
551 dbgs() << "------------- " << Title << "--------------\n";
552 for (const auto &E : Spills) {
553 E.first->dump();
554 dbgs() << " user: ";
555 for (auto *I : E.second)
556 I->dump();
557 }
558}
559static void dumpRemats(
560 StringRef Title,
561 const SmallMapVector<Instruction *, std::unique_ptr<RematGraph>, 8> &RM) {
562 dbgs() << "------------- " << Title << "--------------\n";
563 for (const auto &E : RM) {
564 E.second->dump();
565 dbgs() << "--\n";
566 }
567}
568
569static void dumpAllocas(const SmallVectorImpl<AllocaInfo> &Allocas) {
570 dbgs() << "------------- Allocas --------------\n";
571 for (const auto &A : Allocas) {
572 A.Alloca->dump();
573 }
574}
575#endif
576
577namespace {
578using FieldIDType = size_t;
579// We cannot rely solely on natural alignment of a type when building a
580// coroutine frame and if the alignment specified on the Alloca instruction
581// differs from the natural alignment of the alloca type we will need to insert
582// padding.
583class FrameTypeBuilder {
584private:
585 struct Field {
588 Type *Ty;
589 FieldIDType LayoutFieldIndex;
590 Align Alignment;
591 Align TyAlignment;
592 uint64_t DynamicAlignBuffer;
593 };
594
595 const DataLayout &DL;
597 uint64_t StructSize = 0;
598 Align StructAlign;
599 bool IsFinished = false;
600
601 std::optional<Align> MaxFrameAlignment;
602
604 DenseMap<Value*, unsigned> FieldIndexByKey;
605
606public:
607 FrameTypeBuilder(LLVMContext &Context, const DataLayout &DL,
608 std::optional<Align> MaxFrameAlignment)
609 : DL(DL), Context(Context), MaxFrameAlignment(MaxFrameAlignment) {}
610
611 /// Add a field to this structure for the storage of an `alloca`
612 /// instruction.
613 [[nodiscard]] FieldIDType addFieldForAlloca(AllocaInst *AI,
614 bool IsHeader = false) {
615 Type *Ty = AI->getAllocatedType();
616
617 // Make an array type if this is a static array allocation.
618 if (AI->isArrayAllocation()) {
619 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
620 Ty = ArrayType::get(Ty, CI->getValue().getZExtValue());
621 else
622 report_fatal_error("Coroutines cannot handle non static allocas yet");
623 }
624
625 return addField(Ty, AI->getAlign(), IsHeader);
626 }
627
628 /// We want to put the allocas whose lifetime-ranges are not overlapped
629 /// into one slot of coroutine frame.
630 /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566
631 ///
632 /// cppcoro::task<void> alternative_paths(bool cond) {
633 /// if (cond) {
634 /// big_structure a;
635 /// process(a);
636 /// co_await something();
637 /// } else {
638 /// big_structure b;
639 /// process2(b);
640 /// co_await something();
641 /// }
642 /// }
643 ///
644 /// We want to put variable a and variable b in the same slot to
645 /// reduce the size of coroutine frame.
646 ///
647 /// This function use StackLifetime algorithm to partition the AllocaInsts in
648 /// Spills to non-overlapped sets in order to put Alloca in the same
649 /// non-overlapped set into the same slot in the Coroutine Frame. Then add
650 /// field for the allocas in the same non-overlapped set by using the largest
651 /// type as the field type.
652 ///
653 /// Side Effects: Because We sort the allocas, the order of allocas in the
654 /// frame may be different with the order in the source code.
655 void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData,
656 coro::Shape &Shape);
657
658 /// Add a field to this structure.
659 [[nodiscard]] FieldIDType addField(Type *Ty, MaybeAlign MaybeFieldAlignment,
660 bool IsHeader = false,
661 bool IsSpillOfValue = false) {
662 assert(!IsFinished && "adding fields to a finished builder");
663 assert(Ty && "must provide a type for a field");
664
665 // The field size is always the alloc size of the type.
666 uint64_t FieldSize = DL.getTypeAllocSize(Ty);
667
668 // For an alloca with size=0, we don't need to add a field and they
669 // can just point to any index in the frame. Use index 0.
670 if (FieldSize == 0) {
671 return 0;
672 }
673
674 // The field alignment might not be the type alignment, but we need
675 // to remember the type alignment anyway to build the type.
676 // If we are spilling values we don't need to worry about ABI alignment
677 // concerns.
678 Align ABIAlign = DL.getABITypeAlign(Ty);
679 Align TyAlignment = ABIAlign;
680 if (IsSpillOfValue && MaxFrameAlignment && *MaxFrameAlignment < ABIAlign)
681 TyAlignment = *MaxFrameAlignment;
682 Align FieldAlignment = MaybeFieldAlignment.value_or(TyAlignment);
683
684 // The field alignment could be bigger than the max frame case, in that case
685 // we request additional storage to be able to dynamically align the
686 // pointer.
687 uint64_t DynamicAlignBuffer = 0;
688 if (MaxFrameAlignment && (FieldAlignment > *MaxFrameAlignment)) {
689 DynamicAlignBuffer =
690 offsetToAlignment(MaxFrameAlignment->value(), FieldAlignment);
691 FieldAlignment = *MaxFrameAlignment;
692 FieldSize = FieldSize + DynamicAlignBuffer;
693 }
694
695 // Lay out header fields immediately.
697 if (IsHeader) {
698 Offset = alignTo(StructSize, FieldAlignment);
699 StructSize = Offset + FieldSize;
700
701 // Everything else has a flexible offset.
702 } else {
704 }
705
706 Fields.push_back({FieldSize, Offset, Ty, 0, FieldAlignment, TyAlignment,
707 DynamicAlignBuffer});
708 return Fields.size() - 1;
709 }
710
711 /// Finish the layout and set the body on the given type.
712 void finish(StructType *Ty);
713
714 uint64_t getStructSize() const {
715 assert(IsFinished && "not yet finished!");
716 return StructSize;
717 }
718
719 Align getStructAlign() const {
720 assert(IsFinished && "not yet finished!");
721 return StructAlign;
722 }
723
724 FieldIDType getLayoutFieldIndex(FieldIDType Id) const {
725 assert(IsFinished && "not yet finished!");
726 return Fields[Id].LayoutFieldIndex;
727 }
728
729 Field getLayoutField(FieldIDType Id) const {
730 assert(IsFinished && "not yet finished!");
731 return Fields[Id];
732 }
733};
734} // namespace
735
736void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) {
737 auto Updater = [&](Value *I) {
738 auto Field = B.getLayoutField(getFieldIndex(I));
739 setFieldIndex(I, Field.LayoutFieldIndex);
740 setAlign(I, Field.Alignment);
741 uint64_t dynamicAlign =
742 Field.DynamicAlignBuffer
743 ? Field.DynamicAlignBuffer + Field.Alignment.value()
744 : 0;
745 setDynamicAlign(I, dynamicAlign);
746 setOffset(I, Field.Offset);
747 };
748 LayoutIndexUpdateStarted = true;
749 for (auto &S : Spills)
750 Updater(S.first);
751 for (const auto &A : Allocas)
752 Updater(A.Alloca);
753 LayoutIndexUpdateStarted = false;
754}
755
756void FrameTypeBuilder::addFieldForAllocas(const Function &F,
757 FrameDataInfo &FrameData,
758 coro::Shape &Shape) {
759 using AllocaSetType = SmallVector<AllocaInst *, 4>;
760 SmallVector<AllocaSetType, 4> NonOverlapedAllocas;
761
762 // We need to add field for allocas at the end of this function.
763 auto AddFieldForAllocasAtExit = make_scope_exit([&]() {
764 for (auto AllocaList : NonOverlapedAllocas) {
765 auto *LargestAI = *AllocaList.begin();
766 FieldIDType Id = addFieldForAlloca(LargestAI);
767 for (auto *Alloca : AllocaList)
768 FrameData.setFieldIndex(Alloca, Id);
769 }
770 });
771
772 if (!Shape.OptimizeFrame) {
773 for (const auto &A : FrameData.Allocas) {
774 AllocaInst *Alloca = A.Alloca;
775 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
776 }
777 return;
778 }
779
780 // Because there are paths from the lifetime.start to coro.end
781 // for each alloca, the liferanges for every alloca is overlaped
782 // in the blocks who contain coro.end and the successor blocks.
783 // So we choose to skip there blocks when we calculate the liferange
784 // for each alloca. It should be reasonable since there shouldn't be uses
785 // in these blocks and the coroutine frame shouldn't be used outside the
786 // coroutine body.
787 //
788 // Note that the user of coro.suspend may not be SwitchInst. However, this
789 // case seems too complex to handle. And it is harmless to skip these
790 // patterns since it just prevend putting the allocas to live in the same
791 // slot.
792 DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest;
793 for (auto *CoroSuspendInst : Shape.CoroSuspends) {
794 for (auto *U : CoroSuspendInst->users()) {
795 if (auto *ConstSWI = dyn_cast<SwitchInst>(U)) {
796 auto *SWI = const_cast<SwitchInst *>(ConstSWI);
797 DefaultSuspendDest[SWI] = SWI->getDefaultDest();
798 SWI->setDefaultDest(SWI->getSuccessor(1));
799 }
800 }
801 }
802
803 auto ExtractAllocas = [&]() {
804 AllocaSetType Allocas;
805 Allocas.reserve(FrameData.Allocas.size());
806 for (const auto &A : FrameData.Allocas)
807 Allocas.push_back(A.Alloca);
808 return Allocas;
809 };
810 StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(),
811 StackLifetime::LivenessType::May);
812 StackLifetimeAnalyzer.run();
813 auto IsAllocaInferenre = [&](const AllocaInst *AI1, const AllocaInst *AI2) {
814 return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps(
815 StackLifetimeAnalyzer.getLiveRange(AI2));
816 };
817 auto GetAllocaSize = [&](const AllocaInfo &A) {
818 std::optional<TypeSize> RetSize = A.Alloca->getAllocationSize(DL);
819 assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n");
820 assert(!RetSize->isScalable() && "Scalable vectors are not yet supported");
821 return RetSize->getFixedValue();
822 };
823 // Put larger allocas in the front. So the larger allocas have higher
824 // priority to merge, which can save more space potentially. Also each
825 // AllocaSet would be ordered. So we can get the largest Alloca in one
826 // AllocaSet easily.
827 sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) {
828 return GetAllocaSize(Iter1) > GetAllocaSize(Iter2);
829 });
830 for (const auto &A : FrameData.Allocas) {
831 AllocaInst *Alloca = A.Alloca;
832 bool Merged = false;
833 // Try to find if the Alloca is not inferenced with any existing
834 // NonOverlappedAllocaSet. If it is true, insert the alloca to that
835 // NonOverlappedAllocaSet.
836 for (auto &AllocaSet : NonOverlapedAllocas) {
837 assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n");
838 bool NoInference = none_of(AllocaSet, [&](auto Iter) {
839 return IsAllocaInferenre(Alloca, Iter);
840 });
841 // If the alignment of A is multiple of the alignment of B, the address
842 // of A should satisfy the requirement for aligning for B.
843 //
844 // There may be other more fine-grained strategies to handle the alignment
845 // infomation during the merging process. But it seems hard to handle
846 // these strategies and benefit little.
847 bool Alignable = [&]() -> bool {
848 auto *LargestAlloca = *AllocaSet.begin();
849 return LargestAlloca->getAlign().value() % Alloca->getAlign().value() ==
850 0;
851 }();
852 bool CouldMerge = NoInference && Alignable;
853 if (!CouldMerge)
854 continue;
855 AllocaSet.push_back(Alloca);
856 Merged = true;
857 break;
858 }
859 if (!Merged) {
860 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
861 }
862 }
863 // Recover the default target destination for each Switch statement
864 // reserved.
865 for (auto SwitchAndDefaultDest : DefaultSuspendDest) {
866 SwitchInst *SWI = SwitchAndDefaultDest.first;
867 BasicBlock *DestBB = SwitchAndDefaultDest.second;
868 SWI->setDefaultDest(DestBB);
869 }
870 // This Debug Info could tell us which allocas are merged into one slot.
871 LLVM_DEBUG(for (auto &AllocaSet
872 : NonOverlapedAllocas) {
873 if (AllocaSet.size() > 1) {
874 dbgs() << "In Function:" << F.getName() << "\n";
875 dbgs() << "Find Union Set "
876 << "\n";
877 dbgs() << "\tAllocas are \n";
878 for (auto Alloca : AllocaSet)
879 dbgs() << "\t\t" << *Alloca << "\n";
880 }
881 });
882}
883
884void FrameTypeBuilder::finish(StructType *Ty) {
885 assert(!IsFinished && "already finished!");
886
887 // Prepare the optimal-layout field array.
888 // The Id in the layout field is a pointer to our Field for it.
890 LayoutFields.reserve(Fields.size());
891 for (auto &Field : Fields) {
892 LayoutFields.emplace_back(&Field, Field.Size, Field.Alignment,
893 Field.Offset);
894 }
895
896 // Perform layout.
897 auto SizeAndAlign = performOptimizedStructLayout(LayoutFields);
898 StructSize = SizeAndAlign.first;
899 StructAlign = SizeAndAlign.second;
900
901 auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & {
902 return *static_cast<Field *>(const_cast<void*>(LayoutField.Id));
903 };
904
905 // We need to produce a packed struct type if there's a field whose
906 // assigned offset isn't a multiple of its natural type alignment.
907 bool Packed = [&] {
908 for (auto &LayoutField : LayoutFields) {
909 auto &F = getField(LayoutField);
910 if (!isAligned(F.TyAlignment, LayoutField.Offset))
911 return true;
912 }
913 return false;
914 }();
915
916 // Build the struct body.
917 SmallVector<Type*, 16> FieldTypes;
918 FieldTypes.reserve(LayoutFields.size() * 3 / 2);
919 uint64_t LastOffset = 0;
920 for (auto &LayoutField : LayoutFields) {
921 auto &F = getField(LayoutField);
922
923 auto Offset = LayoutField.Offset;
924
925 // Add a padding field if there's a padding gap and we're either
926 // building a packed struct or the padding gap is more than we'd
927 // get from aligning to the field type's natural alignment.
928 assert(Offset >= LastOffset);
929 if (Offset != LastOffset) {
930 if (Packed || alignTo(LastOffset, F.TyAlignment) != Offset)
931 FieldTypes.push_back(ArrayType::get(Type::getInt8Ty(Context),
932 Offset - LastOffset));
933 }
934
935 F.Offset = Offset;
936 F.LayoutFieldIndex = FieldTypes.size();
937
938 FieldTypes.push_back(F.Ty);
939 if (F.DynamicAlignBuffer) {
940 FieldTypes.push_back(
941 ArrayType::get(Type::getInt8Ty(Context), F.DynamicAlignBuffer));
942 }
943 LastOffset = Offset + F.Size;
944 }
945
946 Ty->setBody(FieldTypes, Packed);
947
948#ifndef NDEBUG
949 // Check that the IR layout matches the offsets we expect.
950 auto Layout = DL.getStructLayout(Ty);
951 for (auto &F : Fields) {
952 assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty);
953 assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset);
954 }
955#endif
956
957 IsFinished = true;
958}
959
960static void cacheDIVar(FrameDataInfo &FrameData,
962 for (auto *V : FrameData.getAllDefs()) {
963 if (DIVarCache.contains(V))
964 continue;
965
966 auto CacheIt = [&DIVarCache, V](const auto &Container) {
967 auto *I = llvm::find_if(Container, [](auto *DDI) {
968 return DDI->getExpression()->getNumElements() == 0;
969 });
970 if (I != Container.end())
971 DIVarCache.insert({V, (*I)->getVariable()});
972 };
973 CacheIt(findDbgDeclares(V));
974 CacheIt(findDVRDeclares(V));
975 }
976}
977
978/// Create name for Type. It uses MDString to store new created string to
979/// avoid memory leak.
981 if (Ty->isIntegerTy()) {
982 // The longest name in common may be '__int_128', which has 9 bits.
983 SmallString<16> Buffer;
984 raw_svector_ostream OS(Buffer);
985 OS << "__int_" << cast<IntegerType>(Ty)->getBitWidth();
986 auto *MDName = MDString::get(Ty->getContext(), OS.str());
987 return MDName->getString();
988 }
989
990 if (Ty->isFloatingPointTy()) {
991 if (Ty->isFloatTy())
992 return "__float_";
993 if (Ty->isDoubleTy())
994 return "__double_";
995 return "__floating_type_";
996 }
997
998 if (Ty->isPointerTy())
999 return "PointerType";
1000
1001 if (Ty->isStructTy()) {
1002 if (!cast<StructType>(Ty)->hasName())
1003 return "__LiteralStructType_";
1004
1005 auto Name = Ty->getStructName();
1006
1007 SmallString<16> Buffer(Name);
1008 for (auto &Iter : Buffer)
1009 if (Iter == '.' || Iter == ':')
1010 Iter = '_';
1011 auto *MDName = MDString::get(Ty->getContext(), Buffer.str());
1012 return MDName->getString();
1013 }
1014
1015 return "UnknownType";
1016}
1017
1018static DIType *solveDIType(DIBuilder &Builder, Type *Ty,
1019 const DataLayout &Layout, DIScope *Scope,
1020 unsigned LineNum,
1021 DenseMap<Type *, DIType *> &DITypeCache) {
1022 if (DIType *DT = DITypeCache.lookup(Ty))
1023 return DT;
1024
1026
1027 DIType *RetType = nullptr;
1028
1029 if (Ty->isIntegerTy()) {
1030 auto BitWidth = cast<IntegerType>(Ty)->getBitWidth();
1031 RetType = Builder.createBasicType(Name, BitWidth, dwarf::DW_ATE_signed,
1032 llvm::DINode::FlagArtificial);
1033 } else if (Ty->isFloatingPointTy()) {
1034 RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty),
1035 dwarf::DW_ATE_float,
1036 llvm::DINode::FlagArtificial);
1037 } else if (Ty->isPointerTy()) {
1038 // Construct PointerType points to null (aka void *) instead of exploring
1039 // pointee type to avoid infinite search problem. For example, we would be
1040 // in trouble if we traverse recursively:
1041 //
1042 // struct Node {
1043 // Node* ptr;
1044 // };
1045 RetType =
1046 Builder.createPointerType(nullptr, Layout.getTypeSizeInBits(Ty),
1047 Layout.getABITypeAlign(Ty).value() * CHAR_BIT,
1048 /*DWARFAddressSpace=*/std::nullopt, Name);
1049 } else if (Ty->isStructTy()) {
1050 auto *DIStruct = Builder.createStructType(
1051 Scope, Name, Scope->getFile(), LineNum, Layout.getTypeSizeInBits(Ty),
1052 Layout.getPrefTypeAlign(Ty).value() * CHAR_BIT,
1053 llvm::DINode::FlagArtificial, nullptr, llvm::DINodeArray());
1054
1055 auto *StructTy = cast<StructType>(Ty);
1057 for (unsigned I = 0; I < StructTy->getNumElements(); I++) {
1058 DIType *DITy = solveDIType(Builder, StructTy->getElementType(I), Layout,
1059 Scope, LineNum, DITypeCache);
1060 assert(DITy);
1061 Elements.push_back(Builder.createMemberType(
1062 Scope, DITy->getName(), Scope->getFile(), LineNum,
1063 DITy->getSizeInBits(), DITy->getAlignInBits(),
1064 Layout.getStructLayout(StructTy)->getElementOffsetInBits(I),
1065 llvm::DINode::FlagArtificial, DITy));
1066 }
1067
1068 Builder.replaceArrays(DIStruct, Builder.getOrCreateArray(Elements));
1069
1070 RetType = DIStruct;
1071 } else {
1072 LLVM_DEBUG(dbgs() << "Unresolved Type: " << *Ty << "\n");
1073 TypeSize Size = Layout.getTypeSizeInBits(Ty);
1074 auto *CharSizeType = Builder.createBasicType(
1075 Name, 8, dwarf::DW_ATE_unsigned_char, llvm::DINode::FlagArtificial);
1076
1077 if (Size <= 8)
1078 RetType = CharSizeType;
1079 else {
1080 if (Size % 8 != 0)
1081 Size = TypeSize::getFixed(Size + 8 - (Size % 8));
1082
1083 RetType = Builder.createArrayType(
1084 Size, Layout.getPrefTypeAlign(Ty).value(), CharSizeType,
1085 Builder.getOrCreateArray(Builder.getOrCreateSubrange(0, Size / 8)));
1086 }
1087 }
1088
1089 DITypeCache.insert({Ty, RetType});
1090 return RetType;
1091}
1092
1093/// Build artificial debug info for C++ coroutine frames to allow users to
1094/// inspect the contents of the frame directly
1095///
1096/// Create Debug information for coroutine frame with debug name "__coro_frame".
1097/// The debug information for the fields of coroutine frame is constructed from
1098/// the following way:
1099/// 1. For all the value in the Frame, we search the use of dbg.declare to find
1100/// the corresponding debug variables for the value. If we can find the
1101/// debug variable, we can get full and accurate debug information.
1102/// 2. If we can't get debug information in step 1 and 2, we could only try to
1103/// build the DIType by Type. We did this in solveDIType. We only handle
1104/// integer, float, double, integer type and struct type for now.
1106 FrameDataInfo &FrameData) {
1107 DISubprogram *DIS = F.getSubprogram();
1108 // If there is no DISubprogram for F, it implies the Function are not compiled
1109 // with debug info. So we also don't need to generate debug info for the frame
1110 // neither.
1111 if (!DIS || !DIS->getUnit() ||
1113 (dwarf::SourceLanguage)DIS->getUnit()->getSourceLanguage()))
1114 return;
1115
1116 assert(Shape.ABI == coro::ABI::Switch &&
1117 "We could only build debug infomation for C++ coroutine now.\n");
1118
1119 DIBuilder DBuilder(*F.getParent(), /*AllowUnresolved*/ false);
1120
1121 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
1122 assert(PromiseAlloca &&
1123 "Coroutine with switch ABI should own Promise alloca");
1124
1127
1128 DILocalVariable *PromiseDIVariable = nullptr;
1129 DILocation *DILoc = nullptr;
1130 if (!DIs.empty()) {
1131 DbgDeclareInst *PromiseDDI = DIs.front();
1132 PromiseDIVariable = PromiseDDI->getVariable();
1133 DILoc = PromiseDDI->getDebugLoc().get();
1134 } else if (!DVRs.empty()) {
1135 DbgVariableRecord *PromiseDVR = DVRs.front();
1136 PromiseDIVariable = PromiseDVR->getVariable();
1137 DILoc = PromiseDVR->getDebugLoc().get();
1138 } else {
1139 return;
1140 }
1141
1142 DILocalScope *PromiseDIScope = PromiseDIVariable->getScope();
1143 DIFile *DFile = PromiseDIScope->getFile();
1144 unsigned LineNum = PromiseDIVariable->getLine();
1145
1146 DICompositeType *FrameDITy = DBuilder.createStructType(
1147 DIS->getUnit(), Twine(F.getName() + ".coro_frame_ty").str(),
1148 DFile, LineNum, Shape.FrameSize * 8,
1149 Shape.FrameAlign.value() * 8, llvm::DINode::FlagArtificial, nullptr,
1150 llvm::DINodeArray());
1151 StructType *FrameTy = Shape.FrameTy;
1153 DataLayout Layout = F.getParent()->getDataLayout();
1154
1156 cacheDIVar(FrameData, DIVarCache);
1157
1158 unsigned ResumeIndex = coro::Shape::SwitchFieldIndex::Resume;
1159 unsigned DestroyIndex = coro::Shape::SwitchFieldIndex::Destroy;
1160 unsigned IndexIndex = Shape.SwitchLowering.IndexField;
1161
1163 NameCache.insert({ResumeIndex, "__resume_fn"});
1164 NameCache.insert({DestroyIndex, "__destroy_fn"});
1165 NameCache.insert({IndexIndex, "__coro_index"});
1166
1167 Type *ResumeFnTy = FrameTy->getElementType(ResumeIndex),
1168 *DestroyFnTy = FrameTy->getElementType(DestroyIndex),
1169 *IndexTy = FrameTy->getElementType(IndexIndex);
1170
1172 TyCache.insert(
1173 {ResumeIndex, DBuilder.createPointerType(
1174 nullptr, Layout.getTypeSizeInBits(ResumeFnTy))});
1175 TyCache.insert(
1176 {DestroyIndex, DBuilder.createPointerType(
1177 nullptr, Layout.getTypeSizeInBits(DestroyFnTy))});
1178
1179 /// FIXME: If we fill the field `SizeInBits` with the actual size of
1180 /// __coro_index in bits, then __coro_index wouldn't show in the debugger.
1181 TyCache.insert({IndexIndex, DBuilder.createBasicType(
1182 "__coro_index",
1183 (Layout.getTypeSizeInBits(IndexTy) < 8)
1184 ? 8
1185 : Layout.getTypeSizeInBits(IndexTy),
1186 dwarf::DW_ATE_unsigned_char)});
1187
1188 for (auto *V : FrameData.getAllDefs()) {
1189 if (!DIVarCache.contains(V))
1190 continue;
1191
1192 auto Index = FrameData.getFieldIndex(V);
1193
1194 NameCache.insert({Index, DIVarCache[V]->getName()});
1195 TyCache.insert({Index, DIVarCache[V]->getType()});
1196 }
1197
1198 // Cache from index to (Align, Offset Pair)
1200 // The Align and Offset of Resume function and Destroy function are fixed.
1201 OffsetCache.insert({ResumeIndex, {8, 0}});
1202 OffsetCache.insert({DestroyIndex, {8, 8}});
1203 OffsetCache.insert(
1204 {IndexIndex,
1206
1207 for (auto *V : FrameData.getAllDefs()) {
1208 auto Index = FrameData.getFieldIndex(V);
1209
1210 OffsetCache.insert(
1211 {Index, {FrameData.getAlign(V).value(), FrameData.getOffset(V)}});
1212 }
1213
1214 DenseMap<Type *, DIType *> DITypeCache;
1215 // This counter is used to avoid same type names. e.g., there would be
1216 // many i32 and i64 types in one coroutine. And we would use i32_0 and
1217 // i32_1 to avoid the same type. Since it makes no sense the name of the
1218 // fields confilicts with each other.
1219 unsigned UnknownTypeNum = 0;
1220 for (unsigned Index = 0; Index < FrameTy->getNumElements(); Index++) {
1221 if (!OffsetCache.contains(Index))
1222 continue;
1223
1224 std::string Name;
1225 uint64_t SizeInBits;
1226 uint32_t AlignInBits;
1227 uint64_t OffsetInBits;
1228 DIType *DITy = nullptr;
1229
1230 Type *Ty = FrameTy->getElementType(Index);
1231 assert(Ty->isSized() && "We can't handle type which is not sized.\n");
1232 SizeInBits = Layout.getTypeSizeInBits(Ty).getFixedValue();
1233 AlignInBits = OffsetCache[Index].first * 8;
1234 OffsetInBits = OffsetCache[Index].second * 8;
1235
1236 if (NameCache.contains(Index)) {
1237 Name = NameCache[Index].str();
1238 DITy = TyCache[Index];
1239 } else {
1240 DITy = solveDIType(DBuilder, Ty, Layout, FrameDITy, LineNum, DITypeCache);
1241 assert(DITy && "SolveDIType shouldn't return nullptr.\n");
1242 Name = DITy->getName().str();
1243 Name += "_" + std::to_string(UnknownTypeNum);
1244 UnknownTypeNum++;
1245 }
1246
1247 Elements.push_back(DBuilder.createMemberType(
1248 FrameDITy, Name, DFile, LineNum, SizeInBits, AlignInBits, OffsetInBits,
1249 llvm::DINode::FlagArtificial, DITy));
1250 }
1251
1252 DBuilder.replaceArrays(FrameDITy, DBuilder.getOrCreateArray(Elements));
1253
1254 auto *FrameDIVar = DBuilder.createAutoVariable(PromiseDIScope, "__coro_frame",
1255 DFile, LineNum, FrameDITy,
1256 true, DINode::FlagArtificial);
1257 assert(FrameDIVar->isValidLocationForIntrinsic(DILoc));
1258
1259 // Subprogram would have ContainedNodes field which records the debug
1260 // variables it contained. So we need to add __coro_frame to the
1261 // ContainedNodes of it.
1262 //
1263 // If we don't add __coro_frame to the RetainedNodes, user may get
1264 // `no symbol __coro_frame in context` rather than `__coro_frame`
1265 // is optimized out, which is more precise.
1266 if (auto *SubProgram = dyn_cast<DISubprogram>(PromiseDIScope)) {
1267 auto RetainedNodes = SubProgram->getRetainedNodes();
1268 SmallVector<Metadata *, 32> RetainedNodesVec(RetainedNodes.begin(),
1269 RetainedNodes.end());
1270 RetainedNodesVec.push_back(FrameDIVar);
1271 SubProgram->replaceOperandWith(
1272 7, (MDTuple::get(F.getContext(), RetainedNodesVec)));
1273 }
1274
1275 if (UseNewDbgInfoFormat) {
1276 DbgVariableRecord *NewDVR =
1277 new DbgVariableRecord(ValueAsMetadata::get(Shape.FramePtr), FrameDIVar,
1278 DBuilder.createExpression(), DILoc,
1279 DbgVariableRecord::LocationType::Declare);
1281 It->getParent()->insertDbgRecordBefore(NewDVR, It);
1282 } else {
1283 DBuilder.insertDeclare(Shape.FramePtr, FrameDIVar,
1284 DBuilder.createExpression(), DILoc,
1285 &*Shape.getInsertPtAfterFramePtr());
1286 }
1287}
1288
1289// Build a struct that will keep state for an active coroutine.
1290// struct f.frame {
1291// ResumeFnTy ResumeFnAddr;
1292// ResumeFnTy DestroyFnAddr;
1293// ... promise (if present) ...
1294// int ResumeIndex;
1295// ... spills ...
1296// };
1298 FrameDataInfo &FrameData) {
1299 LLVMContext &C = F.getContext();
1300 const DataLayout &DL = F.getParent()->getDataLayout();
1301 StructType *FrameTy = [&] {
1302 SmallString<32> Name(F.getName());
1303 Name.append(".Frame");
1304 return StructType::create(C, Name);
1305 }();
1306
1307 // We will use this value to cap the alignment of spilled values.
1308 std::optional<Align> MaxFrameAlignment;
1309 if (Shape.ABI == coro::ABI::Async)
1310 MaxFrameAlignment = Shape.AsyncLowering.getContextAlignment();
1311 FrameTypeBuilder B(C, DL, MaxFrameAlignment);
1312
1313 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
1314 std::optional<FieldIDType> SwitchIndexFieldId;
1315
1316 if (Shape.ABI == coro::ABI::Switch) {
1317 auto *FnPtrTy = PointerType::getUnqual(C);
1318
1319 // Add header fields for the resume and destroy functions.
1320 // We can rely on these being perfectly packed.
1321 (void)B.addField(FnPtrTy, std::nullopt, /*header*/ true);
1322 (void)B.addField(FnPtrTy, std::nullopt, /*header*/ true);
1323
1324 // PromiseAlloca field needs to be explicitly added here because it's
1325 // a header field with a fixed offset based on its alignment. Hence it
1326 // needs special handling and cannot be added to FrameData.Allocas.
1327 if (PromiseAlloca)
1328 FrameData.setFieldIndex(
1329 PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, /*header*/ true));
1330
1331 // Add a field to store the suspend index. This doesn't need to
1332 // be in the header.
1333 unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size()));
1334 Type *IndexType = Type::getIntNTy(C, IndexBits);
1335
1336 SwitchIndexFieldId = B.addField(IndexType, std::nullopt);
1337 } else {
1338 assert(PromiseAlloca == nullptr && "lowering doesn't support promises");
1339 }
1340
1341 // Because multiple allocas may own the same field slot,
1342 // we add allocas to field here.
1343 B.addFieldForAllocas(F, FrameData, Shape);
1344 // Add PromiseAlloca to Allocas list so that
1345 // 1. updateLayoutIndex could update its index after
1346 // `performOptimizedStructLayout`
1347 // 2. it is processed in insertSpills.
1348 if (Shape.ABI == coro::ABI::Switch && PromiseAlloca)
1349 // We assume that the promise alloca won't be modified before
1350 // CoroBegin and no alias will be create before CoroBegin.
1351 FrameData.Allocas.emplace_back(
1352 PromiseAlloca, DenseMap<Instruction *, std::optional<APInt>>{}, false);
1353 // Create an entry for every spilled value.
1354 for (auto &S : FrameData.Spills) {
1355 Type *FieldType = S.first->getType();
1356 // For byval arguments, we need to store the pointed value in the frame,
1357 // instead of the pointer itself.
1358 if (const Argument *A = dyn_cast<Argument>(S.first))
1359 if (A->hasByValAttr())
1360 FieldType = A->getParamByValType();
1361 FieldIDType Id = B.addField(FieldType, std::nullopt, false /*header*/,
1362 true /*IsSpillOfValue*/);
1363 FrameData.setFieldIndex(S.first, Id);
1364 }
1365
1366 B.finish(FrameTy);
1367 FrameData.updateLayoutIndex(B);
1368 Shape.FrameAlign = B.getStructAlign();
1369 Shape.FrameSize = B.getStructSize();
1370
1371 switch (Shape.ABI) {
1372 case coro::ABI::Switch: {
1373 // In the switch ABI, remember the switch-index field.
1374 auto IndexField = B.getLayoutField(*SwitchIndexFieldId);
1375 Shape.SwitchLowering.IndexField = IndexField.LayoutFieldIndex;
1376 Shape.SwitchLowering.IndexAlign = IndexField.Alignment.value();
1377 Shape.SwitchLowering.IndexOffset = IndexField.Offset;
1378
1379 // Also round the frame size up to a multiple of its alignment, as is
1380 // generally expected in C/C++.
1381 Shape.FrameSize = alignTo(Shape.FrameSize, Shape.FrameAlign);
1382 break;
1383 }
1384
1385 // In the retcon ABI, remember whether the frame is inline in the storage.
1386 case coro::ABI::Retcon:
1387 case coro::ABI::RetconOnce: {
1388 auto Id = Shape.getRetconCoroId();
1390 = (B.getStructSize() <= Id->getStorageSize() &&
1391 B.getStructAlign() <= Id->getStorageAlignment());
1392 break;
1393 }
1394 case coro::ABI::Async: {
1397 // Also make the final context size a multiple of the context alignment to
1398 // make allocation easier for allocators.
1402 if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) {
1404 "The alignment requirment of frame variables cannot be higher than "
1405 "the alignment of the async function context");
1406 }
1407 break;
1408 }
1409 }
1410
1411 return FrameTy;
1412}
1413
1414// We use a pointer use visitor to track how an alloca is being used.
1415// The goal is to be able to answer the following three questions:
1416// 1. Should this alloca be allocated on the frame instead.
1417// 2. Could the content of the alloca be modified prior to CoroBegn, which would
1418// require copying the data from alloca to the frame after CoroBegin.
1419// 3. Is there any alias created for this alloca prior to CoroBegin, but used
1420// after CoroBegin. In that case, we will need to recreate the alias after
1421// CoroBegin based off the frame. To answer question 1, we track two things:
1422// a. List of all BasicBlocks that use this alloca or any of the aliases of
1423// the alloca. In the end, we check if there exists any two basic blocks that
1424// cross suspension points. If so, this alloca must be put on the frame. b.
1425// Whether the alloca or any alias of the alloca is escaped at some point,
1426// either by storing the address somewhere, or the address is used in a
1427// function call that might capture. If it's ever escaped, this alloca must be
1428// put on the frame conservatively.
1429// To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin.
1430// Whenever a potential write happens, either through a store instruction, a
1431// function call or any of the memory intrinsics, we check whether this
1432// instruction is prior to CoroBegin. To answer question 3, we track the offsets
1433// of all aliases created for the alloca prior to CoroBegin but used after
1434// CoroBegin. std::optional is used to be able to represent the case when the
1435// offset is unknown (e.g. when you have a PHINode that takes in different
1436// offset values). We cannot handle unknown offsets and will assert. This is the
1437// potential issue left out. An ideal solution would likely require a
1438// significant redesign.
1439namespace {
1440struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
1442 AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT,
1443 const CoroBeginInst &CB, const SuspendCrossingInfo &Checker,
1444 bool ShouldUseLifetimeStartInfo)
1445 : PtrUseVisitor(DL), DT(DT), CoroBegin(CB), Checker(Checker),
1446 ShouldUseLifetimeStartInfo(ShouldUseLifetimeStartInfo) {}
1447
1448 void visit(Instruction &I) {
1449 Users.insert(&I);
1450 Base::visit(I);
1451 // If the pointer is escaped prior to CoroBegin, we have to assume it would
1452 // be written into before CoroBegin as well.
1453 if (PI.isEscaped() && !DT.dominates(&CoroBegin, PI.getEscapingInst())) {
1454 MayWriteBeforeCoroBegin = true;
1455 }
1456 }
1457 // We need to provide this overload as PtrUseVisitor uses a pointer based
1458 // visiting function.
1459 void visit(Instruction *I) { return visit(*I); }
1460
1461 void visitPHINode(PHINode &I) {
1462 enqueueUsers(I);
1463 handleAlias(I);
1464 }
1465
1467 enqueueUsers(I);
1468 handleAlias(I);
1469 }
1470
1471 void visitStoreInst(StoreInst &SI) {
1472 // Regardless whether the alias of the alloca is the value operand or the
1473 // pointer operand, we need to assume the alloca is been written.
1474 handleMayWrite(SI);
1475
1476 if (SI.getValueOperand() != U->get())
1477 return;
1478
1479 // We are storing the pointer into a memory location, potentially escaping.
1480 // As an optimization, we try to detect simple cases where it doesn't
1481 // actually escape, for example:
1482 // %ptr = alloca ..
1483 // %addr = alloca ..
1484 // store %ptr, %addr
1485 // %x = load %addr
1486 // ..
1487 // If %addr is only used by loading from it, we could simply treat %x as
1488 // another alias of %ptr, and not considering %ptr being escaped.
1489 auto IsSimpleStoreThenLoad = [&]() {
1490 auto *AI = dyn_cast<AllocaInst>(SI.getPointerOperand());
1491 // If the memory location we are storing to is not an alloca, it
1492 // could be an alias of some other memory locations, which is difficult
1493 // to analyze.
1494 if (!AI)
1495 return false;
1496 // StoreAliases contains aliases of the memory location stored into.
1497 SmallVector<Instruction *, 4> StoreAliases = {AI};
1498 while (!StoreAliases.empty()) {
1499 Instruction *I = StoreAliases.pop_back_val();
1500 for (User *U : I->users()) {
1501 // If we are loading from the memory location, we are creating an
1502 // alias of the original pointer.
1503 if (auto *LI = dyn_cast<LoadInst>(U)) {
1504 enqueueUsers(*LI);
1505 handleAlias(*LI);
1506 continue;
1507 }
1508 // If we are overriding the memory location, the pointer certainly
1509 // won't escape.
1510 if (auto *S = dyn_cast<StoreInst>(U))
1511 if (S->getPointerOperand() == I)
1512 continue;
1513 if (auto *II = dyn_cast<IntrinsicInst>(U))
1514 if (II->isLifetimeStartOrEnd())
1515 continue;
1516 // BitCastInst creats aliases of the memory location being stored
1517 // into.
1518 if (auto *BI = dyn_cast<BitCastInst>(U)) {
1519 StoreAliases.push_back(BI);
1520 continue;
1521 }
1522 return false;
1523 }
1524 }
1525
1526 return true;
1527 };
1528
1529 if (!IsSimpleStoreThenLoad())
1530 PI.setEscaped(&SI);
1531 }
1532
1533 // All mem intrinsics modify the data.
1534 void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); }
1535
1536 void visitBitCastInst(BitCastInst &BC) {
1538 handleAlias(BC);
1539 }
1540
1543 handleAlias(ASC);
1544 }
1545
1547 // The base visitor will adjust Offset accordingly.
1549 handleAlias(GEPI);
1550 }
1551
1553 // When we found the lifetime markers refers to a
1554 // subrange of the original alloca, ignore the lifetime
1555 // markers to avoid misleading the analysis.
1556 if (II.getIntrinsicID() != Intrinsic::lifetime_start || !IsOffsetKnown ||
1557 !Offset.isZero())
1558 return Base::visitIntrinsicInst(II);
1559 LifetimeStarts.insert(&II);
1560 }
1561
1562 void visitCallBase(CallBase &CB) {
1563 for (unsigned Op = 0, OpCount = CB.arg_size(); Op < OpCount; ++Op)
1564 if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op))
1565 PI.setEscaped(&CB);
1566 handleMayWrite(CB);
1567 }
1568
1569 bool getShouldLiveOnFrame() const {
1570 if (!ShouldLiveOnFrame)
1571 ShouldLiveOnFrame = computeShouldLiveOnFrame();
1572 return *ShouldLiveOnFrame;
1573 }
1574
1575 bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; }
1576
1577 DenseMap<Instruction *, std::optional<APInt>> getAliasesCopy() const {
1578 assert(getShouldLiveOnFrame() && "This method should only be called if the "
1579 "alloca needs to live on the frame.");
1580 for (const auto &P : AliasOffetMap)
1581 if (!P.second)
1582 report_fatal_error("Unable to handle an alias with unknown offset "
1583 "created before CoroBegin.");
1584 return AliasOffetMap;
1585 }
1586
1587private:
1588 const DominatorTree &DT;
1589 const CoroBeginInst &CoroBegin;
1590 const SuspendCrossingInfo &Checker;
1591 // All alias to the original AllocaInst, created before CoroBegin and used
1592 // after CoroBegin. Each entry contains the instruction and the offset in the
1593 // original Alloca. They need to be recreated after CoroBegin off the frame.
1596 SmallPtrSet<IntrinsicInst *, 2> LifetimeStarts{};
1597 bool MayWriteBeforeCoroBegin{false};
1598 bool ShouldUseLifetimeStartInfo{true};
1599
1600 mutable std::optional<bool> ShouldLiveOnFrame{};
1601
1602 bool computeShouldLiveOnFrame() const {
1603 // If lifetime information is available, we check it first since it's
1604 // more precise. We look at every pair of lifetime.start intrinsic and
1605 // every basic block that uses the pointer to see if they cross suspension
1606 // points. The uses cover both direct uses as well as indirect uses.
1607 if (ShouldUseLifetimeStartInfo && !LifetimeStarts.empty()) {
1608 for (auto *I : Users)
1609 for (auto *S : LifetimeStarts)
1610 if (Checker.isDefinitionAcrossSuspend(*S, I))
1611 return true;
1612 // Addresses are guaranteed to be identical after every lifetime.start so
1613 // we cannot use the local stack if the address escaped and there is a
1614 // suspend point between lifetime markers. This should also cover the
1615 // case of a single lifetime.start intrinsic in a loop with suspend point.
1616 if (PI.isEscaped()) {
1617 for (auto *A : LifetimeStarts) {
1618 for (auto *B : LifetimeStarts) {
1619 if (Checker.hasPathOrLoopCrossingSuspendPoint(A->getParent(),
1620 B->getParent()))
1621 return true;
1622 }
1623 }
1624 }
1625 return false;
1626 }
1627 // FIXME: Ideally the isEscaped check should come at the beginning.
1628 // However there are a few loose ends that need to be fixed first before
1629 // we can do that. We need to make sure we are not over-conservative, so
1630 // that the data accessed in-between await_suspend and symmetric transfer
1631 // is always put on the stack, and also data accessed after coro.end is
1632 // always put on the stack (esp the return object). To fix that, we need
1633 // to:
1634 // 1) Potentially treat sret as nocapture in calls
1635 // 2) Special handle the return object and put it on the stack
1636 // 3) Utilize lifetime.end intrinsic
1637 if (PI.isEscaped())
1638 return true;
1639
1640 for (auto *U1 : Users)
1641 for (auto *U2 : Users)
1642 if (Checker.isDefinitionAcrossSuspend(*U1, U2))
1643 return true;
1644
1645 return false;
1646 }
1647
1648 void handleMayWrite(const Instruction &I) {
1649 if (!DT.dominates(&CoroBegin, &I))
1650 MayWriteBeforeCoroBegin = true;
1651 }
1652
1653 bool usedAfterCoroBegin(Instruction &I) {
1654 for (auto &U : I.uses())
1655 if (DT.dominates(&CoroBegin, U))
1656 return true;
1657 return false;
1658 }
1659
1660 void handleAlias(Instruction &I) {
1661 // We track all aliases created prior to CoroBegin but used after.
1662 // These aliases may need to be recreated after CoroBegin if the alloca
1663 // need to live on the frame.
1664 if (DT.dominates(&CoroBegin, &I) || !usedAfterCoroBegin(I))
1665 return;
1666
1667 if (!IsOffsetKnown) {
1668 AliasOffetMap[&I].reset();
1669 } else {
1670 auto Itr = AliasOffetMap.find(&I);
1671 if (Itr == AliasOffetMap.end()) {
1672 AliasOffetMap[&I] = Offset;
1673 } else if (Itr->second && *Itr->second != Offset) {
1674 // If we have seen two different possible values for this alias, we set
1675 // it to empty.
1676 AliasOffetMap[&I].reset();
1677 }
1678 }
1679 }
1680};
1681} // namespace
1682
1683// We need to make room to insert a spill after initial PHIs, but before
1684// catchswitch instruction. Placing it before violates the requirement that
1685// catchswitch, like all other EHPads must be the first nonPHI in a block.
1686//
1687// Split away catchswitch into a separate block and insert in its place:
1688//
1689// cleanuppad <InsertPt> cleanupret.
1690//
1691// cleanupret instruction will act as an insert point for the spill.
1693 BasicBlock *CurrentBlock = CatchSwitch->getParent();
1694 BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch);
1695 CurrentBlock->getTerminator()->eraseFromParent();
1696
1697 auto *CleanupPad =
1698 CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock);
1699 auto *CleanupRet =
1700 CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock);
1701 return CleanupRet;
1702}
1703
1704// Replace all alloca and SSA values that are accessed across suspend points
1705// with GetElementPointer from coroutine frame + loads and stores. Create an
1706// AllocaSpillBB that will become the new entry block for the resume parts of
1707// the coroutine:
1708//
1709// %hdl = coro.begin(...)
1710// whatever
1711//
1712// becomes:
1713//
1714// %hdl = coro.begin(...)
1715// br label %AllocaSpillBB
1716//
1717// AllocaSpillBB:
1718// ; geps corresponding to allocas that were moved to coroutine frame
1719// br label PostSpill
1720//
1721// PostSpill:
1722// whatever
1723//
1724//
1725static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape) {
1726 auto *CB = Shape.CoroBegin;
1727 LLVMContext &C = CB->getContext();
1728 Function *F = CB->getFunction();
1729 IRBuilder<> Builder(C);
1730 StructType *FrameTy = Shape.FrameTy;
1731 Value *FramePtr = Shape.FramePtr;
1732 DominatorTree DT(*F);
1734
1735 // Create a GEP with the given index into the coroutine frame for the original
1736 // value Orig. Appends an extra 0 index for array-allocas, preserving the
1737 // original type.
1738 auto GetFramePointer = [&](Value *Orig) -> Value * {
1739 FieldIDType Index = FrameData.getFieldIndex(Orig);
1740 SmallVector<Value *, 3> Indices = {
1741 ConstantInt::get(Type::getInt32Ty(C), 0),
1742 ConstantInt::get(Type::getInt32Ty(C), Index),
1743 };
1744
1745 if (auto *AI = dyn_cast<AllocaInst>(Orig)) {
1746 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) {
1747 auto Count = CI->getValue().getZExtValue();
1748 if (Count > 1) {
1749 Indices.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
1750 }
1751 } else {
1752 report_fatal_error("Coroutines cannot handle non static allocas yet");
1753 }
1754 }
1755
1756 auto GEP = cast<GetElementPtrInst>(
1757 Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices));
1758 if (auto *AI = dyn_cast<AllocaInst>(Orig)) {
1759 if (FrameData.getDynamicAlign(Orig) != 0) {
1760 assert(FrameData.getDynamicAlign(Orig) == AI->getAlign().value());
1761 auto *M = AI->getModule();
1762 auto *IntPtrTy = M->getDataLayout().getIntPtrType(AI->getType());
1763 auto *PtrValue = Builder.CreatePtrToInt(GEP, IntPtrTy);
1764 auto *AlignMask =
1765 ConstantInt::get(IntPtrTy, AI->getAlign().value() - 1);
1766 PtrValue = Builder.CreateAdd(PtrValue, AlignMask);
1767 PtrValue = Builder.CreateAnd(PtrValue, Builder.CreateNot(AlignMask));
1768 return Builder.CreateIntToPtr(PtrValue, AI->getType());
1769 }
1770 // If the type of GEP is not equal to the type of AllocaInst, it implies
1771 // that the AllocaInst may be reused in the Frame slot of other
1772 // AllocaInst. So We cast GEP to the AllocaInst here to re-use
1773 // the Frame storage.
1774 //
1775 // Note: If we change the strategy dealing with alignment, we need to refine
1776 // this casting.
1777 if (GEP->getType() != Orig->getType())
1778 return Builder.CreateAddrSpaceCast(GEP, Orig->getType(),
1779 Orig->getName() + Twine(".cast"));
1780 }
1781 return GEP;
1782 };
1783
1784 for (auto const &E : FrameData.Spills) {
1785 Value *Def = E.first;
1786 auto SpillAlignment = Align(FrameData.getAlign(Def));
1787 // Create a store instruction storing the value into the
1788 // coroutine frame.
1789 BasicBlock::iterator InsertPt;
1790 Type *ByValTy = nullptr;
1791 if (auto *Arg = dyn_cast<Argument>(Def)) {
1792 // For arguments, we will place the store instruction right after
1793 // the coroutine frame pointer instruction, i.e. coro.begin.
1794 InsertPt = Shape.getInsertPtAfterFramePtr();
1795
1796 // If we're spilling an Argument, make sure we clear 'nocapture'
1797 // from the coroutine function.
1798 Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture);
1799
1800 if (Arg->hasByValAttr())
1801 ByValTy = Arg->getParamByValType();
1802 } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) {
1803 // Don't spill immediately after a suspend; splitting assumes
1804 // that the suspend will be followed by a branch.
1805 InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHIIt();
1806 } else {
1807 auto *I = cast<Instruction>(Def);
1808 if (!DT.dominates(CB, I)) {
1809 // If it is not dominated by CoroBegin, then spill should be
1810 // inserted immediately after CoroFrame is computed.
1811 InsertPt = Shape.getInsertPtAfterFramePtr();
1812 } else if (auto *II = dyn_cast<InvokeInst>(I)) {
1813 // If we are spilling the result of the invoke instruction, split
1814 // the normal edge and insert the spill in the new block.
1815 auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest());
1816 InsertPt = NewBB->getTerminator()->getIterator();
1817 } else if (isa<PHINode>(I)) {
1818 // Skip the PHINodes and EH pads instructions.
1819 BasicBlock *DefBlock = I->getParent();
1820 if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator()))
1821 InsertPt = splitBeforeCatchSwitch(CSI)->getIterator();
1822 else
1823 InsertPt = DefBlock->getFirstInsertionPt();
1824 } else {
1825 assert(!I->isTerminator() && "unexpected terminator");
1826 // For all other values, the spill is placed immediately after
1827 // the definition.
1828 InsertPt = I->getNextNode()->getIterator();
1829 }
1830 }
1831
1832 auto Index = FrameData.getFieldIndex(Def);
1833 Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
1834 auto *G = Builder.CreateConstInBoundsGEP2_32(
1835 FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr"));
1836 if (ByValTy) {
1837 // For byval arguments, we need to store the pointed value in the frame,
1838 // instead of the pointer itself.
1839 auto *Value = Builder.CreateLoad(ByValTy, Def);
1840 Builder.CreateAlignedStore(Value, G, SpillAlignment);
1841 } else {
1842 Builder.CreateAlignedStore(Def, G, SpillAlignment);
1843 }
1844
1845 BasicBlock *CurrentBlock = nullptr;
1846 Value *CurrentReload = nullptr;
1847 for (auto *U : E.second) {
1848 // If we have not seen the use block, create a load instruction to reload
1849 // the spilled value from the coroutine frame. Populates the Value pointer
1850 // reference provided with the frame GEP.
1851 if (CurrentBlock != U->getParent()) {
1852 CurrentBlock = U->getParent();
1853 Builder.SetInsertPoint(CurrentBlock,
1854 CurrentBlock->getFirstInsertionPt());
1855
1856 auto *GEP = GetFramePointer(E.first);
1857 GEP->setName(E.first->getName() + Twine(".reload.addr"));
1858 if (ByValTy)
1859 CurrentReload = GEP;
1860 else
1861 CurrentReload = Builder.CreateAlignedLoad(
1862 FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP,
1863 SpillAlignment, E.first->getName() + Twine(".reload"));
1864
1867 // Try best to find dbg.declare. If the spill is a temp, there may not
1868 // be a direct dbg.declare. Walk up the load chain to find one from an
1869 // alias.
1870 if (F->getSubprogram()) {
1871 auto *CurDef = Def;
1872 while (DIs.empty() && DVRs.empty() && isa<LoadInst>(CurDef)) {
1873 auto *LdInst = cast<LoadInst>(CurDef);
1874 // Only consider ptr to ptr same type load.
1875 if (LdInst->getPointerOperandType() != LdInst->getType())
1876 break;
1877 CurDef = LdInst->getPointerOperand();
1878 if (!isa<AllocaInst, LoadInst>(CurDef))
1879 break;
1880 DIs = findDbgDeclares(CurDef);
1881 DVRs = findDVRDeclares(CurDef);
1882 }
1883 }
1884
1885 auto SalvageOne = [&](auto *DDI) {
1886 bool AllowUnresolved = false;
1887 // This dbg.declare is preserved for all coro-split function
1888 // fragments. It will be unreachable in the main function, and
1889 // processed by coro::salvageDebugInfo() by CoroCloner.
1890 if (UseNewDbgInfoFormat) {
1892 ValueAsMetadata::get(CurrentReload), DDI->getVariable(),
1893 DDI->getExpression(), DDI->getDebugLoc(),
1894 DbgVariableRecord::LocationType::Declare);
1895 Builder.GetInsertPoint()->getParent()->insertDbgRecordBefore(
1896 NewDVR, Builder.GetInsertPoint());
1897 } else {
1898 DIBuilder(*CurrentBlock->getParent()->getParent(), AllowUnresolved)
1899 .insertDeclare(CurrentReload, DDI->getVariable(),
1900 DDI->getExpression(), DDI->getDebugLoc(),
1901 &*Builder.GetInsertPoint());
1902 }
1903 // This dbg.declare is for the main function entry point. It
1904 // will be deleted in all coro-split functions.
1905 coro::salvageDebugInfo(ArgToAllocaMap, *DDI, Shape.OptimizeFrame,
1906 false /*UseEntryValue*/);
1907 };
1908 for_each(DIs, SalvageOne);
1909 for_each(DVRs, SalvageOne);
1910 }
1911
1912 // If we have a single edge PHINode, remove it and replace it with a
1913 // reload from the coroutine frame. (We already took care of multi edge
1914 // PHINodes by rewriting them in the rewritePHIs function).
1915 if (auto *PN = dyn_cast<PHINode>(U)) {
1916 assert(PN->getNumIncomingValues() == 1 &&
1917 "unexpected number of incoming "
1918 "values in the PHINode");
1919 PN->replaceAllUsesWith(CurrentReload);
1920 PN->eraseFromParent();
1921 continue;
1922 }
1923
1924 // Replace all uses of CurrentValue in the current instruction with
1925 // reload.
1926 U->replaceUsesOfWith(Def, CurrentReload);
1927 // Instructions are added to Def's user list if the attached
1928 // debug records use Def. Update those now.
1929 for (DbgVariableRecord &DVR : filterDbgVars(U->getDbgRecordRange()))
1930 DVR.replaceVariableLocationOp(Def, CurrentReload, true);
1931 }
1932 }
1933
1934 BasicBlock *FramePtrBB = Shape.getInsertPtAfterFramePtr()->getParent();
1935
1936 auto SpillBlock = FramePtrBB->splitBasicBlock(
1937 Shape.getInsertPtAfterFramePtr(), "AllocaSpillBB");
1938 SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill");
1939 Shape.AllocaSpillBlock = SpillBlock;
1940
1941 // retcon and retcon.once lowering assumes all uses have been sunk.
1942 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
1943 Shape.ABI == coro::ABI::Async) {
1944 // If we found any allocas, replace all of their remaining uses with Geps.
1945 Builder.SetInsertPoint(SpillBlock, SpillBlock->begin());
1946 for (const auto &P : FrameData.Allocas) {
1947 AllocaInst *Alloca = P.Alloca;
1948 auto *G = GetFramePointer(Alloca);
1949
1950 // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G))
1951 // here, as we are changing location of the instruction.
1952 G->takeName(Alloca);
1953 Alloca->replaceAllUsesWith(G);
1954 Alloca->eraseFromParent();
1955 }
1956 return;
1957 }
1958
1959 // If we found any alloca, replace all of their remaining uses with GEP
1960 // instructions. To remain debugbility, we replace the uses of allocas for
1961 // dbg.declares and dbg.values with the reload from the frame.
1962 // Note: We cannot replace the alloca with GEP instructions indiscriminately,
1963 // as some of the uses may not be dominated by CoroBegin.
1964 Builder.SetInsertPoint(Shape.AllocaSpillBlock,
1965 Shape.AllocaSpillBlock->begin());
1966 SmallVector<Instruction *, 4> UsersToUpdate;
1967 for (const auto &A : FrameData.Allocas) {
1968 AllocaInst *Alloca = A.Alloca;
1969 UsersToUpdate.clear();
1970 for (User *U : Alloca->users()) {
1971 auto *I = cast<Instruction>(U);
1972 if (DT.dominates(CB, I))
1973 UsersToUpdate.push_back(I);
1974 }
1975 if (UsersToUpdate.empty())
1976 continue;
1977 auto *G = GetFramePointer(Alloca);
1978 G->setName(Alloca->getName() + Twine(".reload.addr"));
1979
1981 SmallVector<DbgVariableRecord *> DbgVariableRecords;
1982 findDbgUsers(DIs, Alloca, &DbgVariableRecords);
1983 for (auto *DVI : DIs)
1984 DVI->replaceUsesOfWith(Alloca, G);
1985 for (auto *DVR : DbgVariableRecords)
1986 DVR->replaceVariableLocationOp(Alloca, G);
1987
1988 for (Instruction *I : UsersToUpdate) {
1989 // It is meaningless to retain the lifetime intrinsics refer for the
1990 // member of coroutine frames and the meaningless lifetime intrinsics
1991 // are possible to block further optimizations.
1992 if (I->isLifetimeStartOrEnd()) {
1993 I->eraseFromParent();
1994 continue;
1995 }
1996
1997 I->replaceUsesOfWith(Alloca, G);
1998 }
1999 }
2000 Builder.SetInsertPoint(&*Shape.getInsertPtAfterFramePtr());
2001 for (const auto &A : FrameData.Allocas) {
2002 AllocaInst *Alloca = A.Alloca;
2003 if (A.MayWriteBeforeCoroBegin) {
2004 // isEscaped really means potentially modified before CoroBegin.
2005 if (Alloca->isArrayAllocation())
2007 "Coroutines cannot handle copying of array allocas yet");
2008
2009 auto *G = GetFramePointer(Alloca);
2010 auto *Value = Builder.CreateLoad(Alloca->getAllocatedType(), Alloca);
2011 Builder.CreateStore(Value, G);
2012 }
2013 // For each alias to Alloca created before CoroBegin but used after
2014 // CoroBegin, we recreate them after CoroBegin by appplying the offset
2015 // to the pointer in the frame.
2016 for (const auto &Alias : A.Aliases) {
2017 auto *FramePtr = GetFramePointer(Alloca);
2018 auto &Value = *Alias.second;
2019 auto ITy = IntegerType::get(C, Value.getBitWidth());
2020 auto *AliasPtr =
2021 Builder.CreatePtrAdd(FramePtr, ConstantInt::get(ITy, Value));
2022 Alias.first->replaceUsesWithIf(
2023 AliasPtr, [&](Use &U) { return DT.dominates(CB, U); });
2024 }
2025 }
2026
2027 // PromiseAlloca is not collected in FrameData.Allocas. So we don't handle
2028 // the case that the PromiseAlloca may have writes before CoroBegin in the
2029 // above codes. And it may be problematic in edge cases. See
2030 // https://github.com/llvm/llvm-project/issues/57861 for an example.
2031 if (Shape.ABI == coro::ABI::Switch && Shape.SwitchLowering.PromiseAlloca) {
2033 // If there is memory accessing to promise alloca before CoroBegin;
2034 bool HasAccessingPromiseBeforeCB = llvm::any_of(PA->uses(), [&](Use &U) {
2035 auto *Inst = dyn_cast<Instruction>(U.getUser());
2036 if (!Inst || DT.dominates(CB, Inst))
2037 return false;
2038
2039 if (auto *CI = dyn_cast<CallInst>(Inst)) {
2040 // It is fine if the call wouldn't write to the Promise.
2041 // This is possible for @llvm.coro.id intrinsics, which
2042 // would take the promise as the second argument as a
2043 // marker.
2044 if (CI->onlyReadsMemory() ||
2045 CI->onlyReadsMemory(CI->getArgOperandNo(&U)))
2046 return false;
2047 return true;
2048 }
2049
2050 return isa<StoreInst>(Inst) ||
2051 // It may take too much time to track the uses.
2052 // Be conservative about the case the use may escape.
2053 isa<GetElementPtrInst>(Inst) ||
2054 // There would always be a bitcast for the promise alloca
2055 // before we enabled Opaque pointers. And now given
2056 // opaque pointers are enabled by default. This should be
2057 // fine.
2058 isa<BitCastInst>(Inst);
2059 });
2060 if (HasAccessingPromiseBeforeCB) {
2061 Builder.SetInsertPoint(&*Shape.getInsertPtAfterFramePtr());
2062 auto *G = GetFramePointer(PA);
2063 auto *Value = Builder.CreateLoad(PA->getAllocatedType(), PA);
2064 Builder.CreateStore(Value, G);
2065 }
2066 }
2067}
2068
2069// Moves the values in the PHIs in SuccBB that correspong to PredBB into a new
2070// PHI in InsertedBB.
2072 BasicBlock *InsertedBB,
2073 BasicBlock *PredBB,
2074 PHINode *UntilPHI = nullptr) {
2075 auto *PN = cast<PHINode>(&SuccBB->front());
2076 do {
2077 int Index = PN->getBasicBlockIndex(InsertedBB);
2078 Value *V = PN->getIncomingValue(Index);
2079 PHINode *InputV = PHINode::Create(
2080 V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName());
2081 InputV->insertBefore(InsertedBB->begin());
2082 InputV->addIncoming(V, PredBB);
2083 PN->setIncomingValue(Index, InputV);
2084 PN = dyn_cast<PHINode>(PN->getNextNode());
2085 } while (PN != UntilPHI);
2086}
2087
2088// Rewrites the PHI Nodes in a cleanuppad.
2089static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB,
2090 CleanupPadInst *CleanupPad) {
2091 // For every incoming edge to a CleanupPad we will create a new block holding
2092 // all incoming values in single-value PHI nodes. We will then create another
2093 // block to act as a dispather (as all unwind edges for related EH blocks
2094 // must be the same).
2095 //
2096 // cleanuppad:
2097 // %2 = phi i32[%0, %catchswitch], [%1, %catch.1]
2098 // %3 = cleanuppad within none []
2099 //
2100 // It will create:
2101 //
2102 // cleanuppad.corodispatch
2103 // %2 = phi i8[0, %catchswitch], [1, %catch.1]
2104 // %3 = cleanuppad within none []
2105 // switch i8 % 2, label %unreachable
2106 // [i8 0, label %cleanuppad.from.catchswitch
2107 // i8 1, label %cleanuppad.from.catch.1]
2108 // cleanuppad.from.catchswitch:
2109 // %4 = phi i32 [%0, %catchswitch]
2110 // br %label cleanuppad
2111 // cleanuppad.from.catch.1:
2112 // %6 = phi i32 [%1, %catch.1]
2113 // br %label cleanuppad
2114 // cleanuppad:
2115 // %8 = phi i32 [%4, %cleanuppad.from.catchswitch],
2116 // [%6, %cleanuppad.from.catch.1]
2117
2118 // Unreachable BB, in case switching on an invalid value in the dispatcher.
2119 auto *UnreachBB = BasicBlock::Create(
2120 CleanupPadBB->getContext(), "unreachable", CleanupPadBB->getParent());
2121 IRBuilder<> Builder(UnreachBB);
2122 Builder.CreateUnreachable();
2123
2124 // Create a new cleanuppad which will be the dispatcher.
2125 auto *NewCleanupPadBB =
2126 BasicBlock::Create(CleanupPadBB->getContext(),
2127 CleanupPadBB->getName() + Twine(".corodispatch"),
2128 CleanupPadBB->getParent(), CleanupPadBB);
2129 Builder.SetInsertPoint(NewCleanupPadBB);
2130 auto *SwitchType = Builder.getInt8Ty();
2131 auto *SetDispatchValuePN =
2132 Builder.CreatePHI(SwitchType, pred_size(CleanupPadBB));
2133 CleanupPad->removeFromParent();
2134 CleanupPad->insertAfter(SetDispatchValuePN);
2135 auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB,
2136 pred_size(CleanupPadBB));
2137
2138 int SwitchIndex = 0;
2139 SmallVector<BasicBlock *, 8> Preds(predecessors(CleanupPadBB));
2140 for (BasicBlock *Pred : Preds) {
2141 // Create a new cleanuppad and move the PHI values to there.
2142 auto *CaseBB = BasicBlock::Create(CleanupPadBB->getContext(),
2143 CleanupPadBB->getName() +
2144 Twine(".from.") + Pred->getName(),
2145 CleanupPadBB->getParent(), CleanupPadBB);
2146 updatePhiNodes(CleanupPadBB, Pred, CaseBB);
2147 CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") +
2148 Pred->getName());
2149 Builder.SetInsertPoint(CaseBB);
2150 Builder.CreateBr(CleanupPadBB);
2151 movePHIValuesToInsertedBlock(CleanupPadBB, CaseBB, NewCleanupPadBB);
2152
2153 // Update this Pred to the new unwind point.
2154 setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB);
2155
2156 // Setup the switch in the dispatcher.
2157 auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex);
2158 SetDispatchValuePN->addIncoming(SwitchConstant, Pred);
2159 SwitchOnDispatch->addCase(SwitchConstant, CaseBB);
2160 SwitchIndex++;
2161 }
2162}
2163
2166 for (auto &BB : F) {
2167 for (auto &Phi : BB.phis()) {
2168 if (Phi.getNumIncomingValues() == 1) {
2169 Worklist.push_back(&Phi);
2170 } else
2171 break;
2172 }
2173 }
2174 while (!Worklist.empty()) {
2175 auto *Phi = Worklist.pop_back_val();
2176 auto *OriginalValue = Phi->getIncomingValue(0);
2177 Phi->replaceAllUsesWith(OriginalValue);
2178 }
2179}
2180
2181static void rewritePHIs(BasicBlock &BB) {
2182 // For every incoming edge we will create a block holding all
2183 // incoming values in a single PHI nodes.
2184 //
2185 // loop:
2186 // %n.val = phi i32[%n, %entry], [%inc, %loop]
2187 //
2188 // It will create:
2189 //
2190 // loop.from.entry:
2191 // %n.loop.pre = phi i32 [%n, %entry]
2192 // br %label loop
2193 // loop.from.loop:
2194 // %inc.loop.pre = phi i32 [%inc, %loop]
2195 // br %label loop
2196 //
2197 // After this rewrite, further analysis will ignore any phi nodes with more
2198 // than one incoming edge.
2199
2200 // TODO: Simplify PHINodes in the basic block to remove duplicate
2201 // predecessors.
2202
2203 // Special case for CleanupPad: all EH blocks must have the same unwind edge
2204 // so we need to create an additional "dispatcher" block.
2205 if (auto *CleanupPad =
2206 dyn_cast_or_null<CleanupPadInst>(BB.getFirstNonPHI())) {
2208 for (BasicBlock *Pred : Preds) {
2209 if (CatchSwitchInst *CS =
2210 dyn_cast<CatchSwitchInst>(Pred->getTerminator())) {
2211 // CleanupPad with a CatchSwitch predecessor: therefore this is an
2212 // unwind destination that needs to be handle specially.
2213 assert(CS->getUnwindDest() == &BB);
2214 (void)CS;
2215 rewritePHIsForCleanupPad(&BB, CleanupPad);
2216 return;
2217 }
2218 }
2219 }
2220
2221 LandingPadInst *LandingPad = nullptr;
2222 PHINode *ReplPHI = nullptr;
2223 if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) {
2224 // ehAwareSplitEdge will clone the LandingPad in all the edge blocks.
2225 // We replace the original landing pad with a PHINode that will collect the
2226 // results from all of them.
2227 ReplPHI = PHINode::Create(LandingPad->getType(), 1, "");
2228 ReplPHI->insertBefore(LandingPad->getIterator());
2229 ReplPHI->takeName(LandingPad);
2230 LandingPad->replaceAllUsesWith(ReplPHI);
2231 // We will erase the original landing pad at the end of this function after
2232 // ehAwareSplitEdge cloned it in the transition blocks.
2233 }
2234
2236 for (BasicBlock *Pred : Preds) {
2237 auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI);
2238 IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName());
2239
2240 // Stop the moving of values at ReplPHI, as this is either null or the PHI
2241 // that replaced the landing pad.
2242 movePHIValuesToInsertedBlock(&BB, IncomingBB, Pred, ReplPHI);
2243 }
2244
2245 if (LandingPad) {
2246 // Calls to ehAwareSplitEdge function cloned the original lading pad.
2247 // No longer need it.
2248 LandingPad->eraseFromParent();
2249 }
2250}
2251
2252static void rewritePHIs(Function &F) {
2254
2255 for (BasicBlock &BB : F)
2256 if (auto *PN = dyn_cast<PHINode>(&BB.front()))
2257 if (PN->getNumIncomingValues() > 1)
2258 WorkList.push_back(&BB);
2259
2260 for (BasicBlock *BB : WorkList)
2261 rewritePHIs(*BB);
2262}
2263
2264/// Default materializable callback
2265// Check for instructions that we can recreate on resume as opposed to spill
2266// the result into a coroutine frame.
2268 return (isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) ||
2269 isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V));
2270}
2271
2272// Check for structural coroutine intrinsics that should not be spilled into
2273// the coroutine frame.
2275 return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) ||
2276 isa<CoroSuspendInst>(&I);
2277}
2278
2279// For each instruction identified as materializable across the suspend point,
2280// and its associated DAG of other rematerializable instructions,
2281// recreate the DAG of instructions after the suspend point.
2283 const SmallMapVector<Instruction *, std::unique_ptr<RematGraph>, 8>
2284 &AllRemats) {
2285 // This has to be done in 2 phases
2286 // Do the remats and record the required defs to be replaced in the
2287 // original use instructions
2288 // Once all the remats are complete, replace the uses in the final
2289 // instructions with the new defs
2290 typedef struct {
2292 Instruction *Def;
2293 Instruction *Remat;
2294 } ProcessNode;
2295
2296 SmallVector<ProcessNode> FinalInstructionsToProcess;
2297
2298 for (const auto &E : AllRemats) {
2299 Instruction *Use = E.first;
2300 Instruction *CurrentMaterialization = nullptr;
2301 RematGraph *RG = E.second.get();
2303 SmallVector<Instruction *> InstructionsToProcess;
2304
2305 // If the target use is actually a suspend instruction then we have to
2306 // insert the remats into the end of the predecessor (there should only be
2307 // one). This is so that suspend blocks always have the suspend instruction
2308 // as the first instruction.
2309 auto InsertPoint = &*Use->getParent()->getFirstInsertionPt();
2310 if (isa<AnyCoroSuspendInst>(Use)) {
2311 BasicBlock *SuspendPredecessorBlock =
2312 Use->getParent()->getSinglePredecessor();
2313 assert(SuspendPredecessorBlock && "malformed coro suspend instruction");
2314 InsertPoint = SuspendPredecessorBlock->getTerminator();
2315 }
2316
2317 // Note: skip the first instruction as this is the actual use that we're
2318 // rematerializing everything for.
2319 auto I = RPOT.begin();
2320 ++I;
2321 for (; I != RPOT.end(); ++I) {
2322 Instruction *D = (*I)->Node;
2323 CurrentMaterialization = D->clone();
2324 CurrentMaterialization->setName(D->getName());
2325 CurrentMaterialization->insertBefore(InsertPoint);
2326 InsertPoint = CurrentMaterialization;
2327
2328 // Replace all uses of Def in the instructions being added as part of this
2329 // rematerialization group
2330 for (auto &I : InstructionsToProcess)
2331 I->replaceUsesOfWith(D, CurrentMaterialization);
2332
2333 // Don't replace the final use at this point as this can cause problems
2334 // for other materializations. Instead, for any final use that uses a
2335 // define that's being rematerialized, record the replace values
2336 for (unsigned i = 0, E = Use->getNumOperands(); i != E; ++i)
2337 if (Use->getOperand(i) == D) // Is this operand pointing to oldval?
2338 FinalInstructionsToProcess.push_back(
2339 {Use, D, CurrentMaterialization});
2340
2341 InstructionsToProcess.push_back(CurrentMaterialization);
2342 }
2343 }
2344
2345 // Finally, replace the uses with the defines that we've just rematerialized
2346 for (auto &R : FinalInstructionsToProcess) {
2347 if (auto *PN = dyn_cast<PHINode>(R.Use)) {
2348 assert(PN->getNumIncomingValues() == 1 && "unexpected number of incoming "
2349 "values in the PHINode");
2350 PN->replaceAllUsesWith(R.Remat);
2351 PN->eraseFromParent();
2352 continue;
2353 }
2354 R.Use->replaceUsesOfWith(R.Def, R.Remat);
2355 }
2356}
2357
2358// Splits the block at a particular instruction unless it is the first
2359// instruction in the block with a single predecessor.
2361 auto *BB = I->getParent();
2362 if (&BB->front() == I) {
2363 if (BB->getSinglePredecessor()) {
2364 BB->setName(Name);
2365 return BB;
2366 }
2367 }
2368 return BB->splitBasicBlock(I, Name);
2369}
2370
2371// Split above and below a particular instruction so that it
2372// will be all alone by itself in a block.
2373static void splitAround(Instruction *I, const Twine &Name) {
2375 splitBlockIfNotFirst(I->getNextNode(), "After" + Name);
2376}
2377
2378static bool isSuspendBlock(BasicBlock *BB) {
2379 return isa<AnyCoroSuspendInst>(BB->front());
2380}
2381
2383
2384/// Does control flow starting at the given block ever reach a suspend
2385/// instruction before reaching a block in VisitedOrFreeBBs?
2387 VisitedBlocksSet &VisitedOrFreeBBs) {
2388 // Eagerly try to add this block to the visited set. If it's already
2389 // there, stop recursing; this path doesn't reach a suspend before
2390 // either looping or reaching a freeing block.
2391 if (!VisitedOrFreeBBs.insert(From).second)
2392 return false;
2393
2394 // We assume that we'll already have split suspends into their own blocks.
2395 if (isSuspendBlock(From))
2396 return true;
2397
2398 // Recurse on the successors.
2399 for (auto *Succ : successors(From)) {
2400 if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs))
2401 return true;
2402 }
2403
2404 return false;
2405}
2406
2407/// Is the given alloca "local", i.e. bounded in lifetime to not cross a
2408/// suspend point?
2410 // Seed the visited set with all the basic blocks containing a free
2411 // so that we won't pass them up.
2412 VisitedBlocksSet VisitedOrFreeBBs;
2413 for (auto *User : AI->users()) {
2414 if (auto FI = dyn_cast<CoroAllocaFreeInst>(User))
2415 VisitedOrFreeBBs.insert(FI->getParent());
2416 }
2417
2418 return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs);
2419}
2420
2421/// After we split the coroutine, will the given basic block be along
2422/// an obvious exit path for the resumption function?
2424 unsigned depth = 3) {
2425 // If we've bottomed out our depth count, stop searching and assume
2426 // that the path might loop back.
2427 if (depth == 0) return false;
2428
2429 // If this is a suspend block, we're about to exit the resumption function.
2430 if (isSuspendBlock(BB)) return true;
2431
2432 // Recurse into the successors.
2433 for (auto *Succ : successors(BB)) {
2434 if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1))
2435 return false;
2436 }
2437
2438 // If none of the successors leads back in a loop, we're on an exit/abort.
2439 return true;
2440}
2441
2443 // Look for a free that isn't sufficiently obviously followed by
2444 // either a suspend or a termination, i.e. something that will leave
2445 // the coro resumption frame.
2446 for (auto *U : AI->users()) {
2447 auto FI = dyn_cast<CoroAllocaFreeInst>(U);
2448 if (!FI) continue;
2449
2450 if (!willLeaveFunctionImmediatelyAfter(FI->getParent()))
2451 return true;
2452 }
2453
2454 // If we never found one, we don't need a stack save.
2455 return false;
2456}
2457
2458/// Turn each of the given local allocas into a normal (dynamic) alloca
2459/// instruction.
2461 SmallVectorImpl<Instruction*> &DeadInsts) {
2462 for (auto *AI : LocalAllocas) {
2463 IRBuilder<> Builder(AI);
2464
2465 // Save the stack depth. Try to avoid doing this if the stackrestore
2466 // is going to immediately precede a return or something.
2467 Value *StackSave = nullptr;
2469 StackSave = Builder.CreateStackSave();
2470
2471 // Allocate memory.
2472 auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize());
2473 Alloca->setAlignment(AI->getAlignment());
2474
2475 for (auto *U : AI->users()) {
2476 // Replace gets with the allocation.
2477 if (isa<CoroAllocaGetInst>(U)) {
2478 U->replaceAllUsesWith(Alloca);
2479
2480 // Replace frees with stackrestores. This is safe because
2481 // alloca.alloc is required to obey a stack discipline, although we
2482 // don't enforce that structurally.
2483 } else {
2484 auto FI = cast<CoroAllocaFreeInst>(U);
2485 if (StackSave) {
2486 Builder.SetInsertPoint(FI);
2487 Builder.CreateStackRestore(StackSave);
2488 }
2489 }
2490 DeadInsts.push_back(cast<Instruction>(U));
2491 }
2492
2493 DeadInsts.push_back(AI);
2494 }
2495}
2496
2497/// Turn the given coro.alloca.alloc call into a dynamic allocation.
2498/// This happens during the all-instructions iteration, so it must not
2499/// delete the call.
2501 coro::Shape &Shape,
2502 SmallVectorImpl<Instruction*> &DeadInsts) {
2503 IRBuilder<> Builder(AI);
2504 auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr);
2505
2506 for (User *U : AI->users()) {
2507 if (isa<CoroAllocaGetInst>(U)) {
2508 U->replaceAllUsesWith(Alloc);
2509 } else {
2510 auto FI = cast<CoroAllocaFreeInst>(U);
2511 Builder.SetInsertPoint(FI);
2512 Shape.emitDealloc(Builder, Alloc, nullptr);
2513 }
2514 DeadInsts.push_back(cast<Instruction>(U));
2515 }
2516
2517 // Push this on last so that it gets deleted after all the others.
2518 DeadInsts.push_back(AI);
2519
2520 // Return the new allocation value so that we can check for needed spills.
2521 return cast<Instruction>(Alloc);
2522}
2523
2524/// Get the current swifterror value.
2526 coro::Shape &Shape) {
2527 // Make a fake function pointer as a sort of intrinsic.
2528 auto FnTy = FunctionType::get(ValueTy, {}, false);
2529 auto Fn = ConstantPointerNull::get(Builder.getPtrTy());
2530
2531 auto Call = Builder.CreateCall(FnTy, Fn, {});
2532 Shape.SwiftErrorOps.push_back(Call);
2533
2534 return Call;
2535}
2536
2537/// Set the given value as the current swifterror value.
2538///
2539/// Returns a slot that can be used as a swifterror slot.
2541 coro::Shape &Shape) {
2542 // Make a fake function pointer as a sort of intrinsic.
2543 auto FnTy = FunctionType::get(Builder.getPtrTy(),
2544 {V->getType()}, false);
2545 auto Fn = ConstantPointerNull::get(Builder.getPtrTy());
2546
2547 auto Call = Builder.CreateCall(FnTy, Fn, { V });
2548 Shape.SwiftErrorOps.push_back(Call);
2549
2550 return Call;
2551}
2552
2553/// Set the swifterror value from the given alloca before a call,
2554/// then put in back in the alloca afterwards.
2555///
2556/// Returns an address that will stand in for the swifterror slot
2557/// until splitting.
2559 AllocaInst *Alloca,
2560 coro::Shape &Shape) {
2561 auto ValueTy = Alloca->getAllocatedType();
2562 IRBuilder<> Builder(Call);
2563
2564 // Load the current value from the alloca and set it as the
2565 // swifterror value.
2566 auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca);
2567 auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape);
2568
2569 // Move to after the call. Since swifterror only has a guaranteed
2570 // value on normal exits, we can ignore implicit and explicit unwind
2571 // edges.
2572 if (isa<CallInst>(Call)) {
2573 Builder.SetInsertPoint(Call->getNextNode());
2574 } else {
2575 auto Invoke = cast<InvokeInst>(Call);
2576 Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg());
2577 }
2578
2579 // Get the current swifterror value and store it to the alloca.
2580 auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape);
2581 Builder.CreateStore(ValueAfterCall, Alloca);
2582
2583 return Addr;
2584}
2585
2586/// Eliminate a formerly-swifterror alloca by inserting the get/set
2587/// intrinsics and attempting to MemToReg the alloca away.
2589 coro::Shape &Shape) {
2590 for (Use &Use : llvm::make_early_inc_range(Alloca->uses())) {
2591 // swifterror values can only be used in very specific ways.
2592 // We take advantage of that here.
2593 auto User = Use.getUser();
2594 if (isa<LoadInst>(User) || isa<StoreInst>(User))
2595 continue;
2596
2597 assert(isa<CallInst>(User) || isa<InvokeInst>(User));
2598 auto Call = cast<Instruction>(User);
2599
2600 auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape);
2601
2602 // Use the returned slot address as the call argument.
2603 Use.set(Addr);
2604 }
2605
2606 // All the uses should be loads and stores now.
2607 assert(isAllocaPromotable(Alloca));
2608}
2609
2610/// "Eliminate" a swifterror argument by reducing it to the alloca case
2611/// and then loading and storing in the prologue and epilog.
2612///
2613/// The argument keeps the swifterror flag.
2615 coro::Shape &Shape,
2616 SmallVectorImpl<AllocaInst*> &AllocasToPromote) {
2617 IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
2618
2619 auto ArgTy = cast<PointerType>(Arg.getType());
2620 auto ValueTy = PointerType::getUnqual(F.getContext());
2621
2622 // Reduce to the alloca case:
2623
2624 // Create an alloca and replace all uses of the arg with it.
2625 auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace());
2626 Arg.replaceAllUsesWith(Alloca);
2627
2628 // Set an initial value in the alloca. swifterror is always null on entry.
2629 auto InitialValue = Constant::getNullValue(ValueTy);
2630 Builder.CreateStore(InitialValue, Alloca);
2631
2632 // Find all the suspends in the function and save and restore around them.
2633 for (auto *Suspend : Shape.CoroSuspends) {
2634 (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape);
2635 }
2636
2637 // Find all the coro.ends in the function and restore the error value.
2638 for (auto *End : Shape.CoroEnds) {
2639 Builder.SetInsertPoint(End);
2640 auto FinalValue = Builder.CreateLoad(ValueTy, Alloca);
2641 (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape);
2642 }
2643
2644 // Now we can use the alloca logic.
2645 AllocasToPromote.push_back(Alloca);
2646 eliminateSwiftErrorAlloca(F, Alloca, Shape);
2647}
2648
2649/// Eliminate all problematic uses of swifterror arguments and allocas
2650/// from the function. We'll fix them up later when splitting the function.
2652 SmallVector<AllocaInst*, 4> AllocasToPromote;
2653
2654 // Look for a swifterror argument.
2655 for (auto &Arg : F.args()) {
2656 if (!Arg.hasSwiftErrorAttr()) continue;
2657
2658 eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote);
2659 break;
2660 }
2661
2662 // Look for swifterror allocas.
2663 for (auto &Inst : F.getEntryBlock()) {
2664 auto Alloca = dyn_cast<AllocaInst>(&Inst);
2665 if (!Alloca || !Alloca->isSwiftError()) continue;
2666
2667 // Clear the swifterror flag.
2668 Alloca->setSwiftError(false);
2669
2670 AllocasToPromote.push_back(Alloca);
2671 eliminateSwiftErrorAlloca(F, Alloca, Shape);
2672 }
2673
2674 // If we have any allocas to promote, compute a dominator tree and
2675 // promote them en masse.
2676 if (!AllocasToPromote.empty()) {
2677 DominatorTree DT(F);
2678 PromoteMemToReg(AllocasToPromote, DT);
2679 }
2680}
2681
2682/// retcon and retcon.once conventions assume that all spill uses can be sunk
2683/// after the coro.begin intrinsic.
2685 const FrameDataInfo &FrameData,
2686 CoroBeginInst *CoroBegin) {
2687 DominatorTree Dom(F);
2688
2691
2692 // Collect all users that precede coro.begin.
2693 for (auto *Def : FrameData.getAllDefs()) {
2694 for (User *U : Def->users()) {
2695 auto Inst = cast<Instruction>(U);
2696 if (Inst->getParent() != CoroBegin->getParent() ||
2697 Dom.dominates(CoroBegin, Inst))
2698 continue;
2699 if (ToMove.insert(Inst))
2700 Worklist.push_back(Inst);
2701 }
2702 }
2703 // Recursively collect users before coro.begin.
2704 while (!Worklist.empty()) {
2705 auto *Def = Worklist.pop_back_val();
2706 for (User *U : Def->users()) {
2707 auto Inst = cast<Instruction>(U);
2708 if (Dom.dominates(CoroBegin, Inst))
2709 continue;
2710 if (ToMove.insert(Inst))
2711 Worklist.push_back(Inst);
2712 }
2713 }
2714
2715 // Sort by dominance.
2716 SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end());
2717 llvm::sort(InsertionList, [&Dom](Instruction *A, Instruction *B) -> bool {
2718 // If a dominates b it should preceed (<) b.
2719 return Dom.dominates(A, B);
2720 });
2721
2722 Instruction *InsertPt = CoroBegin->getNextNode();
2723 for (Instruction *Inst : InsertionList)
2724 Inst->moveBefore(InsertPt);
2725}
2726
2727/// For each local variable that all of its user are only used inside one of
2728/// suspended region, we sink their lifetime.start markers to the place where
2729/// after the suspend block. Doing so minimizes the lifetime of each variable,
2730/// hence minimizing the amount of data we end up putting on the frame.
2732 SuspendCrossingInfo &Checker) {
2733 if (F.hasOptNone())
2734 return;
2735
2736 DominatorTree DT(F);
2737
2738 // Collect all possible basic blocks which may dominate all uses of allocas.
2740 DomSet.insert(&F.getEntryBlock());
2741 for (auto *CSI : Shape.CoroSuspends) {
2742 BasicBlock *SuspendBlock = CSI->getParent();
2743 assert(isSuspendBlock(SuspendBlock) && SuspendBlock->getSingleSuccessor() &&
2744 "should have split coro.suspend into its own block");
2745 DomSet.insert(SuspendBlock->getSingleSuccessor());
2746 }
2747
2748 for (Instruction &I : instructions(F)) {
2749 AllocaInst* AI = dyn_cast<AllocaInst>(&I);
2750 if (!AI)
2751 continue;
2752
2753 for (BasicBlock *DomBB : DomSet) {
2754 bool Valid = true;
2756
2757 auto isLifetimeStart = [](Instruction* I) {
2758 if (auto* II = dyn_cast<IntrinsicInst>(I))
2759 return II->getIntrinsicID() == Intrinsic::lifetime_start;
2760 return false;
2761 };
2762
2763 auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) {
2764 if (isLifetimeStart(U)) {
2765 Lifetimes.push_back(U);
2766 return true;
2767 }
2768 if (!U->hasOneUse() || U->stripPointerCasts() != AI)
2769 return false;
2770 if (isLifetimeStart(U->user_back())) {
2771 Lifetimes.push_back(U->user_back());
2772 return true;
2773 }
2774 return false;
2775 };
2776
2777 for (User *U : AI->users()) {
2778 Instruction *UI = cast<Instruction>(U);
2779 // For all users except lifetime.start markers, if they are all
2780 // dominated by one of the basic blocks and do not cross
2781 // suspend points as well, then there is no need to spill the
2782 // instruction.
2783 if (!DT.dominates(DomBB, UI->getParent()) ||
2784 Checker.isDefinitionAcrossSuspend(DomBB, UI)) {
2785 // Skip lifetime.start, GEP and bitcast used by lifetime.start
2786 // markers.
2787 if (collectLifetimeStart(UI, AI))
2788 continue;
2789 Valid = false;
2790 break;
2791 }
2792 }
2793 // Sink lifetime.start markers to dominate block when they are
2794 // only used outside the region.
2795 if (Valid && Lifetimes.size() != 0) {
2796 auto *NewLifetime = Lifetimes[0]->clone();
2797 NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), AI);
2798 NewLifetime->insertBefore(DomBB->getTerminator());
2799
2800 // All the outsided lifetime.start markers are no longer necessary.
2801 for (Instruction *S : Lifetimes)
2802 S->eraseFromParent();
2803
2804 break;
2805 }
2806 }
2807 }
2808}
2809
2811 const SuspendCrossingInfo &Checker,
2813 const DominatorTree &DT) {
2814 if (Shape.CoroSuspends.empty())
2815 return;
2816
2817 // The PromiseAlloca will be specially handled since it needs to be in a
2818 // fixed position in the frame.
2819 if (AI == Shape.SwitchLowering.PromiseAlloca)
2820 return;
2821
2822 // The __coro_gro alloca should outlive the promise, make sure we
2823 // keep it outside the frame.
2824 if (AI->hasMetadata(LLVMContext::MD_coro_outside_frame))
2825 return;
2826
2827 // The code that uses lifetime.start intrinsic does not work for functions
2828 // with loops without exit. Disable it on ABIs we know to generate such
2829 // code.
2830 bool ShouldUseLifetimeStartInfo =
2831 (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon &&
2832 Shape.ABI != coro::ABI::RetconOnce);
2833 AllocaUseVisitor Visitor{AI->getModule()->getDataLayout(), DT,
2834 *Shape.CoroBegin, Checker,
2835 ShouldUseLifetimeStartInfo};
2836 Visitor.visitPtr(*AI);
2837 if (!Visitor.getShouldLiveOnFrame())
2838 return;
2839 Allocas.emplace_back(AI, Visitor.getAliasesCopy(),
2840 Visitor.getMayWriteBeforeCoroBegin());
2841}
2842
2843static std::optional<std::pair<Value &, DIExpression &>>
2845 bool OptimizeFrame, bool UseEntryValue, Function *F,
2846 Value *Storage, DIExpression *Expr,
2847 bool SkipOutermostLoad) {
2848 IRBuilder<> Builder(F->getContext());
2849 auto InsertPt = F->getEntryBlock().getFirstInsertionPt();
2850 while (isa<IntrinsicInst>(InsertPt))
2851 ++InsertPt;
2852 Builder.SetInsertPoint(&F->getEntryBlock(), InsertPt);
2853
2854 while (auto *Inst = dyn_cast_or_null<Instruction>(Storage)) {
2855 if (auto *LdInst = dyn_cast<LoadInst>(Inst)) {
2856 Storage = LdInst->getPointerOperand();
2857 // FIXME: This is a heuristic that works around the fact that
2858 // LLVM IR debug intrinsics cannot yet distinguish between
2859 // memory and value locations: Because a dbg.declare(alloca) is
2860 // implicitly a memory location no DW_OP_deref operation for the
2861 // last direct load from an alloca is necessary. This condition
2862 // effectively drops the *last* DW_OP_deref in the expression.
2863 if (!SkipOutermostLoad)
2865 } else if (auto *StInst = dyn_cast<StoreInst>(Inst)) {
2866 Storage = StInst->getValueOperand();
2867 } else {
2869 SmallVector<Value *, 0> AdditionalValues;
2871 *Inst, Expr ? Expr->getNumLocationOperands() : 0, Ops,
2872 AdditionalValues);
2873 if (!Op || !AdditionalValues.empty()) {
2874 // If salvaging failed or salvaging produced more than one location
2875 // operand, give up.
2876 break;
2877 }
2878 Storage = Op;
2879 Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, /*StackValue*/ false);
2880 }
2881 SkipOutermostLoad = false;
2882 }
2883 if (!Storage)
2884 return std::nullopt;
2885
2886 auto *StorageAsArg = dyn_cast<Argument>(Storage);
2887 const bool IsSwiftAsyncArg =
2888 StorageAsArg && StorageAsArg->hasAttribute(Attribute::SwiftAsync);
2889
2890 // Swift async arguments are described by an entry value of the ABI-defined
2891 // register containing the coroutine context.
2892 // Entry values in variadic expressions are not supported.
2893 if (IsSwiftAsyncArg && UseEntryValue && !Expr->isEntryValue() &&
2896
2897 // If the coroutine frame is an Argument, store it in an alloca to improve
2898 // its availability (e.g. registers may be clobbered).
2899 // Avoid this if optimizations are enabled (they would remove the alloca) or
2900 // if the value is guaranteed to be available through other means (e.g. swift
2901 // ABI guarantees).
2902 if (StorageAsArg && !OptimizeFrame && !IsSwiftAsyncArg) {
2903 auto &Cached = ArgToAllocaMap[StorageAsArg];
2904 if (!Cached) {
2905 Cached = Builder.CreateAlloca(Storage->getType(), 0, nullptr,
2906 Storage->getName() + ".debug");
2907 Builder.CreateStore(Storage, Cached);
2908 }
2909 Storage = Cached;
2910 // FIXME: LLVM lacks nuanced semantics to differentiate between
2911 // memory and direct locations at the IR level. The backend will
2912 // turn a dbg.declare(alloca, ..., DIExpression()) into a memory
2913 // location. Thus, if there are deref and offset operations in the
2914 // expression, we need to add a DW_OP_deref at the *start* of the
2915 // expression to first load the contents of the alloca before
2916 // adjusting it with the expression.
2918 }
2919
2920 return {{*Storage, *Expr}};
2921}
2922
2925 DbgVariableIntrinsic &DVI, bool OptimizeFrame, bool UseEntryValue) {
2926
2927 Function *F = DVI.getFunction();
2928 // Follow the pointer arithmetic all the way to the incoming
2929 // function argument and convert into a DIExpression.
2930 bool SkipOutermostLoad = !isa<DbgValueInst>(DVI);
2931 Value *OriginalStorage = DVI.getVariableLocationOp(0);
2932
2933 auto SalvagedInfo = ::salvageDebugInfoImpl(
2934 ArgToAllocaMap, OptimizeFrame, UseEntryValue, F, OriginalStorage,
2935 DVI.getExpression(), SkipOutermostLoad);
2936 if (!SalvagedInfo)
2937 return;
2938
2939 Value *Storage = &SalvagedInfo->first;
2940 DIExpression *Expr = &SalvagedInfo->second;
2941
2942 DVI.replaceVariableLocationOp(OriginalStorage, Storage);
2943 DVI.setExpression(Expr);
2944 // We only hoist dbg.declare today since it doesn't make sense to hoist
2945 // dbg.value since it does not have the same function wide guarantees that
2946 // dbg.declare does.
2947 if (isa<DbgDeclareInst>(DVI)) {
2948 std::optional<BasicBlock::iterator> InsertPt;
2949 if (auto *I = dyn_cast<Instruction>(Storage)) {
2950 InsertPt = I->getInsertionPointAfterDef();
2951 // Update DILocation only in O0 since it is easy to get out of sync in
2952 // optimizations. See https://github.com/llvm/llvm-project/pull/75104 for
2953 // an example.
2954 if (!OptimizeFrame && I->getDebugLoc())
2955 DVI.setDebugLoc(I->getDebugLoc());
2956 } else if (isa<Argument>(Storage))
2957 InsertPt = F->getEntryBlock().begin();
2958 if (InsertPt)
2959 DVI.moveBefore(*(*InsertPt)->getParent(), *InsertPt);
2960 }
2961}
2962
2965 DbgVariableRecord &DVR, bool OptimizeFrame, bool UseEntryValue) {
2966
2967 Function *F = DVR.getFunction();
2968 // Follow the pointer arithmetic all the way to the incoming
2969 // function argument and convert into a DIExpression.
2970 bool SkipOutermostLoad = DVR.isDbgDeclare();
2971 Value *OriginalStorage = DVR.getVariableLocationOp(0);
2972
2973 auto SalvagedInfo = ::salvageDebugInfoImpl(
2974 ArgToAllocaMap, OptimizeFrame, UseEntryValue, F, OriginalStorage,
2975 DVR.getExpression(), SkipOutermostLoad);
2976 if (!SalvagedInfo)
2977 return;
2978
2979 Value *Storage = &SalvagedInfo->first;
2980 DIExpression *Expr = &SalvagedInfo->second;
2981
2982 DVR.replaceVariableLocationOp(OriginalStorage, Storage);
2983 DVR.setExpression(Expr);
2984 // We only hoist dbg.declare today since it doesn't make sense to hoist
2985 // dbg.value since it does not have the same function wide guarantees that
2986 // dbg.declare does.
2987 if (DVR.getType() == DbgVariableRecord::LocationType::Declare) {
2988 std::optional<BasicBlock::iterator> InsertPt;
2989 if (auto *I = dyn_cast<Instruction>(Storage)) {
2990 InsertPt = I->getInsertionPointAfterDef();
2991 // Update DILocation only in O0 since it is easy to get out of sync in
2992 // optimizations. See https://github.com/llvm/llvm-project/pull/75104 for
2993 // an example.
2994 if (!OptimizeFrame && I->getDebugLoc())
2995 DVR.setDebugLoc(I->getDebugLoc());
2996 } else if (isa<Argument>(Storage))
2997 InsertPt = F->getEntryBlock().begin();
2998 if (InsertPt) {
2999 DVR.removeFromParent();
3000 (*InsertPt)->getParent()->insertDbgRecordBefore(&DVR, *InsertPt);
3001 }
3002 }
3003}
3004
3006 Function &F, SuspendCrossingInfo &Checker,
3007 const std::function<bool(Instruction &)> &MaterializableCallback) {
3008 if (F.hasOptNone())
3009 return;
3010
3011 SpillInfo Spills;
3012
3013 // See if there are materializable instructions across suspend points
3014 // We record these as the starting point to also identify materializable
3015 // defs of uses in these operations
3016 for (Instruction &I : instructions(F)) {
3017 if (!MaterializableCallback(I))
3018 continue;
3019 for (User *U : I.users())
3020 if (Checker.isDefinitionAcrossSuspend(I, U))
3021 Spills[&I].push_back(cast<Instruction>(U));
3022 }
3023
3024 // Process each of the identified rematerializable instructions
3025 // and add predecessor instructions that can also be rematerialized.
3026 // This is actually a graph of instructions since we could potentially
3027 // have multiple uses of a def in the set of predecessor instructions.
3028 // The approach here is to maintain a graph of instructions for each bottom
3029 // level instruction - where we have a unique set of instructions (nodes)
3030 // and edges between them. We then walk the graph in reverse post-dominator
3031 // order to insert them past the suspend point, but ensure that ordering is
3032 // correct. We also rely on CSE removing duplicate defs for remats of
3033 // different instructions with a def in common (rather than maintaining more
3034 // complex graphs for each suspend point)
3035
3036 // We can do this by adding new nodes to the list for each suspend
3037 // point. Then using standard GraphTraits to give a reverse post-order
3038 // traversal when we insert the nodes after the suspend
3040 for (auto &E : Spills) {
3041 for (Instruction *U : E.second) {
3042 // Don't process a user twice (this can happen if the instruction uses
3043 // more than one rematerializable def)
3044 if (AllRemats.count(U))
3045 continue;
3046
3047 // Constructor creates the whole RematGraph for the given Use
3048 auto RematUPtr =
3049 std::make_unique<RematGraph>(MaterializableCallback, U, Checker);
3050
3051 LLVM_DEBUG(dbgs() << "***** Next remat group *****\n";
3052 ReversePostOrderTraversal<RematGraph *> RPOT(RematUPtr.get());
3053 for (auto I = RPOT.begin(); I != RPOT.end();
3054 ++I) { (*I)->Node->dump(); } dbgs()
3055 << "\n";);
3056
3057 AllRemats[U] = std::move(RematUPtr);
3058 }
3059 }
3060
3061 // Rewrite materializable instructions to be materialized at the use
3062 // point.
3063 LLVM_DEBUG(dumpRemats("Materializations", AllRemats));
3065}
3066
3069 const std::function<bool(Instruction &)> &MaterializableCallback) {
3070 // Don't eliminate swifterror in async functions that won't be split.
3071 if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
3073
3074 if (Shape.ABI == coro::ABI::Switch &&
3077 }
3078
3079 // Make sure that all coro.save, coro.suspend and the fallthrough coro.end
3080 // intrinsics are in their own blocks to simplify the logic of building up
3081 // SuspendCrossing data.
3082 for (auto *CSI : Shape.CoroSuspends) {
3083 if (auto *Save = CSI->getCoroSave())
3084 splitAround(Save, "CoroSave");
3085 splitAround(CSI, "CoroSuspend");
3086 }
3087
3088 // Put CoroEnds into their own blocks.
3089 for (AnyCoroEndInst *CE : Shape.CoroEnds) {
3090 splitAround(CE, "CoroEnd");
3091
3092 // Emit the musttail call function in a new block before the CoroEnd.
3093 // We do this here so that the right suspend crossing info is computed for
3094 // the uses of the musttail call function call. (Arguments to the coro.end
3095 // instructions would be ignored)
3096 if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) {
3097 auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction();
3098 if (!MustTailCallFn)
3099 continue;
3100 IRBuilder<> Builder(AsyncEnd);
3101 SmallVector<Value *, 8> Args(AsyncEnd->args());
3102 auto Arguments = ArrayRef<Value *>(Args).drop_front(3);
3103 auto *Call = createMustTailCall(AsyncEnd->getDebugLoc(), MustTailCallFn,
3104 TTI, Arguments, Builder);
3105 splitAround(Call, "MustTailCall.Before.CoroEnd");
3106 }
3107 }
3108
3109 // Later code makes structural assumptions about single predecessors phis e.g
3110 // that they are not live across a suspend point.
3112
3113 // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will
3114 // never has its definition separated from the PHI by the suspend point.
3115 rewritePHIs(F);
3116
3117 // Build suspend crossing info.
3118 SuspendCrossingInfo Checker(F, Shape);
3119
3120 doRematerializations(F, Checker, MaterializableCallback);
3121
3122 FrameDataInfo FrameData;
3124 SmallVector<Instruction*, 4> DeadInstructions;
3125 if (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon &&
3126 Shape.ABI != coro::ABI::RetconOnce)
3127 sinkLifetimeStartMarkers(F, Shape, Checker);
3128
3129 // Collect the spills for arguments and other not-materializable values.
3130 for (Argument &A : F.args())
3131 for (User *U : A.users())
3132 if (Checker.isDefinitionAcrossSuspend(A, U))
3133 FrameData.Spills[&A].push_back(cast<Instruction>(U));
3134
3135 const DominatorTree DT(F);
3136 for (Instruction &I : instructions(F)) {
3137 // Values returned from coroutine structure intrinsics should not be part
3138 // of the Coroutine Frame.
3140 continue;
3141
3142 // Handle alloca.alloc specially here.
3143 if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) {
3144 // Check whether the alloca's lifetime is bounded by suspend points.
3145 if (isLocalAlloca(AI)) {
3146 LocalAllocas.push_back(AI);
3147 continue;
3148 }
3149
3150 // If not, do a quick rewrite of the alloca and then add spills of
3151 // the rewritten value. The rewrite doesn't invalidate anything in
3152 // Spills because the other alloca intrinsics have no other operands
3153 // besides AI, and it doesn't invalidate the iteration because we delay
3154 // erasing AI.
3155 auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions);
3156
3157 for (User *U : Alloc->users()) {
3158 if (Checker.isDefinitionAcrossSuspend(*Alloc, U))
3159 FrameData.Spills[Alloc].push_back(cast<Instruction>(U));
3160 }
3161 continue;
3162 }
3163
3164 // Ignore alloca.get; we process this as part of coro.alloca.alloc.
3165 if (isa<CoroAllocaGetInst>(I))
3166 continue;
3167
3168 if (auto *AI = dyn_cast<AllocaInst>(&I)) {
3169 collectFrameAlloca(AI, Shape, Checker, FrameData.Allocas, DT);
3170 continue;
3171 }
3172
3173 for (User *U : I.users())
3174 if (Checker.isDefinitionAcrossSuspend(I, U)) {
3175 // We cannot spill a token.
3176 if (I.getType()->isTokenTy())
3178 "token definition is separated from the use by a suspend point");
3179 FrameData.Spills[&I].push_back(cast<Instruction>(U));
3180 }
3181 }
3182
3183 LLVM_DEBUG(dumpAllocas(FrameData.Allocas));
3184
3185 // We don't want the layout of coroutine frame to be affected
3186 // by debug information. So we only choose to salvage DbgValueInst for
3187 // whose value is already in the frame.
3188 // We would handle the dbg.values for allocas specially
3189 for (auto &Iter : FrameData.Spills) {
3190 auto *V = Iter.first;
3193 findDbgValues(DVIs, V, &DVRs);
3194 for (DbgValueInst *DVI : DVIs)
3195 if (Checker.isDefinitionAcrossSuspend(*V, DVI))
3196 FrameData.Spills[V].push_back(DVI);
3197 // Add the instructions which carry debug info that is in the frame.
3198 for (DbgVariableRecord *DVR : DVRs)
3199 if (Checker.isDefinitionAcrossSuspend(*V, DVR->Marker->MarkedInstr))
3200 FrameData.Spills[V].push_back(DVR->Marker->MarkedInstr);
3201 }
3202
3203 LLVM_DEBUG(dumpSpills("Spills", FrameData.Spills));
3204 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
3205 Shape.ABI == coro::ABI::Async)
3207 Shape.FrameTy = buildFrameType(F, Shape, FrameData);
3209 // For now, this works for C++ programs only.
3210 buildFrameDebugInfo(F, Shape, FrameData);
3211 insertSpills(FrameData, Shape);
3212 lowerLocalAllocas(LocalAllocas, DeadInstructions);
3213
3214 for (auto *I : DeadInstructions)
3215 I->eraseFromParent();
3216}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
AMDGPU Lower Kernel Arguments
Expand Atomic instructions
This file implements the BitVector class.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition: Compiler.h:529
static void cleanupSinglePredPHIs(Function &F)
Definition: CoroFrame.cpp:2164
static bool isSuspendReachableFrom(BasicBlock *From, VisitedBlocksSet &VisitedOrFreeBBs)
Does control flow starting at the given block ever reach a suspend instruction before reaching a bloc...
Definition: CoroFrame.cpp:2386
static bool isCoroutineStructureIntrinsic(Instruction &I)
Definition: CoroFrame.cpp:2274
SmallPtrSet< BasicBlock *, 8 > VisitedBlocksSet
Definition: CoroFrame.cpp:2382
static Instruction * lowerNonLocalAlloca(CoroAllocaAllocInst *AI, coro::Shape &Shape, SmallVectorImpl< Instruction * > &DeadInsts)
Turn the given coro.alloca.alloc call into a dynamic allocation.
Definition: CoroFrame.cpp:2500
static Instruction * splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch)
Definition: CoroFrame.cpp:1692
static void eliminateSwiftError(Function &F, coro::Shape &Shape)
Eliminate all problematic uses of swifterror arguments and allocas from the function.
Definition: CoroFrame.cpp:2651
static void lowerLocalAllocas(ArrayRef< CoroAllocaAllocInst * > LocalAllocas, SmallVectorImpl< Instruction * > &DeadInsts)
Turn each of the given local allocas into a normal (dynamic) alloca instruction.
Definition: CoroFrame.cpp:2460
static bool isLocalAlloca(CoroAllocaAllocInst *AI)
Is the given alloca "local", i.e.
Definition: CoroFrame.cpp:2409
static Value * emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V, coro::Shape &Shape)
Set the given value as the current swifterror value.
Definition: CoroFrame.cpp:2540
static Value * emitSetAndGetSwiftErrorValueAround(Instruction *Call, AllocaInst *Alloca, coro::Shape &Shape)
Set the swifterror value from the given alloca before a call, then put in back in the alloca afterwar...
Definition: CoroFrame.cpp:2558
static void cacheDIVar(FrameDataInfo &FrameData, DenseMap< Value *, DILocalVariable * > &DIVarCache)
Definition: CoroFrame.cpp:960
static void collectFrameAlloca(AllocaInst *AI, coro::Shape &Shape, const SuspendCrossingInfo &Checker, SmallVectorImpl< AllocaInfo > &Allocas, const DominatorTree &DT)
Definition: CoroFrame.cpp:2810
static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI)
Definition: CoroFrame.cpp:2442
static void splitAround(Instruction *I, const Twine &Name)
Definition: CoroFrame.cpp:2373
static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca, coro::Shape &Shape)
Eliminate a formerly-swifterror alloca by inserting the get/set intrinsics and attempting to MemToReg...
Definition: CoroFrame.cpp:2588
static void rewritePHIs(BasicBlock &BB)
Definition: CoroFrame.cpp:2181
static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB, BasicBlock *InsertedBB, BasicBlock *PredBB, PHINode *UntilPHI=nullptr)
Definition: CoroFrame.cpp:2071
static std::optional< std::pair< Value &, DIExpression & > > salvageDebugInfoImpl(SmallDenseMap< Argument *, AllocaInst *, 4 > &ArgToAllocaMap, bool OptimizeFrame, bool UseEntryValue, Function *F, Value *Storage, DIExpression *Expr, bool SkipOutermostLoad)
Definition: CoroFrame.cpp:2844
static DIType * solveDIType(DIBuilder &Builder, Type *Ty, const DataLayout &Layout, DIScope *Scope, unsigned LineNum, DenseMap< Type *, DIType * > &DITypeCache)
Definition: CoroFrame.cpp:1018
static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB, unsigned depth=3)
After we split the coroutine, will the given basic block be along an obvious exit path for the resump...
Definition: CoroFrame.cpp:2423
@ SmallVectorThreshold
Definition: CoroFrame.cpp:50
static void eliminateSwiftErrorArgument(Function &F, Argument &Arg, coro::Shape &Shape, SmallVectorImpl< AllocaInst * > &AllocasToPromote)
"Eliminate" a swifterror argument by reducing it to the alloca case and then loading and storing in t...
Definition: CoroFrame.cpp:2614
static void buildFrameDebugInfo(Function &F, coro::Shape &Shape, FrameDataInfo &FrameData)
Build artificial debug info for C++ coroutine frames to allow users to inspect the contents of the fr...
Definition: CoroFrame.cpp:1105
static StructType * buildFrameType(Function &F, coro::Shape &Shape, FrameDataInfo &FrameData)
Definition: CoroFrame.cpp:1297
static BasicBlock * splitBlockIfNotFirst(Instruction *I, const Twine &Name)
Definition: CoroFrame.cpp:2360
static void sinkSpillUsesAfterCoroBegin(Function &F, const FrameDataInfo &FrameData, CoroBeginInst *CoroBegin)
retcon and retcon.once conventions assume that all spill uses can be sunk after the coro....
Definition: CoroFrame.cpp:2684
static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape, SuspendCrossingInfo &Checker)
For each local variable that all of its user are only used inside one of suspended region,...
Definition: CoroFrame.cpp:2731
static bool isSuspendBlock(BasicBlock *BB)
Definition: CoroFrame.cpp:2378
static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB, CleanupPadInst *CleanupPad)
Definition: CoroFrame.cpp:2089
static void rewriteMaterializableInstructions(const SmallMapVector< Instruction *, std::unique_ptr< RematGraph >, 8 > &AllRemats)
Definition: CoroFrame.cpp:2282
static void dumpAllocas(const SmallVectorImpl< AllocaInfo > &Allocas)
Definition: CoroFrame.cpp:569
static StringRef solveTypeName(Type *Ty)
Create name for Type.
Definition: CoroFrame.cpp:980
static void dumpSpills(StringRef Title, const SpillInfo &Spills)
Definition: CoroFrame.cpp:550
static void doRematerializations(Function &F, SuspendCrossingInfo &Checker, const std::function< bool(Instruction &)> &MaterializableCallback)
Definition: CoroFrame.cpp:3005
static Value * emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy, coro::Shape &Shape)
Get the current swifterror value.
Definition: CoroFrame.cpp:2525
static void dumpRemats(StringRef Title, const SmallMapVector< Instruction *, std::unique_ptr< RematGraph >, 8 > &RM)
Definition: CoroFrame.cpp:559
static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape)
Definition: CoroFrame.cpp:1725
Given that RA is a live value
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Addr
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
static bool isLifetimeStart(const Instruction *Inst)
Definition: GVN.cpp:1068
Hexagon Common GEP
static MaybeAlign getAlign(Value *Ptr)
Definition: IRBuilder.cpp:530
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
iv Induction Variable Users
Definition: IVUsers.cpp:48
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
mir Rename Register Operands
LLVMContext & Context
This file provides an interface for laying out a sequence of fields as a struct in a way that attempt...
#define P(N)
llvm::cl::opt< bool > UseNewDbgInfoFormat
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
This file provides a collection of visitors which walk the (instruction) uses of a pointer.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallString class.
static const unsigned FramePtr
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
Definition: Instructions.h:59
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:157
void setSwiftError(bool V)
Specify whether this alloca is used to represent a swifterror.
Definition: Instructions.h:159
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:132
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:107
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:125
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
Definition: Instructions.h:136
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:103
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:204
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:429
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:498
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:398
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:349
const Instruction & front() const
Definition: BasicBlock.h:452
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:198
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
Definition: BasicBlock.cpp:559
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Definition: BasicBlock.cpp:441
const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
Definition: BasicBlock.cpp:471
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:205
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:164
LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition: BasicBlock.cpp:157
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:220
This class represents a no-op cast from one type to another.
size_type size() const
size - Returns the number of bits in this bitvector.
Definition: BitVector.h:159
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1461
bool doesNotCapture(unsigned OpNo) const
Determine whether this data operand is not captured.
Definition: InstrTypes.h:2002
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1654
unsigned arg_size() const
Definition: InstrTypes.h:1652
Value * getParentPad() const
static CleanupPadInst * Create(Value *ParentPad, ArrayRef< Value * > Args, const Twine &NameStr, BasicBlock::iterator InsertBefore)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB, BasicBlock::iterator InsertBefore)
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Definition: Constants.cpp:1775
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
This represents the llvm.coro.alloca.alloc instruction.
Definition: CoroInstr.h:739
Value * getSize() const
Definition: CoroInstr.h:742
This class represents the llvm.coro.begin instruction.
Definition: CoroInstr.h:451
void clearPromise()
Definition: CoroInstr.h:157
This represents the llvm.coro.suspend instruction.
Definition: CoroInstr.h:524
DICompositeType * createStructType(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber, uint64_t SizeInBits, uint32_t AlignInBits, DINode::DIFlags Flags, DIType *DerivedFrom, DINodeArray Elements, unsigned RunTimeLang=0, DIType *VTableHolder=nullptr, StringRef UniqueIdentifier="")
Create debugging information entry for a struct.
Definition: DIBuilder.cpp:509
DIDerivedType * createPointerType(DIType *PointeeTy, uint64_t SizeInBits, uint32_t AlignInBits=0, std::optional< unsigned > DWARFAddressSpace=std::nullopt, StringRef Name="", DINodeArray Annotations=nullptr)
Create debugging information entry for a pointer.
Definition: DIBuilder.cpp:316
DIExpression * createExpression(ArrayRef< uint64_t > Addr=std::nullopt)
Create a new descriptor for the specified variable which has a complex address expression for its add...
Definition: DIBuilder.cpp:833
DISubrange * getOrCreateSubrange(int64_t Lo, int64_t Count)
Create a descriptor for a value range.
Definition: DIBuilder.cpp:702
DICompositeType * createArrayType(uint64_t Size, uint32_t AlignInBits, DIType *Ty, DINodeArray Subscripts, PointerUnion< DIExpression *, DIVariable * > DataLocation=nullptr, PointerUnion< DIExpression *, DIVariable * > Associated=nullptr, PointerUnion< DIExpression *, DIVariable * > Allocated=nullptr, PointerUnion< DIExpression *, DIVariable * > Rank=nullptr)
Create debugging information entry for an array.
Definition: DIBuilder.cpp:583
DIBasicType * createBasicType(StringRef Name, uint64_t SizeInBits, unsigned Encoding, DINode::DIFlags Flags=DINode::FlagZero)
Create debugging information entry for a basic type.
Definition: DIBuilder.cpp:267
DINodeArray getOrCreateArray(ArrayRef< Metadata * > Elements)
Get a DINodeArray, create one if required.
Definition: DIBuilder.cpp:682
DIDerivedType * createMemberType(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo, uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits, DINode::DIFlags Flags, DIType *Ty, DINodeArray Annotations=nullptr)
Create debugging information entry for a member.
Definition: DIBuilder.cpp:379
DILocalVariable * createAutoVariable(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo, DIType *Ty, bool AlwaysPreserve=false, DINode::DIFlags Flags=DINode::FlagZero, uint32_t AlignInBits=0)
Create a new descriptor for an auto variable.
Definition: DIBuilder.cpp:795
void replaceArrays(DICompositeType *&T, DINodeArray Elements, DINodeArray TParams=DINodeArray())
Replace arrays on a composite type.
Definition: DIBuilder.cpp:1195
DWARF expression.
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
static DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
bool isSingleLocationExpression() const
Return whether the evaluated expression makes use of a single location at the start of the expression...
A scope for locals.
DILocalScope * getScope() const
Get the local scope for this variable.
Debug location.
Base class for scope-like contexts.
DIFile * getFile() const
Subprogram description.
Base class for types.
StringRef getName() const
uint64_t getSizeInBits() const
uint32_t getAlignInBits() const
unsigned getLine() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:720
Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:865
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:672
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Definition: DataLayout.cpp:874
This represents the llvm.dbg.declare instruction.
DebugLoc getDebugLoc() const
void setDebugLoc(DebugLoc Loc)
This represents the llvm.dbg.value instruction.
This is the common base class for debug info intrinsics for variables.
void replaceVariableLocationOp(Value *OldValue, Value *NewValue)
Value * getVariableLocationOp(unsigned OpIdx) const
void setExpression(DIExpression *NewExpr)
DILocalVariable * getVariable() const
DIExpression * getExpression() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
void setExpression(DIExpression *NewExpr)
DIExpression * getExpression() const
Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
void replaceVariableLocationOp(Value *OldValue, Value *NewValue, bool AllowEmpty=false)
DILocation * get() const
Get the underlying DILocation.
Definition: DebugLoc.cpp:20
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:202
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition: DenseMap.h:145
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Definition: DenseMap.h:103
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:973
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:655
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Definition: IRBuilder.h:1767
CallInst * CreateStackSave(const Twine &Name="")
Create a call to llvm.stacksave.
Definition: IRBuilder.h:1047
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", bool IsInBounds=false)
Definition: IRBuilder.h:1972
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition: IRBuilder.h:1801
UnreachableInst * CreateUnreachable()
Definition: IRBuilder.h:1257
BasicBlock::iterator GetInsertPoint() const
Definition: IRBuilder.h:175
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2100
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
Definition: IRBuilder.h:1870
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition: IRBuilder.h:2375
Value * CreateNot(Value *V, const Twine &Name="")
Definition: IRBuilder.h:1743
SwitchInst * CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases=10, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a switch instruction with the specified value, default dest, and with a hint for the number of...
Definition: IRBuilder.h:1137
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1784
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1469
Value * CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1, const Twine &Name="")
Definition: IRBuilder.h:1908
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition: IRBuilder.h:1797
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1321
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2095
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Definition: IRBuilder.h:563
BranchInst * CreateBr(BasicBlock *Dest)
Create an unconditional 'br label X' instruction.
Definition: IRBuilder.h:1108
CallInst * CreateStackRestore(Value *Ptr, const Twine &Name="")
Create a call to llvm.stackrestore.
Definition: IRBuilder.h:1054
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:180
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Definition: IRBuilder.h:1820
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2390
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition: IRBuilder.h:510
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2110
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2644
void visitIntrinsicInst(IntrinsicInst &I)
Definition: InstVisitor.h:219
void visitBitCastInst(BitCastInst &I)
Definition: InstVisitor.h:187
void visit(Iterator Start, Iterator End)
Definition: InstVisitor.h:87
void visitPHINode(PHINode &I)
Definition: InstVisitor.h:175
void visitAddrSpaceCastInst(AddrSpaceCastInst &I)
Definition: InstVisitor.h:188
void visitSelectInst(SelectInst &I)
Definition: InstVisitor.h:189
void visitGetElementPtrInst(GetElementPtrInst &I)
Definition: InstVisitor.h:174
void removeFromParent()
This method unlinks 'this' from the containing basic block, but does not delete it.
Definition: Instruction.cpp:88
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:454
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:80
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
Definition: Instruction.h:341
const BasicBlock * getParent() const
Definition: Instruction.h:152
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:84
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:451
void insertAfter(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately after the specified instruction.
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:278
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:54
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
The landingpad instruction holds all of the information necessary to generate correct exception handl...
static MDString * get(LLVMContext &Context, StringRef Str)
Definition: Metadata.cpp:600
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1498
size_type count(const KeyT &Key) const
Definition: MapVector.h:165
This is the common base class for memset/memcpy/memmove.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.h:287
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
A base class for visitors over the uses of a pointer value.
void visitCallBase(CallBase &CB)
void visitGetElementPtrInst(GetElementPtrInst &GEPI)
void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC)
void visitBitCastInst(BitCastInst &BC)
void visitStoreInst(StoreInst &SI)
void visitIntrinsicInst(IntrinsicInst &II)
void visitMemIntrinsic(MemIntrinsic &I)
This class represents the LLVM 'select' instruction.
iterator end()
Get an iterator to the end of the SetVector.
Definition: SetVector.h:113
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition: SetVector.h:103
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:162
iterator find(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:363
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:342
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:427
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:370
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
StringRef str() const
Explicit conversion to StringRef.
Definition: SmallString.h:254
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void reserve(size_type N)
Definition: SmallVector.h:676
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
Compute live ranges of allocas.
Definition: StackLifetime.h:37
An instruction for storing to memory.
Definition: Instructions.h:317
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:222
TypeSize getElementOffsetInBits(unsigned Idx) const
Definition: DataLayout.h:656
Class to represent struct types.
Definition: DerivedTypes.h:216
void setBody(ArrayRef< Type * > Elements, bool isPacked=false)
Specify a body for an opaque identified type.
Definition: Type.cpp:445
static StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition: Type.cpp:513
unsigned getNumElements() const
Random access to the elements.
Definition: DerivedTypes.h:341
Type * getElementType(unsigned N) const
Definition: DerivedTypes.h:342
Multiway switch.
void setDefaultDest(BasicBlock *DefaultCase)
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:29
EltTy front() const
bool empty() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
std::string str() const
Return the twine contents as a std::string.
Definition: Twine.cpp:17
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition: TypeSize.h:330
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
Definition: Type.h:154
StringRef getStructName() const
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isStructTy() const
True if this is an instance of StructType.
Definition: Type.h:249
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:302
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:129
static IntegerType * getInt8Ty(LLVMContext &C)
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
Definition: Type.h:157
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:185
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
void set(Value *Val)
Definition: Value.h:882
User * getUser() const
Returns the User that contains this Use.
Definition: Use.h:72
static ValueAsMetadata * get(Value *V)
Definition: Metadata.cpp:495
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
iterator_range< user_iterator > users()
Definition: Value.h:421
void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition: Value.cpp:542
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1074
iterator_range< use_iterator > uses()
Definition: Value.h:376
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
APInt Offset
The constant offset of the use if that is known.
void enqueueUsers(Instruction &I)
Enqueue the users of this instruction in the visit worklist.
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:187
self_iterator getIterator()
Definition: ilist_node.h:109
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:316
A range adaptor for a pair of iterators.
A raw_ostream that writes to an SmallVector or SmallString.
Definition: raw_ostream.h:690
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ CE
Windows NT (Windows on ARM)
void salvageDebugInfo(SmallDenseMap< Argument *, AllocaInst *, 4 > &ArgToAllocaMap, DbgVariableIntrinsic &DVI, bool OptimizeFrame, bool IsEntryPoint)
Attempts to rewrite the location operand of debug intrinsics in terms of the coroutine frame pointer,...
Definition: CoroFrame.cpp:2923
bool defaultMaterializable(Instruction &V)
Default materializable callback.
Definition: CoroFrame.cpp:2267
void buildCoroutineFrame(Function &F, Shape &Shape, TargetTransformInfo &TTI, const std::function< bool(Instruction &)> &MaterializableCallback)
Definition: CoroFrame.cpp:3067
CallInst * createMustTailCall(DebugLoc Loc, Function *MustTailCallFn, TargetTransformInfo &TTI, ArrayRef< Value * > Arguments, IRBuilder<> &)
Definition: CoroSplit.cpp:1785
SourceLanguage
Definition: Dwarf.h:204
bool isCPlusPlus(SourceLanguage S)
Definition: Dwarf.h:212
NodeAddr< DefNode * > Def
Definition: RDFGraph.h:384
NodeAddr< BlockNode * > Block
Definition: RDFGraph.h:392
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
@ Offset
Definition: DWP.cpp:456
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1724
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1731
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1689
TinyPtrVector< DbgDeclareInst * > findDbgDeclares(Value *V)
Finds dbg.declare intrinsics declaring local variables as living in the memory that 'V' points to.
Definition: DebugInfo.cpp:47
void PromoteMemToReg(ArrayRef< AllocaInst * > Allocas, DominatorTree &DT, AssumptionCache *AC=nullptr)
Promote the specified list of alloca instructions into scalar registers, inserting PHI nodes as appro...
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:59
unsigned Log2_64_Ceil(uint64_t Value)
Return the ceil log base 2 of the specified value, 64 if the value is zero.
Definition: MathExtras.h:332
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
Definition: Alignment.h:145
void findDbgUsers(SmallVectorImpl< DbgVariableIntrinsic * > &DbgInsts, Value *V, SmallVectorImpl< DbgVariableRecord * > *DbgVariableRecords=nullptr)
Finds the debug info intrinsics describing a value.
Definition: DebugInfo.cpp:148
auto successors(const MachineBasicBlock *BB)
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:665
void findDbgValues(SmallVectorImpl< DbgValueInst * > &DbgValues, Value *V, SmallVectorImpl< DbgVariableRecord * > *DbgVariableRecords=nullptr)
Finds the llvm.dbg.value intrinsics describing a value.
Definition: DebugInfo.cpp:141
bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1738
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1656
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1745
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
BasicBlock * ehAwareSplitEdge(BasicBlock *BB, BasicBlock *Succ, LandingPadInst *OriginalPad=nullptr, PHINode *LandingPadReplacement=nullptr, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions(), const Twine &BBName="")
Split the edge connect the specficed blocks in the case that Succ is an Exception Handling Block.
Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
Definition: Local.cpp:2534
uint64_t offsetToAlignment(uint64_t Value, Align Alignment)
Returns the offset to the next integer (mod 2**64) that is greater than or equal to Value and is a mu...
Definition: Alignment.h:197
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1963
std::pair< uint64_t, Align > performOptimizedStructLayout(MutableArrayRef< OptimizedStructLayoutField > Fields)
Compute a layout for a struct containing the given fields, making a best-effort attempt to minimize t...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
DWARFExpression::Operation Op
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
void updatePhiNodes(BasicBlock *DestBB, BasicBlock *OldPred, BasicBlock *NewPred, PHINode *Until=nullptr)
Replaces all uses of OldPred with the NewPred block in all PHINodes in a block.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1758
TinyPtrVector< DbgVariableRecord * > findDVRDeclares(Value *V)
As above, for DVRDeclares.
Definition: DebugInfo.cpp:66
auto predecessors(const MachineBasicBlock *BB)
BasicBlock * SplitEdge(BasicBlock *From, BasicBlock *To, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the edge connecting the specified blocks, and return the newly created basic block between From...
void setUnwindEdgeTo(Instruction *TI, BasicBlock *Succ)
Sets the unwind edge of an instruction to a particular successor.
unsigned pred_size(const MachineBasicBlock *BB)
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
RematGraph::RematNode * NodeRef
Definition: CoroFrame.cpp:436
static ChildIteratorType child_end(NodeRef N)
Definition: CoroFrame.cpp:443
RematGraph::RematNode ** ChildIteratorType
Definition: CoroFrame.cpp:437
static NodeRef getEntryNode(RematGraph *G)
Definition: CoroFrame.cpp:439
static ChildIteratorType child_begin(NodeRef N)
Definition: CoroFrame.cpp:440
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Align Alignment
The required alignment of this field.
uint64_t Offset
The offset of this field in the final layout.
uint64_t Size
The required size of this field in bytes.
static constexpr uint64_t FlexibleOffset
A special value for Offset indicating that the field can be moved anywhere.
A MapVector that performs no allocations if smaller than a certain size.
Definition: MapVector.h:254
AsyncLoweringStorage AsyncLowering
Definition: CoroInternal.h:150
StructType * FrameTy
Definition: CoroInternal.h:106
AnyCoroIdRetconInst * getRetconCoroId() const
Definition: CoroInternal.h:158
CoroIdInst * getSwitchCoroId() const
Definition: CoroInternal.h:153
SmallVector< AnyCoroSuspendInst *, 4 > CoroSuspends
Definition: CoroInternal.h:85
Value * emitAlloc(IRBuilder<> &Builder, Value *Size, CallGraph *CG) const
Allocate memory according to the rules of the active lowering.
Definition: Coroutines.cpp:456
SmallVector< CallInst *, 2 > SwiftErrorOps
Definition: CoroInternal.h:86
AllocaInst * getPromiseAlloca() const
Definition: CoroInternal.h:242
bool OptimizeFrame
This would only be true if optimization are enabled.
Definition: CoroInternal.h:113
SwitchLoweringStorage SwitchLowering
Definition: CoroInternal.h:148
CoroBeginInst * CoroBegin
Definition: CoroInternal.h:81
BasicBlock::iterator getInsertPtAfterFramePtr() const
Definition: CoroInternal.h:248
void emitDealloc(IRBuilder<> &Builder, Value *Ptr, CallGraph *CG) const
Deallocate memory according to the rules of the active lowering.
Definition: Coroutines.cpp:479
RetconLoweringStorage RetconLowering
Definition: CoroInternal.h:149
SmallVector< AnyCoroEndInst *, 4 > CoroEnds
Definition: CoroInternal.h:82
BasicBlock * AllocaSpillBlock
Definition: CoroInternal.h:110