LLVM 17.0.0git
CoroFrame.cpp
Go to the documentation of this file.
1//===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8// This file contains classes used to discover if for a particular value
9// there from sue to definition that crosses a suspend block.
10//
11// Using the information discovered we form a Coroutine Frame structure to
12// contain those values. All uses of those values are replaced with appropriate
13// GEP + load from the coroutine frame. At the point of the definition we spill
14// the value into the coroutine frame.
15//===----------------------------------------------------------------------===//
16
17#include "CoroInternal.h"
18#include "llvm/ADT/BitVector.h"
19#include "llvm/ADT/ScopeExit.h"
23#include "llvm/Config/llvm-config.h"
24#include "llvm/IR/CFG.h"
25#include "llvm/IR/DIBuilder.h"
26#include "llvm/IR/DebugInfo.h"
27#include "llvm/IR/Dominators.h"
28#include "llvm/IR/IRBuilder.h"
31#include "llvm/Support/Debug.h"
39#include <algorithm>
40#include <optional>
41
42using namespace llvm;
43
44// The "coro-suspend-crossing" flag is very noisy. There is another debug type,
45// "coro-frame", which results in leaner debug spew.
46#define DEBUG_TYPE "coro-suspend-crossing"
47
49
50// Provides two way mapping between the blocks and numbers.
51namespace {
52class BlockToIndexMapping {
54
55public:
56 size_t size() const { return V.size(); }
57
58 BlockToIndexMapping(Function &F) {
59 for (BasicBlock &BB : F)
60 V.push_back(&BB);
61 llvm::sort(V);
62 }
63
64 size_t blockToIndex(BasicBlock *BB) const {
65 auto *I = llvm::lower_bound(V, BB);
66 assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block");
67 return I - V.begin();
68 }
69
70 BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; }
71};
72} // end anonymous namespace
73
74// The SuspendCrossingInfo maintains data that allows to answer a question
75// whether given two BasicBlocks A and B there is a path from A to B that
76// passes through a suspend point.
77//
78// For every basic block 'i' it maintains a BlockData that consists of:
79// Consumes: a bit vector which contains a set of indices of blocks that can
80// reach block 'i'. A block can trivially reach itself.
81// Kills: a bit vector which contains a set of indices of blocks that can
82// reach block 'i' but there is a path crossing a suspend point
83// not repeating 'i' (path to 'i' without cycles containing 'i').
84// Suspend: a boolean indicating whether block 'i' contains a suspend point.
85// End: a boolean indicating whether block 'i' contains a coro.end intrinsic.
86// KillLoop: There is a path from 'i' to 'i' not otherwise repeating 'i' that
87// crosses a suspend point.
88//
89namespace {
90struct SuspendCrossingInfo {
91 BlockToIndexMapping Mapping;
92
93 struct BlockData {
94 BitVector Consumes;
95 BitVector Kills;
96 bool Suspend = false;
97 bool End = false;
98 bool KillLoop = false;
99 };
101
103 BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]);
104 return llvm::successors(BB);
105 }
106
107 BlockData &getBlockData(BasicBlock *BB) {
108 return Block[Mapping.blockToIndex(BB)];
109 }
110
111 void dump() const;
112 void dump(StringRef Label, BitVector const &BV) const;
113
114 SuspendCrossingInfo(Function &F, coro::Shape &Shape);
115
116 /// Returns true if there is a path from \p From to \p To crossing a suspend
117 /// point without crossing \p From a 2nd time.
118 bool hasPathCrossingSuspendPoint(BasicBlock *From, BasicBlock *To) const {
119 size_t const FromIndex = Mapping.blockToIndex(From);
120 size_t const ToIndex = Mapping.blockToIndex(To);
121 bool const Result = Block[ToIndex].Kills[FromIndex];
122 LLVM_DEBUG(dbgs() << From->getName() << " => " << To->getName()
123 << " answer is " << Result << "\n");
124 return Result;
125 }
126
127 /// Returns true if there is a path from \p From to \p To crossing a suspend
128 /// point without crossing \p From a 2nd time. If \p From is the same as \p To
129 /// this will also check if there is a looping path crossing a suspend point.
130 bool hasPathOrLoopCrossingSuspendPoint(BasicBlock *From,
131 BasicBlock *To) const {
132 size_t const FromIndex = Mapping.blockToIndex(From);
133 size_t const ToIndex = Mapping.blockToIndex(To);
134 bool Result = Block[ToIndex].Kills[FromIndex] ||
135 (From == To && Block[ToIndex].KillLoop);
136 LLVM_DEBUG(dbgs() << From->getName() << " => " << To->getName()
137 << " answer is " << Result << " (path or loop)\n");
138 return Result;
139 }
140
141 bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const {
142 auto *I = cast<Instruction>(U);
143
144 // We rewrote PHINodes, so that only the ones with exactly one incoming
145 // value need to be analyzed.
146 if (auto *PN = dyn_cast<PHINode>(I))
147 if (PN->getNumIncomingValues() > 1)
148 return false;
149
150 BasicBlock *UseBB = I->getParent();
151
152 // As a special case, treat uses by an llvm.coro.suspend.retcon or an
153 // llvm.coro.suspend.async as if they were uses in the suspend's single
154 // predecessor: the uses conceptually occur before the suspend.
155 if (isa<CoroSuspendRetconInst>(I) || isa<CoroSuspendAsyncInst>(I)) {
156 UseBB = UseBB->getSinglePredecessor();
157 assert(UseBB && "should have split coro.suspend into its own block");
158 }
159
160 return hasPathCrossingSuspendPoint(DefBB, UseBB);
161 }
162
163 bool isDefinitionAcrossSuspend(Argument &A, User *U) const {
164 return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U);
165 }
166
167 bool isDefinitionAcrossSuspend(Instruction &I, User *U) const {
168 auto *DefBB = I.getParent();
169
170 // As a special case, treat values produced by an llvm.coro.suspend.*
171 // as if they were defined in the single successor: the uses
172 // conceptually occur after the suspend.
173 if (isa<AnyCoroSuspendInst>(I)) {
174 DefBB = DefBB->getSingleSuccessor();
175 assert(DefBB && "should have split coro.suspend into its own block");
176 }
177
178 return isDefinitionAcrossSuspend(DefBB, U);
179 }
180
181 bool isDefinitionAcrossSuspend(Value &V, User *U) const {
182 if (auto *Arg = dyn_cast<Argument>(&V))
183 return isDefinitionAcrossSuspend(*Arg, U);
184 if (auto *Inst = dyn_cast<Instruction>(&V))
185 return isDefinitionAcrossSuspend(*Inst, U);
186
188 "Coroutine could only collect Argument and Instruction now.");
189 }
190};
191} // end anonymous namespace
192
193#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
194LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label,
195 BitVector const &BV) const {
196 dbgs() << Label << ":";
197 for (size_t I = 0, N = BV.size(); I < N; ++I)
198 if (BV[I])
199 dbgs() << " " << Mapping.indexToBlock(I)->getName();
200 dbgs() << "\n";
201}
202
203LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const {
204 for (size_t I = 0, N = Block.size(); I < N; ++I) {
205 BasicBlock *const B = Mapping.indexToBlock(I);
206 dbgs() << B->getName() << ":\n";
207 dump(" Consumes", Block[I].Consumes);
208 dump(" Kills", Block[I].Kills);
209 }
210 dbgs() << "\n";
211}
212#endif
213
214SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape)
215 : Mapping(F) {
216 const size_t N = Mapping.size();
217 Block.resize(N);
218
219 // Initialize every block so that it consumes itself
220 for (size_t I = 0; I < N; ++I) {
221 auto &B = Block[I];
222 B.Consumes.resize(N);
223 B.Kills.resize(N);
224 B.Consumes.set(I);
225 }
226
227 // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as
228 // the code beyond coro.end is reachable during initial invocation of the
229 // coroutine.
230 for (auto *CE : Shape.CoroEnds)
231 getBlockData(CE->getParent()).End = true;
232
233 // Mark all suspend blocks and indicate that they kill everything they
234 // consume. Note, that crossing coro.save also requires a spill, as any code
235 // between coro.save and coro.suspend may resume the coroutine and all of the
236 // state needs to be saved by that time.
237 auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) {
238 BasicBlock *SuspendBlock = BarrierInst->getParent();
239 auto &B = getBlockData(SuspendBlock);
240 B.Suspend = true;
241 B.Kills |= B.Consumes;
242 };
243 for (auto *CSI : Shape.CoroSuspends) {
244 markSuspendBlock(CSI);
245 if (auto *Save = CSI->getCoroSave())
246 markSuspendBlock(Save);
247 }
248
249 // Iterate propagating consumes and kills until they stop changing.
250 int Iteration = 0;
251 (void)Iteration;
252
253 bool Changed;
254 do {
255 LLVM_DEBUG(dbgs() << "iteration " << ++Iteration);
256 LLVM_DEBUG(dbgs() << "==============\n");
257
258 Changed = false;
259 for (size_t I = 0; I < N; ++I) {
260 auto &B = Block[I];
261 for (BasicBlock *SI : successors(B)) {
262
263 auto SuccNo = Mapping.blockToIndex(SI);
264
265 // Saved Consumes and Kills bitsets so that it is easy to see
266 // if anything changed after propagation.
267 auto &S = Block[SuccNo];
268 auto SavedConsumes = S.Consumes;
269 auto SavedKills = S.Kills;
270
271 // Propagate Kills and Consumes from block B into its successor S.
272 S.Consumes |= B.Consumes;
273 S.Kills |= B.Kills;
274
275 // If block B is a suspend block, it should propagate kills into the
276 // its successor for every block B consumes.
277 if (B.Suspend) {
278 S.Kills |= B.Consumes;
279 }
280 if (S.Suspend) {
281 // If block S is a suspend block, it should kill all of the blocks it
282 // consumes.
283 S.Kills |= S.Consumes;
284 } else if (S.End) {
285 // If block S is an end block, it should not propagate kills as the
286 // blocks following coro.end() are reached during initial invocation
287 // of the coroutine while all the data are still available on the
288 // stack or in the registers.
289 S.Kills.reset();
290 } else {
291 // This is reached when S block it not Suspend nor coro.end and it
292 // need to make sure that it is not in the kill set.
293 S.KillLoop |= S.Kills[SuccNo];
294 S.Kills.reset(SuccNo);
295 }
296
297 // See if anything changed.
298 Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes);
299
300 if (S.Kills != SavedKills) {
301 LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName()
302 << "\n");
303 LLVM_DEBUG(dump("S.Kills", S.Kills));
304 LLVM_DEBUG(dump("SavedKills", SavedKills));
305 }
306 if (S.Consumes != SavedConsumes) {
307 LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n");
308 LLVM_DEBUG(dump("S.Consume", S.Consumes));
309 LLVM_DEBUG(dump("SavedCons", SavedConsumes));
310 }
311 }
312 }
313 } while (Changed);
314 LLVM_DEBUG(dump());
315}
316
317#undef DEBUG_TYPE // "coro-suspend-crossing"
318#define DEBUG_TYPE "coro-frame"
319
320namespace {
321class FrameTypeBuilder;
322// Mapping from the to-be-spilled value to all the users that need reload.
324struct AllocaInfo {
325 AllocaInst *Alloca;
327 bool MayWriteBeforeCoroBegin;
328 AllocaInfo(AllocaInst *Alloca,
329 DenseMap<Instruction *, std::optional<APInt>> Aliases,
330 bool MayWriteBeforeCoroBegin)
331 : Alloca(Alloca), Aliases(std::move(Aliases)),
332 MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {}
333};
334struct FrameDataInfo {
335 // All the values (that are not allocas) that needs to be spilled to the
336 // frame.
337 SpillInfo Spills;
338 // Allocas contains all values defined as allocas that need to live in the
339 // frame.
341
342 SmallVector<Value *, 8> getAllDefs() const {
344 for (const auto &P : Spills)
345 Defs.push_back(P.first);
346 for (const auto &A : Allocas)
347 Defs.push_back(A.Alloca);
348 return Defs;
349 }
350
351 uint32_t getFieldIndex(Value *V) const {
352 auto Itr = FieldIndexMap.find(V);
353 assert(Itr != FieldIndexMap.end() &&
354 "Value does not have a frame field index");
355 return Itr->second;
356 }
357
358 void setFieldIndex(Value *V, uint32_t Index) {
359 assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) &&
360 "Cannot set the index for the same field twice.");
361 FieldIndexMap[V] = Index;
362 }
363
364 Align getAlign(Value *V) const {
365 auto Iter = FieldAlignMap.find(V);
366 assert(Iter != FieldAlignMap.end());
367 return Iter->second;
368 }
369
370 void setAlign(Value *V, Align AL) {
371 assert(FieldAlignMap.count(V) == 0);
372 FieldAlignMap.insert({V, AL});
373 }
374
375 uint64_t getDynamicAlign(Value *V) const {
376 auto Iter = FieldDynamicAlignMap.find(V);
377 assert(Iter != FieldDynamicAlignMap.end());
378 return Iter->second;
379 }
380
381 void setDynamicAlign(Value *V, uint64_t Align) {
382 assert(FieldDynamicAlignMap.count(V) == 0);
383 FieldDynamicAlignMap.insert({V, Align});
384 }
385
386 uint64_t getOffset(Value *V) const {
387 auto Iter = FieldOffsetMap.find(V);
388 assert(Iter != FieldOffsetMap.end());
389 return Iter->second;
390 }
391
392 void setOffset(Value *V, uint64_t Offset) {
393 assert(FieldOffsetMap.count(V) == 0);
394 FieldOffsetMap.insert({V, Offset});
395 }
396
397 // Remap the index of every field in the frame, using the final layout index.
398 void updateLayoutIndex(FrameTypeBuilder &B);
399
400private:
401 // LayoutIndexUpdateStarted is used to avoid updating the index of any field
402 // twice by mistake.
403 bool LayoutIndexUpdateStarted = false;
404 // Map from values to their slot indexes on the frame. They will be first set
405 // with their original insertion field index. After the frame is built, their
406 // indexes will be updated into the final layout index.
407 DenseMap<Value *, uint32_t> FieldIndexMap;
408 // Map from values to their alignment on the frame. They would be set after
409 // the frame is built.
410 DenseMap<Value *, Align> FieldAlignMap;
411 DenseMap<Value *, uint64_t> FieldDynamicAlignMap;
412 // Map from values to their offset on the frame. They would be set after
413 // the frame is built.
414 DenseMap<Value *, uint64_t> FieldOffsetMap;
415};
416} // namespace
417
418#ifndef NDEBUG
419static void dumpSpills(StringRef Title, const SpillInfo &Spills) {
420 dbgs() << "------------- " << Title << "--------------\n";
421 for (const auto &E : Spills) {
422 E.first->dump();
423 dbgs() << " user: ";
424 for (auto *I : E.second)
425 I->dump();
426 }
427}
428
429static void dumpAllocas(const SmallVectorImpl<AllocaInfo> &Allocas) {
430 dbgs() << "------------- Allocas --------------\n";
431 for (const auto &A : Allocas) {
432 A.Alloca->dump();
433 }
434}
435#endif
436
437namespace {
438using FieldIDType = size_t;
439// We cannot rely solely on natural alignment of a type when building a
440// coroutine frame and if the alignment specified on the Alloca instruction
441// differs from the natural alignment of the alloca type we will need to insert
442// padding.
443class FrameTypeBuilder {
444private:
445 struct Field {
448 Type *Ty;
449 FieldIDType LayoutFieldIndex;
450 Align Alignment;
451 Align TyAlignment;
452 uint64_t DynamicAlignBuffer;
453 };
454
455 const DataLayout &DL;
457 uint64_t StructSize = 0;
458 Align StructAlign;
459 bool IsFinished = false;
460
461 std::optional<Align> MaxFrameAlignment;
462
464 DenseMap<Value*, unsigned> FieldIndexByKey;
465
466public:
467 FrameTypeBuilder(LLVMContext &Context, const DataLayout &DL,
468 std::optional<Align> MaxFrameAlignment)
469 : DL(DL), Context(Context), MaxFrameAlignment(MaxFrameAlignment) {}
470
471 /// Add a field to this structure for the storage of an `alloca`
472 /// instruction.
473 [[nodiscard]] FieldIDType addFieldForAlloca(AllocaInst *AI,
474 bool IsHeader = false) {
475 Type *Ty = AI->getAllocatedType();
476
477 // Make an array type if this is a static array allocation.
478 if (AI->isArrayAllocation()) {
479 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
480 Ty = ArrayType::get(Ty, CI->getValue().getZExtValue());
481 else
482 report_fatal_error("Coroutines cannot handle non static allocas yet");
483 }
484
485 return addField(Ty, AI->getAlign(), IsHeader);
486 }
487
488 /// We want to put the allocas whose lifetime-ranges are not overlapped
489 /// into one slot of coroutine frame.
490 /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566
491 ///
492 /// cppcoro::task<void> alternative_paths(bool cond) {
493 /// if (cond) {
494 /// big_structure a;
495 /// process(a);
496 /// co_await something();
497 /// } else {
498 /// big_structure b;
499 /// process2(b);
500 /// co_await something();
501 /// }
502 /// }
503 ///
504 /// We want to put variable a and variable b in the same slot to
505 /// reduce the size of coroutine frame.
506 ///
507 /// This function use StackLifetime algorithm to partition the AllocaInsts in
508 /// Spills to non-overlapped sets in order to put Alloca in the same
509 /// non-overlapped set into the same slot in the Coroutine Frame. Then add
510 /// field for the allocas in the same non-overlapped set by using the largest
511 /// type as the field type.
512 ///
513 /// Side Effects: Because We sort the allocas, the order of allocas in the
514 /// frame may be different with the order in the source code.
515 void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData,
516 coro::Shape &Shape);
517
518 /// Add a field to this structure.
519 [[nodiscard]] FieldIDType addField(Type *Ty, MaybeAlign MaybeFieldAlignment,
520 bool IsHeader = false,
521 bool IsSpillOfValue = false) {
522 assert(!IsFinished && "adding fields to a finished builder");
523 assert(Ty && "must provide a type for a field");
524
525 // The field size is always the alloc size of the type.
526 uint64_t FieldSize = DL.getTypeAllocSize(Ty);
527
528 // For an alloca with size=0, we don't need to add a field and they
529 // can just point to any index in the frame. Use index 0.
530 if (FieldSize == 0) {
531 return 0;
532 }
533
534 // The field alignment might not be the type alignment, but we need
535 // to remember the type alignment anyway to build the type.
536 // If we are spilling values we don't need to worry about ABI alignment
537 // concerns.
538 Align ABIAlign = DL.getABITypeAlign(Ty);
539 Align TyAlignment = ABIAlign;
540 if (IsSpillOfValue && MaxFrameAlignment && *MaxFrameAlignment < ABIAlign)
541 TyAlignment = *MaxFrameAlignment;
542 Align FieldAlignment = MaybeFieldAlignment.value_or(TyAlignment);
543
544 // The field alignment could be bigger than the max frame case, in that case
545 // we request additional storage to be able to dynamically align the
546 // pointer.
547 uint64_t DynamicAlignBuffer = 0;
548 if (MaxFrameAlignment && (FieldAlignment > *MaxFrameAlignment)) {
549 DynamicAlignBuffer =
550 offsetToAlignment(MaxFrameAlignment->value(), FieldAlignment);
551 FieldAlignment = *MaxFrameAlignment;
552 FieldSize = FieldSize + DynamicAlignBuffer;
553 }
554
555 // Lay out header fields immediately.
557 if (IsHeader) {
558 Offset = alignTo(StructSize, FieldAlignment);
559 StructSize = Offset + FieldSize;
560
561 // Everything else has a flexible offset.
562 } else {
564 }
565
566 Fields.push_back({FieldSize, Offset, Ty, 0, FieldAlignment, TyAlignment,
567 DynamicAlignBuffer});
568 return Fields.size() - 1;
569 }
570
571 /// Finish the layout and set the body on the given type.
572 void finish(StructType *Ty);
573
574 uint64_t getStructSize() const {
575 assert(IsFinished && "not yet finished!");
576 return StructSize;
577 }
578
579 Align getStructAlign() const {
580 assert(IsFinished && "not yet finished!");
581 return StructAlign;
582 }
583
584 FieldIDType getLayoutFieldIndex(FieldIDType Id) const {
585 assert(IsFinished && "not yet finished!");
586 return Fields[Id].LayoutFieldIndex;
587 }
588
589 Field getLayoutField(FieldIDType Id) const {
590 assert(IsFinished && "not yet finished!");
591 return Fields[Id];
592 }
593};
594} // namespace
595
596void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) {
597 auto Updater = [&](Value *I) {
598 auto Field = B.getLayoutField(getFieldIndex(I));
599 setFieldIndex(I, Field.LayoutFieldIndex);
600 setAlign(I, Field.Alignment);
601 uint64_t dynamicAlign =
602 Field.DynamicAlignBuffer
603 ? Field.DynamicAlignBuffer + Field.Alignment.value()
604 : 0;
605 setDynamicAlign(I, dynamicAlign);
606 setOffset(I, Field.Offset);
607 };
608 LayoutIndexUpdateStarted = true;
609 for (auto &S : Spills)
610 Updater(S.first);
611 for (const auto &A : Allocas)
612 Updater(A.Alloca);
613 LayoutIndexUpdateStarted = false;
614}
615
616void FrameTypeBuilder::addFieldForAllocas(const Function &F,
617 FrameDataInfo &FrameData,
618 coro::Shape &Shape) {
619 using AllocaSetType = SmallVector<AllocaInst *, 4>;
620 SmallVector<AllocaSetType, 4> NonOverlapedAllocas;
621
622 // We need to add field for allocas at the end of this function.
623 auto AddFieldForAllocasAtExit = make_scope_exit([&]() {
624 for (auto AllocaList : NonOverlapedAllocas) {
625 auto *LargestAI = *AllocaList.begin();
626 FieldIDType Id = addFieldForAlloca(LargestAI);
627 for (auto *Alloca : AllocaList)
628 FrameData.setFieldIndex(Alloca, Id);
629 }
630 });
631
632 if (!Shape.OptimizeFrame) {
633 for (const auto &A : FrameData.Allocas) {
634 AllocaInst *Alloca = A.Alloca;
635 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
636 }
637 return;
638 }
639
640 // Because there are paths from the lifetime.start to coro.end
641 // for each alloca, the liferanges for every alloca is overlaped
642 // in the blocks who contain coro.end and the successor blocks.
643 // So we choose to skip there blocks when we calculate the liferange
644 // for each alloca. It should be reasonable since there shouldn't be uses
645 // in these blocks and the coroutine frame shouldn't be used outside the
646 // coroutine body.
647 //
648 // Note that the user of coro.suspend may not be SwitchInst. However, this
649 // case seems too complex to handle. And it is harmless to skip these
650 // patterns since it just prevend putting the allocas to live in the same
651 // slot.
652 DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest;
653 for (auto *CoroSuspendInst : Shape.CoroSuspends) {
654 for (auto *U : CoroSuspendInst->users()) {
655 if (auto *ConstSWI = dyn_cast<SwitchInst>(U)) {
656 auto *SWI = const_cast<SwitchInst *>(ConstSWI);
657 DefaultSuspendDest[SWI] = SWI->getDefaultDest();
658 SWI->setDefaultDest(SWI->getSuccessor(1));
659 }
660 }
661 }
662
663 auto ExtractAllocas = [&]() {
664 AllocaSetType Allocas;
665 Allocas.reserve(FrameData.Allocas.size());
666 for (const auto &A : FrameData.Allocas)
667 Allocas.push_back(A.Alloca);
668 return Allocas;
669 };
670 StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(),
671 StackLifetime::LivenessType::May);
672 StackLifetimeAnalyzer.run();
673 auto IsAllocaInferenre = [&](const AllocaInst *AI1, const AllocaInst *AI2) {
674 return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps(
675 StackLifetimeAnalyzer.getLiveRange(AI2));
676 };
677 auto GetAllocaSize = [&](const AllocaInfo &A) {
678 std::optional<TypeSize> RetSize = A.Alloca->getAllocationSize(DL);
679 assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n");
680 assert(!RetSize->isScalable() && "Scalable vectors are not yet supported");
681 return RetSize->getFixedValue();
682 };
683 // Put larger allocas in the front. So the larger allocas have higher
684 // priority to merge, which can save more space potentially. Also each
685 // AllocaSet would be ordered. So we can get the largest Alloca in one
686 // AllocaSet easily.
687 sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) {
688 return GetAllocaSize(Iter1) > GetAllocaSize(Iter2);
689 });
690 for (const auto &A : FrameData.Allocas) {
691 AllocaInst *Alloca = A.Alloca;
692 bool Merged = false;
693 // Try to find if the Alloca is not inferenced with any existing
694 // NonOverlappedAllocaSet. If it is true, insert the alloca to that
695 // NonOverlappedAllocaSet.
696 for (auto &AllocaSet : NonOverlapedAllocas) {
697 assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n");
698 bool NoInference = none_of(AllocaSet, [&](auto Iter) {
699 return IsAllocaInferenre(Alloca, Iter);
700 });
701 // If the alignment of A is multiple of the alignment of B, the address
702 // of A should satisfy the requirement for aligning for B.
703 //
704 // There may be other more fine-grained strategies to handle the alignment
705 // infomation during the merging process. But it seems hard to handle
706 // these strategies and benefit little.
707 bool Alignable = [&]() -> bool {
708 auto *LargestAlloca = *AllocaSet.begin();
709 return LargestAlloca->getAlign().value() % Alloca->getAlign().value() ==
710 0;
711 }();
712 bool CouldMerge = NoInference && Alignable;
713 if (!CouldMerge)
714 continue;
715 AllocaSet.push_back(Alloca);
716 Merged = true;
717 break;
718 }
719 if (!Merged) {
720 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
721 }
722 }
723 // Recover the default target destination for each Switch statement
724 // reserved.
725 for (auto SwitchAndDefaultDest : DefaultSuspendDest) {
726 SwitchInst *SWI = SwitchAndDefaultDest.first;
727 BasicBlock *DestBB = SwitchAndDefaultDest.second;
728 SWI->setDefaultDest(DestBB);
729 }
730 // This Debug Info could tell us which allocas are merged into one slot.
731 LLVM_DEBUG(for (auto &AllocaSet
732 : NonOverlapedAllocas) {
733 if (AllocaSet.size() > 1) {
734 dbgs() << "In Function:" << F.getName() << "\n";
735 dbgs() << "Find Union Set "
736 << "\n";
737 dbgs() << "\tAllocas are \n";
738 for (auto Alloca : AllocaSet)
739 dbgs() << "\t\t" << *Alloca << "\n";
740 }
741 });
742}
743
744void FrameTypeBuilder::finish(StructType *Ty) {
745 assert(!IsFinished && "already finished!");
746
747 // Prepare the optimal-layout field array.
748 // The Id in the layout field is a pointer to our Field for it.
750 LayoutFields.reserve(Fields.size());
751 for (auto &Field : Fields) {
752 LayoutFields.emplace_back(&Field, Field.Size, Field.Alignment,
753 Field.Offset);
754 }
755
756 // Perform layout.
757 auto SizeAndAlign = performOptimizedStructLayout(LayoutFields);
758 StructSize = SizeAndAlign.first;
759 StructAlign = SizeAndAlign.second;
760
761 auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & {
762 return *static_cast<Field *>(const_cast<void*>(LayoutField.Id));
763 };
764
765 // We need to produce a packed struct type if there's a field whose
766 // assigned offset isn't a multiple of its natural type alignment.
767 bool Packed = [&] {
768 for (auto &LayoutField : LayoutFields) {
769 auto &F = getField(LayoutField);
770 if (!isAligned(F.TyAlignment, LayoutField.Offset))
771 return true;
772 }
773 return false;
774 }();
775
776 // Build the struct body.
777 SmallVector<Type*, 16> FieldTypes;
778 FieldTypes.reserve(LayoutFields.size() * 3 / 2);
779 uint64_t LastOffset = 0;
780 for (auto &LayoutField : LayoutFields) {
781 auto &F = getField(LayoutField);
782
783 auto Offset = LayoutField.Offset;
784
785 // Add a padding field if there's a padding gap and we're either
786 // building a packed struct or the padding gap is more than we'd
787 // get from aligning to the field type's natural alignment.
788 assert(Offset >= LastOffset);
789 if (Offset != LastOffset) {
790 if (Packed || alignTo(LastOffset, F.TyAlignment) != Offset)
791 FieldTypes.push_back(ArrayType::get(Type::getInt8Ty(Context),
792 Offset - LastOffset));
793 }
794
795 F.Offset = Offset;
796 F.LayoutFieldIndex = FieldTypes.size();
797
798 FieldTypes.push_back(F.Ty);
799 if (F.DynamicAlignBuffer) {
800 FieldTypes.push_back(
801 ArrayType::get(Type::getInt8Ty(Context), F.DynamicAlignBuffer));
802 }
803 LastOffset = Offset + F.Size;
804 }
805
806 Ty->setBody(FieldTypes, Packed);
807
808#ifndef NDEBUG
809 // Check that the IR layout matches the offsets we expect.
810 auto Layout = DL.getStructLayout(Ty);
811 for (auto &F : Fields) {
812 assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty);
813 assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset);
814 }
815#endif
816
817 IsFinished = true;
818}
819
820static void cacheDIVar(FrameDataInfo &FrameData,
822 for (auto *V : FrameData.getAllDefs()) {
823 if (DIVarCache.find(V) != DIVarCache.end())
824 continue;
825
826 auto DDIs = FindDbgDeclareUses(V);
827 auto *I = llvm::find_if(DDIs, [](DbgDeclareInst *DDI) {
828 return DDI->getExpression()->getNumElements() == 0;
829 });
830 if (I != DDIs.end())
831 DIVarCache.insert({V, (*I)->getVariable()});
832 }
833}
834
835/// Create name for Type. It uses MDString to store new created string to
836/// avoid memory leak.
838 if (Ty->isIntegerTy()) {
839 // The longest name in common may be '__int_128', which has 9 bits.
840 SmallString<16> Buffer;
841 raw_svector_ostream OS(Buffer);
842 OS << "__int_" << cast<IntegerType>(Ty)->getBitWidth();
843 auto *MDName = MDString::get(Ty->getContext(), OS.str());
844 return MDName->getString();
845 }
846
847 if (Ty->isFloatingPointTy()) {
848 if (Ty->isFloatTy())
849 return "__float_";
850 if (Ty->isDoubleTy())
851 return "__double_";
852 return "__floating_type_";
853 }
854
855 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) {
856 if (PtrTy->isOpaque())
857 return "PointerType";
858 Type *PointeeTy = PtrTy->getNonOpaquePointerElementType();
859 auto Name = solveTypeName(PointeeTy);
860 if (Name == "UnknownType")
861 return "PointerType";
862 SmallString<16> Buffer;
863 Twine(Name + "_Ptr").toStringRef(Buffer);
864 auto *MDName = MDString::get(Ty->getContext(), Buffer.str());
865 return MDName->getString();
866 }
867
868 if (Ty->isStructTy()) {
869 if (!cast<StructType>(Ty)->hasName())
870 return "__LiteralStructType_";
871
872 auto Name = Ty->getStructName();
873
874 SmallString<16> Buffer(Name);
875 for (auto &Iter : Buffer)
876 if (Iter == '.' || Iter == ':')
877 Iter = '_';
878 auto *MDName = MDString::get(Ty->getContext(), Buffer.str());
879 return MDName->getString();
880 }
881
882 return "UnknownType";
883}
884
885static DIType *solveDIType(DIBuilder &Builder, Type *Ty,
886 const DataLayout &Layout, DIScope *Scope,
887 unsigned LineNum,
888 DenseMap<Type *, DIType *> &DITypeCache) {
889 if (DIType *DT = DITypeCache.lookup(Ty))
890 return DT;
891
893
894 DIType *RetType = nullptr;
895
896 if (Ty->isIntegerTy()) {
897 auto BitWidth = cast<IntegerType>(Ty)->getBitWidth();
898 RetType = Builder.createBasicType(Name, BitWidth, dwarf::DW_ATE_signed,
899 llvm::DINode::FlagArtificial);
900 } else if (Ty->isFloatingPointTy()) {
901 RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty),
902 dwarf::DW_ATE_float,
903 llvm::DINode::FlagArtificial);
904 } else if (Ty->isPointerTy()) {
905 // Construct PointerType points to null (aka void *) instead of exploring
906 // pointee type to avoid infinite search problem. For example, we would be
907 // in trouble if we traverse recursively:
908 //
909 // struct Node {
910 // Node* ptr;
911 // };
912 RetType =
913 Builder.createPointerType(nullptr, Layout.getTypeSizeInBits(Ty),
914 Layout.getABITypeAlign(Ty).value() * CHAR_BIT,
915 /*DWARFAddressSpace=*/std::nullopt, Name);
916 } else if (Ty->isStructTy()) {
917 auto *DIStruct = Builder.createStructType(
918 Scope, Name, Scope->getFile(), LineNum, Layout.getTypeSizeInBits(Ty),
919 Layout.getPrefTypeAlign(Ty).value() * CHAR_BIT,
920 llvm::DINode::FlagArtificial, nullptr, llvm::DINodeArray());
921
922 auto *StructTy = cast<StructType>(Ty);
924 for (unsigned I = 0; I < StructTy->getNumElements(); I++) {
925 DIType *DITy = solveDIType(Builder, StructTy->getElementType(I), Layout,
926 Scope, LineNum, DITypeCache);
927 assert(DITy);
928 Elements.push_back(Builder.createMemberType(
929 Scope, DITy->getName(), Scope->getFile(), LineNum,
930 DITy->getSizeInBits(), DITy->getAlignInBits(),
931 Layout.getStructLayout(StructTy)->getElementOffsetInBits(I),
932 llvm::DINode::FlagArtificial, DITy));
933 }
934
935 Builder.replaceArrays(DIStruct, Builder.getOrCreateArray(Elements));
936
937 RetType = DIStruct;
938 } else {
939 LLVM_DEBUG(dbgs() << "Unresolved Type: " << *Ty << "\n");
940 TypeSize Size = Layout.getTypeSizeInBits(Ty);
941 auto *CharSizeType = Builder.createBasicType(
942 Name, 8, dwarf::DW_ATE_unsigned_char, llvm::DINode::FlagArtificial);
943
944 if (Size <= 8)
945 RetType = CharSizeType;
946 else {
947 if (Size % 8 != 0)
948 Size = TypeSize::Fixed(Size + 8 - (Size % 8));
949
950 RetType = Builder.createArrayType(
951 Size, Layout.getPrefTypeAlign(Ty).value(), CharSizeType,
952 Builder.getOrCreateArray(Builder.getOrCreateSubrange(0, Size / 8)));
953 }
954 }
955
956 DITypeCache.insert({Ty, RetType});
957 return RetType;
958}
959
960/// Build artificial debug info for C++ coroutine frames to allow users to
961/// inspect the contents of the frame directly
962///
963/// Create Debug information for coroutine frame with debug name "__coro_frame".
964/// The debug information for the fields of coroutine frame is constructed from
965/// the following way:
966/// 1. For all the value in the Frame, we search the use of dbg.declare to find
967/// the corresponding debug variables for the value. If we can find the
968/// debug variable, we can get full and accurate debug information.
969/// 2. If we can't get debug information in step 1 and 2, we could only try to
970/// build the DIType by Type. We did this in solveDIType. We only handle
971/// integer, float, double, integer type and struct type for now.
973 FrameDataInfo &FrameData) {
974 DISubprogram *DIS = F.getSubprogram();
975 // If there is no DISubprogram for F, it implies the Function are not compiled
976 // with debug info. So we also don't need to generate debug info for the frame
977 // neither.
978 if (!DIS || !DIS->getUnit() ||
980 (dwarf::SourceLanguage)DIS->getUnit()->getSourceLanguage()))
981 return;
982
983 assert(Shape.ABI == coro::ABI::Switch &&
984 "We could only build debug infomation for C++ coroutine now.\n");
985
986 DIBuilder DBuilder(*F.getParent(), /*AllowUnresolved*/ false);
987
988 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
989 assert(PromiseAlloca &&
990 "Coroutine with switch ABI should own Promise alloca");
991
993 if (DIs.empty())
994 return;
995
996 DbgDeclareInst *PromiseDDI = DIs.front();
997 DILocalVariable *PromiseDIVariable = PromiseDDI->getVariable();
998 DILocalScope *PromiseDIScope = PromiseDIVariable->getScope();
999 DIFile *DFile = PromiseDIScope->getFile();
1000 DILocation *DILoc = PromiseDDI->getDebugLoc().get();
1001 unsigned LineNum = PromiseDIVariable->getLine();
1002
1003 DICompositeType *FrameDITy = DBuilder.createStructType(
1004 DIS->getUnit(), Twine(F.getName() + ".coro_frame_ty").str(),
1005 DFile, LineNum, Shape.FrameSize * 8,
1006 Shape.FrameAlign.value() * 8, llvm::DINode::FlagArtificial, nullptr,
1007 llvm::DINodeArray());
1008 StructType *FrameTy = Shape.FrameTy;
1010 DataLayout Layout = F.getParent()->getDataLayout();
1011
1013 cacheDIVar(FrameData, DIVarCache);
1014
1015 unsigned ResumeIndex = coro::Shape::SwitchFieldIndex::Resume;
1016 unsigned DestroyIndex = coro::Shape::SwitchFieldIndex::Destroy;
1017 unsigned IndexIndex = Shape.SwitchLowering.IndexField;
1018
1020 NameCache.insert({ResumeIndex, "__resume_fn"});
1021 NameCache.insert({DestroyIndex, "__destroy_fn"});
1022 NameCache.insert({IndexIndex, "__coro_index"});
1023
1024 Type *ResumeFnTy = FrameTy->getElementType(ResumeIndex),
1025 *DestroyFnTy = FrameTy->getElementType(DestroyIndex),
1026 *IndexTy = FrameTy->getElementType(IndexIndex);
1027
1029 TyCache.insert(
1030 {ResumeIndex, DBuilder.createPointerType(
1031 nullptr, Layout.getTypeSizeInBits(ResumeFnTy))});
1032 TyCache.insert(
1033 {DestroyIndex, DBuilder.createPointerType(
1034 nullptr, Layout.getTypeSizeInBits(DestroyFnTy))});
1035
1036 /// FIXME: If we fill the field `SizeInBits` with the actual size of
1037 /// __coro_index in bits, then __coro_index wouldn't show in the debugger.
1038 TyCache.insert({IndexIndex, DBuilder.createBasicType(
1039 "__coro_index",
1040 (Layout.getTypeSizeInBits(IndexTy) < 8)
1041 ? 8
1042 : Layout.getTypeSizeInBits(IndexTy),
1043 dwarf::DW_ATE_unsigned_char)});
1044
1045 for (auto *V : FrameData.getAllDefs()) {
1046 if (DIVarCache.find(V) == DIVarCache.end())
1047 continue;
1048
1049 auto Index = FrameData.getFieldIndex(V);
1050
1051 NameCache.insert({Index, DIVarCache[V]->getName()});
1052 TyCache.insert({Index, DIVarCache[V]->getType()});
1053 }
1054
1055 // Cache from index to (Align, Offset Pair)
1057 // The Align and Offset of Resume function and Destroy function are fixed.
1058 OffsetCache.insert({ResumeIndex, {8, 0}});
1059 OffsetCache.insert({DestroyIndex, {8, 8}});
1060 OffsetCache.insert(
1061 {IndexIndex,
1063
1064 for (auto *V : FrameData.getAllDefs()) {
1065 auto Index = FrameData.getFieldIndex(V);
1066
1067 OffsetCache.insert(
1068 {Index, {FrameData.getAlign(V).value(), FrameData.getOffset(V)}});
1069 }
1070
1071 DenseMap<Type *, DIType *> DITypeCache;
1072 // This counter is used to avoid same type names. e.g., there would be
1073 // many i32 and i64 types in one coroutine. And we would use i32_0 and
1074 // i32_1 to avoid the same type. Since it makes no sense the name of the
1075 // fields confilicts with each other.
1076 unsigned UnknownTypeNum = 0;
1077 for (unsigned Index = 0; Index < FrameTy->getNumElements(); Index++) {
1078 if (OffsetCache.find(Index) == OffsetCache.end())
1079 continue;
1080
1081 std::string Name;
1082 uint64_t SizeInBits;
1083 uint32_t AlignInBits;
1084 uint64_t OffsetInBits;
1085 DIType *DITy = nullptr;
1086
1087 Type *Ty = FrameTy->getElementType(Index);
1088 assert(Ty->isSized() && "We can't handle type which is not sized.\n");
1089 SizeInBits = Layout.getTypeSizeInBits(Ty).getFixedValue();
1090 AlignInBits = OffsetCache[Index].first * 8;
1091 OffsetInBits = OffsetCache[Index].second * 8;
1092
1093 if (NameCache.find(Index) != NameCache.end()) {
1094 Name = NameCache[Index].str();
1095 DITy = TyCache[Index];
1096 } else {
1097 DITy = solveDIType(DBuilder, Ty, Layout, FrameDITy, LineNum, DITypeCache);
1098 assert(DITy && "SolveDIType shouldn't return nullptr.\n");
1099 Name = DITy->getName().str();
1100 Name += "_" + std::to_string(UnknownTypeNum);
1101 UnknownTypeNum++;
1102 }
1103
1104 Elements.push_back(DBuilder.createMemberType(
1105 FrameDITy, Name, DFile, LineNum, SizeInBits, AlignInBits, OffsetInBits,
1106 llvm::DINode::FlagArtificial, DITy));
1107 }
1108
1109 DBuilder.replaceArrays(FrameDITy, DBuilder.getOrCreateArray(Elements));
1110
1111 auto *FrameDIVar = DBuilder.createAutoVariable(PromiseDIScope, "__coro_frame",
1112 DFile, LineNum, FrameDITy,
1113 true, DINode::FlagArtificial);
1114 assert(FrameDIVar->isValidLocationForIntrinsic(PromiseDDI->getDebugLoc()));
1115
1116 // Subprogram would have ContainedNodes field which records the debug
1117 // variables it contained. So we need to add __coro_frame to the
1118 // ContainedNodes of it.
1119 //
1120 // If we don't add __coro_frame to the RetainedNodes, user may get
1121 // `no symbol __coro_frame in context` rather than `__coro_frame`
1122 // is optimized out, which is more precise.
1123 if (auto *SubProgram = dyn_cast<DISubprogram>(PromiseDIScope)) {
1124 auto RetainedNodes = SubProgram->getRetainedNodes();
1125 SmallVector<Metadata *, 32> RetainedNodesVec(RetainedNodes.begin(),
1126 RetainedNodes.end());
1127 RetainedNodesVec.push_back(FrameDIVar);
1128 SubProgram->replaceOperandWith(
1129 7, (MDTuple::get(F.getContext(), RetainedNodesVec)));
1130 }
1131
1132 DBuilder.insertDeclare(Shape.FramePtr, FrameDIVar,
1133 DBuilder.createExpression(), DILoc,
1134 Shape.getInsertPtAfterFramePtr());
1135}
1136
1137// Build a struct that will keep state for an active coroutine.
1138// struct f.frame {
1139// ResumeFnTy ResumeFnAddr;
1140// ResumeFnTy DestroyFnAddr;
1141// int ResumeIndex;
1142// ... promise (if present) ...
1143// ... spills ...
1144// };
1146 FrameDataInfo &FrameData) {
1147 LLVMContext &C = F.getContext();
1148 const DataLayout &DL = F.getParent()->getDataLayout();
1149 StructType *FrameTy = [&] {
1150 SmallString<32> Name(F.getName());
1151 Name.append(".Frame");
1152 return StructType::create(C, Name);
1153 }();
1154
1155 // We will use this value to cap the alignment of spilled values.
1156 std::optional<Align> MaxFrameAlignment;
1157 if (Shape.ABI == coro::ABI::Async)
1158 MaxFrameAlignment = Shape.AsyncLowering.getContextAlignment();
1159 FrameTypeBuilder B(C, DL, MaxFrameAlignment);
1160
1161 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
1162 std::optional<FieldIDType> SwitchIndexFieldId;
1163
1164 if (Shape.ABI == coro::ABI::Switch) {
1165 auto *FramePtrTy = FrameTy->getPointerTo();
1166 auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy,
1167 /*IsVarArg=*/false);
1168 auto *FnPtrTy = FnTy->getPointerTo();
1169
1170 // Add header fields for the resume and destroy functions.
1171 // We can rely on these being perfectly packed.
1172 (void)B.addField(FnPtrTy, std::nullopt, /*header*/ true);
1173 (void)B.addField(FnPtrTy, std::nullopt, /*header*/ true);
1174
1175 // PromiseAlloca field needs to be explicitly added here because it's
1176 // a header field with a fixed offset based on its alignment. Hence it
1177 // needs special handling and cannot be added to FrameData.Allocas.
1178 if (PromiseAlloca)
1179 FrameData.setFieldIndex(
1180 PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, /*header*/ true));
1181
1182 // Add a field to store the suspend index. This doesn't need to
1183 // be in the header.
1184 unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size()));
1185 Type *IndexType = Type::getIntNTy(C, IndexBits);
1186
1187 SwitchIndexFieldId = B.addField(IndexType, std::nullopt);
1188 } else {
1189 assert(PromiseAlloca == nullptr && "lowering doesn't support promises");
1190 }
1191
1192 // Because multiple allocas may own the same field slot,
1193 // we add allocas to field here.
1194 B.addFieldForAllocas(F, FrameData, Shape);
1195 // Add PromiseAlloca to Allocas list so that
1196 // 1. updateLayoutIndex could update its index after
1197 // `performOptimizedStructLayout`
1198 // 2. it is processed in insertSpills.
1199 if (Shape.ABI == coro::ABI::Switch && PromiseAlloca)
1200 // We assume that the promise alloca won't be modified before
1201 // CoroBegin and no alias will be create before CoroBegin.
1202 FrameData.Allocas.emplace_back(
1203 PromiseAlloca, DenseMap<Instruction *, std::optional<APInt>>{}, false);
1204 // Create an entry for every spilled value.
1205 for (auto &S : FrameData.Spills) {
1206 Type *FieldType = S.first->getType();
1207 // For byval arguments, we need to store the pointed value in the frame,
1208 // instead of the pointer itself.
1209 if (const Argument *A = dyn_cast<Argument>(S.first))
1210 if (A->hasByValAttr())
1211 FieldType = A->getParamByValType();
1212 FieldIDType Id = B.addField(FieldType, std::nullopt, false /*header*/,
1213 true /*IsSpillOfValue*/);
1214 FrameData.setFieldIndex(S.first, Id);
1215 }
1216
1217 B.finish(FrameTy);
1218 FrameData.updateLayoutIndex(B);
1219 Shape.FrameAlign = B.getStructAlign();
1220 Shape.FrameSize = B.getStructSize();
1221
1222 switch (Shape.ABI) {
1223 case coro::ABI::Switch: {
1224 // In the switch ABI, remember the switch-index field.
1225 auto IndexField = B.getLayoutField(*SwitchIndexFieldId);
1226 Shape.SwitchLowering.IndexField = IndexField.LayoutFieldIndex;
1227 Shape.SwitchLowering.IndexAlign = IndexField.Alignment.value();
1228 Shape.SwitchLowering.IndexOffset = IndexField.Offset;
1229
1230 // Also round the frame size up to a multiple of its alignment, as is
1231 // generally expected in C/C++.
1232 Shape.FrameSize = alignTo(Shape.FrameSize, Shape.FrameAlign);
1233 break;
1234 }
1235
1236 // In the retcon ABI, remember whether the frame is inline in the storage.
1237 case coro::ABI::Retcon:
1238 case coro::ABI::RetconOnce: {
1239 auto Id = Shape.getRetconCoroId();
1241 = (B.getStructSize() <= Id->getStorageSize() &&
1242 B.getStructAlign() <= Id->getStorageAlignment());
1243 break;
1244 }
1245 case coro::ABI::Async: {
1248 // Also make the final context size a multiple of the context alignment to
1249 // make allocation easier for allocators.
1253 if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) {
1255 "The alignment requirment of frame variables cannot be higher than "
1256 "the alignment of the async function context");
1257 }
1258 break;
1259 }
1260 }
1261
1262 return FrameTy;
1263}
1264
1265// We use a pointer use visitor to track how an alloca is being used.
1266// The goal is to be able to answer the following three questions:
1267// 1. Should this alloca be allocated on the frame instead.
1268// 2. Could the content of the alloca be modified prior to CoroBegn, which would
1269// require copying the data from alloca to the frame after CoroBegin.
1270// 3. Is there any alias created for this alloca prior to CoroBegin, but used
1271// after CoroBegin. In that case, we will need to recreate the alias after
1272// CoroBegin based off the frame. To answer question 1, we track two things:
1273// a. List of all BasicBlocks that use this alloca or any of the aliases of
1274// the alloca. In the end, we check if there exists any two basic blocks that
1275// cross suspension points. If so, this alloca must be put on the frame. b.
1276// Whether the alloca or any alias of the alloca is escaped at some point,
1277// either by storing the address somewhere, or the address is used in a
1278// function call that might capture. If it's ever escaped, this alloca must be
1279// put on the frame conservatively.
1280// To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin.
1281// Whenever a potential write happens, either through a store instruction, a
1282// function call or any of the memory intrinsics, we check whether this
1283// instruction is prior to CoroBegin. To answer question 3, we track the offsets
1284// of all aliases created for the alloca prior to CoroBegin but used after
1285// CoroBegin. llvm::Optional is used to be able to represent the case when the
1286// offset is unknown (e.g. when you have a PHINode that takes in different
1287// offset values). We cannot handle unknown offsets and will assert. This is the
1288// potential issue left out. An ideal solution would likely require a
1289// significant redesign.
1290namespace {
1291struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
1293 AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT,
1294 const CoroBeginInst &CB, const SuspendCrossingInfo &Checker,
1295 bool ShouldUseLifetimeStartInfo)
1296 : PtrUseVisitor(DL), DT(DT), CoroBegin(CB), Checker(Checker),
1297 ShouldUseLifetimeStartInfo(ShouldUseLifetimeStartInfo) {}
1298
1299 void visit(Instruction &I) {
1300 Users.insert(&I);
1301 Base::visit(I);
1302 // If the pointer is escaped prior to CoroBegin, we have to assume it would
1303 // be written into before CoroBegin as well.
1304 if (PI.isEscaped() && !DT.dominates(&CoroBegin, PI.getEscapingInst())) {
1305 MayWriteBeforeCoroBegin = true;
1306 }
1307 }
1308 // We need to provide this overload as PtrUseVisitor uses a pointer based
1309 // visiting function.
1310 void visit(Instruction *I) { return visit(*I); }
1311
1312 void visitPHINode(PHINode &I) {
1313 enqueueUsers(I);
1314 handleAlias(I);
1315 }
1316
1318 enqueueUsers(I);
1319 handleAlias(I);
1320 }
1321
1322 void visitStoreInst(StoreInst &SI) {
1323 // Regardless whether the alias of the alloca is the value operand or the
1324 // pointer operand, we need to assume the alloca is been written.
1325 handleMayWrite(SI);
1326
1327 if (SI.getValueOperand() != U->get())
1328 return;
1329
1330 // We are storing the pointer into a memory location, potentially escaping.
1331 // As an optimization, we try to detect simple cases where it doesn't
1332 // actually escape, for example:
1333 // %ptr = alloca ..
1334 // %addr = alloca ..
1335 // store %ptr, %addr
1336 // %x = load %addr
1337 // ..
1338 // If %addr is only used by loading from it, we could simply treat %x as
1339 // another alias of %ptr, and not considering %ptr being escaped.
1340 auto IsSimpleStoreThenLoad = [&]() {
1341 auto *AI = dyn_cast<AllocaInst>(SI.getPointerOperand());
1342 // If the memory location we are storing to is not an alloca, it
1343 // could be an alias of some other memory locations, which is difficult
1344 // to analyze.
1345 if (!AI)
1346 return false;
1347 // StoreAliases contains aliases of the memory location stored into.
1348 SmallVector<Instruction *, 4> StoreAliases = {AI};
1349 while (!StoreAliases.empty()) {
1350 Instruction *I = StoreAliases.pop_back_val();
1351 for (User *U : I->users()) {
1352 // If we are loading from the memory location, we are creating an
1353 // alias of the original pointer.
1354 if (auto *LI = dyn_cast<LoadInst>(U)) {
1355 enqueueUsers(*LI);
1356 handleAlias(*LI);
1357 continue;
1358 }
1359 // If we are overriding the memory location, the pointer certainly
1360 // won't escape.
1361 if (auto *S = dyn_cast<StoreInst>(U))
1362 if (S->getPointerOperand() == I)
1363 continue;
1364 if (auto *II = dyn_cast<IntrinsicInst>(U))
1365 if (II->isLifetimeStartOrEnd())
1366 continue;
1367 // BitCastInst creats aliases of the memory location being stored
1368 // into.
1369 if (auto *BI = dyn_cast<BitCastInst>(U)) {
1370 StoreAliases.push_back(BI);
1371 continue;
1372 }
1373 return false;
1374 }
1375 }
1376
1377 return true;
1378 };
1379
1380 if (!IsSimpleStoreThenLoad())
1381 PI.setEscaped(&SI);
1382 }
1383
1384 // All mem intrinsics modify the data.
1385 void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); }
1386
1387 void visitBitCastInst(BitCastInst &BC) {
1389 handleAlias(BC);
1390 }
1391
1394 handleAlias(ASC);
1395 }
1396
1398 // The base visitor will adjust Offset accordingly.
1400 handleAlias(GEPI);
1401 }
1402
1404 // When we found the lifetime markers refers to a
1405 // subrange of the original alloca, ignore the lifetime
1406 // markers to avoid misleading the analysis.
1407 if (II.getIntrinsicID() != Intrinsic::lifetime_start || !IsOffsetKnown ||
1408 !Offset.isZero())
1409 return Base::visitIntrinsicInst(II);
1410 LifetimeStarts.insert(&II);
1411 }
1412
1413 void visitCallBase(CallBase &CB) {
1414 for (unsigned Op = 0, OpCount = CB.arg_size(); Op < OpCount; ++Op)
1415 if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op))
1416 PI.setEscaped(&CB);
1417 handleMayWrite(CB);
1418 }
1419
1420 bool getShouldLiveOnFrame() const {
1421 if (!ShouldLiveOnFrame)
1422 ShouldLiveOnFrame = computeShouldLiveOnFrame();
1423 return *ShouldLiveOnFrame;
1424 }
1425
1426 bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; }
1427
1428 DenseMap<Instruction *, std::optional<APInt>> getAliasesCopy() const {
1429 assert(getShouldLiveOnFrame() && "This method should only be called if the "
1430 "alloca needs to live on the frame.");
1431 for (const auto &P : AliasOffetMap)
1432 if (!P.second)
1433 report_fatal_error("Unable to handle an alias with unknown offset "
1434 "created before CoroBegin.");
1435 return AliasOffetMap;
1436 }
1437
1438private:
1439 const DominatorTree &DT;
1440 const CoroBeginInst &CoroBegin;
1441 const SuspendCrossingInfo &Checker;
1442 // All alias to the original AllocaInst, created before CoroBegin and used
1443 // after CoroBegin. Each entry contains the instruction and the offset in the
1444 // original Alloca. They need to be recreated after CoroBegin off the frame.
1447 SmallPtrSet<IntrinsicInst *, 2> LifetimeStarts{};
1448 bool MayWriteBeforeCoroBegin{false};
1449 bool ShouldUseLifetimeStartInfo{true};
1450
1451 mutable std::optional<bool> ShouldLiveOnFrame{};
1452
1453 bool computeShouldLiveOnFrame() const {
1454 // If lifetime information is available, we check it first since it's
1455 // more precise. We look at every pair of lifetime.start intrinsic and
1456 // every basic block that uses the pointer to see if they cross suspension
1457 // points. The uses cover both direct uses as well as indirect uses.
1458 if (ShouldUseLifetimeStartInfo && !LifetimeStarts.empty()) {
1459 for (auto *I : Users)
1460 for (auto *S : LifetimeStarts)
1461 if (Checker.isDefinitionAcrossSuspend(*S, I))
1462 return true;
1463 // Addresses are guaranteed to be identical after every lifetime.start so
1464 // we cannot use the local stack if the address escaped and there is a
1465 // suspend point between lifetime markers. This should also cover the
1466 // case of a single lifetime.start intrinsic in a loop with suspend point.
1467 if (PI.isEscaped()) {
1468 for (auto *A : LifetimeStarts) {
1469 for (auto *B : LifetimeStarts) {
1470 if (Checker.hasPathOrLoopCrossingSuspendPoint(A->getParent(),
1471 B->getParent()))
1472 return true;
1473 }
1474 }
1475 }
1476 return false;
1477 }
1478 // FIXME: Ideally the isEscaped check should come at the beginning.
1479 // However there are a few loose ends that need to be fixed first before
1480 // we can do that. We need to make sure we are not over-conservative, so
1481 // that the data accessed in-between await_suspend and symmetric transfer
1482 // is always put on the stack, and also data accessed after coro.end is
1483 // always put on the stack (esp the return object). To fix that, we need
1484 // to:
1485 // 1) Potentially treat sret as nocapture in calls
1486 // 2) Special handle the return object and put it on the stack
1487 // 3) Utilize lifetime.end intrinsic
1488 if (PI.isEscaped())
1489 return true;
1490
1491 for (auto *U1 : Users)
1492 for (auto *U2 : Users)
1493 if (Checker.isDefinitionAcrossSuspend(*U1, U2))
1494 return true;
1495
1496 return false;
1497 }
1498
1499 void handleMayWrite(const Instruction &I) {
1500 if (!DT.dominates(&CoroBegin, &I))
1501 MayWriteBeforeCoroBegin = true;
1502 }
1503
1504 bool usedAfterCoroBegin(Instruction &I) {
1505 for (auto &U : I.uses())
1506 if (DT.dominates(&CoroBegin, U))
1507 return true;
1508 return false;
1509 }
1510
1511 void handleAlias(Instruction &I) {
1512 // We track all aliases created prior to CoroBegin but used after.
1513 // These aliases may need to be recreated after CoroBegin if the alloca
1514 // need to live on the frame.
1515 if (DT.dominates(&CoroBegin, &I) || !usedAfterCoroBegin(I))
1516 return;
1517
1518 if (!IsOffsetKnown) {
1519 AliasOffetMap[&I].reset();
1520 } else {
1521 auto Itr = AliasOffetMap.find(&I);
1522 if (Itr == AliasOffetMap.end()) {
1523 AliasOffetMap[&I] = Offset;
1524 } else if (Itr->second && *Itr->second != Offset) {
1525 // If we have seen two different possible values for this alias, we set
1526 // it to empty.
1527 AliasOffetMap[&I].reset();
1528 }
1529 }
1530 }
1531};
1532} // namespace
1533
1534// We need to make room to insert a spill after initial PHIs, but before
1535// catchswitch instruction. Placing it before violates the requirement that
1536// catchswitch, like all other EHPads must be the first nonPHI in a block.
1537//
1538// Split away catchswitch into a separate block and insert in its place:
1539//
1540// cleanuppad <InsertPt> cleanupret.
1541//
1542// cleanupret instruction will act as an insert point for the spill.
1544 BasicBlock *CurrentBlock = CatchSwitch->getParent();
1545 BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch);
1546 CurrentBlock->getTerminator()->eraseFromParent();
1547
1548 auto *CleanupPad =
1549 CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock);
1550 auto *CleanupRet =
1551 CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock);
1552 return CleanupRet;
1553}
1554
1555static void createFramePtr(coro::Shape &Shape) {
1556 auto *CB = Shape.CoroBegin;
1558 StructType *FrameTy = Shape.FrameTy;
1559 PointerType *FramePtrTy = FrameTy->getPointerTo();
1560 Shape.FramePtr =
1561 cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr"));
1562}
1563
1564// Replace all alloca and SSA values that are accessed across suspend points
1565// with GetElementPointer from coroutine frame + loads and stores. Create an
1566// AllocaSpillBB that will become the new entry block for the resume parts of
1567// the coroutine:
1568//
1569// %hdl = coro.begin(...)
1570// whatever
1571//
1572// becomes:
1573//
1574// %hdl = coro.begin(...)
1575// %FramePtr = bitcast i8* hdl to %f.frame*
1576// br label %AllocaSpillBB
1577//
1578// AllocaSpillBB:
1579// ; geps corresponding to allocas that were moved to coroutine frame
1580// br label PostSpill
1581//
1582// PostSpill:
1583// whatever
1584//
1585//
1586static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape) {
1587 auto *CB = Shape.CoroBegin;
1588 LLVMContext &C = CB->getContext();
1590 StructType *FrameTy = Shape.FrameTy;
1591 Value *FramePtr = Shape.FramePtr;
1592 DominatorTree DT(*CB->getFunction());
1594
1595 // Create a GEP with the given index into the coroutine frame for the original
1596 // value Orig. Appends an extra 0 index for array-allocas, preserving the
1597 // original type.
1598 auto GetFramePointer = [&](Value *Orig) -> Value * {
1599 FieldIDType Index = FrameData.getFieldIndex(Orig);
1600 SmallVector<Value *, 3> Indices = {
1603 };
1604
1605 if (auto *AI = dyn_cast<AllocaInst>(Orig)) {
1606 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) {
1607 auto Count = CI->getValue().getZExtValue();
1608 if (Count > 1) {
1610 }
1611 } else {
1612 report_fatal_error("Coroutines cannot handle non static allocas yet");
1613 }
1614 }
1615
1616 auto GEP = cast<GetElementPtrInst>(
1617 Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices));
1618 if (auto *AI = dyn_cast<AllocaInst>(Orig)) {
1619 if (FrameData.getDynamicAlign(Orig) != 0) {
1620 assert(FrameData.getDynamicAlign(Orig) == AI->getAlign().value());
1621 auto *M = AI->getModule();
1622 auto *IntPtrTy = M->getDataLayout().getIntPtrType(AI->getType());
1623 auto *PtrValue = Builder.CreatePtrToInt(GEP, IntPtrTy);
1624 auto *AlignMask =
1625 ConstantInt::get(IntPtrTy, AI->getAlign().value() - 1);
1626 PtrValue = Builder.CreateAdd(PtrValue, AlignMask);
1627 PtrValue = Builder.CreateAnd(PtrValue, Builder.CreateNot(AlignMask));
1628 return Builder.CreateIntToPtr(PtrValue, AI->getType());
1629 }
1630 // If the type of GEP is not equal to the type of AllocaInst, it implies
1631 // that the AllocaInst may be reused in the Frame slot of other
1632 // AllocaInst. So We cast GEP to the AllocaInst here to re-use
1633 // the Frame storage.
1634 //
1635 // Note: If we change the strategy dealing with alignment, we need to refine
1636 // this casting.
1637 if (GEP->getType() != Orig->getType())
1638 return Builder.CreateBitCast(GEP, Orig->getType(),
1639 Orig->getName() + Twine(".cast"));
1640 }
1641 return GEP;
1642 };
1643
1644 for (auto const &E : FrameData.Spills) {
1645 Value *Def = E.first;
1646 auto SpillAlignment = Align(FrameData.getAlign(Def));
1647 // Create a store instruction storing the value into the
1648 // coroutine frame.
1649 Instruction *InsertPt = nullptr;
1650 Type *ByValTy = nullptr;
1651 if (auto *Arg = dyn_cast<Argument>(Def)) {
1652 // For arguments, we will place the store instruction right after
1653 // the coroutine frame pointer instruction, i.e. bitcast of
1654 // coro.begin from i8* to %f.frame*.
1655 InsertPt = Shape.getInsertPtAfterFramePtr();
1656
1657 // If we're spilling an Argument, make sure we clear 'nocapture'
1658 // from the coroutine function.
1659 Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture);
1660
1661 if (Arg->hasByValAttr())
1662 ByValTy = Arg->getParamByValType();
1663 } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) {
1664 // Don't spill immediately after a suspend; splitting assumes
1665 // that the suspend will be followed by a branch.
1666 InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHI();
1667 } else {
1668 auto *I = cast<Instruction>(Def);
1669 if (!DT.dominates(CB, I)) {
1670 // If it is not dominated by CoroBegin, then spill should be
1671 // inserted immediately after CoroFrame is computed.
1672 InsertPt = Shape.getInsertPtAfterFramePtr();
1673 } else if (auto *II = dyn_cast<InvokeInst>(I)) {
1674 // If we are spilling the result of the invoke instruction, split
1675 // the normal edge and insert the spill in the new block.
1676 auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest());
1677 InsertPt = NewBB->getTerminator();
1678 } else if (isa<PHINode>(I)) {
1679 // Skip the PHINodes and EH pads instructions.
1680 BasicBlock *DefBlock = I->getParent();
1681 if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator()))
1682 InsertPt = splitBeforeCatchSwitch(CSI);
1683 else
1684 InsertPt = &*DefBlock->getFirstInsertionPt();
1685 } else {
1686 assert(!I->isTerminator() && "unexpected terminator");
1687 // For all other values, the spill is placed immediately after
1688 // the definition.
1689 InsertPt = I->getNextNode();
1690 }
1691 }
1692
1693 auto Index = FrameData.getFieldIndex(Def);
1694 Builder.SetInsertPoint(InsertPt);
1695 auto *G = Builder.CreateConstInBoundsGEP2_32(
1696 FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr"));
1697 if (ByValTy) {
1698 // For byval arguments, we need to store the pointed value in the frame,
1699 // instead of the pointer itself.
1700 auto *Value = Builder.CreateLoad(ByValTy, Def);
1701 Builder.CreateAlignedStore(Value, G, SpillAlignment);
1702 } else {
1703 Builder.CreateAlignedStore(Def, G, SpillAlignment);
1704 }
1705
1706 BasicBlock *CurrentBlock = nullptr;
1707 Value *CurrentReload = nullptr;
1708 for (auto *U : E.second) {
1709 // If we have not seen the use block, create a load instruction to reload
1710 // the spilled value from the coroutine frame. Populates the Value pointer
1711 // reference provided with the frame GEP.
1712 if (CurrentBlock != U->getParent()) {
1713 CurrentBlock = U->getParent();
1714 Builder.SetInsertPoint(&*CurrentBlock->getFirstInsertionPt());
1715
1716 auto *GEP = GetFramePointer(E.first);
1717 GEP->setName(E.first->getName() + Twine(".reload.addr"));
1718 if (ByValTy)
1719 CurrentReload = GEP;
1720 else
1721 CurrentReload = Builder.CreateAlignedLoad(
1722 FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP,
1723 SpillAlignment, E.first->getName() + Twine(".reload"));
1724
1726 for (DbgDeclareInst *DDI : DIs) {
1727 bool AllowUnresolved = false;
1728 // This dbg.declare is preserved for all coro-split function
1729 // fragments. It will be unreachable in the main function, and
1730 // processed by coro::salvageDebugInfo() by CoroCloner.
1731 DIBuilder(*CurrentBlock->getParent()->getParent(), AllowUnresolved)
1732 .insertDeclare(CurrentReload, DDI->getVariable(),
1733 DDI->getExpression(), DDI->getDebugLoc(),
1734 &*Builder.GetInsertPoint());
1735 // This dbg.declare is for the main function entry point. It
1736 // will be deleted in all coro-split functions.
1737 coro::salvageDebugInfo(DbgPtrAllocaCache, DDI, Shape.OptimizeFrame);
1738 }
1739 }
1740
1741 // Salvage debug info on any dbg.addr that we see. We do not insert them
1742 // into each block where we have a use though.
1743 if (auto *DI = dyn_cast<DbgAddrIntrinsic>(U)) {
1744 coro::salvageDebugInfo(DbgPtrAllocaCache, DI, Shape.OptimizeFrame);
1745 }
1746
1747 // If we have a single edge PHINode, remove it and replace it with a
1748 // reload from the coroutine frame. (We already took care of multi edge
1749 // PHINodes by rewriting them in the rewritePHIs function).
1750 if (auto *PN = dyn_cast<PHINode>(U)) {
1751 assert(PN->getNumIncomingValues() == 1 &&
1752 "unexpected number of incoming "
1753 "values in the PHINode");
1754 PN->replaceAllUsesWith(CurrentReload);
1755 PN->eraseFromParent();
1756 continue;
1757 }
1758
1759 // Replace all uses of CurrentValue in the current instruction with
1760 // reload.
1761 U->replaceUsesOfWith(Def, CurrentReload);
1762 }
1763 }
1764
1765 BasicBlock *FramePtrBB = Shape.getInsertPtAfterFramePtr()->getParent();
1766
1767 auto SpillBlock = FramePtrBB->splitBasicBlock(
1768 Shape.getInsertPtAfterFramePtr(), "AllocaSpillBB");
1769 SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill");
1770 Shape.AllocaSpillBlock = SpillBlock;
1771
1772 // retcon and retcon.once lowering assumes all uses have been sunk.
1773 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
1774 Shape.ABI == coro::ABI::Async) {
1775 // If we found any allocas, replace all of their remaining uses with Geps.
1776 Builder.SetInsertPoint(&SpillBlock->front());
1777 for (const auto &P : FrameData.Allocas) {
1778 AllocaInst *Alloca = P.Alloca;
1779 auto *G = GetFramePointer(Alloca);
1780
1781 // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G))
1782 // here, as we are changing location of the instruction.
1783 G->takeName(Alloca);
1784 Alloca->replaceAllUsesWith(G);
1785 Alloca->eraseFromParent();
1786 }
1787 return;
1788 }
1789
1790 // If we found any alloca, replace all of their remaining uses with GEP
1791 // instructions. To remain debugbility, we replace the uses of allocas for
1792 // dbg.declares and dbg.values with the reload from the frame.
1793 // Note: We cannot replace the alloca with GEP instructions indiscriminately,
1794 // as some of the uses may not be dominated by CoroBegin.
1795 Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front());
1796 SmallVector<Instruction *, 4> UsersToUpdate;
1797 for (const auto &A : FrameData.Allocas) {
1798 AllocaInst *Alloca = A.Alloca;
1799 UsersToUpdate.clear();
1800 for (User *U : Alloca->users()) {
1801 auto *I = cast<Instruction>(U);
1802 if (DT.dominates(CB, I))
1803 UsersToUpdate.push_back(I);
1804 }
1805 if (UsersToUpdate.empty())
1806 continue;
1807 auto *G = GetFramePointer(Alloca);
1808 G->setName(Alloca->getName() + Twine(".reload.addr"));
1809
1811 findDbgUsers(DIs, Alloca);
1812 for (auto *DVI : DIs)
1813 DVI->replaceUsesOfWith(Alloca, G);
1814
1815 for (Instruction *I : UsersToUpdate) {
1816 // It is meaningless to remain the lifetime intrinsics refer for the
1817 // member of coroutine frames and the meaningless lifetime intrinsics
1818 // are possible to block further optimizations.
1819 if (I->isLifetimeStartOrEnd())
1820 continue;
1821
1822 I->replaceUsesOfWith(Alloca, G);
1823 }
1824 }
1825 Builder.SetInsertPoint(Shape.getInsertPtAfterFramePtr());
1826 for (const auto &A : FrameData.Allocas) {
1827 AllocaInst *Alloca = A.Alloca;
1828 if (A.MayWriteBeforeCoroBegin) {
1829 // isEscaped really means potentially modified before CoroBegin.
1830 if (Alloca->isArrayAllocation())
1832 "Coroutines cannot handle copying of array allocas yet");
1833
1834 auto *G = GetFramePointer(Alloca);
1835 auto *Value = Builder.CreateLoad(Alloca->getAllocatedType(), Alloca);
1836 Builder.CreateStore(Value, G);
1837 }
1838 // For each alias to Alloca created before CoroBegin but used after
1839 // CoroBegin, we recreate them after CoroBegin by appplying the offset
1840 // to the pointer in the frame.
1841 for (const auto &Alias : A.Aliases) {
1842 auto *FramePtr = GetFramePointer(Alloca);
1843 auto *FramePtrRaw =
1844 Builder.CreateBitCast(FramePtr, Type::getInt8PtrTy(C));
1845 auto &Value = *Alias.second;
1846 auto ITy = IntegerType::get(C, Value.getBitWidth());
1847 auto *AliasPtr = Builder.CreateGEP(Type::getInt8Ty(C), FramePtrRaw,
1848 ConstantInt::get(ITy, Value));
1849 auto *AliasPtrTyped =
1850 Builder.CreateBitCast(AliasPtr, Alias.first->getType());
1851 Alias.first->replaceUsesWithIf(
1852 AliasPtrTyped, [&](Use &U) { return DT.dominates(CB, U); });
1853 }
1854 }
1855
1856 // PromiseAlloca is not collected in FrameData.Allocas. So we don't handle
1857 // the case that the PromiseAlloca may have writes before CoroBegin in the
1858 // above codes. And it may be problematic in edge cases. See
1859 // https://github.com/llvm/llvm-project/issues/57861 for an example.
1860 if (Shape.ABI == coro::ABI::Switch && Shape.SwitchLowering.PromiseAlloca) {
1862 // If there is memory accessing to promise alloca before CoroBegin;
1863 bool HasAccessingPromiseBeforeCB = llvm::any_of(PA->uses(), [&](Use &U) {
1864 auto *Inst = dyn_cast<Instruction>(U.getUser());
1865 if (!Inst || DT.dominates(CB, Inst))
1866 return false;
1867
1868 if (auto *CI = dyn_cast<CallInst>(Inst)) {
1869 // It is fine if the call wouldn't write to the Promise.
1870 // This is possible for @llvm.coro.id intrinsics, which
1871 // would take the promise as the second argument as a
1872 // marker.
1873 if (CI->onlyReadsMemory() ||
1874 CI->onlyReadsMemory(CI->getArgOperandNo(&U)))
1875 return false;
1876 return true;
1877 }
1878
1879 return isa<StoreInst>(Inst) ||
1880 // It may take too much time to track the uses.
1881 // Be conservative about the case the use may escape.
1882 isa<GetElementPtrInst>(Inst) ||
1883 // There would always be a bitcast for the promise alloca
1884 // before we enabled Opaque pointers. And now given
1885 // opaque pointers are enabled by default. This should be
1886 // fine.
1887 isa<BitCastInst>(Inst);
1888 });
1889 if (HasAccessingPromiseBeforeCB) {
1890 Builder.SetInsertPoint(Shape.getInsertPtAfterFramePtr());
1891 auto *G = GetFramePointer(PA);
1892 auto *Value = Builder.CreateLoad(PA->getAllocatedType(), PA);
1893 Builder.CreateStore(Value, G);
1894 }
1895 }
1896}
1897
1898// Moves the values in the PHIs in SuccBB that correspong to PredBB into a new
1899// PHI in InsertedBB.
1901 BasicBlock *InsertedBB,
1902 BasicBlock *PredBB,
1903 PHINode *UntilPHI = nullptr) {
1904 auto *PN = cast<PHINode>(&SuccBB->front());
1905 do {
1906 int Index = PN->getBasicBlockIndex(InsertedBB);
1907 Value *V = PN->getIncomingValue(Index);
1908 PHINode *InputV = PHINode::Create(
1909 V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName(),
1910 &InsertedBB->front());
1911 InputV->addIncoming(V, PredBB);
1912 PN->setIncomingValue(Index, InputV);
1913 PN = dyn_cast<PHINode>(PN->getNextNode());
1914 } while (PN != UntilPHI);
1915}
1916
1917// Rewrites the PHI Nodes in a cleanuppad.
1918static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB,
1919 CleanupPadInst *CleanupPad) {
1920 // For every incoming edge to a CleanupPad we will create a new block holding
1921 // all incoming values in single-value PHI nodes. We will then create another
1922 // block to act as a dispather (as all unwind edges for related EH blocks
1923 // must be the same).
1924 //
1925 // cleanuppad:
1926 // %2 = phi i32[%0, %catchswitch], [%1, %catch.1]
1927 // %3 = cleanuppad within none []
1928 //
1929 // It will create:
1930 //
1931 // cleanuppad.corodispatch
1932 // %2 = phi i8[0, %catchswitch], [1, %catch.1]
1933 // %3 = cleanuppad within none []
1934 // switch i8 % 2, label %unreachable
1935 // [i8 0, label %cleanuppad.from.catchswitch
1936 // i8 1, label %cleanuppad.from.catch.1]
1937 // cleanuppad.from.catchswitch:
1938 // %4 = phi i32 [%0, %catchswitch]
1939 // br %label cleanuppad
1940 // cleanuppad.from.catch.1:
1941 // %6 = phi i32 [%1, %catch.1]
1942 // br %label cleanuppad
1943 // cleanuppad:
1944 // %8 = phi i32 [%4, %cleanuppad.from.catchswitch],
1945 // [%6, %cleanuppad.from.catch.1]
1946
1947 // Unreachable BB, in case switching on an invalid value in the dispatcher.
1948 auto *UnreachBB = BasicBlock::Create(
1949 CleanupPadBB->getContext(), "unreachable", CleanupPadBB->getParent());
1950 IRBuilder<> Builder(UnreachBB);
1951 Builder.CreateUnreachable();
1952
1953 // Create a new cleanuppad which will be the dispatcher.
1954 auto *NewCleanupPadBB =
1955 BasicBlock::Create(CleanupPadBB->getContext(),
1956 CleanupPadBB->getName() + Twine(".corodispatch"),
1957 CleanupPadBB->getParent(), CleanupPadBB);
1958 Builder.SetInsertPoint(NewCleanupPadBB);
1959 auto *SwitchType = Builder.getInt8Ty();
1960 auto *SetDispatchValuePN =
1961 Builder.CreatePHI(SwitchType, pred_size(CleanupPadBB));
1962 CleanupPad->removeFromParent();
1963 CleanupPad->insertAfter(SetDispatchValuePN);
1964 auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB,
1965 pred_size(CleanupPadBB));
1966
1967 int SwitchIndex = 0;
1968 SmallVector<BasicBlock *, 8> Preds(predecessors(CleanupPadBB));
1969 for (BasicBlock *Pred : Preds) {
1970 // Create a new cleanuppad and move the PHI values to there.
1971 auto *CaseBB = BasicBlock::Create(CleanupPadBB->getContext(),
1972 CleanupPadBB->getName() +
1973 Twine(".from.") + Pred->getName(),
1974 CleanupPadBB->getParent(), CleanupPadBB);
1975 updatePhiNodes(CleanupPadBB, Pred, CaseBB);
1976 CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") +
1977 Pred->getName());
1978 Builder.SetInsertPoint(CaseBB);
1979 Builder.CreateBr(CleanupPadBB);
1980 movePHIValuesToInsertedBlock(CleanupPadBB, CaseBB, NewCleanupPadBB);
1981
1982 // Update this Pred to the new unwind point.
1983 setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB);
1984
1985 // Setup the switch in the dispatcher.
1986 auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex);
1987 SetDispatchValuePN->addIncoming(SwitchConstant, Pred);
1988 SwitchOnDispatch->addCase(SwitchConstant, CaseBB);
1989 SwitchIndex++;
1990 }
1991}
1992
1995 for (auto &BB : F) {
1996 for (auto &Phi : BB.phis()) {
1997 if (Phi.getNumIncomingValues() == 1) {
1998 Worklist.push_back(&Phi);
1999 } else
2000 break;
2001 }
2002 }
2003 while (!Worklist.empty()) {
2004 auto *Phi = Worklist.pop_back_val();
2005 auto *OriginalValue = Phi->getIncomingValue(0);
2006 Phi->replaceAllUsesWith(OriginalValue);
2007 }
2008}
2009
2010static void rewritePHIs(BasicBlock &BB) {
2011 // For every incoming edge we will create a block holding all
2012 // incoming values in a single PHI nodes.
2013 //
2014 // loop:
2015 // %n.val = phi i32[%n, %entry], [%inc, %loop]
2016 //
2017 // It will create:
2018 //
2019 // loop.from.entry:
2020 // %n.loop.pre = phi i32 [%n, %entry]
2021 // br %label loop
2022 // loop.from.loop:
2023 // %inc.loop.pre = phi i32 [%inc, %loop]
2024 // br %label loop
2025 //
2026 // After this rewrite, further analysis will ignore any phi nodes with more
2027 // than one incoming edge.
2028
2029 // TODO: Simplify PHINodes in the basic block to remove duplicate
2030 // predecessors.
2031
2032 // Special case for CleanupPad: all EH blocks must have the same unwind edge
2033 // so we need to create an additional "dispatcher" block.
2034 if (auto *CleanupPad =
2035 dyn_cast_or_null<CleanupPadInst>(BB.getFirstNonPHI())) {
2037 for (BasicBlock *Pred : Preds) {
2038 if (CatchSwitchInst *CS =
2039 dyn_cast<CatchSwitchInst>(Pred->getTerminator())) {
2040 // CleanupPad with a CatchSwitch predecessor: therefore this is an
2041 // unwind destination that needs to be handle specially.
2042 assert(CS->getUnwindDest() == &BB);
2043 (void)CS;
2044 rewritePHIsForCleanupPad(&BB, CleanupPad);
2045 return;
2046 }
2047 }
2048 }
2049
2050 LandingPadInst *LandingPad = nullptr;
2051 PHINode *ReplPHI = nullptr;
2052 if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) {
2053 // ehAwareSplitEdge will clone the LandingPad in all the edge blocks.
2054 // We replace the original landing pad with a PHINode that will collect the
2055 // results from all of them.
2056 ReplPHI = PHINode::Create(LandingPad->getType(), 1, "", LandingPad);
2057 ReplPHI->takeName(LandingPad);
2058 LandingPad->replaceAllUsesWith(ReplPHI);
2059 // We will erase the original landing pad at the end of this function after
2060 // ehAwareSplitEdge cloned it in the transition blocks.
2061 }
2062
2064 for (BasicBlock *Pred : Preds) {
2065 auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI);
2066 IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName());
2067
2068 // Stop the moving of values at ReplPHI, as this is either null or the PHI
2069 // that replaced the landing pad.
2070 movePHIValuesToInsertedBlock(&BB, IncomingBB, Pred, ReplPHI);
2071 }
2072
2073 if (LandingPad) {
2074 // Calls to ehAwareSplitEdge function cloned the original lading pad.
2075 // No longer need it.
2076 LandingPad->eraseFromParent();
2077 }
2078}
2079
2080static void rewritePHIs(Function &F) {
2082
2083 for (BasicBlock &BB : F)
2084 if (auto *PN = dyn_cast<PHINode>(&BB.front()))
2085 if (PN->getNumIncomingValues() > 1)
2086 WorkList.push_back(&BB);
2087
2088 for (BasicBlock *BB : WorkList)
2089 rewritePHIs(*BB);
2090}
2091
2092// Check for instructions that we can recreate on resume as opposed to spill
2093// the result into a coroutine frame.
2095 return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) ||
2096 isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V);
2097}
2098
2099// Check for structural coroutine intrinsics that should not be spilled into
2100// the coroutine frame.
2102 return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) ||
2103 isa<CoroSuspendInst>(&I);
2104}
2105
2106// For every use of the value that is across suspend point, recreate that value
2107// after a suspend point.
2109 const SpillInfo &Spills) {
2110 for (const auto &E : Spills) {
2111 Value *Def = E.first;
2112 BasicBlock *CurrentBlock = nullptr;
2113 Instruction *CurrentMaterialization = nullptr;
2114 for (Instruction *U : E.second) {
2115 // If we have not seen this block, materialize the value.
2116 if (CurrentBlock != U->getParent()) {
2117
2118 bool IsInCoroSuspendBlock = isa<AnyCoroSuspendInst>(U);
2119 CurrentBlock = U->getParent();
2120 auto *InsertBlock = IsInCoroSuspendBlock
2121 ? CurrentBlock->getSinglePredecessor()
2122 : CurrentBlock;
2123 CurrentMaterialization = cast<Instruction>(Def)->clone();
2124 CurrentMaterialization->setName(Def->getName());
2125 CurrentMaterialization->insertBefore(
2126 IsInCoroSuspendBlock ? InsertBlock->getTerminator()
2127 : &*InsertBlock->getFirstInsertionPt());
2128 }
2129 if (auto *PN = dyn_cast<PHINode>(U)) {
2130 assert(PN->getNumIncomingValues() == 1 &&
2131 "unexpected number of incoming "
2132 "values in the PHINode");
2133 PN->replaceAllUsesWith(CurrentMaterialization);
2134 PN->eraseFromParent();
2135 continue;
2136 }
2137 // Replace all uses of Def in the current instruction with the
2138 // CurrentMaterialization for the block.
2139 U->replaceUsesOfWith(Def, CurrentMaterialization);
2140 }
2141 }
2142}
2143
2144// Splits the block at a particular instruction unless it is the first
2145// instruction in the block with a single predecessor.
2147 auto *BB = I->getParent();
2148 if (&BB->front() == I) {
2149 if (BB->getSinglePredecessor()) {
2150 BB->setName(Name);
2151 return BB;
2152 }
2153 }
2154 return BB->splitBasicBlock(I, Name);
2155}
2156
2157// Split above and below a particular instruction so that it
2158// will be all alone by itself in a block.
2159static void splitAround(Instruction *I, const Twine &Name) {
2161 splitBlockIfNotFirst(I->getNextNode(), "After" + Name);
2162}
2163
2164static bool isSuspendBlock(BasicBlock *BB) {
2165 return isa<AnyCoroSuspendInst>(BB->front());
2166}
2167
2169
2170/// Does control flow starting at the given block ever reach a suspend
2171/// instruction before reaching a block in VisitedOrFreeBBs?
2173 VisitedBlocksSet &VisitedOrFreeBBs) {
2174 // Eagerly try to add this block to the visited set. If it's already
2175 // there, stop recursing; this path doesn't reach a suspend before
2176 // either looping or reaching a freeing block.
2177 if (!VisitedOrFreeBBs.insert(From).second)
2178 return false;
2179
2180 // We assume that we'll already have split suspends into their own blocks.
2181 if (isSuspendBlock(From))
2182 return true;
2183
2184 // Recurse on the successors.
2185 for (auto *Succ : successors(From)) {
2186 if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs))
2187 return true;
2188 }
2189
2190 return false;
2191}
2192
2193/// Is the given alloca "local", i.e. bounded in lifetime to not cross a
2194/// suspend point?
2196 // Seed the visited set with all the basic blocks containing a free
2197 // so that we won't pass them up.
2198 VisitedBlocksSet VisitedOrFreeBBs;
2199 for (auto *User : AI->users()) {
2200 if (auto FI = dyn_cast<CoroAllocaFreeInst>(User))
2201 VisitedOrFreeBBs.insert(FI->getParent());
2202 }
2203
2204 return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs);
2205}
2206
2207/// After we split the coroutine, will the given basic block be along
2208/// an obvious exit path for the resumption function?
2210 unsigned depth = 3) {
2211 // If we've bottomed out our depth count, stop searching and assume
2212 // that the path might loop back.
2213 if (depth == 0) return false;
2214
2215 // If this is a suspend block, we're about to exit the resumption function.
2216 if (isSuspendBlock(BB)) return true;
2217
2218 // Recurse into the successors.
2219 for (auto *Succ : successors(BB)) {
2220 if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1))
2221 return false;
2222 }
2223
2224 // If none of the successors leads back in a loop, we're on an exit/abort.
2225 return true;
2226}
2227
2229 // Look for a free that isn't sufficiently obviously followed by
2230 // either a suspend or a termination, i.e. something that will leave
2231 // the coro resumption frame.
2232 for (auto *U : AI->users()) {
2233 auto FI = dyn_cast<CoroAllocaFreeInst>(U);
2234 if (!FI) continue;
2235
2236 if (!willLeaveFunctionImmediatelyAfter(FI->getParent()))
2237 return true;
2238 }
2239
2240 // If we never found one, we don't need a stack save.
2241 return false;
2242}
2243
2244/// Turn each of the given local allocas into a normal (dynamic) alloca
2245/// instruction.
2247 SmallVectorImpl<Instruction*> &DeadInsts) {
2248 for (auto *AI : LocalAllocas) {
2249 auto M = AI->getModule();
2250 IRBuilder<> Builder(AI);
2251
2252 // Save the stack depth. Try to avoid doing this if the stackrestore
2253 // is going to immediately precede a return or something.
2254 Value *StackSave = nullptr;
2256 StackSave = Builder.CreateCall(
2257 Intrinsic::getDeclaration(M, Intrinsic::stacksave));
2258
2259 // Allocate memory.
2260 auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize());
2261 Alloca->setAlignment(AI->getAlignment());
2262
2263 for (auto *U : AI->users()) {
2264 // Replace gets with the allocation.
2265 if (isa<CoroAllocaGetInst>(U)) {
2266 U->replaceAllUsesWith(Alloca);
2267
2268 // Replace frees with stackrestores. This is safe because
2269 // alloca.alloc is required to obey a stack discipline, although we
2270 // don't enforce that structurally.
2271 } else {
2272 auto FI = cast<CoroAllocaFreeInst>(U);
2273 if (StackSave) {
2274 Builder.SetInsertPoint(FI);
2275 Builder.CreateCall(
2276 Intrinsic::getDeclaration(M, Intrinsic::stackrestore),
2277 StackSave);
2278 }
2279 }
2280 DeadInsts.push_back(cast<Instruction>(U));
2281 }
2282
2283 DeadInsts.push_back(AI);
2284 }
2285}
2286
2287/// Turn the given coro.alloca.alloc call into a dynamic allocation.
2288/// This happens during the all-instructions iteration, so it must not
2289/// delete the call.
2291 coro::Shape &Shape,
2292 SmallVectorImpl<Instruction*> &DeadInsts) {
2293 IRBuilder<> Builder(AI);
2294 auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr);
2295
2296 for (User *U : AI->users()) {
2297 if (isa<CoroAllocaGetInst>(U)) {
2298 U->replaceAllUsesWith(Alloc);
2299 } else {
2300 auto FI = cast<CoroAllocaFreeInst>(U);
2301 Builder.SetInsertPoint(FI);
2302 Shape.emitDealloc(Builder, Alloc, nullptr);
2303 }
2304 DeadInsts.push_back(cast<Instruction>(U));
2305 }
2306
2307 // Push this on last so that it gets deleted after all the others.
2308 DeadInsts.push_back(AI);
2309
2310 // Return the new allocation value so that we can check for needed spills.
2311 return cast<Instruction>(Alloc);
2312}
2313
2314/// Get the current swifterror value.
2316 coro::Shape &Shape) {
2317 // Make a fake function pointer as a sort of intrinsic.
2318 auto FnTy = FunctionType::get(ValueTy, {}, false);
2319 auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());
2320
2321 auto Call = Builder.CreateCall(FnTy, Fn, {});
2322 Shape.SwiftErrorOps.push_back(Call);
2323
2324 return Call;
2325}
2326
2327/// Set the given value as the current swifterror value.
2328///
2329/// Returns a slot that can be used as a swifterror slot.
2331 coro::Shape &Shape) {
2332 // Make a fake function pointer as a sort of intrinsic.
2333 auto FnTy = FunctionType::get(V->getType()->getPointerTo(),
2334 {V->getType()}, false);
2335 auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());
2336
2337 auto Call = Builder.CreateCall(FnTy, Fn, { V });
2338 Shape.SwiftErrorOps.push_back(Call);
2339
2340 return Call;
2341}
2342
2343/// Set the swifterror value from the given alloca before a call,
2344/// then put in back in the alloca afterwards.
2345///
2346/// Returns an address that will stand in for the swifterror slot
2347/// until splitting.
2349 AllocaInst *Alloca,
2350 coro::Shape &Shape) {
2351 auto ValueTy = Alloca->getAllocatedType();
2352 IRBuilder<> Builder(Call);
2353
2354 // Load the current value from the alloca and set it as the
2355 // swifterror value.
2356 auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca);
2357 auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape);
2358
2359 // Move to after the call. Since swifterror only has a guaranteed
2360 // value on normal exits, we can ignore implicit and explicit unwind
2361 // edges.
2362 if (isa<CallInst>(Call)) {
2363 Builder.SetInsertPoint(Call->getNextNode());
2364 } else {
2365 auto Invoke = cast<InvokeInst>(Call);
2366 Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg());
2367 }
2368
2369 // Get the current swifterror value and store it to the alloca.
2370 auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape);
2371 Builder.CreateStore(ValueAfterCall, Alloca);
2372
2373 return Addr;
2374}
2375
2376/// Eliminate a formerly-swifterror alloca by inserting the get/set
2377/// intrinsics and attempting to MemToReg the alloca away.
2379 coro::Shape &Shape) {
2380 for (Use &Use : llvm::make_early_inc_range(Alloca->uses())) {
2381 // swifterror values can only be used in very specific ways.
2382 // We take advantage of that here.
2383 auto User = Use.getUser();
2384 if (isa<LoadInst>(User) || isa<StoreInst>(User))
2385 continue;
2386
2387 assert(isa<CallInst>(User) || isa<InvokeInst>(User));
2388 auto Call = cast<Instruction>(User);
2389
2390 auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape);
2391
2392 // Use the returned slot address as the call argument.
2393 Use.set(Addr);
2394 }
2395
2396 // All the uses should be loads and stores now.
2397 assert(isAllocaPromotable(Alloca));
2398}
2399
2400/// "Eliminate" a swifterror argument by reducing it to the alloca case
2401/// and then loading and storing in the prologue and epilog.
2402///
2403/// The argument keeps the swifterror flag.
2405 coro::Shape &Shape,
2406 SmallVectorImpl<AllocaInst*> &AllocasToPromote) {
2407 IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
2408
2409 auto ArgTy = cast<PointerType>(Arg.getType());
2410 // swifterror arguments are required to have pointer-to-pointer type,
2411 // so create a pointer-typed alloca with opaque pointers.
2412 auto ValueTy = ArgTy->isOpaque() ? PointerType::getUnqual(F.getContext())
2413 : ArgTy->getNonOpaquePointerElementType();
2414
2415 // Reduce to the alloca case:
2416
2417 // Create an alloca and replace all uses of the arg with it.
2418 auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace());
2419 Arg.replaceAllUsesWith(Alloca);
2420
2421 // Set an initial value in the alloca. swifterror is always null on entry.
2422 auto InitialValue = Constant::getNullValue(ValueTy);
2423 Builder.CreateStore(InitialValue, Alloca);
2424
2425 // Find all the suspends in the function and save and restore around them.
2426 for (auto *Suspend : Shape.CoroSuspends) {
2427 (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape);
2428 }
2429
2430 // Find all the coro.ends in the function and restore the error value.
2431 for (auto *End : Shape.CoroEnds) {
2432 Builder.SetInsertPoint(End);
2433 auto FinalValue = Builder.CreateLoad(ValueTy, Alloca);
2434 (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape);
2435 }
2436
2437 // Now we can use the alloca logic.
2438 AllocasToPromote.push_back(Alloca);
2439 eliminateSwiftErrorAlloca(F, Alloca, Shape);
2440}
2441
2442/// Eliminate all problematic uses of swifterror arguments and allocas
2443/// from the function. We'll fix them up later when splitting the function.
2445 SmallVector<AllocaInst*, 4> AllocasToPromote;
2446
2447 // Look for a swifterror argument.
2448 for (auto &Arg : F.args()) {
2449 if (!Arg.hasSwiftErrorAttr()) continue;
2450
2451 eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote);
2452 break;
2453 }
2454
2455 // Look for swifterror allocas.
2456 for (auto &Inst : F.getEntryBlock()) {
2457 auto Alloca = dyn_cast<AllocaInst>(&Inst);
2458 if (!Alloca || !Alloca->isSwiftError()) continue;
2459
2460 // Clear the swifterror flag.
2461 Alloca->setSwiftError(false);
2462
2463 AllocasToPromote.push_back(Alloca);
2464 eliminateSwiftErrorAlloca(F, Alloca, Shape);
2465 }
2466
2467 // If we have any allocas to promote, compute a dominator tree and
2468 // promote them en masse.
2469 if (!AllocasToPromote.empty()) {
2470 DominatorTree DT(F);
2471 PromoteMemToReg(AllocasToPromote, DT);
2472 }
2473}
2474
2475/// retcon and retcon.once conventions assume that all spill uses can be sunk
2476/// after the coro.begin intrinsic.
2478 const FrameDataInfo &FrameData,
2479 CoroBeginInst *CoroBegin) {
2480 DominatorTree Dom(F);
2481
2484
2485 // Collect all users that precede coro.begin.
2486 for (auto *Def : FrameData.getAllDefs()) {
2487 for (User *U : Def->users()) {
2488 auto Inst = cast<Instruction>(U);
2489 if (Inst->getParent() != CoroBegin->getParent() ||
2490 Dom.dominates(CoroBegin, Inst))
2491 continue;
2492 if (ToMove.insert(Inst))
2493 Worklist.push_back(Inst);
2494 }
2495 }
2496 // Recursively collect users before coro.begin.
2497 while (!Worklist.empty()) {
2498 auto *Def = Worklist.pop_back_val();
2499 for (User *U : Def->users()) {
2500 auto Inst = cast<Instruction>(U);
2501 if (Dom.dominates(CoroBegin, Inst))
2502 continue;
2503 if (ToMove.insert(Inst))
2504 Worklist.push_back(Inst);
2505 }
2506 }
2507
2508 // Sort by dominance.
2509 SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end());
2510 llvm::sort(InsertionList, [&Dom](Instruction *A, Instruction *B) -> bool {
2511 // If a dominates b it should preceed (<) b.
2512 return Dom.dominates(A, B);
2513 });
2514
2515 Instruction *InsertPt = CoroBegin->getNextNode();
2516 for (Instruction *Inst : InsertionList)
2517 Inst->moveBefore(InsertPt);
2518}
2519
2520/// For each local variable that all of its user are only used inside one of
2521/// suspended region, we sink their lifetime.start markers to the place where
2522/// after the suspend block. Doing so minimizes the lifetime of each variable,
2523/// hence minimizing the amount of data we end up putting on the frame.
2525 SuspendCrossingInfo &Checker) {
2526 DominatorTree DT(F);
2527
2528 // Collect all possible basic blocks which may dominate all uses of allocas.
2530 DomSet.insert(&F.getEntryBlock());
2531 for (auto *CSI : Shape.CoroSuspends) {
2532 BasicBlock *SuspendBlock = CSI->getParent();
2533 assert(isSuspendBlock(SuspendBlock) && SuspendBlock->getSingleSuccessor() &&
2534 "should have split coro.suspend into its own block");
2535 DomSet.insert(SuspendBlock->getSingleSuccessor());
2536 }
2537
2538 for (Instruction &I : instructions(F)) {
2539 AllocaInst* AI = dyn_cast<AllocaInst>(&I);
2540 if (!AI)
2541 continue;
2542
2543 for (BasicBlock *DomBB : DomSet) {
2544 bool Valid = true;
2546
2547 auto isLifetimeStart = [](Instruction* I) {
2548 if (auto* II = dyn_cast<IntrinsicInst>(I))
2549 return II->getIntrinsicID() == Intrinsic::lifetime_start;
2550 return false;
2551 };
2552
2553 auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) {
2554 if (isLifetimeStart(U)) {
2555 Lifetimes.push_back(U);
2556 return true;
2557 }
2558 if (!U->hasOneUse() || U->stripPointerCasts() != AI)
2559 return false;
2560 if (isLifetimeStart(U->user_back())) {
2561 Lifetimes.push_back(U->user_back());
2562 return true;
2563 }
2564 return false;
2565 };
2566
2567 for (User *U : AI->users()) {
2568 Instruction *UI = cast<Instruction>(U);
2569 // For all users except lifetime.start markers, if they are all
2570 // dominated by one of the basic blocks and do not cross
2571 // suspend points as well, then there is no need to spill the
2572 // instruction.
2573 if (!DT.dominates(DomBB, UI->getParent()) ||
2574 Checker.isDefinitionAcrossSuspend(DomBB, UI)) {
2575 // Skip lifetime.start, GEP and bitcast used by lifetime.start
2576 // markers.
2577 if (collectLifetimeStart(UI, AI))
2578 continue;
2579 Valid = false;
2580 break;
2581 }
2582 }
2583 // Sink lifetime.start markers to dominate block when they are
2584 // only used outside the region.
2585 if (Valid && Lifetimes.size() != 0) {
2586 // May be AI itself, when the type of AI is i8*
2587 auto *NewBitCast = [&](AllocaInst *AI) -> Value* {
2588 if (isa<AllocaInst>(Lifetimes[0]->getOperand(1)))
2589 return AI;
2590 auto *Int8PtrTy = Type::getInt8PtrTy(F.getContext());
2591 return CastInst::Create(Instruction::BitCast, AI, Int8PtrTy, "",
2592 DomBB->getTerminator());
2593 }(AI);
2594
2595 auto *NewLifetime = Lifetimes[0]->clone();
2596 NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), NewBitCast);
2597 NewLifetime->insertBefore(DomBB->getTerminator());
2598
2599 // All the outsided lifetime.start markers are no longer necessary.
2600 for (Instruction *S : Lifetimes)
2601 S->eraseFromParent();
2602
2603 break;
2604 }
2605 }
2606 }
2607}
2608
2610 const SuspendCrossingInfo &Checker,
2612 const DominatorTree &DT) {
2613 if (Shape.CoroSuspends.empty())
2614 return;
2615
2616 // The PromiseAlloca will be specially handled since it needs to be in a
2617 // fixed position in the frame.
2618 if (AI == Shape.SwitchLowering.PromiseAlloca)
2619 return;
2620
2621 // The code that uses lifetime.start intrinsic does not work for functions
2622 // with loops without exit. Disable it on ABIs we know to generate such
2623 // code.
2624 bool ShouldUseLifetimeStartInfo =
2625 (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon &&
2626 Shape.ABI != coro::ABI::RetconOnce);
2627 AllocaUseVisitor Visitor{AI->getModule()->getDataLayout(), DT,
2628 *Shape.CoroBegin, Checker,
2629 ShouldUseLifetimeStartInfo};
2630 Visitor.visitPtr(*AI);
2631 if (!Visitor.getShouldLiveOnFrame())
2632 return;
2633 Allocas.emplace_back(AI, Visitor.getAliasesCopy(),
2634 Visitor.getMayWriteBeforeCoroBegin());
2635}
2636
2639 DbgVariableIntrinsic *DVI, bool OptimizeFrame) {
2640 Function *F = DVI->getFunction();
2641 IRBuilder<> Builder(F->getContext());
2642 auto InsertPt = F->getEntryBlock().getFirstInsertionPt();
2643 while (isa<IntrinsicInst>(InsertPt))
2644 ++InsertPt;
2645 Builder.SetInsertPoint(&F->getEntryBlock(), InsertPt);
2646 DIExpression *Expr = DVI->getExpression();
2647 // Follow the pointer arithmetic all the way to the incoming
2648 // function argument and convert into a DIExpression.
2649 bool SkipOutermostLoad = !isa<DbgValueInst>(DVI);
2650 Value *Storage = DVI->getVariableLocationOp(0);
2651 Value *OriginalStorage = Storage;
2652
2653 while (auto *Inst = dyn_cast_or_null<Instruction>(Storage)) {
2654 if (auto *LdInst = dyn_cast<LoadInst>(Inst)) {
2655 Storage = LdInst->getOperand(0);
2656 // FIXME: This is a heuristic that works around the fact that
2657 // LLVM IR debug intrinsics cannot yet distinguish between
2658 // memory and value locations: Because a dbg.declare(alloca) is
2659 // implicitly a memory location no DW_OP_deref operation for the
2660 // last direct load from an alloca is necessary. This condition
2661 // effectively drops the *last* DW_OP_deref in the expression.
2662 if (!SkipOutermostLoad)
2664 } else if (auto *StInst = dyn_cast<StoreInst>(Inst)) {
2665 Storage = StInst->getOperand(0);
2666 } else {
2668 SmallVector<Value *, 0> AdditionalValues;
2670 *Inst, Expr ? Expr->getNumLocationOperands() : 0, Ops,
2671 AdditionalValues);
2672 if (!Op || !AdditionalValues.empty()) {
2673 // If salvaging failed or salvaging produced more than one location
2674 // operand, give up.
2675 break;
2676 }
2677 Storage = Op;
2678 Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, /*StackValue*/ false);
2679 }
2680 SkipOutermostLoad = false;
2681 }
2682 if (!Storage)
2683 return;
2684
2685 // Store a pointer to the coroutine frame object in an alloca so it
2686 // is available throughout the function when producing unoptimized
2687 // code. Extending the lifetime this way is correct because the
2688 // variable has been declared by a dbg.declare intrinsic.
2689 //
2690 // Avoid to create the alloca would be eliminated by optimization
2691 // passes and the corresponding dbg.declares would be invalid.
2692 if (!OptimizeFrame)
2693 if (auto *Arg = dyn_cast<llvm::Argument>(Storage)) {
2694 auto &Cached = DbgPtrAllocaCache[Storage];
2695 if (!Cached) {
2696 Cached = Builder.CreateAlloca(Storage->getType(), 0, nullptr,
2697 Arg->getName() + ".debug");
2698 Builder.CreateStore(Storage, Cached);
2699 }
2700 Storage = Cached;
2701 // FIXME: LLVM lacks nuanced semantics to differentiate between
2702 // memory and direct locations at the IR level. The backend will
2703 // turn a dbg.declare(alloca, ..., DIExpression()) into a memory
2704 // location. Thus, if there are deref and offset operations in the
2705 // expression, we need to add a DW_OP_deref at the *start* of the
2706 // expression to first load the contents of the alloca before
2707 // adjusting it with the expression.
2709 }
2710
2711 DVI->replaceVariableLocationOp(OriginalStorage, Storage);
2712 DVI->setExpression(Expr);
2713 // We only hoist dbg.declare today since it doesn't make sense to hoist
2714 // dbg.value or dbg.addr since they do not have the same function wide
2715 // guarantees that dbg.declare does.
2716 if (!isa<DbgValueInst>(DVI) && !isa<DbgAddrIntrinsic>(DVI)) {
2717 Instruction *InsertPt = nullptr;
2718 if (auto *I = dyn_cast<Instruction>(Storage))
2719 InsertPt = I->getInsertionPointAfterDef();
2720 else if (isa<Argument>(Storage))
2721 InsertPt = &*F->getEntryBlock().begin();
2722 if (InsertPt)
2723 DVI->moveBefore(InsertPt);
2724 }
2725}
2726
2728 // Don't eliminate swifterror in async functions that won't be split.
2729 if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
2731
2732 if (Shape.ABI == coro::ABI::Switch &&
2735 }
2736
2737 // Make sure that all coro.save, coro.suspend and the fallthrough coro.end
2738 // intrinsics are in their own blocks to simplify the logic of building up
2739 // SuspendCrossing data.
2740 for (auto *CSI : Shape.CoroSuspends) {
2741 if (auto *Save = CSI->getCoroSave())
2742 splitAround(Save, "CoroSave");
2743 splitAround(CSI, "CoroSuspend");
2744 }
2745
2746 // Put CoroEnds into their own blocks.
2747 for (AnyCoroEndInst *CE : Shape.CoroEnds) {
2748 splitAround(CE, "CoroEnd");
2749
2750 // Emit the musttail call function in a new block before the CoroEnd.
2751 // We do this here so that the right suspend crossing info is computed for
2752 // the uses of the musttail call function call. (Arguments to the coro.end
2753 // instructions would be ignored)
2754 if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) {
2755 auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction();
2756 if (!MustTailCallFn)
2757 continue;
2758 IRBuilder<> Builder(AsyncEnd);
2759 SmallVector<Value *, 8> Args(AsyncEnd->args());
2760 auto Arguments = ArrayRef<Value *>(Args).drop_front(3);
2761 auto *Call = createMustTailCall(AsyncEnd->getDebugLoc(), MustTailCallFn,
2763 splitAround(Call, "MustTailCall.Before.CoroEnd");
2764 }
2765 }
2766
2767 // Later code makes structural assumptions about single predecessors phis e.g
2768 // that they are not live across a suspend point.
2770
2771 // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will
2772 // never has its definition separated from the PHI by the suspend point.
2773 rewritePHIs(F);
2774
2775 // Build suspend crossing info.
2776 SuspendCrossingInfo Checker(F, Shape);
2777
2778 IRBuilder<> Builder(F.getContext());
2779 FrameDataInfo FrameData;
2781 SmallVector<Instruction*, 4> DeadInstructions;
2782
2783 {
2784 SpillInfo Spills;
2785 for (int Repeat = 0; Repeat < 4; ++Repeat) {
2786 // See if there are materializable instructions across suspend points.
2787 // FIXME: We can use a worklist to track the possible materialize
2788 // instructions instead of iterating the whole function again and again.
2789 for (Instruction &I : instructions(F))
2790 if (materializable(I)) {
2791 for (User *U : I.users())
2792 if (Checker.isDefinitionAcrossSuspend(I, U))
2793 Spills[&I].push_back(cast<Instruction>(U));
2794 }
2795
2796 if (Spills.empty())
2797 break;
2798
2799 // Rewrite materializable instructions to be materialized at the use
2800 // point.
2801 LLVM_DEBUG(dumpSpills("Materializations", Spills));
2803 Spills.clear();
2804 }
2805 }
2806
2807 if (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon &&
2808 Shape.ABI != coro::ABI::RetconOnce)
2809 sinkLifetimeStartMarkers(F, Shape, Checker);
2810
2811 // Collect the spills for arguments and other not-materializable values.
2812 for (Argument &A : F.args())
2813 for (User *U : A.users())
2814 if (Checker.isDefinitionAcrossSuspend(A, U))
2815 FrameData.Spills[&A].push_back(cast<Instruction>(U));
2816
2817 const DominatorTree DT(F);
2818 for (Instruction &I : instructions(F)) {
2819 // Values returned from coroutine structure intrinsics should not be part
2820 // of the Coroutine Frame.
2822 continue;
2823
2824 // Handle alloca.alloc specially here.
2825 if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) {
2826 // Check whether the alloca's lifetime is bounded by suspend points.
2827 if (isLocalAlloca(AI)) {
2828 LocalAllocas.push_back(AI);
2829 continue;
2830 }
2831
2832 // If not, do a quick rewrite of the alloca and then add spills of
2833 // the rewritten value. The rewrite doesn't invalidate anything in
2834 // Spills because the other alloca intrinsics have no other operands
2835 // besides AI, and it doesn't invalidate the iteration because we delay
2836 // erasing AI.
2837 auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions);
2838
2839 for (User *U : Alloc->users()) {
2840 if (Checker.isDefinitionAcrossSuspend(*Alloc, U))
2841 FrameData.Spills[Alloc].push_back(cast<Instruction>(U));
2842 }
2843 continue;
2844 }
2845
2846 // Ignore alloca.get; we process this as part of coro.alloca.alloc.
2847 if (isa<CoroAllocaGetInst>(I))
2848 continue;
2849
2850 if (auto *AI = dyn_cast<AllocaInst>(&I)) {
2851 collectFrameAlloca(AI, Shape, Checker, FrameData.Allocas, DT);
2852 continue;
2853 }
2854
2855 for (User *U : I.users())
2856 if (Checker.isDefinitionAcrossSuspend(I, U)) {
2857 // We cannot spill a token.
2858 if (I.getType()->isTokenTy())
2860 "token definition is separated from the use by a suspend point");
2861 FrameData.Spills[&I].push_back(cast<Instruction>(U));
2862 }
2863 }
2864
2865 LLVM_DEBUG(dumpAllocas(FrameData.Allocas));
2866
2867 // We don't want the layout of coroutine frame to be affected
2868 // by debug information. So we only choose to salvage DbgValueInst for
2869 // whose value is already in the frame.
2870 // We would handle the dbg.values for allocas specially
2871 for (auto &Iter : FrameData.Spills) {
2872 auto *V = Iter.first;
2874 findDbgValues(DVIs, V);
2875 for (DbgValueInst *DVI : DVIs)
2876 if (Checker.isDefinitionAcrossSuspend(*V, DVI))
2877 FrameData.Spills[V].push_back(DVI);
2878 }
2879
2880 LLVM_DEBUG(dumpSpills("Spills", FrameData.Spills));
2881 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
2882 Shape.ABI == coro::ABI::Async)
2884 Shape.FrameTy = buildFrameType(F, Shape, FrameData);
2886 // For now, this works for C++ programs only.
2887 buildFrameDebugInfo(F, Shape, FrameData);
2888 insertSpills(FrameData, Shape);
2889 lowerLocalAllocas(LocalAllocas, DeadInstructions);
2890
2891 for (auto *I : DeadInstructions)
2892 I->eraseFromParent();
2893}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
AMDGPU Lower Kernel Arguments
assume Assume Builder
This file implements the BitVector class.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition: Compiler.h:492
static void cleanupSinglePredPHIs(Function &F)
Definition: CoroFrame.cpp:1993
static bool isSuspendReachableFrom(BasicBlock *From, VisitedBlocksSet &VisitedOrFreeBBs)
Does control flow starting at the given block ever reach a suspend instruction before reaching a bloc...
Definition: CoroFrame.cpp:2172
static bool isCoroutineStructureIntrinsic(Instruction &I)
Definition: CoroFrame.cpp:2101
SmallPtrSet< BasicBlock *, 8 > VisitedBlocksSet
Definition: CoroFrame.cpp:2168
static void createFramePtr(coro::Shape &Shape)
Definition: CoroFrame.cpp:1555
static Instruction * lowerNonLocalAlloca(CoroAllocaAllocInst *AI, coro::Shape &Shape, SmallVectorImpl< Instruction * > &DeadInsts)
Turn the given coro.alloca.alloc call into a dynamic allocation.
Definition: CoroFrame.cpp:2290
static Instruction * splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch)
Definition: CoroFrame.cpp:1543
@ SmallVectorThreshold
Definition: CoroFrame.cpp:48
static void eliminateSwiftError(Function &F, coro::Shape &Shape)
Eliminate all problematic uses of swifterror arguments and allocas from the function.
Definition: CoroFrame.cpp:2444
static void lowerLocalAllocas(ArrayRef< CoroAllocaAllocInst * > LocalAllocas, SmallVectorImpl< Instruction * > &DeadInsts)
Turn each of the given local allocas into a normal (dynamic) alloca instruction.
Definition: CoroFrame.cpp:2246
static bool isLocalAlloca(CoroAllocaAllocInst *AI)
Is the given alloca "local", i.e.
Definition: CoroFrame.cpp:2195
static Value * emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V, coro::Shape &Shape)
Set the given value as the current swifterror value.
Definition: CoroFrame.cpp:2330
static Value * emitSetAndGetSwiftErrorValueAround(Instruction *Call, AllocaInst *Alloca, coro::Shape &Shape)
Set the swifterror value from the given alloca before a call, then put in back in the alloca afterwar...
Definition: CoroFrame.cpp:2348
static void cacheDIVar(FrameDataInfo &FrameData, DenseMap< Value *, DILocalVariable * > &DIVarCache)
Definition: CoroFrame.cpp:820
static void collectFrameAlloca(AllocaInst *AI, coro::Shape &Shape, const SuspendCrossingInfo &Checker, SmallVectorImpl< AllocaInfo > &Allocas, const DominatorTree &DT)
Definition: CoroFrame.cpp:2609
static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI)
Definition: CoroFrame.cpp:2228
static void splitAround(Instruction *I, const Twine &Name)
Definition: CoroFrame.cpp:2159
static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca, coro::Shape &Shape)
Eliminate a formerly-swifterror alloca by inserting the get/set intrinsics and attempting to MemToReg...
Definition: CoroFrame.cpp:2378
static void rewritePHIs(BasicBlock &BB)
Definition: CoroFrame.cpp:2010
static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB, BasicBlock *InsertedBB, BasicBlock *PredBB, PHINode *UntilPHI=nullptr)
Definition: CoroFrame.cpp:1900
static DIType * solveDIType(DIBuilder &Builder, Type *Ty, const DataLayout &Layout, DIScope *Scope, unsigned LineNum, DenseMap< Type *, DIType * > &DITypeCache)
Definition: CoroFrame.cpp:885
static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB, unsigned depth=3)
After we split the coroutine, will the given basic block be along an obvious exit path for the resump...
Definition: CoroFrame.cpp:2209
static void rewriteMaterializableInstructions(IRBuilder<> &IRB, const SpillInfo &Spills)
Definition: CoroFrame.cpp:2108
static void eliminateSwiftErrorArgument(Function &F, Argument &Arg, coro::Shape &Shape, SmallVectorImpl< AllocaInst * > &AllocasToPromote)
"Eliminate" a swifterror argument by reducing it to the alloca case and then loading and storing in t...
Definition: CoroFrame.cpp:2404
static void buildFrameDebugInfo(Function &F, coro::Shape &Shape, FrameDataInfo &FrameData)
Build artificial debug info for C++ coroutine frames to allow users to inspect the contents of the fr...
Definition: CoroFrame.cpp:972
static StructType * buildFrameType(Function &F, coro::Shape &Shape, FrameDataInfo &FrameData)
Definition: CoroFrame.cpp:1145
static BasicBlock * splitBlockIfNotFirst(Instruction *I, const Twine &Name)
Definition: CoroFrame.cpp:2146
static void sinkSpillUsesAfterCoroBegin(Function &F, const FrameDataInfo &FrameData, CoroBeginInst *CoroBegin)
retcon and retcon.once conventions assume that all spill uses can be sunk after the coro....
Definition: CoroFrame.cpp:2477
static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape, SuspendCrossingInfo &Checker)
For each local variable that all of its user are only used inside one of suspended region,...
Definition: CoroFrame.cpp:2524
static bool materializable(Instruction &V)
Definition: CoroFrame.cpp:2094
static bool isSuspendBlock(BasicBlock *BB)
Definition: CoroFrame.cpp:2164
static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB, CleanupPadInst *CleanupPad)
Definition: CoroFrame.cpp:1918
static void dumpAllocas(const SmallVectorImpl< AllocaInfo > &Allocas)
Definition: CoroFrame.cpp:429
static StringRef solveTypeName(Type *Ty)
Create name for Type.
Definition: CoroFrame.cpp:837
static void dumpSpills(StringRef Title, const SpillInfo &Spills)
Definition: CoroFrame.cpp:419
static Value * emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy, coro::Shape &Shape)
Get the current swifterror value.
Definition: CoroFrame.cpp:2315
static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape)
Definition: CoroFrame.cpp:1586
Given that RA is a live value
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Addr
std::string Name
uint64_t Size
static bool isLifetimeStart(const Instruction *Inst)
Definition: GVN.cpp:1032
Hexagon Common GEP
static MaybeAlign getAlign(Value *Ptr)
Definition: IRBuilder.cpp:529
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
iv Induction Variable Users
Definition: IVUsers.cpp:48
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
print must be executed print the must be executed context for all instructions
LLVMContext & Context
return ToRemove size() > 0
This file provides an interface for laying out a sequence of fields as a struct in a way that attempt...
#define P(N)
This file provides a collection of visitors which walk the (instruction) uses of a pointer.
@ SI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallString class.
static const unsigned FramePtr
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
Definition: Instructions.h:58
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:150
void setSwiftError(bool V)
Specify whether this alloca is used to represent a swifterror.
Definition: Instructions.h:152
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:125
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:100
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:118
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
Definition: Instructions.h:129
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:96
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:202
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:372
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:245
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:208
const Instruction & front() const
Definition: BasicBlock.h:326
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:105
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
Definition: BasicBlock.cpp:401
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Definition: BasicBlock.cpp:284
const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
Definition: BasicBlock.cpp:314
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:112
LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition: BasicBlock.cpp:35
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:127
This class represents a no-op cast from one type to another.
size_type size() const
size - Returns the number of bits in this bitvector.
Definition: BitVector.h:152
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1184
bool doesNotCapture(unsigned OpNo) const
Determine whether this data operand is not captured.
Definition: InstrTypes.h:1683
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1351
unsigned arg_size() const
Definition: InstrTypes.h:1349
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Value * getParentPad() const
static CleanupPadInst * Create(Value *ParentPad, ArrayRef< Value * > Args=std::nullopt, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, Instruction *InsertBefore=nullptr)
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:887
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Definition: Constants.cpp:1707
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:356
This represents the llvm.coro.alloca.alloc instruction.
Definition: CoroInstr.h:670
Value * getSize() const
Definition: CoroInstr.h:673
This class represents the llvm.coro.begin instruction.
Definition: CoroInstr.h:420
void clearPromise()
Definition: CoroInstr.h:124
This represents the llvm.coro.suspend instruction.
Definition: CoroInstr.h:493
DICompositeType * createStructType(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber, uint64_t SizeInBits, uint32_t AlignInBits, DINode::DIFlags Flags, DIType *DerivedFrom, DINodeArray Elements, unsigned RunTimeLang=0, DIType *VTableHolder=nullptr, StringRef UniqueIdentifier="")
Create debugging information entry for a struct.
Definition: DIBuilder.cpp:509
DIDerivedType * createPointerType(DIType *PointeeTy, uint64_t SizeInBits, uint32_t AlignInBits=0, std::optional< unsigned > DWARFAddressSpace=std::nullopt, StringRef Name="", DINodeArray Annotations=nullptr)
Create debugging information entry for a pointer.
Definition: DIBuilder.cpp:319
DIExpression * createExpression(ArrayRef< uint64_t > Addr=std::nullopt)
Create a new descriptor for the specified variable which has a complex address expression for its add...
Definition: DIBuilder.cpp:837
DIBasicType * createBasicType(StringRef Name, uint64_t SizeInBits, unsigned Encoding, DINode::DIFlags Flags=DINode::FlagZero)
Create debugging information entry for a basic type.
Definition: DIBuilder.cpp:283
DINodeArray getOrCreateArray(ArrayRef< Metadata * > Elements)
Get a DINodeArray, create one if required.
Definition: DIBuilder.cpp:680
DIDerivedType * createMemberType(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo, uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits, DINode::DIFlags Flags, DIType *Ty, DINodeArray Annotations=nullptr)
Create debugging information entry for a member.
Definition: DIBuilder.cpp:380
DILocalVariable * createAutoVariable(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo, DIType *Ty, bool AlwaysPreserve=false, DINode::DIFlags Flags=DINode::FlagZero, uint32_t AlignInBits=0)
Create a new descriptor for an auto variable.
Definition: DIBuilder.cpp:799
void replaceArrays(DICompositeType *&T, DINodeArray Elements, DINodeArray TParams=DINodeArray())
Replace arrays on a composite type.
Definition: DIBuilder.cpp:1169
DWARF expression.
static DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
static DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
A scope for locals.
DILocalScope * getScope() const
Get the local scope for this variable.
Debug location.
Base class for scope-like contexts.
DIFile * getFile() const
Subprogram description.
Base class for types.
StringRef getName() const
uint64_t getSizeInBits() const
uint32_t getAlignInBits() const
unsigned getLine() const
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:114
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:681
Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:836
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:676
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Definition: DataLayout.cpp:845
This represents the llvm.dbg.declare instruction.
This represents the llvm.dbg.value instruction.
This is the common base class for debug info intrinsics for variables.
void replaceVariableLocationOp(Value *OldValue, Value *NewValue)
Value * getVariableLocationOp(unsigned OpIdx) const
void setExpression(DIExpression *NewExpr)
DILocalVariable * getVariable() const
DIExpression * getExpression() const
DILocation * get() const
Get the underlying DILocation.
Definition: DebugLoc.cpp:20
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:197
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:150
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:207
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Definition: DenseMap.h:103
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:166
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:940
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:652
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2550
void visitIntrinsicInst(IntrinsicInst &I)
Definition: InstVisitor.h:219
void visitBitCastInst(BitCastInst &I)
Definition: InstVisitor.h:187
void visit(Iterator Start, Iterator End)
Definition: InstVisitor.h:87
void visitPHINode(PHINode &I)
Definition: InstVisitor.h:175
void visitAddrSpaceCastInst(AddrSpaceCastInst &I)
Definition: InstVisitor.h:188
void visitSelectInst(SelectInst &I)
Definition: InstVisitor.h:189
void visitGetElementPtrInst(GetElementPtrInst &I)
Definition: InstVisitor.h:174
Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
void removeFromParent()
This method unlinks 'this' from the containing basic block, but does not delete it.
Definition: Instruction.cpp:78
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
Definition: Instruction.cpp:88
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:358
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:70
const BasicBlock * getParent() const
Definition: Instruction.h:90
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
Definition: Instruction.h:87
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:74
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:82
void insertAfter(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately after the specified instruction.
Definition: Instruction.cpp:94
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:313
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:54
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
The landingpad instruction holds all of the information necessary to generate correct exception handl...
static MDString * get(LLVMContext &Context, StringRef Str)
Definition: Metadata.cpp:497
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1356
This is the common base class for memset/memcpy/memmove.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.cpp:398
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
A base class for visitors over the uses of a pointer value.
void visitCallBase(CallBase &CB)
void visitGetElementPtrInst(GetElementPtrInst &GEPI)
void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC)
void visitBitCastInst(BitCastInst &BC)
void visitStoreInst(StoreInst &SI)
void visitIntrinsicInst(IntrinsicInst &II)
void visitMemIntrinsic(MemIntrinsic &I)
This class represents the LLVM 'select' instruction.
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:141
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition: SetVector.h:82
iterator end()
Get an iterator to the end of the SetVector.
Definition: SetVector.h:92
iterator find(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:386
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:450
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:301
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
StringRef str() const
Explicit conversion to StringRef.
Definition: SmallString.h:261
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:941
void reserve(size_type N)
Definition: SmallVector.h:667
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
Compute live ranges of allocas.
Definition: StackLifetime.h:37
An instruction for storing to memory.
Definition: Instructions.h:301
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:222
uint64_t getElementOffsetInBits(unsigned Idx) const
Definition: DataLayout.h:660
Class to represent struct types.
Definition: DerivedTypes.h:213
void setBody(ArrayRef< Type * > Elements, bool isPacked=false)
Specify a body for an opaque identified type.
Definition: Type.cpp:447
static StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition: Type.cpp:515
unsigned getNumElements() const
Random access to the elements.
Definition: DerivedTypes.h:327
Type * getElementType(unsigned N) const
Definition: DerivedTypes.h:328
Multiway switch.
void setDefaultDest(BasicBlock *DefaultCase)
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:29
EltTy front() const
bool empty() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
std::string str() const
Return the twine contents as a std::string.
Definition: Twine.cpp:17
StringRef toStringRef(SmallVectorImpl< char > &Out) const
This returns the twine as a single StringRef if it can be represented as such.
Definition: Twine.h:473
static constexpr TypeSize Fixed(ScalarTy ExactSize)
Definition: TypeSize.h:331
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:249
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
Definition: Type.h:154
Type * getNonOpaquePointerElementType() const
Only use this method in code that is not reachable with opaque pointers, or part of deprecated method...
Definition: Type.h:416
StringRef getStructName() const
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static Type * getVoidTy(LLVMContext &C)
bool isStructTy() const
True if this is an instance of StructType.
Definition: Type.h:243
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:295
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:129
static IntegerType * getInt8Ty(LLVMContext &C)
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
Definition: Type.h:157
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:185
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:222
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
void set(Value *Val)
Definition: Value.h:865
User * getUser() const
Returns the User that contains this Use.
Definition: Use.h:72
bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition: User.cpp:21
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:375
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:532
iterator_range< user_iterator > users()
Definition: Value.h:421
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:685
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:994
iterator_range< use_iterator > uses()
Definition: Value.h:376
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:308
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:381
APInt Offset
The constant offset of the use if that is known.
void enqueueUsers(Instruction &I)
Enqueue the users of this instruction in the visit worklist.
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:182
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:289
A range adaptor for a pair of iterators.
A raw_ostream that writes to an SmallVector or SmallString.
Definition: raw_ostream.h:672
StringRef str() const
Return a StringRef for the vector contents.
Definition: raw_ostream.h:697
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1502
@ CE
Windows NT (Windows on ARM)
void salvageDebugInfo(SmallDenseMap< llvm::Value *, llvm::AllocaInst *, 4 > &DbgPtrAllocaCache, DbgVariableIntrinsic *DVI, bool OptimizeFrame)
Recover a dbg.declare prepared by the frontend and emit an alloca holding a pointer to the coroutine ...
Definition: CoroFrame.cpp:2637
void buildCoroutineFrame(Function &F, Shape &Shape)
Definition: CoroFrame.cpp:2727
CallInst * createMustTailCall(DebugLoc Loc, Function *MustTailCallFn, ArrayRef< Value * > Arguments, IRBuilder<> &)
Definition: CoroSplit.cpp:1675
SourceLanguage
Definition: Dwarf.h:199
bool isCPlusPlus(SourceLanguage S)
Definition: Dwarf.h:207
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
@ Offset
Definition: DWP.cpp:406
TinyPtrVector< DbgDeclareInst * > FindDbgDeclareUses(Value *V)
Like FindDbgAddrUses, but only returns dbg.declare intrinsics, not dbg.addr.
Definition: DebugInfo.cpp:71
void PromoteMemToReg(ArrayRef< AllocaInst * > Allocas, DominatorTree &DT, AssumptionCache *AC=nullptr)
Promote the specified list of alloca instructions into scalar registers, inserting PHI nodes as appro...
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:59
unsigned Log2_64_Ceil(uint64_t Value)
Return the ceil log base 2 of the specified value, 64 if the value is zero.
Definition: MathExtras.h:392
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
Definition: Alignment.h:145
auto successors(const MachineBasicBlock *BB)
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:721
bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1742
void findDbgUsers(SmallVectorImpl< DbgVariableIntrinsic * > &DbgInsts, Value *V)
Finds the debug info intrinsics describing a value.
Definition: DebugInfo.cpp:105
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1683
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1749
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:145
BasicBlock * ehAwareSplitEdge(BasicBlock *BB, BasicBlock *Succ, LandingPadInst *OriginalPad=nullptr, PHINode *LandingPadReplacement=nullptr, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions(), const Twine &BBName="")
Split the edge connect the specficed blocks in the case that Succ is an Exception Handling Block.
void findDbgValues(SmallVectorImpl< DbgValueInst * > &DbgValues, Value *V)
Finds the llvm.dbg.value intrinsics describing a value.
Definition: DebugInfo.cpp:79
Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
Definition: Local.cpp:2033
uint64_t offsetToAlignment(uint64_t Value, Align Alignment)
Returns the offset to the next integer (mod 2**64) that is greater than or equal to Value and is a mu...
Definition: Alignment.h:197
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1923
std::pair< uint64_t, Align > performOptimizedStructLayout(MutableArrayRef< OptimizedStructLayoutField > Fields)
Compute a layout for a struct containing the given fields, making a best-effort attempt to minimize t...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:147
void updatePhiNodes(BasicBlock *DestBB, BasicBlock *OldPred, BasicBlock *NewPred, PHINode *Until=nullptr)
Replaces all uses of OldPred with the NewPred block in all PHINodes in a block.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1762
auto predecessors(const MachineBasicBlock *BB)
BasicBlock * SplitEdge(BasicBlock *From, BasicBlock *To, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the edge connecting the specified blocks, and return the newly created basic block between From...
void setUnwindEdgeTo(Instruction *TI, BasicBlock *Succ)
Sets the unwind edge of an instruction to a particular successor.
unsigned pred_size(const MachineBasicBlock *BB)
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Align Alignment
The required alignment of this field.
uint64_t Offset
The offset of this field in the final layout.
uint64_t Size
The required size of this field in bytes.
static constexpr uint64_t FlexibleOffset
A special value for Offset indicating that the field can be moved anywhere.
A MapVector that performs no allocations if smaller than a certain size.
Definition: MapVector.h:235
AsyncLoweringStorage AsyncLowering
Definition: CoroInternal.h:143
StructType * FrameTy
Definition: CoroInternal.h:98
AnyCoroIdRetconInst * getRetconCoroId() const
Definition: CoroInternal.h:151
CoroIdInst * getSwitchCoroId() const
Definition: CoroInternal.h:146
Instruction * getInsertPtAfterFramePtr() const
Definition: CoroInternal.h:240
SmallVector< AnyCoroSuspendInst *, 4 > CoroSuspends
Definition: CoroInternal.h:78
Value * emitAlloc(IRBuilder<> &Builder, Value *Size, CallGraph *CG) const
Allocate memory according to the rules of the active lowering.
Definition: Coroutines.cpp:453
SmallVector< CallInst *, 2 > SwiftErrorOps
Definition: CoroInternal.h:79
AllocaInst * getPromiseAlloca() const
Definition: CoroInternal.h:234
bool OptimizeFrame
This would only be true if optimization are enabled.
Definition: CoroInternal.h:105
SwitchLoweringStorage SwitchLowering
Definition: CoroInternal.h:141
CoroBeginInst * CoroBegin
Definition: CoroInternal.h:74
void emitDealloc(IRBuilder<> &Builder, Value *Ptr, CallGraph *CG) const
Deallocate memory according to the rules of the active lowering.
Definition: Coroutines.cpp:476
RetconLoweringStorage RetconLowering
Definition: CoroInternal.h:142
SmallVector< AnyCoroEndInst *, 4 > CoroEnds
Definition: CoroInternal.h:75
BasicBlock * AllocaSpillBlock
Definition: CoroInternal.h:102