LLVM 18.0.0git
CoroSplit.cpp
Go to the documentation of this file.
1//===- CoroSplit.cpp - Converts a coroutine into a state machine ----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8// This pass builds the coroutine frame and outlines resume and destroy parts
9// of the coroutine into separate functions.
10//
11// We present a coroutine to an LLVM as an ordinary function with suspension
12// points marked up with intrinsics. We let the optimizer party on the coroutine
13// as a single function for as long as possible. Shortly before the coroutine is
14// eligible to be inlined into its callers, we split up the coroutine into parts
15// corresponding to an initial, resume and destroy invocations of the coroutine,
16// add them to the current SCC and restart the IPO pipeline to optimize the
17// coroutine subfunctions we extracted before proceeding to the caller of the
18// coroutine.
19//===----------------------------------------------------------------------===//
20
22#include "CoroInstr.h"
23#include "CoroInternal.h"
24#include "llvm/ADT/DenseMap.h"
28#include "llvm/ADT/StringRef.h"
29#include "llvm/ADT/Twine.h"
30#include "llvm/Analysis/CFG.h"
37#include "llvm/IR/Argument.h"
38#include "llvm/IR/Attributes.h"
39#include "llvm/IR/BasicBlock.h"
40#include "llvm/IR/CFG.h"
41#include "llvm/IR/CallingConv.h"
42#include "llvm/IR/Constants.h"
43#include "llvm/IR/DataLayout.h"
45#include "llvm/IR/Dominators.h"
46#include "llvm/IR/Function.h"
47#include "llvm/IR/GlobalValue.h"
49#include "llvm/IR/IRBuilder.h"
51#include "llvm/IR/InstrTypes.h"
52#include "llvm/IR/Instruction.h"
55#include "llvm/IR/LLVMContext.h"
56#include "llvm/IR/Module.h"
57#include "llvm/IR/Type.h"
58#include "llvm/IR/Value.h"
59#include "llvm/IR/Verifier.h"
61#include "llvm/Support/Debug.h"
70#include <cassert>
71#include <cstddef>
72#include <cstdint>
73#include <initializer_list>
74#include <iterator>
75
76using namespace llvm;
77
78#define DEBUG_TYPE "coro-split"
79
80namespace {
81
82/// A little helper class for building
83class CoroCloner {
84public:
85 enum class Kind {
86 /// The shared resume function for a switch lowering.
87 SwitchResume,
88
89 /// The shared unwind function for a switch lowering.
90 SwitchUnwind,
91
92 /// The shared cleanup function for a switch lowering.
93 SwitchCleanup,
94
95 /// An individual continuation function.
96 Continuation,
97
98 /// An async resume function.
99 Async,
100 };
101
102private:
103 Function &OrigF;
104 Function *NewF;
105 const Twine &Suffix;
106 coro::Shape &Shape;
107 Kind FKind;
109 IRBuilder<> Builder;
110 Value *NewFramePtr = nullptr;
111
112 /// The active suspend instruction; meaningful only for continuation and async
113 /// ABIs.
114 AnyCoroSuspendInst *ActiveSuspend = nullptr;
115
116public:
117 /// Create a cloner for a switch lowering.
118 CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape,
119 Kind FKind)
120 : OrigF(OrigF), NewF(nullptr), Suffix(Suffix), Shape(Shape),
121 FKind(FKind), Builder(OrigF.getContext()) {
122 assert(Shape.ABI == coro::ABI::Switch);
123 }
124
125 /// Create a cloner for a continuation lowering.
126 CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape,
127 Function *NewF, AnyCoroSuspendInst *ActiveSuspend)
128 : OrigF(OrigF), NewF(NewF), Suffix(Suffix), Shape(Shape),
129 FKind(Shape.ABI == coro::ABI::Async ? Kind::Async : Kind::Continuation),
130 Builder(OrigF.getContext()), ActiveSuspend(ActiveSuspend) {
131 assert(Shape.ABI == coro::ABI::Retcon ||
132 Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async);
133 assert(NewF && "need existing function for continuation");
134 assert(ActiveSuspend && "need active suspend point for continuation");
135 }
136
137 Function *getFunction() const {
138 assert(NewF != nullptr && "declaration not yet set");
139 return NewF;
140 }
141
142 void create();
143
144private:
145 bool isSwitchDestroyFunction() {
146 switch (FKind) {
147 case Kind::Async:
148 case Kind::Continuation:
149 case Kind::SwitchResume:
150 return false;
151 case Kind::SwitchUnwind:
152 case Kind::SwitchCleanup:
153 return true;
154 }
155 llvm_unreachable("Unknown CoroCloner::Kind enum");
156 }
157
158 void replaceEntryBlock();
159 Value *deriveNewFramePointer();
160 void replaceRetconOrAsyncSuspendUses();
161 void replaceCoroSuspends();
162 void replaceCoroEnds();
164 void salvageDebugInfo();
165 void handleFinalSuspend();
166};
167
168} // end anonymous namespace
169
171 const coro::Shape &Shape, Value *FramePtr,
172 CallGraph *CG) {
173 assert(Shape.ABI == coro::ABI::Retcon ||
174 Shape.ABI == coro::ABI::RetconOnce);
176 return;
177
178 Shape.emitDealloc(Builder, FramePtr, CG);
179}
180
181/// Replace an llvm.coro.end.async.
182/// Will inline the must tail call function call if there is one.
183/// \returns true if cleanup of the coro.end block is needed, false otherwise.
185 IRBuilder<> Builder(End);
186
187 auto *EndAsync = dyn_cast<CoroAsyncEndInst>(End);
188 if (!EndAsync) {
189 Builder.CreateRetVoid();
190 return true /*needs cleanup of coro.end block*/;
191 }
192
193 auto *MustTailCallFunc = EndAsync->getMustTailCallFunction();
194 if (!MustTailCallFunc) {
195 Builder.CreateRetVoid();
196 return true /*needs cleanup of coro.end block*/;
197 }
198
199 // Move the must tail call from the predecessor block into the end block.
200 auto *CoroEndBlock = End->getParent();
201 auto *MustTailCallFuncBlock = CoroEndBlock->getSinglePredecessor();
202 assert(MustTailCallFuncBlock && "Must have a single predecessor block");
203 auto It = MustTailCallFuncBlock->getTerminator()->getIterator();
204 auto *MustTailCall = cast<CallInst>(&*std::prev(It));
205 CoroEndBlock->splice(End->getIterator(), MustTailCallFuncBlock,
206 MustTailCall->getIterator());
207
208 // Insert the return instruction.
209 Builder.SetInsertPoint(End);
210 Builder.CreateRetVoid();
211 InlineFunctionInfo FnInfo;
212
213 // Remove the rest of the block, by splitting it into an unreachable block.
214 auto *BB = End->getParent();
215 BB->splitBasicBlock(End);
216 BB->getTerminator()->eraseFromParent();
217
218 auto InlineRes = InlineFunction(*MustTailCall, FnInfo);
219 assert(InlineRes.isSuccess() && "Expected inlining to succeed");
220 (void)InlineRes;
221
222 // We have cleaned up the coro.end block above.
223 return false;
224}
225
226/// Replace a non-unwind call to llvm.coro.end.
228 const coro::Shape &Shape, Value *FramePtr,
229 bool InResume, CallGraph *CG) {
230 // Start inserting right before the coro.end.
231 IRBuilder<> Builder(End);
232
233 // Create the return instruction.
234 switch (Shape.ABI) {
235 // The cloned functions in switch-lowering always return void.
236 case coro::ABI::Switch:
237 assert(!cast<CoroEndInst>(End)->hasResults() &&
238 "switch coroutine should not return any values");
239 // coro.end doesn't immediately end the coroutine in the main function
240 // in this lowering, because we need to deallocate the coroutine.
241 if (!InResume)
242 return;
243 Builder.CreateRetVoid();
244 break;
245
246 // In async lowering this returns.
247 case coro::ABI::Async: {
248 bool CoroEndBlockNeedsCleanup = replaceCoroEndAsync(End);
249 if (!CoroEndBlockNeedsCleanup)
250 return;
251 break;
252 }
253
254 // In unique continuation lowering, the continuations always return void.
255 // But we may have implicitly allocated storage.
256 case coro::ABI::RetconOnce: {
257 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
258 auto *CoroEnd = cast<CoroEndInst>(End);
259 auto *RetTy = Shape.getResumeFunctionType()->getReturnType();
260
261 if (!CoroEnd->hasResults()) {
262 assert(RetTy->isVoidTy());
263 Builder.CreateRetVoid();
264 break;
265 }
266
267 auto *CoroResults = CoroEnd->getResults();
268 unsigned NumReturns = CoroResults->numReturns();
269
270 if (auto *RetStructTy = dyn_cast<StructType>(RetTy)) {
271 assert(RetStructTy->getNumElements() == NumReturns &&
272 "numbers of returns should match resume function singature");
273 Value *ReturnValue = UndefValue::get(RetStructTy);
274 unsigned Idx = 0;
275 for (Value *RetValEl : CoroResults->return_values())
276 ReturnValue = Builder.CreateInsertValue(ReturnValue, RetValEl, Idx++);
277 Builder.CreateRet(ReturnValue);
278 } else if (NumReturns == 0) {
279 assert(RetTy->isVoidTy());
280 Builder.CreateRetVoid();
281 } else {
282 assert(NumReturns == 1);
283 Builder.CreateRet(*CoroResults->retval_begin());
284 }
285 CoroResults->replaceAllUsesWith(ConstantTokenNone::get(CoroResults->getContext()));
286 CoroResults->eraseFromParent();
287 break;
288 }
289
290 // In non-unique continuation lowering, we signal completion by returning
291 // a null continuation.
292 case coro::ABI::Retcon: {
293 assert(!cast<CoroEndInst>(End)->hasResults() &&
294 "retcon coroutine should not return any values");
295 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
296 auto RetTy = Shape.getResumeFunctionType()->getReturnType();
297 auto RetStructTy = dyn_cast<StructType>(RetTy);
298 PointerType *ContinuationTy =
299 cast<PointerType>(RetStructTy ? RetStructTy->getElementType(0) : RetTy);
300
301 Value *ReturnValue = ConstantPointerNull::get(ContinuationTy);
302 if (RetStructTy) {
303 ReturnValue = Builder.CreateInsertValue(UndefValue::get(RetStructTy),
304 ReturnValue, 0);
305 }
306 Builder.CreateRet(ReturnValue);
307 break;
308 }
309 }
310
311 // Remove the rest of the block, by splitting it into an unreachable block.
312 auto *BB = End->getParent();
313 BB->splitBasicBlock(End);
314 BB->getTerminator()->eraseFromParent();
315}
316
317// Mark a coroutine as done, which implies that the coroutine is finished and
318// never get resumed.
319//
320// In resume-switched ABI, the done state is represented by storing zero in
321// ResumeFnAddr.
322//
323// NOTE: We couldn't omit the argument `FramePtr`. It is necessary because the
324// pointer to the frame in splitted function is not stored in `Shape`.
325static void markCoroutineAsDone(IRBuilder<> &Builder, const coro::Shape &Shape,
326 Value *FramePtr) {
327 assert(
328 Shape.ABI == coro::ABI::Switch &&
329 "markCoroutineAsDone is only supported for Switch-Resumed ABI for now.");
330 auto *GepIndex = Builder.CreateStructGEP(
332 "ResumeFn.addr");
333 auto *NullPtr = ConstantPointerNull::get(cast<PointerType>(
335 Builder.CreateStore(NullPtr, GepIndex);
336
337 // If the coroutine don't have unwind coro end, we could omit the store to
338 // the final suspend point since we could infer the coroutine is suspended
339 // at the final suspend point by the nullness of ResumeFnAddr.
340 // However, we can't skip it if the coroutine have unwind coro end. Since
341 // the coroutine reaches unwind coro end is considered suspended at the
342 // final suspend point (the ResumeFnAddr is null) but in fact the coroutine
343 // didn't complete yet. We need the IndexVal for the final suspend point
344 // to make the states clear.
347 assert(cast<CoroSuspendInst>(Shape.CoroSuspends.back())->isFinal() &&
348 "The final suspend should only live in the last position of "
349 "CoroSuspends.");
350 ConstantInt *IndexVal = Shape.getIndex(Shape.CoroSuspends.size() - 1);
351 auto *FinalIndex = Builder.CreateStructGEP(
352 Shape.FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr");
353
354 Builder.CreateStore(IndexVal, FinalIndex);
355 }
356}
357
358/// Replace an unwind call to llvm.coro.end.
360 Value *FramePtr, bool InResume,
361 CallGraph *CG) {
362 IRBuilder<> Builder(End);
363
364 switch (Shape.ABI) {
365 // In switch-lowering, this does nothing in the main function.
366 case coro::ABI::Switch: {
367 // In C++'s specification, the coroutine should be marked as done
368 // if promise.unhandled_exception() throws. The frontend will
369 // call coro.end(true) along this path.
370 //
371 // FIXME: We should refactor this once there is other language
372 // which uses Switch-Resumed style other than C++.
373 markCoroutineAsDone(Builder, Shape, FramePtr);
374 if (!InResume)
375 return;
376 break;
377 }
378 // In async lowering this does nothing.
379 case coro::ABI::Async:
380 break;
381 // In continuation-lowering, this frees the continuation storage.
382 case coro::ABI::Retcon:
383 case coro::ABI::RetconOnce:
384 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
385 break;
386 }
387
388 // If coro.end has an associated bundle, add cleanupret instruction.
389 if (auto Bundle = End->getOperandBundle(LLVMContext::OB_funclet)) {
390 auto *FromPad = cast<CleanupPadInst>(Bundle->Inputs[0]);
391 auto *CleanupRet = Builder.CreateCleanupRet(FromPad, nullptr);
392 End->getParent()->splitBasicBlock(End);
393 CleanupRet->getParent()->getTerminator()->eraseFromParent();
394 }
395}
396
397static void replaceCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
398 Value *FramePtr, bool InResume, CallGraph *CG) {
399 if (End->isUnwind())
400 replaceUnwindCoroEnd(End, Shape, FramePtr, InResume, CG);
401 else
402 replaceFallthroughCoroEnd(End, Shape, FramePtr, InResume, CG);
403
404 auto &Context = End->getContext();
405 End->replaceAllUsesWith(InResume ? ConstantInt::getTrue(Context)
407 End->eraseFromParent();
408}
409
410// Create an entry block for a resume function with a switch that will jump to
411// suspend points.
413 assert(Shape.ABI == coro::ABI::Switch);
414 LLVMContext &C = F.getContext();
415
416 // resume.entry:
417 // %index.addr = getelementptr inbounds %f.Frame, %f.Frame* %FramePtr, i32 0,
418 // i32 2
419 // % index = load i32, i32* %index.addr
420 // switch i32 %index, label %unreachable [
421 // i32 0, label %resume.0
422 // i32 1, label %resume.1
423 // ...
424 // ]
425
426 auto *NewEntry = BasicBlock::Create(C, "resume.entry", &F);
427 auto *UnreachBB = BasicBlock::Create(C, "unreachable", &F);
428
429 IRBuilder<> Builder(NewEntry);
430 auto *FramePtr = Shape.FramePtr;
431 auto *FrameTy = Shape.FrameTy;
432 auto *GepIndex = Builder.CreateStructGEP(
433 FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr");
434 auto *Index = Builder.CreateLoad(Shape.getIndexType(), GepIndex, "index");
435 auto *Switch =
436 Builder.CreateSwitch(Index, UnreachBB, Shape.CoroSuspends.size());
437 Shape.SwitchLowering.ResumeSwitch = Switch;
438
439 size_t SuspendIndex = 0;
440 for (auto *AnyS : Shape.CoroSuspends) {
441 auto *S = cast<CoroSuspendInst>(AnyS);
442 ConstantInt *IndexVal = Shape.getIndex(SuspendIndex);
443
444 // Replace CoroSave with a store to Index:
445 // %index.addr = getelementptr %f.frame... (index field number)
446 // store i32 %IndexVal, i32* %index.addr1
447 auto *Save = S->getCoroSave();
448 Builder.SetInsertPoint(Save);
449 if (S->isFinal()) {
450 // The coroutine should be marked done if it reaches the final suspend
451 // point.
452 markCoroutineAsDone(Builder, Shape, FramePtr);
453 } else {
454 auto *GepIndex = Builder.CreateStructGEP(
455 FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr");
456 Builder.CreateStore(IndexVal, GepIndex);
457 }
458
460 Save->eraseFromParent();
461
462 // Split block before and after coro.suspend and add a jump from an entry
463 // switch:
464 //
465 // whateverBB:
466 // whatever
467 // %0 = call i8 @llvm.coro.suspend(token none, i1 false)
468 // switch i8 %0, label %suspend[i8 0, label %resume
469 // i8 1, label %cleanup]
470 // becomes:
471 //
472 // whateverBB:
473 // whatever
474 // br label %resume.0.landing
475 //
476 // resume.0: ; <--- jump from the switch in the resume.entry
477 // %0 = tail call i8 @llvm.coro.suspend(token none, i1 false)
478 // br label %resume.0.landing
479 //
480 // resume.0.landing:
481 // %1 = phi i8[-1, %whateverBB], [%0, %resume.0]
482 // switch i8 % 1, label %suspend [i8 0, label %resume
483 // i8 1, label %cleanup]
484
485 auto *SuspendBB = S->getParent();
486 auto *ResumeBB =
487 SuspendBB->splitBasicBlock(S, "resume." + Twine(SuspendIndex));
488 auto *LandingBB = ResumeBB->splitBasicBlock(
489 S->getNextNode(), ResumeBB->getName() + Twine(".landing"));
490 Switch->addCase(IndexVal, ResumeBB);
491
492 cast<BranchInst>(SuspendBB->getTerminator())->setSuccessor(0, LandingBB);
493 auto *PN = PHINode::Create(Builder.getInt8Ty(), 2, "");
494 PN->insertBefore(LandingBB->begin());
495 S->replaceAllUsesWith(PN);
496 PN->addIncoming(Builder.getInt8(-1), SuspendBB);
497 PN->addIncoming(S, ResumeBB);
498
499 ++SuspendIndex;
500 }
501
502 Builder.SetInsertPoint(UnreachBB);
503 Builder.CreateUnreachable();
504
505 Shape.SwitchLowering.ResumeEntryBlock = NewEntry;
506}
507
508// In the resume function, we remove the last case (when coro::Shape is built,
509// the final suspend point (if present) is always the last element of
510// CoroSuspends array) since it is an undefined behavior to resume a coroutine
511// suspended at the final suspend point.
512// In the destroy function, if it isn't possible that the ResumeFnAddr is NULL
513// and the coroutine doesn't suspend at the final suspend point actually (this
514// is possible since the coroutine is considered suspended at the final suspend
515// point if promise.unhandled_exception() exits via an exception), we can
516// remove the last case.
517void CoroCloner::handleFinalSuspend() {
518 assert(Shape.ABI == coro::ABI::Switch &&
519 Shape.SwitchLowering.HasFinalSuspend);
520
521 if (isSwitchDestroyFunction() && Shape.SwitchLowering.HasUnwindCoroEnd)
522 return;
523
524 auto *Switch = cast<SwitchInst>(VMap[Shape.SwitchLowering.ResumeSwitch]);
525 auto FinalCaseIt = std::prev(Switch->case_end());
526 BasicBlock *ResumeBB = FinalCaseIt->getCaseSuccessor();
527 Switch->removeCase(FinalCaseIt);
528 if (isSwitchDestroyFunction()) {
529 BasicBlock *OldSwitchBB = Switch->getParent();
530 auto *NewSwitchBB = OldSwitchBB->splitBasicBlock(Switch, "Switch");
531 Builder.SetInsertPoint(OldSwitchBB->getTerminator());
532
533 if (NewF->isCoroOnlyDestroyWhenComplete()) {
534 // When the coroutine can only be destroyed when complete, we don't need
535 // to generate code for other cases.
536 Builder.CreateBr(ResumeBB);
537 } else {
538 auto *GepIndex = Builder.CreateStructGEP(
539 Shape.FrameTy, NewFramePtr, coro::Shape::SwitchFieldIndex::Resume,
540 "ResumeFn.addr");
541 auto *Load =
542 Builder.CreateLoad(Shape.getSwitchResumePointerType(), GepIndex);
543 auto *Cond = Builder.CreateIsNull(Load);
544 Builder.CreateCondBr(Cond, ResumeBB, NewSwitchBB);
545 }
546 OldSwitchBB->getTerminator()->eraseFromParent();
547 }
548}
549
550static FunctionType *
552 auto *AsyncSuspend = cast<CoroSuspendAsyncInst>(Suspend);
553 auto *StructTy = cast<StructType>(AsyncSuspend->getType());
554 auto &Context = Suspend->getParent()->getParent()->getContext();
555 auto *VoidTy = Type::getVoidTy(Context);
556 return FunctionType::get(VoidTy, StructTy->elements(), false);
557}
558
560 const Twine &Suffix,
561 Module::iterator InsertBefore,
562 AnyCoroSuspendInst *ActiveSuspend) {
563 Module *M = OrigF.getParent();
564 auto *FnTy = (Shape.ABI != coro::ABI::Async)
565 ? Shape.getResumeFunctionType()
566 : getFunctionTypeFromAsyncSuspend(ActiveSuspend);
567
568 Function *NewF =
569 Function::Create(FnTy, GlobalValue::LinkageTypes::InternalLinkage,
570 OrigF.getName() + Suffix);
571
572 M->getFunctionList().insert(InsertBefore, NewF);
573
574 return NewF;
575}
576
577/// Replace uses of the active llvm.coro.suspend.retcon/async call with the
578/// arguments to the continuation function.
579///
580/// This assumes that the builder has a meaningful insertion point.
581void CoroCloner::replaceRetconOrAsyncSuspendUses() {
582 assert(Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
583 Shape.ABI == coro::ABI::Async);
584
585 auto NewS = VMap[ActiveSuspend];
586 if (NewS->use_empty()) return;
587
588 // Copy out all the continuation arguments after the buffer pointer into
589 // an easily-indexed data structure for convenience.
591 // The async ABI includes all arguments -- including the first argument.
592 bool IsAsyncABI = Shape.ABI == coro::ABI::Async;
593 for (auto I = IsAsyncABI ? NewF->arg_begin() : std::next(NewF->arg_begin()),
594 E = NewF->arg_end();
595 I != E; ++I)
596 Args.push_back(&*I);
597
598 // If the suspend returns a single scalar value, we can just do a simple
599 // replacement.
600 if (!isa<StructType>(NewS->getType())) {
601 assert(Args.size() == 1);
602 NewS->replaceAllUsesWith(Args.front());
603 return;
604 }
605
606 // Try to peephole extracts of an aggregate return.
607 for (Use &U : llvm::make_early_inc_range(NewS->uses())) {
608 auto *EVI = dyn_cast<ExtractValueInst>(U.getUser());
609 if (!EVI || EVI->getNumIndices() != 1)
610 continue;
611
612 EVI->replaceAllUsesWith(Args[EVI->getIndices().front()]);
613 EVI->eraseFromParent();
614 }
615
616 // If we have no remaining uses, we're done.
617 if (NewS->use_empty()) return;
618
619 // Otherwise, we need to create an aggregate.
620 Value *Agg = PoisonValue::get(NewS->getType());
621 for (size_t I = 0, E = Args.size(); I != E; ++I)
622 Agg = Builder.CreateInsertValue(Agg, Args[I], I);
623
624 NewS->replaceAllUsesWith(Agg);
625}
626
627void CoroCloner::replaceCoroSuspends() {
628 Value *SuspendResult;
629
630 switch (Shape.ABI) {
631 // In switch lowering, replace coro.suspend with the appropriate value
632 // for the type of function we're extracting.
633 // Replacing coro.suspend with (0) will result in control flow proceeding to
634 // a resume label associated with a suspend point, replacing it with (1) will
635 // result in control flow proceeding to a cleanup label associated with this
636 // suspend point.
637 case coro::ABI::Switch:
638 SuspendResult = Builder.getInt8(isSwitchDestroyFunction() ? 1 : 0);
639 break;
640
641 // In async lowering there are no uses of the result.
642 case coro::ABI::Async:
643 return;
644
645 // In returned-continuation lowering, the arguments from earlier
646 // continuations are theoretically arbitrary, and they should have been
647 // spilled.
648 case coro::ABI::RetconOnce:
649 case coro::ABI::Retcon:
650 return;
651 }
652
653 for (AnyCoroSuspendInst *CS : Shape.CoroSuspends) {
654 // The active suspend was handled earlier.
655 if (CS == ActiveSuspend) continue;
656
657 auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[CS]);
658 MappedCS->replaceAllUsesWith(SuspendResult);
659 MappedCS->eraseFromParent();
660 }
661}
662
663void CoroCloner::replaceCoroEnds() {
664 for (AnyCoroEndInst *CE : Shape.CoroEnds) {
665 // We use a null call graph because there's no call graph node for
666 // the cloned function yet. We'll just be rebuilding that later.
667 auto *NewCE = cast<AnyCoroEndInst>(VMap[CE]);
668 replaceCoroEnd(NewCE, Shape, NewFramePtr, /*in resume*/ true, nullptr);
669 }
670}
671
673 ValueToValueMapTy *VMap) {
674 if (Shape.ABI == coro::ABI::Async && Shape.CoroSuspends.empty())
675 return;
676 Value *CachedSlot = nullptr;
677 auto getSwiftErrorSlot = [&](Type *ValueTy) -> Value * {
678 if (CachedSlot)
679 return CachedSlot;
680
681 // Check if the function has a swifterror argument.
682 for (auto &Arg : F.args()) {
683 if (Arg.isSwiftError()) {
684 CachedSlot = &Arg;
685 return &Arg;
686 }
687 }
688
689 // Create a swifterror alloca.
690 IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
691 auto Alloca = Builder.CreateAlloca(ValueTy);
692 Alloca->setSwiftError(true);
693
694 CachedSlot = Alloca;
695 return Alloca;
696 };
697
698 for (CallInst *Op : Shape.SwiftErrorOps) {
699 auto MappedOp = VMap ? cast<CallInst>((*VMap)[Op]) : Op;
700 IRBuilder<> Builder(MappedOp);
701
702 // If there are no arguments, this is a 'get' operation.
703 Value *MappedResult;
704 if (Op->arg_empty()) {
705 auto ValueTy = Op->getType();
706 auto Slot = getSwiftErrorSlot(ValueTy);
707 MappedResult = Builder.CreateLoad(ValueTy, Slot);
708 } else {
709 assert(Op->arg_size() == 1);
710 auto Value = MappedOp->getArgOperand(0);
711 auto ValueTy = Value->getType();
712 auto Slot = getSwiftErrorSlot(ValueTy);
713 Builder.CreateStore(Value, Slot);
714 MappedResult = Slot;
715 }
716
717 MappedOp->replaceAllUsesWith(MappedResult);
718 MappedOp->eraseFromParent();
719 }
720
721 // If we're updating the original function, we've invalidated SwiftErrorOps.
722 if (VMap == nullptr) {
723 Shape.SwiftErrorOps.clear();
724 }
725}
726
727/// Returns all DbgVariableIntrinsic in F.
731 for (auto &I : instructions(F))
732 if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&I))
733 Intrinsics.push_back(DVI);
734 return Intrinsics;
735}
736
737void CoroCloner::replaceSwiftErrorOps() {
738 ::replaceSwiftErrorOps(*NewF, Shape, &VMap);
739}
740
741void CoroCloner::salvageDebugInfo() {
745
746 // Only 64-bit ABIs have a register we can refer to with the entry value.
747 bool UseEntryValue =
748 llvm::Triple(OrigF.getParent()->getTargetTriple()).isArch64Bit();
749 for (DbgVariableIntrinsic *DVI : Worklist)
750 coro::salvageDebugInfo(ArgToAllocaMap, DVI, Shape.OptimizeFrame,
751 UseEntryValue);
752
753 // Remove all salvaged dbg.declare intrinsics that became
754 // either unreachable or stale due to the CoroSplit transformation.
755 DominatorTree DomTree(*NewF);
756 auto IsUnreachableBlock = [&](BasicBlock *BB) {
757 return !isPotentiallyReachable(&NewF->getEntryBlock(), BB, nullptr,
758 &DomTree);
759 };
760 for (DbgVariableIntrinsic *DVI : Worklist) {
761 if (IsUnreachableBlock(DVI->getParent()))
762 DVI->eraseFromParent();
763 else if (isa_and_nonnull<AllocaInst>(DVI->getVariableLocationOp(0))) {
764 // Count all non-debuginfo uses in reachable blocks.
765 unsigned Uses = 0;
766 for (auto *User : DVI->getVariableLocationOp(0)->users())
767 if (auto *I = dyn_cast<Instruction>(User))
768 if (!isa<AllocaInst>(I) && !IsUnreachableBlock(I->getParent()))
769 ++Uses;
770 if (!Uses)
771 DVI->eraseFromParent();
772 }
773 }
774}
775
776void CoroCloner::replaceEntryBlock() {
777 // In the original function, the AllocaSpillBlock is a block immediately
778 // following the allocation of the frame object which defines GEPs for
779 // all the allocas that have been moved into the frame, and it ends by
780 // branching to the original beginning of the coroutine. Make this
781 // the entry block of the cloned function.
782 auto *Entry = cast<BasicBlock>(VMap[Shape.AllocaSpillBlock]);
783 auto *OldEntry = &NewF->getEntryBlock();
784 Entry->setName("entry" + Suffix);
785 Entry->moveBefore(OldEntry);
786 Entry->getTerminator()->eraseFromParent();
787
788 // Clear all predecessors of the new entry block. There should be
789 // exactly one predecessor, which we created when splitting out
790 // AllocaSpillBlock to begin with.
791 assert(Entry->hasOneUse());
792 auto BranchToEntry = cast<BranchInst>(Entry->user_back());
793 assert(BranchToEntry->isUnconditional());
794 Builder.SetInsertPoint(BranchToEntry);
795 Builder.CreateUnreachable();
796 BranchToEntry->eraseFromParent();
797
798 // Branch from the entry to the appropriate place.
799 Builder.SetInsertPoint(Entry);
800 switch (Shape.ABI) {
801 case coro::ABI::Switch: {
802 // In switch-lowering, we built a resume-entry block in the original
803 // function. Make the entry block branch to this.
804 auto *SwitchBB =
805 cast<BasicBlock>(VMap[Shape.SwitchLowering.ResumeEntryBlock]);
806 Builder.CreateBr(SwitchBB);
807 break;
808 }
809 case coro::ABI::Async:
810 case coro::ABI::Retcon:
811 case coro::ABI::RetconOnce: {
812 // In continuation ABIs, we want to branch to immediately after the
813 // active suspend point. Earlier phases will have put the suspend in its
814 // own basic block, so just thread our jump directly to its successor.
815 assert((Shape.ABI == coro::ABI::Async &&
816 isa<CoroSuspendAsyncInst>(ActiveSuspend)) ||
817 ((Shape.ABI == coro::ABI::Retcon ||
818 Shape.ABI == coro::ABI::RetconOnce) &&
819 isa<CoroSuspendRetconInst>(ActiveSuspend)));
820 auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[ActiveSuspend]);
821 auto Branch = cast<BranchInst>(MappedCS->getNextNode());
822 assert(Branch->isUnconditional());
823 Builder.CreateBr(Branch->getSuccessor(0));
824 break;
825 }
826 }
827
828 // Any static alloca that's still being used but not reachable from the new
829 // entry needs to be moved to the new entry.
830 Function *F = OldEntry->getParent();
831 DominatorTree DT{*F};
833 auto *Alloca = dyn_cast<AllocaInst>(&I);
834 if (!Alloca || I.use_empty())
835 continue;
836 if (DT.isReachableFromEntry(I.getParent()) ||
837 !isa<ConstantInt>(Alloca->getArraySize()))
838 continue;
839 I.moveBefore(*Entry, Entry->getFirstInsertionPt());
840 }
841}
842
843/// Derive the value of the new frame pointer.
844Value *CoroCloner::deriveNewFramePointer() {
845 // Builder should be inserting to the front of the new entry block.
846
847 switch (Shape.ABI) {
848 // In switch-lowering, the argument is the frame pointer.
849 case coro::ABI::Switch:
850 return &*NewF->arg_begin();
851 // In async-lowering, one of the arguments is an async context as determined
852 // by the `llvm.coro.id.async` intrinsic. We can retrieve the async context of
853 // the resume function from the async context projection function associated
854 // with the active suspend. The frame is located as a tail to the async
855 // context header.
856 case coro::ABI::Async: {
857 auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(ActiveSuspend);
858 auto ContextIdx = ActiveAsyncSuspend->getStorageArgumentIndex() & 0xff;
859 auto *CalleeContext = NewF->getArg(ContextIdx);
860 auto *ProjectionFunc =
861 ActiveAsyncSuspend->getAsyncContextProjectionFunction();
862 auto DbgLoc =
863 cast<CoroSuspendAsyncInst>(VMap[ActiveSuspend])->getDebugLoc();
864 // Calling i8* (i8*)
865 auto *CallerContext = Builder.CreateCall(ProjectionFunc->getFunctionType(),
866 ProjectionFunc, CalleeContext);
867 CallerContext->setCallingConv(ProjectionFunc->getCallingConv());
868 CallerContext->setDebugLoc(DbgLoc);
869 // The frame is located after the async_context header.
870 auto &Context = Builder.getContext();
871 auto *FramePtrAddr = Builder.CreateConstInBoundsGEP1_32(
872 Type::getInt8Ty(Context), CallerContext,
873 Shape.AsyncLowering.FrameOffset, "async.ctx.frameptr");
874 // Inline the projection function.
876 auto InlineRes = InlineFunction(*CallerContext, InlineInfo);
877 assert(InlineRes.isSuccess());
878 (void)InlineRes;
879 return FramePtrAddr;
880 }
881 // In continuation-lowering, the argument is the opaque storage.
882 case coro::ABI::Retcon:
883 case coro::ABI::RetconOnce: {
884 Argument *NewStorage = &*NewF->arg_begin();
885 auto FramePtrTy = PointerType::getUnqual(Shape.FrameTy->getContext());
886
887 // If the storage is inline, just bitcast to the storage to the frame type.
888 if (Shape.RetconLowering.IsFrameInlineInStorage)
889 return NewStorage;
890
891 // Otherwise, load the real frame from the opaque storage.
892 return Builder.CreateLoad(FramePtrTy, NewStorage);
893 }
894 }
895 llvm_unreachable("bad ABI");
896}
897
898static void addFramePointerAttrs(AttributeList &Attrs, LLVMContext &Context,
899 unsigned ParamIndex, uint64_t Size,
900 Align Alignment, bool NoAlias) {
901 AttrBuilder ParamAttrs(Context);
902 ParamAttrs.addAttribute(Attribute::NonNull);
903 ParamAttrs.addAttribute(Attribute::NoUndef);
904
905 if (NoAlias)
906 ParamAttrs.addAttribute(Attribute::NoAlias);
907
908 ParamAttrs.addAlignmentAttr(Alignment);
909 ParamAttrs.addDereferenceableAttr(Size);
910 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
911}
912
913static void addAsyncContextAttrs(AttributeList &Attrs, LLVMContext &Context,
914 unsigned ParamIndex) {
915 AttrBuilder ParamAttrs(Context);
916 ParamAttrs.addAttribute(Attribute::SwiftAsync);
917 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
918}
919
920static void addSwiftSelfAttrs(AttributeList &Attrs, LLVMContext &Context,
921 unsigned ParamIndex) {
922 AttrBuilder ParamAttrs(Context);
923 ParamAttrs.addAttribute(Attribute::SwiftSelf);
924 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
925}
926
927/// Clone the body of the original function into a resume function of
928/// some sort.
929void CoroCloner::create() {
930 // Create the new function if we don't already have one.
931 if (!NewF) {
932 NewF = createCloneDeclaration(OrigF, Shape, Suffix,
933 OrigF.getParent()->end(), ActiveSuspend);
934 }
935
936 // Replace all args with dummy instructions. If an argument is the old frame
937 // pointer, the dummy will be replaced by the new frame pointer once it is
938 // computed below. Uses of all other arguments should have already been
939 // rewritten by buildCoroutineFrame() to use loads/stores on the coroutine
940 // frame.
942 for (Argument &A : OrigF.args()) {
943 DummyArgs.push_back(new FreezeInst(PoisonValue::get(A.getType())));
944 VMap[&A] = DummyArgs.back();
945 }
946
948
949 // Ignore attempts to change certain attributes of the function.
950 // TODO: maybe there should be a way to suppress this during cloning?
951 auto savedVisibility = NewF->getVisibility();
952 auto savedUnnamedAddr = NewF->getUnnamedAddr();
953 auto savedDLLStorageClass = NewF->getDLLStorageClass();
954
955 // NewF's linkage (which CloneFunctionInto does *not* change) might not
956 // be compatible with the visibility of OrigF (which it *does* change),
957 // so protect against that.
958 auto savedLinkage = NewF->getLinkage();
959 NewF->setLinkage(llvm::GlobalValue::ExternalLinkage);
960
961 CloneFunctionInto(NewF, &OrigF, VMap,
962 CloneFunctionChangeType::LocalChangesOnly, Returns);
963
964 auto &Context = NewF->getContext();
965
966 // For async functions / continuations, adjust the scope line of the
967 // clone to the line number of the suspend point. However, only
968 // adjust the scope line when the files are the same. This ensures
969 // line number and file name belong together. The scope line is
970 // associated with all pre-prologue instructions. This avoids a jump
971 // in the linetable from the function declaration to the suspend point.
972 if (DISubprogram *SP = NewF->getSubprogram()) {
973 assert(SP != OrigF.getSubprogram() && SP->isDistinct());
974 if (ActiveSuspend)
975 if (auto DL = ActiveSuspend->getDebugLoc())
976 if (SP->getFile() == DL->getFile())
977 SP->setScopeLine(DL->getLine());
978 // Update the linkage name to reflect the modified symbol name. It
979 // is necessary to update the linkage name in Swift, since the
980 // mangling changes for resume functions. It might also be the
981 // right thing to do in C++, but due to a limitation in LLVM's
982 // AsmPrinter we can only do this if the function doesn't have an
983 // abstract specification, since the DWARF backend expects the
984 // abstract specification to contain the linkage name and asserts
985 // that they are identical.
986 if (SP->getUnit() &&
987 SP->getUnit()->getSourceLanguage() == dwarf::DW_LANG_Swift) {
988 SP->replaceLinkageName(MDString::get(Context, NewF->getName()));
989 if (auto *Decl = SP->getDeclaration()) {
990 auto *NewDecl = DISubprogram::get(
991 Decl->getContext(), Decl->getScope(), Decl->getName(),
992 NewF->getName(), Decl->getFile(), Decl->getLine(), Decl->getType(),
993 Decl->getScopeLine(), Decl->getContainingType(),
994 Decl->getVirtualIndex(), Decl->getThisAdjustment(),
995 Decl->getFlags(), Decl->getSPFlags(), Decl->getUnit(),
996 Decl->getTemplateParams(), nullptr, Decl->getRetainedNodes(),
997 Decl->getThrownTypes(), Decl->getAnnotations(),
998 Decl->getTargetFuncName());
999 SP->replaceDeclaration(NewDecl);
1000 }
1001 }
1002 }
1003
1004 NewF->setLinkage(savedLinkage);
1005 NewF->setVisibility(savedVisibility);
1006 NewF->setUnnamedAddr(savedUnnamedAddr);
1007 NewF->setDLLStorageClass(savedDLLStorageClass);
1008 // The function sanitizer metadata needs to match the signature of the
1009 // function it is being attached to. However this does not hold for split
1010 // functions here. Thus remove the metadata for split functions.
1011 if (Shape.ABI == coro::ABI::Switch &&
1012 NewF->hasMetadata(LLVMContext::MD_func_sanitize))
1013 NewF->eraseMetadata(LLVMContext::MD_func_sanitize);
1014
1015 // Replace the attributes of the new function:
1016 auto OrigAttrs = NewF->getAttributes();
1017 auto NewAttrs = AttributeList();
1018
1019 switch (Shape.ABI) {
1020 case coro::ABI::Switch:
1021 // Bootstrap attributes by copying function attributes from the
1022 // original function. This should include optimization settings and so on.
1023 NewAttrs = NewAttrs.addFnAttributes(
1024 Context, AttrBuilder(Context, OrigAttrs.getFnAttrs()));
1025
1026 addFramePointerAttrs(NewAttrs, Context, 0, Shape.FrameSize,
1027 Shape.FrameAlign, /*NoAlias=*/false);
1028 break;
1029 case coro::ABI::Async: {
1030 auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(ActiveSuspend);
1031 if (OrigF.hasParamAttribute(Shape.AsyncLowering.ContextArgNo,
1032 Attribute::SwiftAsync)) {
1033 uint32_t ArgAttributeIndices =
1034 ActiveAsyncSuspend->getStorageArgumentIndex();
1035 auto ContextArgIndex = ArgAttributeIndices & 0xff;
1036 addAsyncContextAttrs(NewAttrs, Context, ContextArgIndex);
1037
1038 // `swiftasync` must preceed `swiftself` so 0 is not a valid index for
1039 // `swiftself`.
1040 auto SwiftSelfIndex = ArgAttributeIndices >> 8;
1041 if (SwiftSelfIndex)
1042 addSwiftSelfAttrs(NewAttrs, Context, SwiftSelfIndex);
1043 }
1044
1045 // Transfer the original function's attributes.
1046 auto FnAttrs = OrigF.getAttributes().getFnAttrs();
1047 NewAttrs = NewAttrs.addFnAttributes(Context, AttrBuilder(Context, FnAttrs));
1048 break;
1049 }
1050 case coro::ABI::Retcon:
1051 case coro::ABI::RetconOnce:
1052 // If we have a continuation prototype, just use its attributes,
1053 // full-stop.
1054 NewAttrs = Shape.RetconLowering.ResumePrototype->getAttributes();
1055
1056 /// FIXME: Is it really good to add the NoAlias attribute?
1057 addFramePointerAttrs(NewAttrs, Context, 0,
1058 Shape.getRetconCoroId()->getStorageSize(),
1059 Shape.getRetconCoroId()->getStorageAlignment(),
1060 /*NoAlias=*/true);
1061
1062 break;
1063 }
1064
1065 switch (Shape.ABI) {
1066 // In these ABIs, the cloned functions always return 'void', and the
1067 // existing return sites are meaningless. Note that for unique
1068 // continuations, this includes the returns associated with suspends;
1069 // this is fine because we can't suspend twice.
1070 case coro::ABI::Switch:
1071 case coro::ABI::RetconOnce:
1072 // Remove old returns.
1073 for (ReturnInst *Return : Returns)
1074 changeToUnreachable(Return);
1075 break;
1076
1077 // With multi-suspend continuations, we'll already have eliminated the
1078 // original returns and inserted returns before all the suspend points,
1079 // so we want to leave any returns in place.
1080 case coro::ABI::Retcon:
1081 break;
1082 // Async lowering will insert musttail call functions at all suspend points
1083 // followed by a return.
1084 // Don't change returns to unreachable because that will trip up the verifier.
1085 // These returns should be unreachable from the clone.
1086 case coro::ABI::Async:
1087 break;
1088 }
1089
1090 NewF->setAttributes(NewAttrs);
1091 NewF->setCallingConv(Shape.getResumeFunctionCC());
1092
1093 // Set up the new entry block.
1094 replaceEntryBlock();
1095
1096 Builder.SetInsertPoint(&NewF->getEntryBlock().front());
1097 NewFramePtr = deriveNewFramePointer();
1098
1099 // Remap frame pointer.
1100 Value *OldFramePtr = VMap[Shape.FramePtr];
1101 NewFramePtr->takeName(OldFramePtr);
1102 OldFramePtr->replaceAllUsesWith(NewFramePtr);
1103
1104 // Remap vFrame pointer.
1105 auto *NewVFrame = Builder.CreateBitCast(
1106 NewFramePtr, PointerType::getUnqual(Builder.getContext()), "vFrame");
1107 Value *OldVFrame = cast<Value>(VMap[Shape.CoroBegin]);
1108 if (OldVFrame != NewVFrame)
1109 OldVFrame->replaceAllUsesWith(NewVFrame);
1110
1111 // All uses of the arguments should have been resolved by this point,
1112 // so we can safely remove the dummy values.
1113 for (Instruction *DummyArg : DummyArgs) {
1114 DummyArg->replaceAllUsesWith(PoisonValue::get(DummyArg->getType()));
1115 DummyArg->deleteValue();
1116 }
1117
1118 switch (Shape.ABI) {
1119 case coro::ABI::Switch:
1120 // Rewrite final suspend handling as it is not done via switch (allows to
1121 // remove final case from the switch, since it is undefined behavior to
1122 // resume the coroutine suspended at the final suspend point.
1123 if (Shape.SwitchLowering.HasFinalSuspend)
1124 handleFinalSuspend();
1125 break;
1126 case coro::ABI::Async:
1127 case coro::ABI::Retcon:
1128 case coro::ABI::RetconOnce:
1129 // Replace uses of the active suspend with the corresponding
1130 // continuation-function arguments.
1131 assert(ActiveSuspend != nullptr &&
1132 "no active suspend when lowering a continuation-style coroutine");
1133 replaceRetconOrAsyncSuspendUses();
1134 break;
1135 }
1136
1137 // Handle suspends.
1138 replaceCoroSuspends();
1139
1140 // Handle swifterror.
1142
1143 // Remove coro.end intrinsics.
1144 replaceCoroEnds();
1145
1146 // Salvage debug info that points into the coroutine frame.
1148
1149 // Eliminate coro.free from the clones, replacing it with 'null' in cleanup,
1150 // to suppress deallocation code.
1151 if (Shape.ABI == coro::ABI::Switch)
1152 coro::replaceCoroFree(cast<CoroIdInst>(VMap[Shape.CoroBegin->getId()]),
1153 /*Elide=*/ FKind == CoroCloner::Kind::SwitchCleanup);
1154}
1155
1156// Create a resume clone by cloning the body of the original function, setting
1157// new entry block and replacing coro.suspend an appropriate value to force
1158// resume or cleanup pass for every suspend point.
1159static Function *createClone(Function &F, const Twine &Suffix,
1160 coro::Shape &Shape, CoroCloner::Kind FKind) {
1161 CoroCloner Cloner(F, Suffix, Shape, FKind);
1162 Cloner.create();
1163 return Cloner.getFunction();
1164}
1165
1167 assert(Shape.ABI == coro::ABI::Async);
1168
1169 auto *FuncPtrStruct = cast<ConstantStruct>(
1171 auto *OrigRelativeFunOffset = FuncPtrStruct->getOperand(0);
1172 auto *OrigContextSize = FuncPtrStruct->getOperand(1);
1173 auto *NewContextSize = ConstantInt::get(OrigContextSize->getType(),
1175 auto *NewFuncPtrStruct = ConstantStruct::get(
1176 FuncPtrStruct->getType(), OrigRelativeFunOffset, NewContextSize);
1177
1178 Shape.AsyncLowering.AsyncFuncPointer->setInitializer(NewFuncPtrStruct);
1179}
1180
1182 if (Shape.ABI == coro::ABI::Async)
1184
1185 for (CoroAlignInst *CA : Shape.CoroAligns) {
1187 ConstantInt::get(CA->getType(), Shape.FrameAlign.value()));
1188 CA->eraseFromParent();
1189 }
1190
1191 if (Shape.CoroSizes.empty())
1192 return;
1193
1194 // In the same function all coro.sizes should have the same result type.
1195 auto *SizeIntrin = Shape.CoroSizes.back();
1196 Module *M = SizeIntrin->getModule();
1197 const DataLayout &DL = M->getDataLayout();
1198 auto Size = DL.getTypeAllocSize(Shape.FrameTy);
1199 auto *SizeConstant = ConstantInt::get(SizeIntrin->getType(), Size);
1200
1201 for (CoroSizeInst *CS : Shape.CoroSizes) {
1202 CS->replaceAllUsesWith(SizeConstant);
1203 CS->eraseFromParent();
1204 }
1205}
1206
1207// Create a global constant array containing pointers to functions provided and
1208// set Info parameter of CoroBegin to point at this constant. Example:
1209//
1210// @f.resumers = internal constant [2 x void(%f.frame*)*]
1211// [void(%f.frame*)* @f.resume, void(%f.frame*)* @f.destroy]
1212// define void @f() {
1213// ...
1214// call i8* @llvm.coro.begin(i8* null, i32 0, i8* null,
1215// i8* bitcast([2 x void(%f.frame*)*] * @f.resumers to i8*))
1216//
1217// Assumes that all the functions have the same signature.
1218static void setCoroInfo(Function &F, coro::Shape &Shape,
1220 // This only works under the switch-lowering ABI because coro elision
1221 // only works on the switch-lowering ABI.
1222 assert(Shape.ABI == coro::ABI::Switch);
1223
1224 SmallVector<Constant *, 4> Args(Fns.begin(), Fns.end());
1225 assert(!Args.empty());
1226 Function *Part = *Fns.begin();
1227 Module *M = Part->getParent();
1228 auto *ArrTy = ArrayType::get(Part->getType(), Args.size());
1229
1230 auto *ConstVal = ConstantArray::get(ArrTy, Args);
1231 auto *GV = new GlobalVariable(*M, ConstVal->getType(), /*isConstant=*/true,
1232 GlobalVariable::PrivateLinkage, ConstVal,
1233 F.getName() + Twine(".resumers"));
1234
1235 // Update coro.begin instruction to refer to this constant.
1236 LLVMContext &C = F.getContext();
1237 auto *BC = ConstantExpr::getPointerCast(GV, PointerType::getUnqual(C));
1238 Shape.getSwitchCoroId()->setInfo(BC);
1239}
1240
1241// Store addresses of Resume/Destroy/Cleanup functions in the coroutine frame.
1242static void updateCoroFrame(coro::Shape &Shape, Function *ResumeFn,
1243 Function *DestroyFn, Function *CleanupFn) {
1244 assert(Shape.ABI == coro::ABI::Switch);
1245
1246 IRBuilder<> Builder(Shape.getInsertPtAfterFramePtr());
1247
1248 auto *ResumeAddr = Builder.CreateStructGEP(
1250 "resume.addr");
1251 Builder.CreateStore(ResumeFn, ResumeAddr);
1252
1253 Value *DestroyOrCleanupFn = DestroyFn;
1254
1255 CoroIdInst *CoroId = Shape.getSwitchCoroId();
1256 if (CoroAllocInst *CA = CoroId->getCoroAlloc()) {
1257 // If there is a CoroAlloc and it returns false (meaning we elide the
1258 // allocation, use CleanupFn instead of DestroyFn).
1259 DestroyOrCleanupFn = Builder.CreateSelect(CA, DestroyFn, CleanupFn);
1260 }
1261
1262 auto *DestroyAddr = Builder.CreateStructGEP(
1264 "destroy.addr");
1265 Builder.CreateStore(DestroyOrCleanupFn, DestroyAddr);
1266}
1267
1270
1271#ifndef NDEBUG
1272 // For now, we do a mandatory verification step because we don't
1273 // entirely trust this pass. Note that we don't want to add a verifier
1274 // pass to FPM below because it will also verify all the global data.
1275 if (verifyFunction(F, &errs()))
1276 report_fatal_error("Broken function");
1277#endif
1278}
1279
1280// Assuming we arrived at the block NewBlock from Prev instruction, store
1281// PHI's incoming values in the ResolvedValues map.
1282static void
1284 DenseMap<Value *, Value *> &ResolvedValues) {
1285 auto *PrevBB = Prev->getParent();
1286 for (PHINode &PN : NewBlock->phis()) {
1287 auto V = PN.getIncomingValueForBlock(PrevBB);
1288 // See if we already resolved it.
1289 auto VI = ResolvedValues.find(V);
1290 if (VI != ResolvedValues.end())
1291 V = VI->second;
1292 // Remember the value.
1293 ResolvedValues[&PN] = V;
1294 }
1295}
1296
1297// Replace a sequence of branches leading to a ret, with a clone of a ret
1298// instruction. Suspend instruction represented by a switch, track the PHI
1299// values and select the correct case successor when possible.
1301 // There is nothing to simplify.
1302 if (isa<ReturnInst>(InitialInst))
1303 return false;
1304
1305 DenseMap<Value *, Value *> ResolvedValues;
1306 assert(InitialInst->getModule());
1307 const DataLayout &DL = InitialInst->getModule()->getDataLayout();
1308
1309 auto GetFirstValidInstruction = [](Instruction *I) {
1310 while (I) {
1311 // BitCastInst wouldn't generate actual code so that we could skip it.
1312 if (isa<BitCastInst>(I) || I->isDebugOrPseudoInst() ||
1313 I->isLifetimeStartOrEnd())
1314 I = I->getNextNode();
1315 else if (isInstructionTriviallyDead(I))
1316 // Duing we are in the middle of the transformation, we need to erase
1317 // the dead instruction manually.
1318 I = &*I->eraseFromParent();
1319 else
1320 break;
1321 }
1322 return I;
1323 };
1324
1325 auto TryResolveConstant = [&ResolvedValues](Value *V) {
1326 auto It = ResolvedValues.find(V);
1327 if (It != ResolvedValues.end())
1328 V = It->second;
1329 return dyn_cast<ConstantInt>(V);
1330 };
1331
1332 Instruction *I = InitialInst;
1333 while (I->isTerminator() || isa<CmpInst>(I)) {
1334 if (isa<ReturnInst>(I)) {
1335 ReplaceInstWithInst(InitialInst, I->clone());
1336 return true;
1337 }
1338
1339 if (auto *BR = dyn_cast<BranchInst>(I)) {
1340 unsigned SuccIndex = 0;
1341 if (BR->isConditional()) {
1342 // Handle the case the condition of the conditional branch is constant.
1343 // e.g.,
1344 //
1345 // br i1 false, label %cleanup, label %CoroEnd
1346 //
1347 // It is possible during the transformation. We could continue the
1348 // simplifying in this case.
1349 ConstantInt *Cond = TryResolveConstant(BR->getCondition());
1350 if (!Cond)
1351 return false;
1352
1353 SuccIndex = Cond->isOne() ? 0 : 1;
1354 }
1355
1356 BasicBlock *Succ = BR->getSuccessor(SuccIndex);
1357 scanPHIsAndUpdateValueMap(I, Succ, ResolvedValues);
1358 I = GetFirstValidInstruction(Succ->getFirstNonPHIOrDbgOrLifetime());
1359
1360 continue;
1361 }
1362
1363 if (auto *CondCmp = dyn_cast<CmpInst>(I)) {
1364 // If the case number of suspended switch instruction is reduced to
1365 // 1, then it is simplified to CmpInst in llvm::ConstantFoldTerminator.
1366 auto *BR = dyn_cast<BranchInst>(
1367 GetFirstValidInstruction(CondCmp->getNextNode()));
1368 if (!BR || !BR->isConditional() || CondCmp != BR->getCondition())
1369 return false;
1370
1371 // And the comparsion looks like : %cond = icmp eq i8 %V, constant.
1372 // So we try to resolve constant for the first operand only since the
1373 // second operand should be literal constant by design.
1374 ConstantInt *Cond0 = TryResolveConstant(CondCmp->getOperand(0));
1375 auto *Cond1 = dyn_cast<ConstantInt>(CondCmp->getOperand(1));
1376 if (!Cond0 || !Cond1)
1377 return false;
1378
1379 // Both operands of the CmpInst are Constant. So that we could evaluate
1380 // it immediately to get the destination.
1381 auto *ConstResult =
1382 dyn_cast_or_null<ConstantInt>(ConstantFoldCompareInstOperands(
1383 CondCmp->getPredicate(), Cond0, Cond1, DL));
1384 if (!ConstResult)
1385 return false;
1386
1387 ResolvedValues[BR->getCondition()] = ConstResult;
1388
1389 // Handle this branch in next iteration.
1390 I = BR;
1391 continue;
1392 }
1393
1394 if (auto *SI = dyn_cast<SwitchInst>(I)) {
1395 ConstantInt *Cond = TryResolveConstant(SI->getCondition());
1396 if (!Cond)
1397 return false;
1398
1399 BasicBlock *BB = SI->findCaseValue(Cond)->getCaseSuccessor();
1400 scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
1401 I = GetFirstValidInstruction(BB->getFirstNonPHIOrDbgOrLifetime());
1402 continue;
1403 }
1404
1405 return false;
1406 }
1407
1408 return false;
1409}
1410
1411// Check whether CI obeys the rules of musttail attribute.
1412static bool shouldBeMustTail(const CallInst &CI, const Function &F) {
1413 if (CI.isInlineAsm())
1414 return false;
1415
1416 // Match prototypes and calling conventions of resume function.
1417 FunctionType *CalleeTy = CI.getFunctionType();
1418 if (!CalleeTy->getReturnType()->isVoidTy() || (CalleeTy->getNumParams() != 1))
1419 return false;
1420
1421 Type *CalleeParmTy = CalleeTy->getParamType(0);
1422 if (!CalleeParmTy->isPointerTy() ||
1423 (CalleeParmTy->getPointerAddressSpace() != 0))
1424 return false;
1425
1426 if (CI.getCallingConv() != F.getCallingConv())
1427 return false;
1428
1429 // CI should not has any ABI-impacting function attributes.
1430 static const Attribute::AttrKind ABIAttrs[] = {
1431 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
1432 Attribute::Preallocated, Attribute::InReg, Attribute::Returned,
1433 Attribute::SwiftSelf, Attribute::SwiftError};
1434 AttributeList Attrs = CI.getAttributes();
1435 for (auto AK : ABIAttrs)
1436 if (Attrs.hasParamAttr(0, AK))
1437 return false;
1438
1439 return true;
1440}
1441
1442// Add musttail to any resume instructions that is immediately followed by a
1443// suspend (i.e. ret). We do this even in -O0 to support guaranteed tail call
1444// for symmetrical coroutine control transfer (C++ Coroutines TS extension).
1445// This transformation is done only in the resume part of the coroutine that has
1446// identical signature and calling convention as the coro.resume call.
1448 bool changed = false;
1449
1450 // Collect potential resume instructions.
1452 for (auto &I : instructions(F))
1453 if (auto *Call = dyn_cast<CallInst>(&I))
1454 if (shouldBeMustTail(*Call, F))
1455 Resumes.push_back(Call);
1456
1457 // Set musttail on those that are followed by a ret instruction.
1458 for (CallInst *Call : Resumes)
1459 // Skip targets which don't support tail call on the specific case.
1460 if (TTI.supportsTailCallFor(Call) &&
1461 simplifyTerminatorLeadingToRet(Call->getNextNode())) {
1462 Call->setTailCallKind(CallInst::TCK_MustTail);
1463 changed = true;
1464 }
1465
1466 if (changed)
1468}
1469
1470// Coroutine has no suspend points. Remove heap allocation for the coroutine
1471// frame if possible.
1473 auto *CoroBegin = Shape.CoroBegin;
1474 auto *CoroId = CoroBegin->getId();
1475 auto *AllocInst = CoroId->getCoroAlloc();
1476 switch (Shape.ABI) {
1477 case coro::ABI::Switch: {
1478 auto SwitchId = cast<CoroIdInst>(CoroId);
1479 coro::replaceCoroFree(SwitchId, /*Elide=*/AllocInst != nullptr);
1480 if (AllocInst) {
1481 IRBuilder<> Builder(AllocInst);
1482 auto *Frame = Builder.CreateAlloca(Shape.FrameTy);
1483 Frame->setAlignment(Shape.FrameAlign);
1484 AllocInst->replaceAllUsesWith(Builder.getFalse());
1485 AllocInst->eraseFromParent();
1486 CoroBegin->replaceAllUsesWith(Frame);
1487 } else {
1488 CoroBegin->replaceAllUsesWith(CoroBegin->getMem());
1489 }
1490
1491 break;
1492 }
1493 case coro::ABI::Async:
1494 case coro::ABI::Retcon:
1495 case coro::ABI::RetconOnce:
1496 CoroBegin->replaceAllUsesWith(UndefValue::get(CoroBegin->getType()));
1497 break;
1498 }
1499
1500 CoroBegin->eraseFromParent();
1501}
1502
1503// SimplifySuspendPoint needs to check that there is no calls between
1504// coro_save and coro_suspend, since any of the calls may potentially resume
1505// the coroutine and if that is the case we cannot eliminate the suspend point.
1507 for (Instruction *I = From; I != To; I = I->getNextNode()) {
1508 // Assume that no intrinsic can resume the coroutine.
1509 if (isa<IntrinsicInst>(I))
1510 continue;
1511
1512 if (isa<CallBase>(I))
1513 return true;
1514 }
1515 return false;
1516}
1517
1518static bool hasCallsInBlocksBetween(BasicBlock *SaveBB, BasicBlock *ResDesBB) {
1521
1522 Set.insert(SaveBB);
1523 Worklist.push_back(ResDesBB);
1524
1525 // Accumulate all blocks between SaveBB and ResDesBB. Because CoroSaveIntr
1526 // returns a token consumed by suspend instruction, all blocks in between
1527 // will have to eventually hit SaveBB when going backwards from ResDesBB.
1528 while (!Worklist.empty()) {
1529 auto *BB = Worklist.pop_back_val();
1530 Set.insert(BB);
1531 for (auto *Pred : predecessors(BB))
1532 if (!Set.contains(Pred))
1533 Worklist.push_back(Pred);
1534 }
1535
1536 // SaveBB and ResDesBB are checked separately in hasCallsBetween.
1537 Set.erase(SaveBB);
1538 Set.erase(ResDesBB);
1539
1540 for (auto *BB : Set)
1541 if (hasCallsInBlockBetween(BB->getFirstNonPHI(), nullptr))
1542 return true;
1543
1544 return false;
1545}
1546
1547static bool hasCallsBetween(Instruction *Save, Instruction *ResumeOrDestroy) {
1548 auto *SaveBB = Save->getParent();
1549 auto *ResumeOrDestroyBB = ResumeOrDestroy->getParent();
1550
1551 if (SaveBB == ResumeOrDestroyBB)
1552 return hasCallsInBlockBetween(Save->getNextNode(), ResumeOrDestroy);
1553
1554 // Any calls from Save to the end of the block?
1555 if (hasCallsInBlockBetween(Save->getNextNode(), nullptr))
1556 return true;
1557
1558 // Any calls from begging of the block up to ResumeOrDestroy?
1559 if (hasCallsInBlockBetween(ResumeOrDestroyBB->getFirstNonPHI(),
1560 ResumeOrDestroy))
1561 return true;
1562
1563 // Any calls in all of the blocks between SaveBB and ResumeOrDestroyBB?
1564 if (hasCallsInBlocksBetween(SaveBB, ResumeOrDestroyBB))
1565 return true;
1566
1567 return false;
1568}
1569
1570// If a SuspendIntrin is preceded by Resume or Destroy, we can eliminate the
1571// suspend point and replace it with nornal control flow.
1573 CoroBeginInst *CoroBegin) {
1574 Instruction *Prev = Suspend->getPrevNode();
1575 if (!Prev) {
1576 auto *Pred = Suspend->getParent()->getSinglePredecessor();
1577 if (!Pred)
1578 return false;
1579 Prev = Pred->getTerminator();
1580 }
1581
1582 CallBase *CB = dyn_cast<CallBase>(Prev);
1583 if (!CB)
1584 return false;
1585
1586 auto *Callee = CB->getCalledOperand()->stripPointerCasts();
1587
1588 // See if the callsite is for resumption or destruction of the coroutine.
1589 auto *SubFn = dyn_cast<CoroSubFnInst>(Callee);
1590 if (!SubFn)
1591 return false;
1592
1593 // Does not refer to the current coroutine, we cannot do anything with it.
1594 if (SubFn->getFrame() != CoroBegin)
1595 return false;
1596
1597 // See if the transformation is safe. Specifically, see if there are any
1598 // calls in between Save and CallInstr. They can potenitally resume the
1599 // coroutine rendering this optimization unsafe.
1600 auto *Save = Suspend->getCoroSave();
1601 if (hasCallsBetween(Save, CB))
1602 return false;
1603
1604 // Replace llvm.coro.suspend with the value that results in resumption over
1605 // the resume or cleanup path.
1606 Suspend->replaceAllUsesWith(SubFn->getRawIndex());
1607 Suspend->eraseFromParent();
1608 Save->eraseFromParent();
1609
1610 // No longer need a call to coro.resume or coro.destroy.
1611 if (auto *Invoke = dyn_cast<InvokeInst>(CB)) {
1612 BranchInst::Create(Invoke->getNormalDest(), Invoke);
1613 }
1614
1615 // Grab the CalledValue from CB before erasing the CallInstr.
1616 auto *CalledValue = CB->getCalledOperand();
1617 CB->eraseFromParent();
1618
1619 // If no more users remove it. Usually it is a bitcast of SubFn.
1620 if (CalledValue != SubFn && CalledValue->user_empty())
1621 if (auto *I = dyn_cast<Instruction>(CalledValue))
1622 I->eraseFromParent();
1623
1624 // Now we are good to remove SubFn.
1625 if (SubFn->user_empty())
1626 SubFn->eraseFromParent();
1627
1628 return true;
1629}
1630
1631// Remove suspend points that are simplified.
1633 // Currently, the only simplification we do is switch-lowering-specific.
1634 if (Shape.ABI != coro::ABI::Switch)
1635 return;
1636
1637 auto &S = Shape.CoroSuspends;
1638 size_t I = 0, N = S.size();
1639 if (N == 0)
1640 return;
1641
1642 size_t ChangedFinalIndex = std::numeric_limits<size_t>::max();
1643 while (true) {
1644 auto SI = cast<CoroSuspendInst>(S[I]);
1645 // Leave final.suspend to handleFinalSuspend since it is undefined behavior
1646 // to resume a coroutine suspended at the final suspend point.
1647 if (!SI->isFinal() && simplifySuspendPoint(SI, Shape.CoroBegin)) {
1648 if (--N == I)
1649 break;
1650
1651 std::swap(S[I], S[N]);
1652
1653 if (cast<CoroSuspendInst>(S[I])->isFinal()) {
1655 ChangedFinalIndex = I;
1656 }
1657
1658 continue;
1659 }
1660 if (++I == N)
1661 break;
1662 }
1663 S.resize(N);
1664
1665 // Maintain final.suspend in case final suspend was swapped.
1666 // Due to we requrie the final suspend to be the last element of CoroSuspends.
1667 if (ChangedFinalIndex < N) {
1668 assert(cast<CoroSuspendInst>(S[ChangedFinalIndex])->isFinal());
1669 std::swap(S[ChangedFinalIndex], S.back());
1670 }
1671}
1672
1676 assert(Shape.ABI == coro::ABI::Switch);
1677
1678 createResumeEntryBlock(F, Shape);
1679 auto ResumeClone = createClone(F, ".resume", Shape,
1680 CoroCloner::Kind::SwitchResume);
1681 auto DestroyClone = createClone(F, ".destroy", Shape,
1682 CoroCloner::Kind::SwitchUnwind);
1683 auto CleanupClone = createClone(F, ".cleanup", Shape,
1684 CoroCloner::Kind::SwitchCleanup);
1685
1686 postSplitCleanup(*ResumeClone);
1687 postSplitCleanup(*DestroyClone);
1688 postSplitCleanup(*CleanupClone);
1689
1690 // Adding musttail call to support symmetric transfer.
1691 // Skip targets which don't support tail call.
1692 //
1693 // FIXME: Could we support symmetric transfer effectively without musttail
1694 // call?
1695 if (TTI.supportsTailCalls())
1696 addMustTailToCoroResumes(*ResumeClone, TTI);
1697
1698 // Store addresses resume/destroy/cleanup functions in the coroutine frame.
1699 updateCoroFrame(Shape, ResumeClone, DestroyClone, CleanupClone);
1700
1701 assert(Clones.empty());
1702 Clones.push_back(ResumeClone);
1703 Clones.push_back(DestroyClone);
1704 Clones.push_back(CleanupClone);
1705
1706 // Create a constant array referring to resume/destroy/clone functions pointed
1707 // by the last argument of @llvm.coro.info, so that CoroElide pass can
1708 // determined correct function to call.
1709 setCoroInfo(F, Shape, Clones);
1710}
1711
1713 Value *Continuation) {
1714 auto *ResumeIntrinsic = Suspend->getResumeFunction();
1715 auto &Context = Suspend->getParent()->getParent()->getContext();
1716 auto *Int8PtrTy = PointerType::getUnqual(Context);
1717
1718 IRBuilder<> Builder(ResumeIntrinsic);
1719 auto *Val = Builder.CreateBitOrPointerCast(Continuation, Int8PtrTy);
1720 ResumeIntrinsic->replaceAllUsesWith(Val);
1721 ResumeIntrinsic->eraseFromParent();
1723 UndefValue::get(Int8PtrTy));
1724}
1725
1726/// Coerce the arguments in \p FnArgs according to \p FnTy in \p CallArgs.
1727static void coerceArguments(IRBuilder<> &Builder, FunctionType *FnTy,
1728 ArrayRef<Value *> FnArgs,
1729 SmallVectorImpl<Value *> &CallArgs) {
1730 size_t ArgIdx = 0;
1731 for (auto *paramTy : FnTy->params()) {
1732 assert(ArgIdx < FnArgs.size());
1733 if (paramTy != FnArgs[ArgIdx]->getType())
1734 CallArgs.push_back(
1735 Builder.CreateBitOrPointerCast(FnArgs[ArgIdx], paramTy));
1736 else
1737 CallArgs.push_back(FnArgs[ArgIdx]);
1738 ++ArgIdx;
1739 }
1740}
1741
1744 IRBuilder<> &Builder) {
1745 auto *FnTy = MustTailCallFn->getFunctionType();
1746 // Coerce the arguments, llvm optimizations seem to ignore the types in
1747 // vaarg functions and throws away casts in optimized mode.
1748 SmallVector<Value *, 8> CallArgs;
1749 coerceArguments(Builder, FnTy, Arguments, CallArgs);
1750
1751 auto *TailCall = Builder.CreateCall(FnTy, MustTailCallFn, CallArgs);
1752 TailCall->setTailCallKind(CallInst::TCK_MustTail);
1753 TailCall->setDebugLoc(Loc);
1754 TailCall->setCallingConv(MustTailCallFn->getCallingConv());
1755 return TailCall;
1756}
1757
1760 assert(Shape.ABI == coro::ABI::Async);
1761 assert(Clones.empty());
1762 // Reset various things that the optimizer might have decided it
1763 // "knows" about the coroutine function due to not seeing a return.
1764 F.removeFnAttr(Attribute::NoReturn);
1765 F.removeRetAttr(Attribute::NoAlias);
1766 F.removeRetAttr(Attribute::NonNull);
1767
1768 auto &Context = F.getContext();
1769 auto *Int8PtrTy = PointerType::getUnqual(Context);
1770
1771 auto *Id = cast<CoroIdAsyncInst>(Shape.CoroBegin->getId());
1772 IRBuilder<> Builder(Id);
1773
1774 auto *FramePtr = Id->getStorage();
1775 FramePtr = Builder.CreateBitOrPointerCast(FramePtr, Int8PtrTy);
1778 "async.ctx.frameptr");
1779
1780 // Map all uses of llvm.coro.begin to the allocated frame pointer.
1781 {
1782 // Make sure we don't invalidate Shape.FramePtr.
1783 TrackingVH<Value> Handle(Shape.FramePtr);
1785 Shape.FramePtr = Handle.getValPtr();
1786 }
1787
1788 // Create all the functions in order after the main function.
1789 auto NextF = std::next(F.getIterator());
1790
1791 // Create a continuation function for each of the suspend points.
1792 Clones.reserve(Shape.CoroSuspends.size());
1793 for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) {
1794 auto *Suspend = cast<CoroSuspendAsyncInst>(Shape.CoroSuspends[Idx]);
1795
1796 // Create the clone declaration.
1797 auto ResumeNameSuffix = ".resume.";
1798 auto ProjectionFunctionName =
1799 Suspend->getAsyncContextProjectionFunction()->getName();
1800 bool UseSwiftMangling = false;
1801 if (ProjectionFunctionName.equals("__swift_async_resume_project_context")) {
1802 ResumeNameSuffix = "TQ";
1803 UseSwiftMangling = true;
1804 } else if (ProjectionFunctionName.equals(
1805 "__swift_async_resume_get_context")) {
1806 ResumeNameSuffix = "TY";
1807 UseSwiftMangling = true;
1808 }
1809 auto *Continuation = createCloneDeclaration(
1810 F, Shape,
1811 UseSwiftMangling ? ResumeNameSuffix + Twine(Idx) + "_"
1812 : ResumeNameSuffix + Twine(Idx),
1813 NextF, Suspend);
1814 Clones.push_back(Continuation);
1815
1816 // Insert a branch to a new return block immediately before the suspend
1817 // point.
1818 auto *SuspendBB = Suspend->getParent();
1819 auto *NewSuspendBB = SuspendBB->splitBasicBlock(Suspend);
1820 auto *Branch = cast<BranchInst>(SuspendBB->getTerminator());
1821
1822 // Place it before the first suspend.
1823 auto *ReturnBB =
1824 BasicBlock::Create(F.getContext(), "coro.return", &F, NewSuspendBB);
1825 Branch->setSuccessor(0, ReturnBB);
1826
1827 IRBuilder<> Builder(ReturnBB);
1828
1829 // Insert the call to the tail call function and inline it.
1830 auto *Fn = Suspend->getMustTailCallFunction();
1831 SmallVector<Value *, 8> Args(Suspend->args());
1832 auto FnArgs = ArrayRef<Value *>(Args).drop_front(
1834 auto *TailCall =
1835 coro::createMustTailCall(Suspend->getDebugLoc(), Fn, FnArgs, Builder);
1836 Builder.CreateRetVoid();
1837 InlineFunctionInfo FnInfo;
1838 auto InlineRes = InlineFunction(*TailCall, FnInfo);
1839 assert(InlineRes.isSuccess() && "Expected inlining to succeed");
1840 (void)InlineRes;
1841
1842 // Replace the lvm.coro.async.resume intrisic call.
1843 replaceAsyncResumeFunction(Suspend, Continuation);
1844 }
1845
1846 assert(Clones.size() == Shape.CoroSuspends.size());
1847 for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) {
1848 auto *Suspend = Shape.CoroSuspends[Idx];
1849 auto *Clone = Clones[Idx];
1850
1851 CoroCloner(F, "resume." + Twine(Idx), Shape, Clone, Suspend).create();
1852 }
1853}
1854
1857 assert(Shape.ABI == coro::ABI::Retcon ||
1858 Shape.ABI == coro::ABI::RetconOnce);
1859 assert(Clones.empty());
1860
1861 // Reset various things that the optimizer might have decided it
1862 // "knows" about the coroutine function due to not seeing a return.
1863 F.removeFnAttr(Attribute::NoReturn);
1864 F.removeRetAttr(Attribute::NoAlias);
1865 F.removeRetAttr(Attribute::NonNull);
1866
1867 // Allocate the frame.
1868 auto *Id = cast<AnyCoroIdRetconInst>(Shape.CoroBegin->getId());
1869 Value *RawFramePtr;
1871 RawFramePtr = Id->getStorage();
1872 } else {
1873 IRBuilder<> Builder(Id);
1874
1875 // Determine the size of the frame.
1876 const DataLayout &DL = F.getParent()->getDataLayout();
1877 auto Size = DL.getTypeAllocSize(Shape.FrameTy);
1878
1879 // Allocate. We don't need to update the call graph node because we're
1880 // going to recompute it from scratch after splitting.
1881 // FIXME: pass the required alignment
1882 RawFramePtr = Shape.emitAlloc(Builder, Builder.getInt64(Size), nullptr);
1883 RawFramePtr =
1884 Builder.CreateBitCast(RawFramePtr, Shape.CoroBegin->getType());
1885
1886 // Stash the allocated frame pointer in the continuation storage.
1887 Builder.CreateStore(RawFramePtr, Id->getStorage());
1888 }
1889
1890 // Map all uses of llvm.coro.begin to the allocated frame pointer.
1891 {
1892 // Make sure we don't invalidate Shape.FramePtr.
1893 TrackingVH<Value> Handle(Shape.FramePtr);
1894 Shape.CoroBegin->replaceAllUsesWith(RawFramePtr);
1895 Shape.FramePtr = Handle.getValPtr();
1896 }
1897
1898 // Create a unique return block.
1899 BasicBlock *ReturnBB = nullptr;
1900 SmallVector<PHINode *, 4> ReturnPHIs;
1901
1902 // Create all the functions in order after the main function.
1903 auto NextF = std::next(F.getIterator());
1904
1905 // Create a continuation function for each of the suspend points.
1906 Clones.reserve(Shape.CoroSuspends.size());
1907 for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) {
1908 auto Suspend = cast<CoroSuspendRetconInst>(Shape.CoroSuspends[i]);
1909
1910 // Create the clone declaration.
1911 auto Continuation =
1912 createCloneDeclaration(F, Shape, ".resume." + Twine(i), NextF, nullptr);
1913 Clones.push_back(Continuation);
1914
1915 // Insert a branch to the unified return block immediately before
1916 // the suspend point.
1917 auto SuspendBB = Suspend->getParent();
1918 auto NewSuspendBB = SuspendBB->splitBasicBlock(Suspend);
1919 auto Branch = cast<BranchInst>(SuspendBB->getTerminator());
1920
1921 // Create the unified return block.
1922 if (!ReturnBB) {
1923 // Place it before the first suspend.
1924 ReturnBB = BasicBlock::Create(F.getContext(), "coro.return", &F,
1925 NewSuspendBB);
1926 Shape.RetconLowering.ReturnBlock = ReturnBB;
1927
1928 IRBuilder<> Builder(ReturnBB);
1929
1930 // Create PHIs for all the return values.
1931 assert(ReturnPHIs.empty());
1932
1933 // First, the continuation.
1934 ReturnPHIs.push_back(Builder.CreatePHI(Continuation->getType(),
1935 Shape.CoroSuspends.size()));
1936
1937 // Next, all the directly-yielded values.
1938 for (auto *ResultTy : Shape.getRetconResultTypes())
1939 ReturnPHIs.push_back(Builder.CreatePHI(ResultTy,
1940 Shape.CoroSuspends.size()));
1941
1942 // Build the return value.
1943 auto RetTy = F.getReturnType();
1944
1945 // Cast the continuation value if necessary.
1946 // We can't rely on the types matching up because that type would
1947 // have to be infinite.
1948 auto CastedContinuationTy =
1949 (ReturnPHIs.size() == 1 ? RetTy : RetTy->getStructElementType(0));
1950 auto *CastedContinuation =
1951 Builder.CreateBitCast(ReturnPHIs[0], CastedContinuationTy);
1952
1953 Value *RetV;
1954 if (ReturnPHIs.size() == 1) {
1955 RetV = CastedContinuation;
1956 } else {
1957 RetV = PoisonValue::get(RetTy);
1958 RetV = Builder.CreateInsertValue(RetV, CastedContinuation, 0);
1959 for (size_t I = 1, E = ReturnPHIs.size(); I != E; ++I)
1960 RetV = Builder.CreateInsertValue(RetV, ReturnPHIs[I], I);
1961 }
1962
1963 Builder.CreateRet(RetV);
1964 }
1965
1966 // Branch to the return block.
1967 Branch->setSuccessor(0, ReturnBB);
1968 ReturnPHIs[0]->addIncoming(Continuation, SuspendBB);
1969 size_t NextPHIIndex = 1;
1970 for (auto &VUse : Suspend->value_operands())
1971 ReturnPHIs[NextPHIIndex++]->addIncoming(&*VUse, SuspendBB);
1972 assert(NextPHIIndex == ReturnPHIs.size());
1973 }
1974
1975 assert(Clones.size() == Shape.CoroSuspends.size());
1976 for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) {
1977 auto Suspend = Shape.CoroSuspends[i];
1978 auto Clone = Clones[i];
1979
1980 CoroCloner(F, "resume." + Twine(i), Shape, Clone, Suspend).create();
1981 }
1982}
1983
1984namespace {
1985 class PrettyStackTraceFunction : public PrettyStackTraceEntry {
1986 Function &F;
1987 public:
1988 PrettyStackTraceFunction(Function &F) : F(F) {}
1989 void print(raw_ostream &OS) const override {
1990 OS << "While splitting coroutine ";
1991 F.printAsOperand(OS, /*print type*/ false, F.getParent());
1992 OS << "\n";
1993 }
1994 };
1995}
1996
1997static coro::Shape
1999 TargetTransformInfo &TTI, bool OptimizeFrame,
2000 std::function<bool(Instruction &)> MaterializableCallback) {
2001 PrettyStackTraceFunction prettyStackTrace(F);
2002
2003 // The suspend-crossing algorithm in buildCoroutineFrame get tripped
2004 // up by uses in unreachable blocks, so remove them as a first pass.
2006
2007 coro::Shape Shape(F, OptimizeFrame);
2008 if (!Shape.CoroBegin)
2009 return Shape;
2010
2011 simplifySuspendPoints(Shape);
2012 buildCoroutineFrame(F, Shape, MaterializableCallback);
2014
2015 // If there are no suspend points, no split required, just remove
2016 // the allocation and deallocation blocks, they are not needed.
2017 if (Shape.CoroSuspends.empty()) {
2019 } else {
2020 switch (Shape.ABI) {
2021 case coro::ABI::Switch:
2022 splitSwitchCoroutine(F, Shape, Clones, TTI);
2023 break;
2024 case coro::ABI::Async:
2025 splitAsyncCoroutine(F, Shape, Clones);
2026 break;
2027 case coro::ABI::Retcon:
2028 case coro::ABI::RetconOnce:
2029 splitRetconCoroutine(F, Shape, Clones);
2030 break;
2031 }
2032 }
2033
2034 // Replace all the swifterror operations in the original function.
2035 // This invalidates SwiftErrorOps in the Shape.
2036 replaceSwiftErrorOps(F, Shape, nullptr);
2037
2038 // Salvage debug intrinsics that point into the coroutine frame in the
2039 // original function. The Cloner has already salvaged debug info in the new
2040 // coroutine funclets.
2042 for (auto *DDI : collectDbgVariableIntrinsics(F))
2043 coro::salvageDebugInfo(ArgToAllocaMap, DDI, Shape.OptimizeFrame,
2044 false /*UseEntryValue*/);
2045
2046 return Shape;
2047}
2048
2049/// Remove calls to llvm.coro.end in the original function.
2050static void removeCoroEnds(const coro::Shape &Shape) {
2051 for (auto *End : Shape.CoroEnds) {
2052 replaceCoroEnd(End, Shape, Shape.FramePtr, /*in resume*/ false, nullptr);
2053 }
2054}
2055
2057 LazyCallGraph::Node &N, const coro::Shape &Shape,
2061 if (!Shape.CoroBegin)
2062 return;
2063
2064 if (Shape.ABI != coro::ABI::Switch)
2065 removeCoroEnds(Shape);
2066 else {
2067 for (llvm::AnyCoroEndInst *End : Shape.CoroEnds) {
2068 auto &Context = End->getContext();
2069 End->replaceAllUsesWith(ConstantInt::getFalse(Context));
2070 End->eraseFromParent();
2071 }
2072 }
2073
2074 if (!Clones.empty()) {
2075 switch (Shape.ABI) {
2076 case coro::ABI::Switch:
2077 // Each clone in the Switch lowering is independent of the other clones.
2078 // Let the LazyCallGraph know about each one separately.
2079 for (Function *Clone : Clones)
2080 CG.addSplitFunction(N.getFunction(), *Clone);
2081 break;
2082 case coro::ABI::Async:
2083 case coro::ABI::Retcon:
2084 case coro::ABI::RetconOnce:
2085 // Each clone in the Async/Retcon lowering references of the other clones.
2086 // Let the LazyCallGraph know about all of them at once.
2087 if (!Clones.empty())
2088 CG.addSplitRefRecursiveFunctions(N.getFunction(), Clones);
2089 break;
2090 }
2091
2092 // Let the CGSCC infra handle the changes to the original function.
2094 }
2095
2096 // Do some cleanup and let the CGSCC infra see if we've cleaned up any edges
2097 // to the split functions.
2098 postSplitCleanup(N.getFunction());
2100}
2101
2102/// Replace a call to llvm.coro.prepare.retcon.
2103static void replacePrepare(CallInst *Prepare, LazyCallGraph &CG,
2105 auto CastFn = Prepare->getArgOperand(0); // as an i8*
2106 auto Fn = CastFn->stripPointerCasts(); // as its original type
2107
2108 // Attempt to peephole this pattern:
2109 // %0 = bitcast [[TYPE]] @some_function to i8*
2110 // %1 = call @llvm.coro.prepare.retcon(i8* %0)
2111 // %2 = bitcast %1 to [[TYPE]]
2112 // ==>
2113 // %2 = @some_function
2114 for (Use &U : llvm::make_early_inc_range(Prepare->uses())) {
2115 // Look for bitcasts back to the original function type.
2116 auto *Cast = dyn_cast<BitCastInst>(U.getUser());
2117 if (!Cast || Cast->getType() != Fn->getType())
2118 continue;
2119
2120 // Replace and remove the cast.
2121 Cast->replaceAllUsesWith(Fn);
2122 Cast->eraseFromParent();
2123 }
2124
2125 // Replace any remaining uses with the function as an i8*.
2126 // This can never directly be a callee, so we don't need to update CG.
2127 Prepare->replaceAllUsesWith(CastFn);
2128 Prepare->eraseFromParent();
2129
2130 // Kill dead bitcasts.
2131 while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) {
2132 if (!Cast->use_empty())
2133 break;
2134 CastFn = Cast->getOperand(0);
2135 Cast->eraseFromParent();
2136 }
2137}
2138
2139static bool replaceAllPrepares(Function *PrepareFn, LazyCallGraph &CG,
2141 bool Changed = false;
2142 for (Use &P : llvm::make_early_inc_range(PrepareFn->uses())) {
2143 // Intrinsics can only be used in calls.
2144 auto *Prepare = cast<CallInst>(P.getUser());
2145 replacePrepare(Prepare, CG, C);
2146 Changed = true;
2147 }
2148
2149 return Changed;
2150}
2151
2152static void addPrepareFunction(const Module &M,
2154 StringRef Name) {
2155 auto *PrepareFn = M.getFunction(Name);
2156 if (PrepareFn && !PrepareFn->use_empty())
2157 Fns.push_back(PrepareFn);
2158}
2159
2161 : MaterializableCallback(coro::defaultMaterializable),
2162 OptimizeFrame(OptimizeFrame) {}
2163
2167 // NB: One invariant of a valid LazyCallGraph::SCC is that it must contain a
2168 // non-zero number of nodes, so we assume that here and grab the first
2169 // node's function's module.
2170 Module &M = *C.begin()->getFunction().getParent();
2171 auto &FAM =
2172 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
2173
2174 // Check for uses of llvm.coro.prepare.retcon/async.
2175 SmallVector<Function *, 2> PrepareFns;
2176 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon");
2177 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async");
2178
2179 // Find coroutines for processing.
2181 for (LazyCallGraph::Node &N : C)
2182 if (N.getFunction().isPresplitCoroutine())
2183 Coroutines.push_back(&N);
2184
2185 if (Coroutines.empty() && PrepareFns.empty())
2186 return PreservedAnalyses::all();
2187
2188 if (Coroutines.empty()) {
2189 for (auto *PrepareFn : PrepareFns) {
2190 replaceAllPrepares(PrepareFn, CG, C);
2191 }
2192 }
2193
2194 // Split all the coroutines.
2195 for (LazyCallGraph::Node *N : Coroutines) {
2196 Function &F = N->getFunction();
2197 LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F.getName()
2198 << "\n");
2199 F.setSplittedCoroutine();
2200
2203 const coro::Shape Shape =
2206 updateCallGraphAfterCoroutineSplit(*N, Shape, Clones, C, CG, AM, UR, FAM);
2207
2208 ORE.emit([&]() {
2209 return OptimizationRemark(DEBUG_TYPE, "CoroSplit", &F)
2210 << "Split '" << ore::NV("function", F.getName())
2211 << "' (frame_size=" << ore::NV("frame_size", Shape.FrameSize)
2212 << ", align=" << ore::NV("align", Shape.FrameAlign.value()) << ")";
2213 });
2214
2215 if (!Shape.CoroSuspends.empty()) {
2216 // Run the CGSCC pipeline on the original and newly split functions.
2217 UR.CWorklist.insert(&C);
2218 for (Function *Clone : Clones)
2219 UR.CWorklist.insert(CG.lookupSCC(CG.get(*Clone)));
2220 }
2221 }
2222
2223 if (!PrepareFns.empty()) {
2224 for (auto *PrepareFn : PrepareFns) {
2225 replaceAllPrepares(PrepareFn, CG, C);
2226 }
2227 }
2228
2229 return PreservedAnalyses::none();
2230}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
AMDGPU Lower Kernel Arguments
This file contains the simple types necessary to represent the attributes associated with functions a...
BlockVerifier::State From
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file provides interfaces used to manipulate a call graph, regardless if it is a "old style" Call...
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Remove calls to llvm coro end in the original static function void removeCoroEnds(const coro::Shape &Shape)
Definition: CoroSplit.cpp:2050
static void addSwiftSelfAttrs(AttributeList &Attrs, LLVMContext &Context, unsigned ParamIndex)
Definition: CoroSplit.cpp:920
static void splitSwitchCoroutine(Function &F, coro::Shape &Shape, SmallVectorImpl< Function * > &Clones, TargetTransformInfo &TTI)
Definition: CoroSplit.cpp:1673
static bool hasCallsBetween(Instruction *Save, Instruction *ResumeOrDestroy)
Definition: CoroSplit.cpp:1547
static void replaceSwiftErrorOps(Function &F, coro::Shape &Shape, ValueToValueMapTy *VMap)
Definition: CoroSplit.cpp:672
static void addAsyncContextAttrs(AttributeList &Attrs, LLVMContext &Context, unsigned ParamIndex)
Definition: CoroSplit.cpp:913
static void addMustTailToCoroResumes(Function &F, TargetTransformInfo &TTI)
Definition: CoroSplit.cpp:1447
static void maybeFreeRetconStorage(IRBuilder<> &Builder, const coro::Shape &Shape, Value *FramePtr, CallGraph *CG)
Definition: CoroSplit.cpp:170
static bool hasCallsInBlocksBetween(BasicBlock *SaveBB, BasicBlock *ResDesBB)
Definition: CoroSplit.cpp:1518
static Function * createCloneDeclaration(Function &OrigF, coro::Shape &Shape, const Twine &Suffix, Module::iterator InsertBefore, AnyCoroSuspendInst *ActiveSuspend)
Definition: CoroSplit.cpp:559
static FunctionType * getFunctionTypeFromAsyncSuspend(AnyCoroSuspendInst *Suspend)
Definition: CoroSplit.cpp:551
static void addPrepareFunction(const Module &M, SmallVectorImpl< Function * > &Fns, StringRef Name)
Definition: CoroSplit.cpp:2152
static void updateCallGraphAfterCoroutineSplit(LazyCallGraph::Node &N, const coro::Shape &Shape, const SmallVectorImpl< Function * > &Clones, LazyCallGraph::SCC &C, LazyCallGraph &CG, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR, FunctionAnalysisManager &FAM)
Definition: CoroSplit.cpp:2056
static void simplifySuspendPoints(coro::Shape &Shape)
Definition: CoroSplit.cpp:1632
static void addFramePointerAttrs(AttributeList &Attrs, LLVMContext &Context, unsigned ParamIndex, uint64_t Size, Align Alignment, bool NoAlias)
Definition: CoroSplit.cpp:898
static bool replaceAllPrepares(Function *PrepareFn, LazyCallGraph &CG, LazyCallGraph::SCC &C)
Definition: CoroSplit.cpp:2139
static void replaceFallthroughCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape, Value *FramePtr, bool InResume, CallGraph *CG)
Replace a non-unwind call to llvm.coro.end.
Definition: CoroSplit.cpp:227
static void replaceFrameSizeAndAlignment(coro::Shape &Shape)
Definition: CoroSplit.cpp:1181
static SmallVector< DbgVariableIntrinsic *, 8 > collectDbgVariableIntrinsics(Function &F)
Returns all DbgVariableIntrinsic in F.
Definition: CoroSplit.cpp:729
static bool replaceCoroEndAsync(AnyCoroEndInst *End)
Replace an llvm.coro.end.async.
Definition: CoroSplit.cpp:184
Replace a call to llvm coro prepare static retcon void replacePrepare(CallInst *Prepare, LazyCallGraph &CG, LazyCallGraph::SCC &C)
Definition: CoroSplit.cpp:2103
static void replaceUnwindCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape, Value *FramePtr, bool InResume, CallGraph *CG)
Replace an unwind call to llvm.coro.end.
Definition: CoroSplit.cpp:359
static bool simplifySuspendPoint(CoroSuspendInst *Suspend, CoroBeginInst *CoroBegin)
Definition: CoroSplit.cpp:1572
static bool hasCallsInBlockBetween(Instruction *From, Instruction *To)
Definition: CoroSplit.cpp:1506
static void markCoroutineAsDone(IRBuilder<> &Builder, const coro::Shape &Shape, Value *FramePtr)
Definition: CoroSplit.cpp:325
static void splitAsyncCoroutine(Function &F, coro::Shape &Shape, SmallVectorImpl< Function * > &Clones)
Definition: CoroSplit.cpp:1758
static void updateAsyncFuncPointerContextSize(coro::Shape &Shape)
Definition: CoroSplit.cpp:1166
static void replaceCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape, Value *FramePtr, bool InResume, CallGraph *CG)
Definition: CoroSplit.cpp:397
static void setCoroInfo(Function &F, coro::Shape &Shape, ArrayRef< Function * > Fns)
Definition: CoroSplit.cpp:1218
static void handleNoSuspendCoroutine(coro::Shape &Shape)
Definition: CoroSplit.cpp:1472
static void updateCoroFrame(coro::Shape &Shape, Function *ResumeFn, Function *DestroyFn, Function *CleanupFn)
Definition: CoroSplit.cpp:1242
static void createResumeEntryBlock(Function &F, coro::Shape &Shape)
Definition: CoroSplit.cpp:412
static coro::Shape splitCoroutine(Function &F, SmallVectorImpl< Function * > &Clones, TargetTransformInfo &TTI, bool OptimizeFrame, std::function< bool(Instruction &)> MaterializableCallback)
Definition: CoroSplit.cpp:1998
static void postSplitCleanup(Function &F)
Definition: CoroSplit.cpp:1268
static bool simplifyTerminatorLeadingToRet(Instruction *InitialInst)
Definition: CoroSplit.cpp:1300
static void splitRetconCoroutine(Function &F, coro::Shape &Shape, SmallVectorImpl< Function * > &Clones)
Definition: CoroSplit.cpp:1855
static void scanPHIsAndUpdateValueMap(Instruction *Prev, BasicBlock *NewBlock, DenseMap< Value *, Value * > &ResolvedValues)
Definition: CoroSplit.cpp:1283
Coerce the arguments in p FnArgs according to p FnTy in p static CallArgs void coerceArguments(IRBuilder<> &Builder, FunctionType *FnTy, ArrayRef< Value * > FnArgs, SmallVectorImpl< Value * > &CallArgs)
Definition: CoroSplit.cpp:1727
static void replaceAsyncResumeFunction(CoroSuspendAsyncInst *Suspend, Value *Continuation)
Definition: CoroSplit.cpp:1712
static bool shouldBeMustTail(const CallInst &CI, const Function &F)
Definition: CoroSplit.cpp:1412
static Function * createClone(Function &F, const Twine &Suffix, coro::Shape &Shape, CoroCloner::Kind FKind)
Definition: CoroSplit.cpp:1159
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:478
static Function * getFunction(Constant *C)
Definition: Evaluator.cpp:236
@ InlineInfo
Rewrite Partial Register Uses
#define DEBUG_TYPE
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Select target instructions out of generic instructions
Implements a lazy call graph analysis and related passes for the new pass manager.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Module.h This file contains the declarations for the Module class.
LLVMContext & Context
#define P(N)
FunctionAnalysisManager FAM
This file provides a priority worklist.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
This pass exposes codegen information to IR-level passes.
static const unsigned FramePtr
void setSwiftError(bool V)
Specify whether this alloca is used to represent a swifterror.
Definition: Instructions.h:152
void setAlignment(Align Align)
Definition: Instructions.h:129
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:649
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:803
CoroAllocInst * getCoroAlloc()
Definition: CoroInstr.h:84
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:204
iterator end() const
Definition: ArrayRef.h:154
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
iterator begin() const
Definition: ArrayRef.h:153
AttrBuilder & addAlignmentAttr(MaybeAlign Align)
This turns an alignment into the form used internally in Attribute.
AttrBuilder & addAttribute(Attribute::AttrKind Val)
Add an attribute to the builder.
AttrBuilder & addDereferenceableAttr(uint64_t Bytes)
This turns the number of dereferenceable bytes into the form used internally in Attribute.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:84
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:506
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:206
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
Definition: BasicBlock.cpp:607
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Definition: BasicBlock.cpp:489
const Instruction * getFirstNonPHIOrDbgOrLifetime(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode, a debug intrinsic,...
Definition: BasicBlock.cpp:430
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:213
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:228
static BranchInst * Create(BasicBlock *IfTrue, Instruction *InsertBefore=nullptr)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1227
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1516
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1507
Value * getCalledOperand() const
Definition: InstrTypes.h:1442
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1394
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1307
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1526
The basic data container for the call graph of a Module of IR.
Definition: CallGraph.h:72
This class represents a function call, abstracting a target machine's calling convention.
static Constant * get(ArrayType *T, ArrayRef< Constant * > V)
Definition: Constants.cpp:1235
static Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
Definition: Constants.cpp:1988
This is the shared class of boolean and integer constants.
Definition: Constants.h:78
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:833
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:888
static ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:840
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Definition: Constants.cpp:1691
static Constant * get(StructType *T, ArrayRef< Constant * > V)
Definition: Constants.cpp:1300
static ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
Definition: Constants.cpp:1415
This represents the llvm.coro.align instruction.
Definition: CoroInstr.h:601
This represents the llvm.coro.alloc instruction.
Definition: CoroInstr.h:70
This class represents the llvm.coro.begin instruction.
Definition: CoroInstr.h:418
AnyCoroIdInst * getId() const
Definition: CoroInstr.h:422
This represents the llvm.coro.id instruction.
Definition: CoroInstr.h:113
void setInfo(Constant *C)
Definition: CoroInstr.h:180
This represents the llvm.coro.size instruction.
Definition: CoroInstr.h:589
This represents the llvm.coro.suspend.async instruction.
Definition: CoroInstr.h:523
CoroAsyncResumeInst * getResumeFunction() const
Definition: CoroInstr.h:544
This represents the llvm.coro.suspend instruction.
Definition: CoroInstr.h:491
CoroSaveInst * getCoroSave() const
Definition: CoroInstr.h:495
DISubprogram * getSubprogram() const
Get the subprogram for this scope.
Subprogram description.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
This is the common base class for debug info intrinsics for variables.
A debug info location.
Definition: DebugLoc.h:33
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
iterator end()
Definition: DenseMap.h:84
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:165
This class represents a freeze function that returns random concrete value if an operand is either a ...
A proxy from a FunctionAnalysisManager to an SCC.
Type * getReturnType() const
Definition: DerivedTypes.h:124
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition: Function.h:162
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:200
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:262
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:341
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:652
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:290
@ ExternalLinkage
Externally visible function.
Definition: GlobalValue.h:48
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
void setInitializer(Constant *InitVal)
setInitializer - Sets the initializer for this global variable, removing any existing initializer if ...
Definition: Globals.cpp:458
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Definition: IRBuilder.h:1777
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition: IRBuilder.h:2501
UnreachableInst * CreateUnreachable()
Definition: IRBuilder.h:1262
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition: IRBuilder.cpp:1108
Value * CreateStructGEP(Type *Ty, Value *Ptr, unsigned Idx, const Twine &Name="")
Definition: IRBuilder.h:1977
Value * CreateConstInBoundsGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
Definition: IRBuilder.h:1895
ConstantInt * getInt8(uint8_t C)
Get a constant 8-bit value.
Definition: IRBuilder.h:470
CleanupReturnInst * CreateCleanupRet(CleanupPadInst *CleanupPad, BasicBlock *UnwindBB=nullptr)
Definition: IRBuilder.h:1235
ReturnInst * CreateRet(Value *V)
Create a 'ret <val>' instruction.
Definition: IRBuilder.h:1094
ConstantInt * getInt64(uint64_t C)
Get a constant 64-bit value.
Definition: IRBuilder.h:485
Value * CreateBitOrPointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2183
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition: IRBuilder.h:2375
SwitchInst * CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases=10, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a switch instruction with the specified value, default dest, and with a hint for the number of...
Definition: IRBuilder.h:1142
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2105
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1794
ReturnInst * CreateRetVoid()
Create a 'ret void' instruction.
Definition: IRBuilder.h:1089
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition: IRBuilder.h:1807
ConstantInt * getFalse()
Get the constant value for i1 false.
Definition: IRBuilder.h:465
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:180
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2390
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition: IRBuilder.h:510
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2644
This class captures the data input to the InlineFunction call, and records the auxiliary results prod...
Definition: Cloning.h:202
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:71
const BasicBlock * getParent() const
Definition: Instruction.h:139
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:93
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
A node in the call graph.
An SCC of the call graph.
A lazily constructed view of the call graph of a module.
void addSplitFunction(Function &OriginalFunction, Function &NewFunction)
Add a new function split/outlined from an existing function.
void addSplitRefRecursiveFunctions(Function &OriginalFunction, ArrayRef< Function * > NewFunctions)
Add new ref-recursive functions split/outlined from an existing function.
Node & get(Function &F)
Get a graph node for a given function, scanning it to populate the graph data as necessary.
SCC * lookupSCC(Node &N) const
Lookup a function's SCC in the graph.
static MDString * get(LLVMContext &Context, StringRef Str)
Definition: Metadata.cpp:559
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
FunctionListType::iterator iterator
The Function iterators.
Definition: Module.h:90
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.h:275
Diagnostic information for applied optimization remarks.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1743
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:172
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: PassManager.h:175
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:178
PrettyStackTraceEntry - This class is used to represent a frame of the "pretty" stack trace that is d...
virtual void print(raw_ostream &OS) const =0
print - Emit information about this stack frame to OS.
Return a value (possibly void), from a function.
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
Definition: SmallPtrSet.h:380
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:366
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:390
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:451
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void reserve(size_type N)
Definition: SmallVector.h:667
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition: Type.cpp:619
Analysis pass providing the TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
bool supportsTailCallFor(const CallBase *CB) const
If target supports tail call on CB.
bool supportsTailCalls() const
If the target supports tail calls.
Value handle that tracks a Value across RAUW.
Definition: ValueHandle.h:331
ValueTy * getValPtr() const
Definition: ValueHandle.h:335
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
bool isArch64Bit() const
Test whether the architecture is 64-bit.
Definition: Triple.cpp:1469
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static Type * getVoidTy(LLVMContext &C)
static IntegerType * getInt8Ty(LLVMContext &C)
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1724
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
iterator_range< user_iterator > users()
Definition: Value.h:421
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:693
iterator_range< use_iterator > uses()
Definition: Value.h:376
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:316
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
void salvageDebugInfo(SmallDenseMap< Argument *, AllocaInst *, 4 > &ArgToAllocaMap, DbgVariableIntrinsic *DVI, bool OptimizeFrame, bool IsEntryPoint)
Attempts to rewrite the location operand of debug intrinsics in terms of the coroutine frame pointer,...
Definition: CoroFrame.cpp:2801
void replaceCoroFree(CoroIdInst *CoroId, bool Elide)
Definition: Coroutines.cpp:125
CallInst * createMustTailCall(DebugLoc Loc, Function *MustTailCallFn, ArrayRef< Value * > Arguments, IRBuilder<> &)
Definition: CoroSplit.cpp:1742
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
Definition: Verifier.cpp:6656
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition: Utils.cpp:1394
LazyCallGraph::SCC & updateCGAndAnalysisManagerForFunctionPass(LazyCallGraph &G, LazyCallGraph::SCC &C, LazyCallGraph::Node &N, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR, FunctionAnalysisManager &FAM)
Helper to update the call graph after running a function pass.
LazyCallGraph::SCC & updateCGAndAnalysisManagerForCGSCCPass(LazyCallGraph &G, LazyCallGraph::SCC &C, LazyCallGraph::Node &N, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR, FunctionAnalysisManager &FAM)
Helper to update the call graph after running a CGSCC pass.
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:665
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition: Local.cpp:399
@ Async
"Asynchronous" unwind tables (instr precise)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
Definition: Local.cpp:2782
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
DWARFExpression::Operation Op
InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, bool MergeAttributes=false, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr)
This function inlines the called function into the basic block of the caller.
void CloneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, CloneFunctionChangeType Changes, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr, ValueMapTypeRemapper *TypeMapper=nullptr, ValueMaterializer *Materializer=nullptr)
Clone OldFunc into NewFunc, transforming the old arguments into references to VMap values.
auto predecessors(const MachineBasicBlock *BB)
bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
Definition: Local.cpp:3147
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
Definition: CFG.cpp:231
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
Support structure for SCC passes to communicate updates the call graph back to the CGSCC pass manager...
SmallPriorityWorklist< LazyCallGraph::SCC *, 1 > & CWorklist
Worklist of the SCCs queued for processing.
const std::function< bool(Instruction &)> MaterializableCallback
Definition: CoroSplit.h:25
PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM, LazyCallGraph &CG, CGSCCUpdateResult &UR)
Definition: CoroSplit.cpp:2164
CoroSplitPass(bool OptimizeFrame=false)
Definition: CoroSplit.cpp:2160
AsyncLoweringStorage AsyncLowering
Definition: CoroInternal.h:145
FunctionType * getResumeFunctionType() const
Definition: CoroInternal.h:184
IntegerType * getIndexType() const
Definition: CoroInternal.h:169
StructType * FrameTy
Definition: CoroInternal.h:101
CoroIdInst * getSwitchCoroId() const
Definition: CoroInternal.h:148
Instruction * getInsertPtAfterFramePtr() const
Definition: CoroInternal.h:243
SmallVector< CoroSizeInst *, 2 > CoroSizes
Definition: CoroInternal.h:79
SmallVector< AnyCoroSuspendInst *, 4 > CoroSuspends
Definition: CoroInternal.h:81
Value * emitAlloc(IRBuilder<> &Builder, Value *Size, CallGraph *CG) const
Allocate memory according to the rules of the active lowering.
Definition: Coroutines.cpp:449
SmallVector< CallInst *, 2 > SwiftErrorOps
Definition: CoroInternal.h:82
ConstantInt * getIndex(uint64_t Value) const
Definition: CoroInternal.h:174
bool OptimizeFrame
This would only be true if optimization are enabled.
Definition: CoroInternal.h:108
SwitchLoweringStorage SwitchLowering
Definition: CoroInternal.h:143
CoroBeginInst * CoroBegin
Definition: CoroInternal.h:77
ArrayRef< Type * > getRetconResultTypes() const
Definition: CoroInternal.h:201
void emitDealloc(IRBuilder<> &Builder, Value *Ptr, CallGraph *CG) const
Deallocate memory according to the rules of the active lowering.
Definition: Coroutines.cpp:472
RetconLoweringStorage RetconLowering
Definition: CoroInternal.h:144
SmallVector< CoroAlignInst *, 2 > CoroAligns
Definition: CoroInternal.h:80
SmallVector< AnyCoroEndInst *, 4 > CoroEnds
Definition: CoroInternal.h:78
unsigned getSwitchIndexField() const
Definition: CoroInternal.h:164