Bug Summary

File:llvm/lib/Transforms/Coroutines/CoroSplit.cpp
Warning:line 489, column 19
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name CoroSplit.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Transforms/Coroutines -I /build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/lib/Transforms/Coroutines -I include -I /build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-01-16-232930-107970-1 -x c++ /build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/lib/Transforms/Coroutines/CoroSplit.cpp
1//===- CoroSplit.cpp - Converts a coroutine into a state machine ----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8// This pass builds the coroutine frame and outlines resume and destroy parts
9// of the coroutine into separate functions.
10//
11// We present a coroutine to an LLVM as an ordinary function with suspension
12// points marked up with intrinsics. We let the optimizer party on the coroutine
13// as a single function for as long as possible. Shortly before the coroutine is
14// eligible to be inlined into its callers, we split up the coroutine into parts
15// corresponding to an initial, resume and destroy invocations of the coroutine,
16// add them to the current SCC and restart the IPO pipeline to optimize the
17// coroutine subfunctions we extracted before proceeding to the caller of the
18// coroutine.
19//===----------------------------------------------------------------------===//
20
21#include "llvm/Transforms/Coroutines/CoroSplit.h"
22#include "CoroInstr.h"
23#include "CoroInternal.h"
24#include "llvm/ADT/DenseMap.h"
25#include "llvm/ADT/SmallPtrSet.h"
26#include "llvm/ADT/SmallVector.h"
27#include "llvm/ADT/StringRef.h"
28#include "llvm/ADT/Twine.h"
29#include "llvm/Analysis/CFG.h"
30#include "llvm/Analysis/CallGraph.h"
31#include "llvm/Analysis/CallGraphSCCPass.h"
32#include "llvm/Analysis/ConstantFolding.h"
33#include "llvm/Analysis/LazyCallGraph.h"
34#include "llvm/IR/Argument.h"
35#include "llvm/IR/Attributes.h"
36#include "llvm/IR/BasicBlock.h"
37#include "llvm/IR/CFG.h"
38#include "llvm/IR/CallingConv.h"
39#include "llvm/IR/Constants.h"
40#include "llvm/IR/DataLayout.h"
41#include "llvm/IR/DerivedTypes.h"
42#include "llvm/IR/Dominators.h"
43#include "llvm/IR/Function.h"
44#include "llvm/IR/GlobalValue.h"
45#include "llvm/IR/GlobalVariable.h"
46#include "llvm/IR/IRBuilder.h"
47#include "llvm/IR/InstIterator.h"
48#include "llvm/IR/InstrTypes.h"
49#include "llvm/IR/Instruction.h"
50#include "llvm/IR/Instructions.h"
51#include "llvm/IR/IntrinsicInst.h"
52#include "llvm/IR/LLVMContext.h"
53#include "llvm/IR/LegacyPassManager.h"
54#include "llvm/IR/Module.h"
55#include "llvm/IR/Type.h"
56#include "llvm/IR/Value.h"
57#include "llvm/IR/Verifier.h"
58#include "llvm/InitializePasses.h"
59#include "llvm/Pass.h"
60#include "llvm/Support/Casting.h"
61#include "llvm/Support/Debug.h"
62#include "llvm/Support/PrettyStackTrace.h"
63#include "llvm/Support/raw_ostream.h"
64#include "llvm/Transforms/Scalar.h"
65#include "llvm/Transforms/Utils/BasicBlockUtils.h"
66#include "llvm/Transforms/Utils/CallGraphUpdater.h"
67#include "llvm/Transforms/Utils/Cloning.h"
68#include "llvm/Transforms/Utils/Local.h"
69#include "llvm/Transforms/Utils/ValueMapper.h"
70#include <cassert>
71#include <cstddef>
72#include <cstdint>
73#include <initializer_list>
74#include <iterator>
75
76using namespace llvm;
77
78#define DEBUG_TYPE"coro-split" "coro-split"
79
80namespace {
81
82/// A little helper class for building
83class CoroCloner {
84public:
85 enum class Kind {
86 /// The shared resume function for a switch lowering.
87 SwitchResume,
88
89 /// The shared unwind function for a switch lowering.
90 SwitchUnwind,
91
92 /// The shared cleanup function for a switch lowering.
93 SwitchCleanup,
94
95 /// An individual continuation function.
96 Continuation,
97
98 /// An async resume function.
99 Async,
100 };
101
102private:
103 Function &OrigF;
104 Function *NewF;
105 const Twine &Suffix;
106 coro::Shape &Shape;
107 Kind FKind;
108 ValueToValueMapTy VMap;
109 IRBuilder<> Builder;
110 Value *NewFramePtr = nullptr;
111
112 /// The active suspend instruction; meaningful only for continuation and async
113 /// ABIs.
114 AnyCoroSuspendInst *ActiveSuspend = nullptr;
20
Null pointer value stored to 'Cloner.ActiveSuspend'
115
116public:
117 /// Create a cloner for a switch lowering.
118 CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape,
119 Kind FKind)
120 : OrigF(OrigF), NewF(nullptr), Suffix(Suffix), Shape(Shape),
121 FKind(FKind), Builder(OrigF.getContext()) {
122 assert(Shape.ABI == coro::ABI::Switch)(static_cast <bool> (Shape.ABI == coro::ABI::Switch) ? void
(0) : __assert_fail ("Shape.ABI == coro::ABI::Switch", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 122, __extension__ __PRETTY_FUNCTION__))
;
21
Assuming field 'ABI' is equal to Switch
22
'?' condition is true
123 }
124
125 /// Create a cloner for a continuation lowering.
126 CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape,
127 Function *NewF, AnyCoroSuspendInst *ActiveSuspend)
128 : OrigF(OrigF), NewF(NewF), Suffix(Suffix), Shape(Shape),
129 FKind(Shape.ABI == coro::ABI::Async ? Kind::Async : Kind::Continuation),
130 Builder(OrigF.getContext()), ActiveSuspend(ActiveSuspend) {
131 assert(Shape.ABI == coro::ABI::Retcon ||(static_cast <bool> (Shape.ABI == coro::ABI::Retcon || Shape
.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async
) ? void (0) : __assert_fail ("Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 132, __extension__
__PRETTY_FUNCTION__))
132 Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async)(static_cast <bool> (Shape.ABI == coro::ABI::Retcon || Shape
.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async
) ? void (0) : __assert_fail ("Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 132, __extension__
__PRETTY_FUNCTION__))
;
133 assert(NewF && "need existing function for continuation")(static_cast <bool> (NewF && "need existing function for continuation"
) ? void (0) : __assert_fail ("NewF && \"need existing function for continuation\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 133, __extension__
__PRETTY_FUNCTION__))
;
134 assert(ActiveSuspend && "need active suspend point for continuation")(static_cast <bool> (ActiveSuspend && "need active suspend point for continuation"
) ? void (0) : __assert_fail ("ActiveSuspend && \"need active suspend point for continuation\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 134, __extension__
__PRETTY_FUNCTION__))
;
135 }
136
137 Function *getFunction() const {
138 assert(NewF != nullptr && "declaration not yet set")(static_cast <bool> (NewF != nullptr && "declaration not yet set"
) ? void (0) : __assert_fail ("NewF != nullptr && \"declaration not yet set\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 138, __extension__
__PRETTY_FUNCTION__))
;
139 return NewF;
140 }
141
142 void create();
143
144private:
145 bool isSwitchDestroyFunction() {
146 switch (FKind) {
147 case Kind::Async:
148 case Kind::Continuation:
149 case Kind::SwitchResume:
150 return false;
151 case Kind::SwitchUnwind:
152 case Kind::SwitchCleanup:
153 return true;
154 }
155 llvm_unreachable("Unknown CoroCloner::Kind enum")::llvm::llvm_unreachable_internal("Unknown CoroCloner::Kind enum"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 155)
;
156 }
157
158 void replaceEntryBlock();
159 Value *deriveNewFramePointer();
160 void replaceRetconOrAsyncSuspendUses();
161 void replaceCoroSuspends();
162 void replaceCoroEnds();
163 void replaceSwiftErrorOps();
164 void salvageDebugInfo();
165 void handleFinalSuspend();
166};
167
168} // end anonymous namespace
169
170static void maybeFreeRetconStorage(IRBuilder<> &Builder,
171 const coro::Shape &Shape, Value *FramePtr,
172 CallGraph *CG) {
173 assert(Shape.ABI == coro::ABI::Retcon ||(static_cast <bool> (Shape.ABI == coro::ABI::Retcon || Shape
.ABI == coro::ABI::RetconOnce) ? void (0) : __assert_fail ("Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 174, __extension__
__PRETTY_FUNCTION__))
174 Shape.ABI == coro::ABI::RetconOnce)(static_cast <bool> (Shape.ABI == coro::ABI::Retcon || Shape
.ABI == coro::ABI::RetconOnce) ? void (0) : __assert_fail ("Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 174, __extension__
__PRETTY_FUNCTION__))
;
175 if (Shape.RetconLowering.IsFrameInlineInStorage)
176 return;
177
178 Shape.emitDealloc(Builder, FramePtr, CG);
179}
180
181/// Replace an llvm.coro.end.async.
182/// Will inline the must tail call function call if there is one.
183/// \returns true if cleanup of the coro.end block is needed, false otherwise.
184static bool replaceCoroEndAsync(AnyCoroEndInst *End) {
185 IRBuilder<> Builder(End);
186
187 auto *EndAsync = dyn_cast<CoroAsyncEndInst>(End);
188 if (!EndAsync) {
189 Builder.CreateRetVoid();
190 return true /*needs cleanup of coro.end block*/;
191 }
192
193 auto *MustTailCallFunc = EndAsync->getMustTailCallFunction();
194 if (!MustTailCallFunc) {
195 Builder.CreateRetVoid();
196 return true /*needs cleanup of coro.end block*/;
197 }
198
199 // Move the must tail call from the predecessor block into the end block.
200 auto *CoroEndBlock = End->getParent();
201 auto *MustTailCallFuncBlock = CoroEndBlock->getSinglePredecessor();
202 assert(MustTailCallFuncBlock && "Must have a single predecessor block")(static_cast <bool> (MustTailCallFuncBlock && "Must have a single predecessor block"
) ? void (0) : __assert_fail ("MustTailCallFuncBlock && \"Must have a single predecessor block\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 202, __extension__
__PRETTY_FUNCTION__))
;
203 auto It = MustTailCallFuncBlock->getTerminator()->getIterator();
204 auto *MustTailCall = cast<CallInst>(&*std::prev(It));
205 CoroEndBlock->getInstList().splice(
206 End->getIterator(), MustTailCallFuncBlock->getInstList(), MustTailCall);
207
208 // Insert the return instruction.
209 Builder.SetInsertPoint(End);
210 Builder.CreateRetVoid();
211 InlineFunctionInfo FnInfo;
212
213 // Remove the rest of the block, by splitting it into an unreachable block.
214 auto *BB = End->getParent();
215 BB->splitBasicBlock(End);
216 BB->getTerminator()->eraseFromParent();
217
218 auto InlineRes = InlineFunction(*MustTailCall, FnInfo);
219 assert(InlineRes.isSuccess() && "Expected inlining to succeed")(static_cast <bool> (InlineRes.isSuccess() && "Expected inlining to succeed"
) ? void (0) : __assert_fail ("InlineRes.isSuccess() && \"Expected inlining to succeed\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 219, __extension__
__PRETTY_FUNCTION__))
;
220 (void)InlineRes;
221
222 // We have cleaned up the coro.end block above.
223 return false;
224}
225
226/// Replace a non-unwind call to llvm.coro.end.
227static void replaceFallthroughCoroEnd(AnyCoroEndInst *End,
228 const coro::Shape &Shape, Value *FramePtr,
229 bool InResume, CallGraph *CG) {
230 // Start inserting right before the coro.end.
231 IRBuilder<> Builder(End);
232
233 // Create the return instruction.
234 switch (Shape.ABI) {
235 // The cloned functions in switch-lowering always return void.
236 case coro::ABI::Switch:
237 // coro.end doesn't immediately end the coroutine in the main function
238 // in this lowering, because we need to deallocate the coroutine.
239 if (!InResume)
240 return;
241 Builder.CreateRetVoid();
242 break;
243
244 // In async lowering this returns.
245 case coro::ABI::Async: {
246 bool CoroEndBlockNeedsCleanup = replaceCoroEndAsync(End);
247 if (!CoroEndBlockNeedsCleanup)
248 return;
249 break;
250 }
251
252 // In unique continuation lowering, the continuations always return void.
253 // But we may have implicitly allocated storage.
254 case coro::ABI::RetconOnce:
255 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
256 Builder.CreateRetVoid();
257 break;
258
259 // In non-unique continuation lowering, we signal completion by returning
260 // a null continuation.
261 case coro::ABI::Retcon: {
262 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
263 auto RetTy = Shape.getResumeFunctionType()->getReturnType();
264 auto RetStructTy = dyn_cast<StructType>(RetTy);
265 PointerType *ContinuationTy =
266 cast<PointerType>(RetStructTy ? RetStructTy->getElementType(0) : RetTy);
267
268 Value *ReturnValue = ConstantPointerNull::get(ContinuationTy);
269 if (RetStructTy) {
270 ReturnValue = Builder.CreateInsertValue(UndefValue::get(RetStructTy),
271 ReturnValue, 0);
272 }
273 Builder.CreateRet(ReturnValue);
274 break;
275 }
276 }
277
278 // Remove the rest of the block, by splitting it into an unreachable block.
279 auto *BB = End->getParent();
280 BB->splitBasicBlock(End);
281 BB->getTerminator()->eraseFromParent();
282}
283
284// Mark a coroutine as done, which implies that the coroutine is finished and
285// never get resumed.
286//
287// In resume-switched ABI, the done state is represented by storing zero in
288// ResumeFnAddr.
289//
290// NOTE: We couldn't omit the argument `FramePtr`. It is necessary because the
291// pointer to the frame in splitted function is not stored in `Shape`.
292static void markCoroutineAsDone(IRBuilder<> &Builder, const coro::Shape &Shape,
293 Value *FramePtr) {
294 assert((static_cast <bool> (Shape.ABI == coro::ABI::Switch &&
"markCoroutineAsDone is only supported for Switch-Resumed ABI for now."
) ? void (0) : __assert_fail ("Shape.ABI == coro::ABI::Switch && \"markCoroutineAsDone is only supported for Switch-Resumed ABI for now.\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 296, __extension__
__PRETTY_FUNCTION__))
295 Shape.ABI == coro::ABI::Switch &&(static_cast <bool> (Shape.ABI == coro::ABI::Switch &&
"markCoroutineAsDone is only supported for Switch-Resumed ABI for now."
) ? void (0) : __assert_fail ("Shape.ABI == coro::ABI::Switch && \"markCoroutineAsDone is only supported for Switch-Resumed ABI for now.\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 296, __extension__
__PRETTY_FUNCTION__))
296 "markCoroutineAsDone is only supported for Switch-Resumed ABI for now.")(static_cast <bool> (Shape.ABI == coro::ABI::Switch &&
"markCoroutineAsDone is only supported for Switch-Resumed ABI for now."
) ? void (0) : __assert_fail ("Shape.ABI == coro::ABI::Switch && \"markCoroutineAsDone is only supported for Switch-Resumed ABI for now.\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 296, __extension__
__PRETTY_FUNCTION__))
;
297 auto *GepIndex = Builder.CreateStructGEP(
298 Shape.FrameTy, FramePtr, coro::Shape::SwitchFieldIndex::Resume,
299 "ResumeFn.addr");
300 auto *NullPtr = ConstantPointerNull::get(cast<PointerType>(
301 Shape.FrameTy->getTypeAtIndex(coro::Shape::SwitchFieldIndex::Resume)));
302 Builder.CreateStore(NullPtr, GepIndex);
303}
304
305/// Replace an unwind call to llvm.coro.end.
306static void replaceUnwindCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
307 Value *FramePtr, bool InResume,
308 CallGraph *CG) {
309 IRBuilder<> Builder(End);
310
311 switch (Shape.ABI) {
312 // In switch-lowering, this does nothing in the main function.
313 case coro::ABI::Switch: {
314 // In C++'s specification, the coroutine should be marked as done
315 // if promise.unhandled_exception() throws. The frontend will
316 // call coro.end(true) along this path.
317 //
318 // FIXME: We should refactor this once there is other language
319 // which uses Switch-Resumed style other than C++.
320 markCoroutineAsDone(Builder, Shape, FramePtr);
321 if (!InResume)
322 return;
323 break;
324 }
325 // In async lowering this does nothing.
326 case coro::ABI::Async:
327 break;
328 // In continuation-lowering, this frees the continuation storage.
329 case coro::ABI::Retcon:
330 case coro::ABI::RetconOnce:
331 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
332 break;
333 }
334
335 // If coro.end has an associated bundle, add cleanupret instruction.
336 if (auto Bundle = End->getOperandBundle(LLVMContext::OB_funclet)) {
337 auto *FromPad = cast<CleanupPadInst>(Bundle->Inputs[0]);
338 auto *CleanupRet = Builder.CreateCleanupRet(FromPad, nullptr);
339 End->getParent()->splitBasicBlock(End);
340 CleanupRet->getParent()->getTerminator()->eraseFromParent();
341 }
342}
343
344static void replaceCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
345 Value *FramePtr, bool InResume, CallGraph *CG) {
346 if (End->isUnwind())
347 replaceUnwindCoroEnd(End, Shape, FramePtr, InResume, CG);
348 else
349 replaceFallthroughCoroEnd(End, Shape, FramePtr, InResume, CG);
350
351 auto &Context = End->getContext();
352 End->replaceAllUsesWith(InResume ? ConstantInt::getTrue(Context)
353 : ConstantInt::getFalse(Context));
354 End->eraseFromParent();
355}
356
357// Create an entry block for a resume function with a switch that will jump to
358// suspend points.
359static void createResumeEntryBlock(Function &F, coro::Shape &Shape) {
360 assert(Shape.ABI == coro::ABI::Switch)(static_cast <bool> (Shape.ABI == coro::ABI::Switch) ? void
(0) : __assert_fail ("Shape.ABI == coro::ABI::Switch", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 360, __extension__ __PRETTY_FUNCTION__))
;
361 LLVMContext &C = F.getContext();
362
363 // resume.entry:
364 // %index.addr = getelementptr inbounds %f.Frame, %f.Frame* %FramePtr, i32 0,
365 // i32 2
366 // % index = load i32, i32* %index.addr
367 // switch i32 %index, label %unreachable [
368 // i32 0, label %resume.0
369 // i32 1, label %resume.1
370 // ...
371 // ]
372
373 auto *NewEntry = BasicBlock::Create(C, "resume.entry", &F);
374 auto *UnreachBB = BasicBlock::Create(C, "unreachable", &F);
375
376 IRBuilder<> Builder(NewEntry);
377 auto *FramePtr = Shape.FramePtr;
378 auto *FrameTy = Shape.FrameTy;
379 auto *GepIndex = Builder.CreateStructGEP(
380 FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr");
381 auto *Index = Builder.CreateLoad(Shape.getIndexType(), GepIndex, "index");
382 auto *Switch =
383 Builder.CreateSwitch(Index, UnreachBB, Shape.CoroSuspends.size());
384 Shape.SwitchLowering.ResumeSwitch = Switch;
385
386 size_t SuspendIndex = 0;
387 for (auto *AnyS : Shape.CoroSuspends) {
388 auto *S = cast<CoroSuspendInst>(AnyS);
389 ConstantInt *IndexVal = Shape.getIndex(SuspendIndex);
390
391 // Replace CoroSave with a store to Index:
392 // %index.addr = getelementptr %f.frame... (index field number)
393 // store i32 0, i32* %index.addr1
394 auto *Save = S->getCoroSave();
395 Builder.SetInsertPoint(Save);
396 if (S->isFinal()) {
397 // The coroutine should be marked done if it reaches the final suspend
398 // point.
399 markCoroutineAsDone(Builder, Shape, FramePtr);
400 } else {
401 auto *GepIndex = Builder.CreateStructGEP(
402 FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr");
403 Builder.CreateStore(IndexVal, GepIndex);
404 }
405 Save->replaceAllUsesWith(ConstantTokenNone::get(C));
406 Save->eraseFromParent();
407
408 // Split block before and after coro.suspend and add a jump from an entry
409 // switch:
410 //
411 // whateverBB:
412 // whatever
413 // %0 = call i8 @llvm.coro.suspend(token none, i1 false)
414 // switch i8 %0, label %suspend[i8 0, label %resume
415 // i8 1, label %cleanup]
416 // becomes:
417 //
418 // whateverBB:
419 // whatever
420 // br label %resume.0.landing
421 //
422 // resume.0: ; <--- jump from the switch in the resume.entry
423 // %0 = tail call i8 @llvm.coro.suspend(token none, i1 false)
424 // br label %resume.0.landing
425 //
426 // resume.0.landing:
427 // %1 = phi i8[-1, %whateverBB], [%0, %resume.0]
428 // switch i8 % 1, label %suspend [i8 0, label %resume
429 // i8 1, label %cleanup]
430
431 auto *SuspendBB = S->getParent();
432 auto *ResumeBB =
433 SuspendBB->splitBasicBlock(S, "resume." + Twine(SuspendIndex));
434 auto *LandingBB = ResumeBB->splitBasicBlock(
435 S->getNextNode(), ResumeBB->getName() + Twine(".landing"));
436 Switch->addCase(IndexVal, ResumeBB);
437
438 cast<BranchInst>(SuspendBB->getTerminator())->setSuccessor(0, LandingBB);
439 auto *PN = PHINode::Create(Builder.getInt8Ty(), 2, "", &LandingBB->front());
440 S->replaceAllUsesWith(PN);
441 PN->addIncoming(Builder.getInt8(-1), SuspendBB);
442 PN->addIncoming(S, ResumeBB);
443
444 ++SuspendIndex;
445 }
446
447 Builder.SetInsertPoint(UnreachBB);
448 Builder.CreateUnreachable();
449
450 Shape.SwitchLowering.ResumeEntryBlock = NewEntry;
451}
452
453
454// Rewrite final suspend point handling. We do not use suspend index to
455// represent the final suspend point. Instead we zero-out ResumeFnAddr in the
456// coroutine frame, since it is undefined behavior to resume a coroutine
457// suspended at the final suspend point. Thus, in the resume function, we can
458// simply remove the last case (when coro::Shape is built, the final suspend
459// point (if present) is always the last element of CoroSuspends array).
460// In the destroy function, we add a code sequence to check if ResumeFnAddress
461// is Null, and if so, jump to the appropriate label to handle cleanup from the
462// final suspend point.
463void CoroCloner::handleFinalSuspend() {
464 assert(Shape.ABI == coro::ABI::Switch &&(static_cast <bool> (Shape.ABI == coro::ABI::Switch &&
Shape.SwitchLowering.HasFinalSuspend) ? void (0) : __assert_fail
("Shape.ABI == coro::ABI::Switch && Shape.SwitchLowering.HasFinalSuspend"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 465, __extension__
__PRETTY_FUNCTION__))
465 Shape.SwitchLowering.HasFinalSuspend)(static_cast <bool> (Shape.ABI == coro::ABI::Switch &&
Shape.SwitchLowering.HasFinalSuspend) ? void (0) : __assert_fail
("Shape.ABI == coro::ABI::Switch && Shape.SwitchLowering.HasFinalSuspend"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 465, __extension__
__PRETTY_FUNCTION__))
;
466 auto *Switch = cast<SwitchInst>(VMap[Shape.SwitchLowering.ResumeSwitch]);
467 auto FinalCaseIt = std::prev(Switch->case_end());
468 BasicBlock *ResumeBB = FinalCaseIt->getCaseSuccessor();
469 Switch->removeCase(FinalCaseIt);
470 if (isSwitchDestroyFunction()) {
471 BasicBlock *OldSwitchBB = Switch->getParent();
472 auto *NewSwitchBB = OldSwitchBB->splitBasicBlock(Switch, "Switch");
473 Builder.SetInsertPoint(OldSwitchBB->getTerminator());
474 auto *GepIndex = Builder.CreateStructGEP(Shape.FrameTy, NewFramePtr,
475 coro::Shape::SwitchFieldIndex::Resume,
476 "ResumeFn.addr");
477 auto *Load = Builder.CreateLoad(Shape.getSwitchResumePointerType(),
478 GepIndex);
479 auto *Cond = Builder.CreateIsNull(Load);
480 Builder.CreateCondBr(Cond, ResumeBB, NewSwitchBB);
481 OldSwitchBB->getTerminator()->eraseFromParent();
482 }
483}
484
485static FunctionType *
486getFunctionTypeFromAsyncSuspend(AnyCoroSuspendInst *Suspend) {
487 auto *AsyncSuspend = cast<CoroSuspendAsyncInst>(Suspend);
488 auto *StructTy = cast<StructType>(AsyncSuspend->getType());
33
The object is a 'StructType'
489 auto &Context = Suspend->getParent()->getParent()->getContext();
34
Called C++ object pointer is null
490 auto *VoidTy = Type::getVoidTy(Context);
491 return FunctionType::get(VoidTy, StructTy->elements(), false);
492}
493
494static Function *createCloneDeclaration(Function &OrigF, coro::Shape &Shape,
495 const Twine &Suffix,
496 Module::iterator InsertBefore,
497 AnyCoroSuspendInst *ActiveSuspend) {
498 Module *M = OrigF.getParent();
499 auto *FnTy = (Shape.ABI != coro::ABI::Async)
29
Assuming field 'ABI' is equal to Async
30
'?' condition is false
500 ? Shape.getResumeFunctionType()
501 : getFunctionTypeFromAsyncSuspend(ActiveSuspend);
31
Passing null pointer value via 1st parameter 'Suspend'
32
Calling 'getFunctionTypeFromAsyncSuspend'
502
503 Function *NewF =
504 Function::Create(FnTy, GlobalValue::LinkageTypes::InternalLinkage,
505 OrigF.getName() + Suffix);
506 if (Shape.ABI != coro::ABI::Async)
507 NewF->addParamAttr(0, Attribute::NonNull);
508
509 // For the async lowering ABI we can't guarantee that the context argument is
510 // not access via a different pointer not based on the argument.
511 if (Shape.ABI != coro::ABI::Async)
512 NewF->addParamAttr(0, Attribute::NoAlias);
513
514 M->getFunctionList().insert(InsertBefore, NewF);
515
516 return NewF;
517}
518
519/// Replace uses of the active llvm.coro.suspend.retcon/async call with the
520/// arguments to the continuation function.
521///
522/// This assumes that the builder has a meaningful insertion point.
523void CoroCloner::replaceRetconOrAsyncSuspendUses() {
524 assert(Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||(static_cast <bool> (Shape.ABI == coro::ABI::Retcon || Shape
.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async
) ? void (0) : __assert_fail ("Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 525, __extension__
__PRETTY_FUNCTION__))
525 Shape.ABI == coro::ABI::Async)(static_cast <bool> (Shape.ABI == coro::ABI::Retcon || Shape
.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async
) ? void (0) : __assert_fail ("Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 525, __extension__
__PRETTY_FUNCTION__))
;
526
527 auto NewS = VMap[ActiveSuspend];
528 if (NewS->use_empty()) return;
529
530 // Copy out all the continuation arguments after the buffer pointer into
531 // an easily-indexed data structure for convenience.
532 SmallVector<Value*, 8> Args;
533 // The async ABI includes all arguments -- including the first argument.
534 bool IsAsyncABI = Shape.ABI == coro::ABI::Async;
535 for (auto I = IsAsyncABI ? NewF->arg_begin() : std::next(NewF->arg_begin()),
536 E = NewF->arg_end();
537 I != E; ++I)
538 Args.push_back(&*I);
539
540 // If the suspend returns a single scalar value, we can just do a simple
541 // replacement.
542 if (!isa<StructType>(NewS->getType())) {
543 assert(Args.size() == 1)(static_cast <bool> (Args.size() == 1) ? void (0) : __assert_fail
("Args.size() == 1", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 543, __extension__ __PRETTY_FUNCTION__))
;
544 NewS->replaceAllUsesWith(Args.front());
545 return;
546 }
547
548 // Try to peephole extracts of an aggregate return.
549 for (Use &U : llvm::make_early_inc_range(NewS->uses())) {
550 auto *EVI = dyn_cast<ExtractValueInst>(U.getUser());
551 if (!EVI || EVI->getNumIndices() != 1)
552 continue;
553
554 EVI->replaceAllUsesWith(Args[EVI->getIndices().front()]);
555 EVI->eraseFromParent();
556 }
557
558 // If we have no remaining uses, we're done.
559 if (NewS->use_empty()) return;
560
561 // Otherwise, we need to create an aggregate.
562 Value *Agg = UndefValue::get(NewS->getType());
563 for (size_t I = 0, E = Args.size(); I != E; ++I)
564 Agg = Builder.CreateInsertValue(Agg, Args[I], I);
565
566 NewS->replaceAllUsesWith(Agg);
567}
568
569void CoroCloner::replaceCoroSuspends() {
570 Value *SuspendResult;
571
572 switch (Shape.ABI) {
573 // In switch lowering, replace coro.suspend with the appropriate value
574 // for the type of function we're extracting.
575 // Replacing coro.suspend with (0) will result in control flow proceeding to
576 // a resume label associated with a suspend point, replacing it with (1) will
577 // result in control flow proceeding to a cleanup label associated with this
578 // suspend point.
579 case coro::ABI::Switch:
580 SuspendResult = Builder.getInt8(isSwitchDestroyFunction() ? 1 : 0);
581 break;
582
583 // In async lowering there are no uses of the result.
584 case coro::ABI::Async:
585 return;
586
587 // In returned-continuation lowering, the arguments from earlier
588 // continuations are theoretically arbitrary, and they should have been
589 // spilled.
590 case coro::ABI::RetconOnce:
591 case coro::ABI::Retcon:
592 return;
593 }
594
595 for (AnyCoroSuspendInst *CS : Shape.CoroSuspends) {
596 // The active suspend was handled earlier.
597 if (CS == ActiveSuspend) continue;
598
599 auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[CS]);
600 MappedCS->replaceAllUsesWith(SuspendResult);
601 MappedCS->eraseFromParent();
602 }
603}
604
605void CoroCloner::replaceCoroEnds() {
606 for (AnyCoroEndInst *CE : Shape.CoroEnds) {
607 // We use a null call graph because there's no call graph node for
608 // the cloned function yet. We'll just be rebuilding that later.
609 auto *NewCE = cast<AnyCoroEndInst>(VMap[CE]);
610 replaceCoroEnd(NewCE, Shape, NewFramePtr, /*in resume*/ true, nullptr);
611 }
612}
613
614static void replaceSwiftErrorOps(Function &F, coro::Shape &Shape,
615 ValueToValueMapTy *VMap) {
616 if (Shape.ABI == coro::ABI::Async && Shape.CoroSuspends.empty())
617 return;
618 Value *CachedSlot = nullptr;
619 auto getSwiftErrorSlot = [&](Type *ValueTy) -> Value * {
620 if (CachedSlot) {
621 assert(CachedSlot->getType()->getPointerElementType() == ValueTy &&(static_cast <bool> (CachedSlot->getType()->getPointerElementType
() == ValueTy && "multiple swifterror slots in function with different types"
) ? void (0) : __assert_fail ("CachedSlot->getType()->getPointerElementType() == ValueTy && \"multiple swifterror slots in function with different types\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 622, __extension__
__PRETTY_FUNCTION__))
622 "multiple swifterror slots in function with different types")(static_cast <bool> (CachedSlot->getType()->getPointerElementType
() == ValueTy && "multiple swifterror slots in function with different types"
) ? void (0) : __assert_fail ("CachedSlot->getType()->getPointerElementType() == ValueTy && \"multiple swifterror slots in function with different types\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 622, __extension__
__PRETTY_FUNCTION__))
;
623 return CachedSlot;
624 }
625
626 // Check if the function has a swifterror argument.
627 for (auto &Arg : F.args()) {
628 if (Arg.isSwiftError()) {
629 CachedSlot = &Arg;
630 assert(Arg.getType()->getPointerElementType() == ValueTy &&(static_cast <bool> (Arg.getType()->getPointerElementType
() == ValueTy && "swifterror argument does not have expected type"
) ? void (0) : __assert_fail ("Arg.getType()->getPointerElementType() == ValueTy && \"swifterror argument does not have expected type\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 631, __extension__
__PRETTY_FUNCTION__))
631 "swifterror argument does not have expected type")(static_cast <bool> (Arg.getType()->getPointerElementType
() == ValueTy && "swifterror argument does not have expected type"
) ? void (0) : __assert_fail ("Arg.getType()->getPointerElementType() == ValueTy && \"swifterror argument does not have expected type\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 631, __extension__
__PRETTY_FUNCTION__))
;
632 return &Arg;
633 }
634 }
635
636 // Create a swifterror alloca.
637 IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
638 auto Alloca = Builder.CreateAlloca(ValueTy);
639 Alloca->setSwiftError(true);
640
641 CachedSlot = Alloca;
642 return Alloca;
643 };
644
645 for (CallInst *Op : Shape.SwiftErrorOps) {
646 auto MappedOp = VMap ? cast<CallInst>((*VMap)[Op]) : Op;
647 IRBuilder<> Builder(MappedOp);
648
649 // If there are no arguments, this is a 'get' operation.
650 Value *MappedResult;
651 if (Op->arg_empty()) {
652 auto ValueTy = Op->getType();
653 auto Slot = getSwiftErrorSlot(ValueTy);
654 MappedResult = Builder.CreateLoad(ValueTy, Slot);
655 } else {
656 assert(Op->arg_size() == 1)(static_cast <bool> (Op->arg_size() == 1) ? void (0)
: __assert_fail ("Op->arg_size() == 1", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 656, __extension__ __PRETTY_FUNCTION__))
;
657 auto Value = MappedOp->getArgOperand(0);
658 auto ValueTy = Value->getType();
659 auto Slot = getSwiftErrorSlot(ValueTy);
660 Builder.CreateStore(Value, Slot);
661 MappedResult = Slot;
662 }
663
664 MappedOp->replaceAllUsesWith(MappedResult);
665 MappedOp->eraseFromParent();
666 }
667
668 // If we're updating the original function, we've invalidated SwiftErrorOps.
669 if (VMap == nullptr) {
670 Shape.SwiftErrorOps.clear();
671 }
672}
673
674void CoroCloner::replaceSwiftErrorOps() {
675 ::replaceSwiftErrorOps(*NewF, Shape, &VMap);
676}
677
678void CoroCloner::salvageDebugInfo() {
679 SmallVector<DbgVariableIntrinsic *, 8> Worklist;
680 SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache;
681 for (auto &BB : *NewF)
682 for (auto &I : BB)
683 if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&I))
684 Worklist.push_back(DVI);
685 for (DbgVariableIntrinsic *DVI : Worklist)
686 coro::salvageDebugInfo(DbgPtrAllocaCache, DVI, Shape.OptimizeFrame);
687
688 // Remove all salvaged dbg.declare intrinsics that became
689 // either unreachable or stale due to the CoroSplit transformation.
690 DominatorTree DomTree(*NewF);
691 auto IsUnreachableBlock = [&](BasicBlock *BB) {
692 return !isPotentiallyReachable(&NewF->getEntryBlock(), BB, nullptr,
693 &DomTree);
694 };
695 for (DbgVariableIntrinsic *DVI : Worklist) {
696 if (IsUnreachableBlock(DVI->getParent()))
697 DVI->eraseFromParent();
698 else if (isa_and_nonnull<AllocaInst>(DVI->getVariableLocationOp(0))) {
699 // Count all non-debuginfo uses in reachable blocks.
700 unsigned Uses = 0;
701 for (auto *User : DVI->getVariableLocationOp(0)->users())
702 if (auto *I = dyn_cast<Instruction>(User))
703 if (!isa<AllocaInst>(I) && !IsUnreachableBlock(I->getParent()))
704 ++Uses;
705 if (!Uses)
706 DVI->eraseFromParent();
707 }
708 }
709}
710
711void CoroCloner::replaceEntryBlock() {
712 // In the original function, the AllocaSpillBlock is a block immediately
713 // following the allocation of the frame object which defines GEPs for
714 // all the allocas that have been moved into the frame, and it ends by
715 // branching to the original beginning of the coroutine. Make this
716 // the entry block of the cloned function.
717 auto *Entry = cast<BasicBlock>(VMap[Shape.AllocaSpillBlock]);
718 auto *OldEntry = &NewF->getEntryBlock();
719 Entry->setName("entry" + Suffix);
720 Entry->moveBefore(OldEntry);
721 Entry->getTerminator()->eraseFromParent();
722
723 // Clear all predecessors of the new entry block. There should be
724 // exactly one predecessor, which we created when splitting out
725 // AllocaSpillBlock to begin with.
726 assert(Entry->hasOneUse())(static_cast <bool> (Entry->hasOneUse()) ? void (0) :
__assert_fail ("Entry->hasOneUse()", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 726, __extension__ __PRETTY_FUNCTION__))
;
727 auto BranchToEntry = cast<BranchInst>(Entry->user_back());
728 assert(BranchToEntry->isUnconditional())(static_cast <bool> (BranchToEntry->isUnconditional(
)) ? void (0) : __assert_fail ("BranchToEntry->isUnconditional()"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 728, __extension__
__PRETTY_FUNCTION__))
;
729 Builder.SetInsertPoint(BranchToEntry);
730 Builder.CreateUnreachable();
731 BranchToEntry->eraseFromParent();
732
733 // Branch from the entry to the appropriate place.
734 Builder.SetInsertPoint(Entry);
735 switch (Shape.ABI) {
736 case coro::ABI::Switch: {
737 // In switch-lowering, we built a resume-entry block in the original
738 // function. Make the entry block branch to this.
739 auto *SwitchBB =
740 cast<BasicBlock>(VMap[Shape.SwitchLowering.ResumeEntryBlock]);
741 Builder.CreateBr(SwitchBB);
742 break;
743 }
744 case coro::ABI::Async:
745 case coro::ABI::Retcon:
746 case coro::ABI::RetconOnce: {
747 // In continuation ABIs, we want to branch to immediately after the
748 // active suspend point. Earlier phases will have put the suspend in its
749 // own basic block, so just thread our jump directly to its successor.
750 assert((Shape.ABI == coro::ABI::Async &&(static_cast <bool> ((Shape.ABI == coro::ABI::Async &&
isa<CoroSuspendAsyncInst>(ActiveSuspend)) || ((Shape.ABI
== coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) &&
isa<CoroSuspendRetconInst>(ActiveSuspend))) ? void (0)
: __assert_fail ("(Shape.ABI == coro::ABI::Async && isa<CoroSuspendAsyncInst>(ActiveSuspend)) || ((Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) && isa<CoroSuspendRetconInst>(ActiveSuspend))"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 754, __extension__
__PRETTY_FUNCTION__))
751 isa<CoroSuspendAsyncInst>(ActiveSuspend)) ||(static_cast <bool> ((Shape.ABI == coro::ABI::Async &&
isa<CoroSuspendAsyncInst>(ActiveSuspend)) || ((Shape.ABI
== coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) &&
isa<CoroSuspendRetconInst>(ActiveSuspend))) ? void (0)
: __assert_fail ("(Shape.ABI == coro::ABI::Async && isa<CoroSuspendAsyncInst>(ActiveSuspend)) || ((Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) && isa<CoroSuspendRetconInst>(ActiveSuspend))"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 754, __extension__
__PRETTY_FUNCTION__))
752 ((Shape.ABI == coro::ABI::Retcon ||(static_cast <bool> ((Shape.ABI == coro::ABI::Async &&
isa<CoroSuspendAsyncInst>(ActiveSuspend)) || ((Shape.ABI
== coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) &&
isa<CoroSuspendRetconInst>(ActiveSuspend))) ? void (0)
: __assert_fail ("(Shape.ABI == coro::ABI::Async && isa<CoroSuspendAsyncInst>(ActiveSuspend)) || ((Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) && isa<CoroSuspendRetconInst>(ActiveSuspend))"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 754, __extension__
__PRETTY_FUNCTION__))
753 Shape.ABI == coro::ABI::RetconOnce) &&(static_cast <bool> ((Shape.ABI == coro::ABI::Async &&
isa<CoroSuspendAsyncInst>(ActiveSuspend)) || ((Shape.ABI
== coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) &&
isa<CoroSuspendRetconInst>(ActiveSuspend))) ? void (0)
: __assert_fail ("(Shape.ABI == coro::ABI::Async && isa<CoroSuspendAsyncInst>(ActiveSuspend)) || ((Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) && isa<CoroSuspendRetconInst>(ActiveSuspend))"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 754, __extension__
__PRETTY_FUNCTION__))
754 isa<CoroSuspendRetconInst>(ActiveSuspend)))(static_cast <bool> ((Shape.ABI == coro::ABI::Async &&
isa<CoroSuspendAsyncInst>(ActiveSuspend)) || ((Shape.ABI
== coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) &&
isa<CoroSuspendRetconInst>(ActiveSuspend))) ? void (0)
: __assert_fail ("(Shape.ABI == coro::ABI::Async && isa<CoroSuspendAsyncInst>(ActiveSuspend)) || ((Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) && isa<CoroSuspendRetconInst>(ActiveSuspend))"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 754, __extension__
__PRETTY_FUNCTION__))
;
755 auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[ActiveSuspend]);
756 auto Branch = cast<BranchInst>(MappedCS->getNextNode());
757 assert(Branch->isUnconditional())(static_cast <bool> (Branch->isUnconditional()) ? void
(0) : __assert_fail ("Branch->isUnconditional()", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 757, __extension__ __PRETTY_FUNCTION__))
;
758 Builder.CreateBr(Branch->getSuccessor(0));
759 break;
760 }
761 }
762
763 // Any static alloca that's still being used but not reachable from the new
764 // entry needs to be moved to the new entry.
765 Function *F = OldEntry->getParent();
766 DominatorTree DT{*F};
767 for (Instruction &I : llvm::make_early_inc_range(instructions(F))) {
768 auto *Alloca = dyn_cast<AllocaInst>(&I);
769 if (!Alloca || I.use_empty())
770 continue;
771 if (DT.isReachableFromEntry(I.getParent()) ||
772 !isa<ConstantInt>(Alloca->getArraySize()))
773 continue;
774 I.moveBefore(*Entry, Entry->getFirstInsertionPt());
775 }
776}
777
778/// Derive the value of the new frame pointer.
779Value *CoroCloner::deriveNewFramePointer() {
780 // Builder should be inserting to the front of the new entry block.
781
782 switch (Shape.ABI) {
783 // In switch-lowering, the argument is the frame pointer.
784 case coro::ABI::Switch:
785 return &*NewF->arg_begin();
786 // In async-lowering, one of the arguments is an async context as determined
787 // by the `llvm.coro.id.async` intrinsic. We can retrieve the async context of
788 // the resume function from the async context projection function associated
789 // with the active suspend. The frame is located as a tail to the async
790 // context header.
791 case coro::ABI::Async: {
792 auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(ActiveSuspend);
793 auto ContextIdx = ActiveAsyncSuspend->getStorageArgumentIndex() & 0xff;
794 auto *CalleeContext = NewF->getArg(ContextIdx);
795 auto *FramePtrTy = Shape.FrameTy->getPointerTo();
796 auto *ProjectionFunc =
797 ActiveAsyncSuspend->getAsyncContextProjectionFunction();
798 auto DbgLoc =
799 cast<CoroSuspendAsyncInst>(VMap[ActiveSuspend])->getDebugLoc();
800 // Calling i8* (i8*)
801 auto *CallerContext = Builder.CreateCall(ProjectionFunc->getFunctionType(),
802 ProjectionFunc, CalleeContext);
803 CallerContext->setCallingConv(ProjectionFunc->getCallingConv());
804 CallerContext->setDebugLoc(DbgLoc);
805 // The frame is located after the async_context header.
806 auto &Context = Builder.getContext();
807 auto *FramePtrAddr = Builder.CreateConstInBoundsGEP1_32(
808 Type::getInt8Ty(Context), CallerContext,
809 Shape.AsyncLowering.FrameOffset, "async.ctx.frameptr");
810 // Inline the projection function.
811 InlineFunctionInfo InlineInfo;
812 auto InlineRes = InlineFunction(*CallerContext, InlineInfo);
813 assert(InlineRes.isSuccess())(static_cast <bool> (InlineRes.isSuccess()) ? void (0) :
__assert_fail ("InlineRes.isSuccess()", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 813, __extension__ __PRETTY_FUNCTION__))
;
814 (void)InlineRes;
815 return Builder.CreateBitCast(FramePtrAddr, FramePtrTy);
816 }
817 // In continuation-lowering, the argument is the opaque storage.
818 case coro::ABI::Retcon:
819 case coro::ABI::RetconOnce: {
820 Argument *NewStorage = &*NewF->arg_begin();
821 auto FramePtrTy = Shape.FrameTy->getPointerTo();
822
823 // If the storage is inline, just bitcast to the storage to the frame type.
824 if (Shape.RetconLowering.IsFrameInlineInStorage)
825 return Builder.CreateBitCast(NewStorage, FramePtrTy);
826
827 // Otherwise, load the real frame from the opaque storage.
828 auto FramePtrPtr =
829 Builder.CreateBitCast(NewStorage, FramePtrTy->getPointerTo());
830 return Builder.CreateLoad(FramePtrTy, FramePtrPtr);
831 }
832 }
833 llvm_unreachable("bad ABI")::llvm::llvm_unreachable_internal("bad ABI", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 833)
;
834}
835
836static void addFramePointerAttrs(AttributeList &Attrs, LLVMContext &Context,
837 unsigned ParamIndex,
838 uint64_t Size, Align Alignment) {
839 AttrBuilder ParamAttrs(Context);
840 ParamAttrs.addAttribute(Attribute::NonNull);
841 ParamAttrs.addAttribute(Attribute::NoAlias);
842 ParamAttrs.addAlignmentAttr(Alignment);
843 ParamAttrs.addDereferenceableAttr(Size);
844 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
845}
846
847static void addAsyncContextAttrs(AttributeList &Attrs, LLVMContext &Context,
848 unsigned ParamIndex) {
849 AttrBuilder ParamAttrs(Context);
850 ParamAttrs.addAttribute(Attribute::SwiftAsync);
851 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
852}
853
854static void addSwiftSelfAttrs(AttributeList &Attrs, LLVMContext &Context,
855 unsigned ParamIndex) {
856 AttrBuilder ParamAttrs(Context);
857 ParamAttrs.addAttribute(Attribute::SwiftSelf);
858 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
859}
860
861/// Clone the body of the original function into a resume function of
862/// some sort.
863void CoroCloner::create() {
864 // Create the new function if we don't already have one.
865 if (!NewF) {
25
Assuming field 'NewF' is null
26
Taking true branch
866 NewF = createCloneDeclaration(OrigF, Shape, Suffix,
28
Calling 'createCloneDeclaration'
867 OrigF.getParent()->end(), ActiveSuspend);
27
Passing null pointer value via 5th parameter 'ActiveSuspend'
868 }
869
870 // Replace all args with undefs. The buildCoroutineFrame algorithm already
871 // rewritten access to the args that occurs after suspend points with loads
872 // and stores to/from the coroutine frame.
873 for (Argument &A : OrigF.args())
874 VMap[&A] = UndefValue::get(A.getType());
875
876 SmallVector<ReturnInst *, 4> Returns;
877
878 // Ignore attempts to change certain attributes of the function.
879 // TODO: maybe there should be a way to suppress this during cloning?
880 auto savedVisibility = NewF->getVisibility();
881 auto savedUnnamedAddr = NewF->getUnnamedAddr();
882 auto savedDLLStorageClass = NewF->getDLLStorageClass();
883
884 // NewF's linkage (which CloneFunctionInto does *not* change) might not
885 // be compatible with the visibility of OrigF (which it *does* change),
886 // so protect against that.
887 auto savedLinkage = NewF->getLinkage();
888 NewF->setLinkage(llvm::GlobalValue::ExternalLinkage);
889
890 CloneFunctionInto(NewF, &OrigF, VMap,
891 CloneFunctionChangeType::LocalChangesOnly, Returns);
892
893 auto &Context = NewF->getContext();
894
895 // For async functions / continuations, adjust the scope line of the
896 // clone to the line number of the suspend point. However, only
897 // adjust the scope line when the files are the same. This ensures
898 // line number and file name belong together. The scope line is
899 // associated with all pre-prologue instructions. This avoids a jump
900 // in the linetable from the function declaration to the suspend point.
901 if (DISubprogram *SP = NewF->getSubprogram()) {
902 assert(SP != OrigF.getSubprogram() && SP->isDistinct())(static_cast <bool> (SP != OrigF.getSubprogram() &&
SP->isDistinct()) ? void (0) : __assert_fail ("SP != OrigF.getSubprogram() && SP->isDistinct()"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 902, __extension__
__PRETTY_FUNCTION__))
;
903 if (ActiveSuspend)
904 if (auto DL = ActiveSuspend->getDebugLoc())
905 if (SP->getFile() == DL->getFile())
906 SP->setScopeLine(DL->getLine());
907 // Update the linkage name to reflect the modified symbol name. It
908 // is necessary to update the linkage name in Swift, since the
909 // mangling changes for resume functions. It might also be the
910 // right thing to do in C++, but due to a limitation in LLVM's
911 // AsmPrinter we can only do this if the function doesn't have an
912 // abstract specification, since the DWARF backend expects the
913 // abstract specification to contain the linkage name and asserts
914 // that they are identical.
915 if (!SP->getDeclaration() && SP->getUnit() &&
916 SP->getUnit()->getSourceLanguage() == dwarf::DW_LANG_Swift)
917 SP->replaceLinkageName(MDString::get(Context, NewF->getName()));
918 }
919
920 NewF->setLinkage(savedLinkage);
921 NewF->setVisibility(savedVisibility);
922 NewF->setUnnamedAddr(savedUnnamedAddr);
923 NewF->setDLLStorageClass(savedDLLStorageClass);
924
925 // Replace the attributes of the new function:
926 auto OrigAttrs = NewF->getAttributes();
927 auto NewAttrs = AttributeList();
928
929 switch (Shape.ABI) {
930 case coro::ABI::Switch:
931 // Bootstrap attributes by copying function attributes from the
932 // original function. This should include optimization settings and so on.
933 NewAttrs = NewAttrs.addFnAttributes(Context, AttrBuilder(Context, OrigAttrs.getFnAttrs()));
934
935 addFramePointerAttrs(NewAttrs, Context, 0,
936 Shape.FrameSize, Shape.FrameAlign);
937 break;
938 case coro::ABI::Async: {
939 auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(ActiveSuspend);
940 if (OrigF.hasParamAttribute(Shape.AsyncLowering.ContextArgNo,
941 Attribute::SwiftAsync)) {
942 uint32_t ArgAttributeIndices =
943 ActiveAsyncSuspend->getStorageArgumentIndex();
944 auto ContextArgIndex = ArgAttributeIndices & 0xff;
945 addAsyncContextAttrs(NewAttrs, Context, ContextArgIndex);
946
947 // `swiftasync` must preceed `swiftself` so 0 is not a valid index for
948 // `swiftself`.
949 auto SwiftSelfIndex = ArgAttributeIndices >> 8;
950 if (SwiftSelfIndex)
951 addSwiftSelfAttrs(NewAttrs, Context, SwiftSelfIndex);
952 }
953
954 // Transfer the original function's attributes.
955 auto FnAttrs = OrigF.getAttributes().getFnAttrs();
956 NewAttrs = NewAttrs.addFnAttributes(Context, AttrBuilder(Context, FnAttrs));
957 break;
958 }
959 case coro::ABI::Retcon:
960 case coro::ABI::RetconOnce:
961 // If we have a continuation prototype, just use its attributes,
962 // full-stop.
963 NewAttrs = Shape.RetconLowering.ResumePrototype->getAttributes();
964
965 addFramePointerAttrs(NewAttrs, Context, 0,
966 Shape.getRetconCoroId()->getStorageSize(),
967 Shape.getRetconCoroId()->getStorageAlignment());
968 break;
969 }
970
971 switch (Shape.ABI) {
972 // In these ABIs, the cloned functions always return 'void', and the
973 // existing return sites are meaningless. Note that for unique
974 // continuations, this includes the returns associated with suspends;
975 // this is fine because we can't suspend twice.
976 case coro::ABI::Switch:
977 case coro::ABI::RetconOnce:
978 // Remove old returns.
979 for (ReturnInst *Return : Returns)
980 changeToUnreachable(Return);
981 break;
982
983 // With multi-suspend continuations, we'll already have eliminated the
984 // original returns and inserted returns before all the suspend points,
985 // so we want to leave any returns in place.
986 case coro::ABI::Retcon:
987 break;
988 // Async lowering will insert musttail call functions at all suspend points
989 // followed by a return.
990 // Don't change returns to unreachable because that will trip up the verifier.
991 // These returns should be unreachable from the clone.
992 case coro::ABI::Async:
993 break;
994 }
995
996 NewF->setAttributes(NewAttrs);
997 NewF->setCallingConv(Shape.getResumeFunctionCC());
998
999 // Set up the new entry block.
1000 replaceEntryBlock();
1001
1002 Builder.SetInsertPoint(&NewF->getEntryBlock().front());
1003 NewFramePtr = deriveNewFramePointer();
1004
1005 // Remap frame pointer.
1006 Value *OldFramePtr = VMap[Shape.FramePtr];
1007 NewFramePtr->takeName(OldFramePtr);
1008 OldFramePtr->replaceAllUsesWith(NewFramePtr);
1009
1010 // Remap vFrame pointer.
1011 auto *NewVFrame = Builder.CreateBitCast(
1012 NewFramePtr, Type::getInt8PtrTy(Builder.getContext()), "vFrame");
1013 Value *OldVFrame = cast<Value>(VMap[Shape.CoroBegin]);
1014 OldVFrame->replaceAllUsesWith(NewVFrame);
1015
1016 switch (Shape.ABI) {
1017 case coro::ABI::Switch:
1018 // Rewrite final suspend handling as it is not done via switch (allows to
1019 // remove final case from the switch, since it is undefined behavior to
1020 // resume the coroutine suspended at the final suspend point.
1021 if (Shape.SwitchLowering.HasFinalSuspend)
1022 handleFinalSuspend();
1023 break;
1024 case coro::ABI::Async:
1025 case coro::ABI::Retcon:
1026 case coro::ABI::RetconOnce:
1027 // Replace uses of the active suspend with the corresponding
1028 // continuation-function arguments.
1029 assert(ActiveSuspend != nullptr &&(static_cast <bool> (ActiveSuspend != nullptr &&
"no active suspend when lowering a continuation-style coroutine"
) ? void (0) : __assert_fail ("ActiveSuspend != nullptr && \"no active suspend when lowering a continuation-style coroutine\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 1030, __extension__
__PRETTY_FUNCTION__))
1030 "no active suspend when lowering a continuation-style coroutine")(static_cast <bool> (ActiveSuspend != nullptr &&
"no active suspend when lowering a continuation-style coroutine"
) ? void (0) : __assert_fail ("ActiveSuspend != nullptr && \"no active suspend when lowering a continuation-style coroutine\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 1030, __extension__
__PRETTY_FUNCTION__))
;
1031 replaceRetconOrAsyncSuspendUses();
1032 break;
1033 }
1034
1035 // Handle suspends.
1036 replaceCoroSuspends();
1037
1038 // Handle swifterror.
1039 replaceSwiftErrorOps();
1040
1041 // Remove coro.end intrinsics.
1042 replaceCoroEnds();
1043
1044 // Salvage debug info that points into the coroutine frame.
1045 salvageDebugInfo();
1046
1047 // Eliminate coro.free from the clones, replacing it with 'null' in cleanup,
1048 // to suppress deallocation code.
1049 if (Shape.ABI == coro::ABI::Switch)
1050 coro::replaceCoroFree(cast<CoroIdInst>(VMap[Shape.CoroBegin->getId()]),
1051 /*Elide=*/ FKind == CoroCloner::Kind::SwitchCleanup);
1052}
1053
1054// Create a resume clone by cloning the body of the original function, setting
1055// new entry block and replacing coro.suspend an appropriate value to force
1056// resume or cleanup pass for every suspend point.
1057static Function *createClone(Function &F, const Twine &Suffix,
1058 coro::Shape &Shape, CoroCloner::Kind FKind) {
1059 CoroCloner Cloner(F, Suffix, Shape, FKind);
19
Calling constructor for 'CoroCloner'
23
Returning from constructor for 'CoroCloner'
1060 Cloner.create();
24
Calling 'CoroCloner::create'
1061 return Cloner.getFunction();
1062}
1063
1064/// Remove calls to llvm.coro.end in the original function.
1065static void removeCoroEnds(const coro::Shape &Shape, CallGraph *CG) {
1066 for (auto End : Shape.CoroEnds) {
1067 replaceCoroEnd(End, Shape, Shape.FramePtr, /*in resume*/ false, CG);
1068 }
1069}
1070
1071static void updateAsyncFuncPointerContextSize(coro::Shape &Shape) {
1072 assert(Shape.ABI == coro::ABI::Async)(static_cast <bool> (Shape.ABI == coro::ABI::Async) ? void
(0) : __assert_fail ("Shape.ABI == coro::ABI::Async", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1072, __extension__ __PRETTY_FUNCTION__))
;
1073
1074 auto *FuncPtrStruct = cast<ConstantStruct>(
1075 Shape.AsyncLowering.AsyncFuncPointer->getInitializer());
1076 auto *OrigRelativeFunOffset = FuncPtrStruct->getOperand(0);
1077 auto *OrigContextSize = FuncPtrStruct->getOperand(1);
1078 auto *NewContextSize = ConstantInt::get(OrigContextSize->getType(),
1079 Shape.AsyncLowering.ContextSize);
1080 auto *NewFuncPtrStruct = ConstantStruct::get(
1081 FuncPtrStruct->getType(), OrigRelativeFunOffset, NewContextSize);
1082
1083 Shape.AsyncLowering.AsyncFuncPointer->setInitializer(NewFuncPtrStruct);
1084}
1085
1086static void replaceFrameSize(coro::Shape &Shape) {
1087 if (Shape.ABI == coro::ABI::Async)
1088 updateAsyncFuncPointerContextSize(Shape);
1089
1090 if (Shape.CoroSizes.empty())
1091 return;
1092
1093 // In the same function all coro.sizes should have the same result type.
1094 auto *SizeIntrin = Shape.CoroSizes.back();
1095 Module *M = SizeIntrin->getModule();
1096 const DataLayout &DL = M->getDataLayout();
1097 auto Size = DL.getTypeAllocSize(Shape.FrameTy);
1098 auto *SizeConstant = ConstantInt::get(SizeIntrin->getType(), Size);
1099
1100 for (CoroSizeInst *CS : Shape.CoroSizes) {
1101 CS->replaceAllUsesWith(SizeConstant);
1102 CS->eraseFromParent();
1103 }
1104}
1105
1106// Create a global constant array containing pointers to functions provided and
1107// set Info parameter of CoroBegin to point at this constant. Example:
1108//
1109// @f.resumers = internal constant [2 x void(%f.frame*)*]
1110// [void(%f.frame*)* @f.resume, void(%f.frame*)* @f.destroy]
1111// define void @f() {
1112// ...
1113// call i8* @llvm.coro.begin(i8* null, i32 0, i8* null,
1114// i8* bitcast([2 x void(%f.frame*)*] * @f.resumers to i8*))
1115//
1116// Assumes that all the functions have the same signature.
1117static void setCoroInfo(Function &F, coro::Shape &Shape,
1118 ArrayRef<Function *> Fns) {
1119 // This only works under the switch-lowering ABI because coro elision
1120 // only works on the switch-lowering ABI.
1121 assert(Shape.ABI == coro::ABI::Switch)(static_cast <bool> (Shape.ABI == coro::ABI::Switch) ? void
(0) : __assert_fail ("Shape.ABI == coro::ABI::Switch", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1121, __extension__ __PRETTY_FUNCTION__))
;
1122
1123 SmallVector<Constant *, 4> Args(Fns.begin(), Fns.end());
1124 assert(!Args.empty())(static_cast <bool> (!Args.empty()) ? void (0) : __assert_fail
("!Args.empty()", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1124, __extension__ __PRETTY_FUNCTION__))
;
1125 Function *Part = *Fns.begin();
1126 Module *M = Part->getParent();
1127 auto *ArrTy = ArrayType::get(Part->getType(), Args.size());
1128
1129 auto *ConstVal = ConstantArray::get(ArrTy, Args);
1130 auto *GV = new GlobalVariable(*M, ConstVal->getType(), /*isConstant=*/true,
1131 GlobalVariable::PrivateLinkage, ConstVal,
1132 F.getName() + Twine(".resumers"));
1133
1134 // Update coro.begin instruction to refer to this constant.
1135 LLVMContext &C = F.getContext();
1136 auto *BC = ConstantExpr::getPointerCast(GV, Type::getInt8PtrTy(C));
1137 Shape.getSwitchCoroId()->setInfo(BC);
1138}
1139
1140// Store addresses of Resume/Destroy/Cleanup functions in the coroutine frame.
1141static void updateCoroFrame(coro::Shape &Shape, Function *ResumeFn,
1142 Function *DestroyFn, Function *CleanupFn) {
1143 assert(Shape.ABI == coro::ABI::Switch)(static_cast <bool> (Shape.ABI == coro::ABI::Switch) ? void
(0) : __assert_fail ("Shape.ABI == coro::ABI::Switch", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1143, __extension__ __PRETTY_FUNCTION__))
;
1144
1145 IRBuilder<> Builder(Shape.FramePtr->getNextNode());
1146 auto *ResumeAddr = Builder.CreateStructGEP(
1147 Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Resume,
1148 "resume.addr");
1149 Builder.CreateStore(ResumeFn, ResumeAddr);
1150
1151 Value *DestroyOrCleanupFn = DestroyFn;
1152
1153 CoroIdInst *CoroId = Shape.getSwitchCoroId();
1154 if (CoroAllocInst *CA = CoroId->getCoroAlloc()) {
1155 // If there is a CoroAlloc and it returns false (meaning we elide the
1156 // allocation, use CleanupFn instead of DestroyFn).
1157 DestroyOrCleanupFn = Builder.CreateSelect(CA, DestroyFn, CleanupFn);
1158 }
1159
1160 auto *DestroyAddr = Builder.CreateStructGEP(
1161 Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Destroy,
1162 "destroy.addr");
1163 Builder.CreateStore(DestroyOrCleanupFn, DestroyAddr);
1164}
1165
1166static void postSplitCleanup(Function &F) {
1167 removeUnreachableBlocks(F);
1168
1169#ifndef NDEBUG
1170 // For now, we do a mandatory verification step because we don't
1171 // entirely trust this pass. Note that we don't want to add a verifier
1172 // pass to FPM below because it will also verify all the global data.
1173 if (verifyFunction(F, &errs()))
1174 report_fatal_error("Broken function");
1175#endif
1176}
1177
1178// Assuming we arrived at the block NewBlock from Prev instruction, store
1179// PHI's incoming values in the ResolvedValues map.
1180static void
1181scanPHIsAndUpdateValueMap(Instruction *Prev, BasicBlock *NewBlock,
1182 DenseMap<Value *, Value *> &ResolvedValues) {
1183 auto *PrevBB = Prev->getParent();
1184 for (PHINode &PN : NewBlock->phis()) {
1185 auto V = PN.getIncomingValueForBlock(PrevBB);
1186 // See if we already resolved it.
1187 auto VI = ResolvedValues.find(V);
1188 if (VI != ResolvedValues.end())
1189 V = VI->second;
1190 // Remember the value.
1191 ResolvedValues[&PN] = V;
1192 }
1193}
1194
1195// Replace a sequence of branches leading to a ret, with a clone of a ret
1196// instruction. Suspend instruction represented by a switch, track the PHI
1197// values and select the correct case successor when possible.
1198static bool simplifyTerminatorLeadingToRet(Instruction *InitialInst) {
1199 DenseMap<Value *, Value *> ResolvedValues;
1200 BasicBlock *UnconditionalSucc = nullptr;
1201 assert(InitialInst->getModule())(static_cast <bool> (InitialInst->getModule()) ? void
(0) : __assert_fail ("InitialInst->getModule()", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1201, __extension__ __PRETTY_FUNCTION__))
;
1202 const DataLayout &DL = InitialInst->getModule()->getDataLayout();
1203
1204 auto GetFirstValidInstruction = [](Instruction *I) {
1205 while (I) {
1206 // BitCastInst wouldn't generate actual code so that we could skip it.
1207 if (isa<BitCastInst>(I) || I->isDebugOrPseudoInst() ||
1208 I->isLifetimeStartOrEnd())
1209 I = I->getNextNode();
1210 else if (isInstructionTriviallyDead(I))
1211 // Duing we are in the middle of the transformation, we need to erase
1212 // the dead instruction manually.
1213 I = &*I->eraseFromParent();
1214 else
1215 break;
1216 }
1217 return I;
1218 };
1219
1220 auto TryResolveConstant = [&ResolvedValues](Value *V) {
1221 auto It = ResolvedValues.find(V);
1222 if (It != ResolvedValues.end())
1223 V = It->second;
1224 return dyn_cast<ConstantInt>(V);
1225 };
1226
1227 Instruction *I = InitialInst;
1228 while (I->isTerminator() || isa<CmpInst>(I)) {
1229 if (isa<ReturnInst>(I)) {
1230 if (I != InitialInst) {
1231 // If InitialInst is an unconditional branch,
1232 // remove PHI values that come from basic block of InitialInst
1233 if (UnconditionalSucc)
1234 UnconditionalSucc->removePredecessor(InitialInst->getParent(), true);
1235 ReplaceInstWithInst(InitialInst, I->clone());
1236 }
1237 return true;
1238 }
1239 if (auto *BR = dyn_cast<BranchInst>(I)) {
1240 if (BR->isUnconditional()) {
1241 BasicBlock *Succ = BR->getSuccessor(0);
1242 if (I == InitialInst)
1243 UnconditionalSucc = Succ;
1244 scanPHIsAndUpdateValueMap(I, Succ, ResolvedValues);
1245 I = GetFirstValidInstruction(Succ->getFirstNonPHIOrDbgOrLifetime());
1246 continue;
1247 }
1248
1249 BasicBlock *BB = BR->getParent();
1250 // Handle the case the condition of the conditional branch is constant.
1251 // e.g.,
1252 //
1253 // br i1 false, label %cleanup, label %CoroEnd
1254 //
1255 // It is possible during the transformation. We could continue the
1256 // simplifying in this case.
1257 if (ConstantFoldTerminator(BB, /*DeleteDeadConditions=*/true)) {
1258 // Handle this branch in next iteration.
1259 I = BB->getTerminator();
1260 continue;
1261 }
1262 } else if (auto *CondCmp = dyn_cast<CmpInst>(I)) {
1263 // If the case number of suspended switch instruction is reduced to
1264 // 1, then it is simplified to CmpInst in llvm::ConstantFoldTerminator.
1265 auto *BR = dyn_cast<BranchInst>(
1266 GetFirstValidInstruction(CondCmp->getNextNode()));
1267 if (!BR || !BR->isConditional() || CondCmp != BR->getCondition())
1268 return false;
1269
1270 // And the comparsion looks like : %cond = icmp eq i8 %V, constant.
1271 // So we try to resolve constant for the first operand only since the
1272 // second operand should be literal constant by design.
1273 ConstantInt *Cond0 = TryResolveConstant(CondCmp->getOperand(0));
1274 auto *Cond1 = dyn_cast<ConstantInt>(CondCmp->getOperand(1));
1275 if (!Cond0 || !Cond1)
1276 return false;
1277
1278 // Both operands of the CmpInst are Constant. So that we could evaluate
1279 // it immediately to get the destination.
1280 auto *ConstResult =
1281 dyn_cast_or_null<ConstantInt>(ConstantFoldCompareInstOperands(
1282 CondCmp->getPredicate(), Cond0, Cond1, DL));
1283 if (!ConstResult)
1284 return false;
1285
1286 CondCmp->replaceAllUsesWith(ConstResult);
1287 CondCmp->eraseFromParent();
1288
1289 // Handle this branch in next iteration.
1290 I = BR;
1291 continue;
1292 } else if (auto *SI = dyn_cast<SwitchInst>(I)) {
1293 ConstantInt *Cond = TryResolveConstant(SI->getCondition());
1294 if (!Cond)
1295 return false;
1296
1297 BasicBlock *BB = SI->findCaseValue(Cond)->getCaseSuccessor();
1298 scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
1299 I = GetFirstValidInstruction(BB->getFirstNonPHIOrDbgOrLifetime());
1300 continue;
1301 }
1302
1303 return false;
1304 }
1305 return false;
1306}
1307
1308// Check whether CI obeys the rules of musttail attribute.
1309static bool shouldBeMustTail(const CallInst &CI, const Function &F) {
1310 if (CI.isInlineAsm())
1311 return false;
1312
1313 // Match prototypes and calling conventions of resume function.
1314 FunctionType *CalleeTy = CI.getFunctionType();
1315 if (!CalleeTy->getReturnType()->isVoidTy() || (CalleeTy->getNumParams() != 1))
1316 return false;
1317
1318 Type *CalleeParmTy = CalleeTy->getParamType(0);
1319 if (!CalleeParmTy->isPointerTy() ||
1320 (CalleeParmTy->getPointerAddressSpace() != 0))
1321 return false;
1322
1323 if (CI.getCallingConv() != F.getCallingConv())
1324 return false;
1325
1326 // CI should not has any ABI-impacting function attributes.
1327 static const Attribute::AttrKind ABIAttrs[] = {
1328 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
1329 Attribute::Preallocated, Attribute::InReg, Attribute::Returned,
1330 Attribute::SwiftSelf, Attribute::SwiftError};
1331 AttributeList Attrs = CI.getAttributes();
1332 for (auto AK : ABIAttrs)
1333 if (Attrs.hasParamAttr(0, AK))
1334 return false;
1335
1336 return true;
1337}
1338
1339// Add musttail to any resume instructions that is immediately followed by a
1340// suspend (i.e. ret). We do this even in -O0 to support guaranteed tail call
1341// for symmetrical coroutine control transfer (C++ Coroutines TS extension).
1342// This transformation is done only in the resume part of the coroutine that has
1343// identical signature and calling convention as the coro.resume call.
1344static void addMustTailToCoroResumes(Function &F) {
1345 bool changed = false;
1346
1347 // Collect potential resume instructions.
1348 SmallVector<CallInst *, 4> Resumes;
1349 for (auto &I : instructions(F))
1350 if (auto *Call = dyn_cast<CallInst>(&I))
1351 if (shouldBeMustTail(*Call, F))
1352 Resumes.push_back(Call);
1353
1354 // Set musttail on those that are followed by a ret instruction.
1355 for (CallInst *Call : Resumes)
1356 if (simplifyTerminatorLeadingToRet(Call->getNextNode())) {
1357 Call->setTailCallKind(CallInst::TCK_MustTail);
1358 changed = true;
1359 }
1360
1361 if (changed)
1362 removeUnreachableBlocks(F);
1363}
1364
1365// Coroutine has no suspend points. Remove heap allocation for the coroutine
1366// frame if possible.
1367static void handleNoSuspendCoroutine(coro::Shape &Shape) {
1368 auto *CoroBegin = Shape.CoroBegin;
1369 auto *CoroId = CoroBegin->getId();
1370 auto *AllocInst = CoroId->getCoroAlloc();
1371 switch (Shape.ABI) {
1372 case coro::ABI::Switch: {
1373 auto SwitchId = cast<CoroIdInst>(CoroId);
1374 coro::replaceCoroFree(SwitchId, /*Elide=*/AllocInst != nullptr);
1375 if (AllocInst) {
1376 IRBuilder<> Builder(AllocInst);
1377 auto *Frame = Builder.CreateAlloca(Shape.FrameTy);
1378 Frame->setAlignment(Shape.FrameAlign);
1379 auto *VFrame = Builder.CreateBitCast(Frame, Builder.getInt8PtrTy());
1380 AllocInst->replaceAllUsesWith(Builder.getFalse());
1381 AllocInst->eraseFromParent();
1382 CoroBegin->replaceAllUsesWith(VFrame);
1383 } else {
1384 CoroBegin->replaceAllUsesWith(CoroBegin->getMem());
1385 }
1386
1387 break;
1388 }
1389 case coro::ABI::Async:
1390 case coro::ABI::Retcon:
1391 case coro::ABI::RetconOnce:
1392 CoroBegin->replaceAllUsesWith(UndefValue::get(CoroBegin->getType()));
1393 break;
1394 }
1395
1396 CoroBegin->eraseFromParent();
1397}
1398
1399// SimplifySuspendPoint needs to check that there is no calls between
1400// coro_save and coro_suspend, since any of the calls may potentially resume
1401// the coroutine and if that is the case we cannot eliminate the suspend point.
1402static bool hasCallsInBlockBetween(Instruction *From, Instruction *To) {
1403 for (Instruction *I = From; I != To; I = I->getNextNode()) {
1404 // Assume that no intrinsic can resume the coroutine.
1405 if (isa<IntrinsicInst>(I))
1406 continue;
1407
1408 if (isa<CallBase>(I))
1409 return true;
1410 }
1411 return false;
1412}
1413
1414static bool hasCallsInBlocksBetween(BasicBlock *SaveBB, BasicBlock *ResDesBB) {
1415 SmallPtrSet<BasicBlock *, 8> Set;
1416 SmallVector<BasicBlock *, 8> Worklist;
1417
1418 Set.insert(SaveBB);
1419 Worklist.push_back(ResDesBB);
1420
1421 // Accumulate all blocks between SaveBB and ResDesBB. Because CoroSaveIntr
1422 // returns a token consumed by suspend instruction, all blocks in between
1423 // will have to eventually hit SaveBB when going backwards from ResDesBB.
1424 while (!Worklist.empty()) {
1425 auto *BB = Worklist.pop_back_val();
1426 Set.insert(BB);
1427 for (auto *Pred : predecessors(BB))
1428 if (!Set.contains(Pred))
1429 Worklist.push_back(Pred);
1430 }
1431
1432 // SaveBB and ResDesBB are checked separately in hasCallsBetween.
1433 Set.erase(SaveBB);
1434 Set.erase(ResDesBB);
1435
1436 for (auto *BB : Set)
1437 if (hasCallsInBlockBetween(BB->getFirstNonPHI(), nullptr))
1438 return true;
1439
1440 return false;
1441}
1442
1443static bool hasCallsBetween(Instruction *Save, Instruction *ResumeOrDestroy) {
1444 auto *SaveBB = Save->getParent();
1445 auto *ResumeOrDestroyBB = ResumeOrDestroy->getParent();
1446
1447 if (SaveBB == ResumeOrDestroyBB)
1448 return hasCallsInBlockBetween(Save->getNextNode(), ResumeOrDestroy);
1449
1450 // Any calls from Save to the end of the block?
1451 if (hasCallsInBlockBetween(Save->getNextNode(), nullptr))
1452 return true;
1453
1454 // Any calls from begging of the block up to ResumeOrDestroy?
1455 if (hasCallsInBlockBetween(ResumeOrDestroyBB->getFirstNonPHI(),
1456 ResumeOrDestroy))
1457 return true;
1458
1459 // Any calls in all of the blocks between SaveBB and ResumeOrDestroyBB?
1460 if (hasCallsInBlocksBetween(SaveBB, ResumeOrDestroyBB))
1461 return true;
1462
1463 return false;
1464}
1465
1466// If a SuspendIntrin is preceded by Resume or Destroy, we can eliminate the
1467// suspend point and replace it with nornal control flow.
1468static bool simplifySuspendPoint(CoroSuspendInst *Suspend,
1469 CoroBeginInst *CoroBegin) {
1470 Instruction *Prev = Suspend->getPrevNode();
1471 if (!Prev) {
1472 auto *Pred = Suspend->getParent()->getSinglePredecessor();
1473 if (!Pred)
1474 return false;
1475 Prev = Pred->getTerminator();
1476 }
1477
1478 CallBase *CB = dyn_cast<CallBase>(Prev);
1479 if (!CB)
1480 return false;
1481
1482 auto *Callee = CB->getCalledOperand()->stripPointerCasts();
1483
1484 // See if the callsite is for resumption or destruction of the coroutine.
1485 auto *SubFn = dyn_cast<CoroSubFnInst>(Callee);
1486 if (!SubFn)
1487 return false;
1488
1489 // Does not refer to the current coroutine, we cannot do anything with it.
1490 if (SubFn->getFrame() != CoroBegin)
1491 return false;
1492
1493 // See if the transformation is safe. Specifically, see if there are any
1494 // calls in between Save and CallInstr. They can potenitally resume the
1495 // coroutine rendering this optimization unsafe.
1496 auto *Save = Suspend->getCoroSave();
1497 if (hasCallsBetween(Save, CB))
1498 return false;
1499
1500 // Replace llvm.coro.suspend with the value that results in resumption over
1501 // the resume or cleanup path.
1502 Suspend->replaceAllUsesWith(SubFn->getRawIndex());
1503 Suspend->eraseFromParent();
1504 Save->eraseFromParent();
1505
1506 // No longer need a call to coro.resume or coro.destroy.
1507 if (auto *Invoke = dyn_cast<InvokeInst>(CB)) {
1508 BranchInst::Create(Invoke->getNormalDest(), Invoke);
1509 }
1510
1511 // Grab the CalledValue from CB before erasing the CallInstr.
1512 auto *CalledValue = CB->getCalledOperand();
1513 CB->eraseFromParent();
1514
1515 // If no more users remove it. Usually it is a bitcast of SubFn.
1516 if (CalledValue != SubFn && CalledValue->user_empty())
1517 if (auto *I = dyn_cast<Instruction>(CalledValue))
1518 I->eraseFromParent();
1519
1520 // Now we are good to remove SubFn.
1521 if (SubFn->user_empty())
1522 SubFn->eraseFromParent();
1523
1524 return true;
1525}
1526
1527// Remove suspend points that are simplified.
1528static void simplifySuspendPoints(coro::Shape &Shape) {
1529 // Currently, the only simplification we do is switch-lowering-specific.
1530 if (Shape.ABI != coro::ABI::Switch)
1531 return;
1532
1533 auto &S = Shape.CoroSuspends;
1534 size_t I = 0, N = S.size();
1535 if (N == 0)
1536 return;
1537 while (true) {
1538 auto SI = cast<CoroSuspendInst>(S[I]);
1539 // Leave final.suspend to handleFinalSuspend since it is undefined behavior
1540 // to resume a coroutine suspended at the final suspend point.
1541 if (!SI->isFinal() && simplifySuspendPoint(SI, Shape.CoroBegin)) {
1542 if (--N == I)
1543 break;
1544 std::swap(S[I], S[N]);
1545 continue;
1546 }
1547 if (++I == N)
1548 break;
1549 }
1550 S.resize(N);
1551}
1552
1553static void splitSwitchCoroutine(Function &F, coro::Shape &Shape,
1554 SmallVectorImpl<Function *> &Clones) {
1555 assert(Shape.ABI == coro::ABI::Switch)(static_cast <bool> (Shape.ABI == coro::ABI::Switch) ? void
(0) : __assert_fail ("Shape.ABI == coro::ABI::Switch", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1555, __extension__ __PRETTY_FUNCTION__))
;
17
'?' condition is true
1556
1557 createResumeEntryBlock(F, Shape);
1558 auto ResumeClone = createClone(F, ".resume", Shape,
18
Calling 'createClone'
1559 CoroCloner::Kind::SwitchResume);
1560 auto DestroyClone = createClone(F, ".destroy", Shape,
1561 CoroCloner::Kind::SwitchUnwind);
1562 auto CleanupClone = createClone(F, ".cleanup", Shape,
1563 CoroCloner::Kind::SwitchCleanup);
1564
1565 postSplitCleanup(*ResumeClone);
1566 postSplitCleanup(*DestroyClone);
1567 postSplitCleanup(*CleanupClone);
1568
1569 addMustTailToCoroResumes(*ResumeClone);
1570
1571 // Store addresses resume/destroy/cleanup functions in the coroutine frame.
1572 updateCoroFrame(Shape, ResumeClone, DestroyClone, CleanupClone);
1573
1574 assert(Clones.empty())(static_cast <bool> (Clones.empty()) ? void (0) : __assert_fail
("Clones.empty()", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1574, __extension__ __PRETTY_FUNCTION__))
;
1575 Clones.push_back(ResumeClone);
1576 Clones.push_back(DestroyClone);
1577 Clones.push_back(CleanupClone);
1578
1579 // Create a constant array referring to resume/destroy/clone functions pointed
1580 // by the last argument of @llvm.coro.info, so that CoroElide pass can
1581 // determined correct function to call.
1582 setCoroInfo(F, Shape, Clones);
1583}
1584
1585static void replaceAsyncResumeFunction(CoroSuspendAsyncInst *Suspend,
1586 Value *Continuation) {
1587 auto *ResumeIntrinsic = Suspend->getResumeFunction();
1588 auto &Context = Suspend->getParent()->getParent()->getContext();
1589 auto *Int8PtrTy = Type::getInt8PtrTy(Context);
1590
1591 IRBuilder<> Builder(ResumeIntrinsic);
1592 auto *Val = Builder.CreateBitOrPointerCast(Continuation, Int8PtrTy);
1593 ResumeIntrinsic->replaceAllUsesWith(Val);
1594 ResumeIntrinsic->eraseFromParent();
1595 Suspend->setOperand(CoroSuspendAsyncInst::ResumeFunctionArg,
1596 UndefValue::get(Int8PtrTy));
1597}
1598
1599/// Coerce the arguments in \p FnArgs according to \p FnTy in \p CallArgs.
1600static void coerceArguments(IRBuilder<> &Builder, FunctionType *FnTy,
1601 ArrayRef<Value *> FnArgs,
1602 SmallVectorImpl<Value *> &CallArgs) {
1603 size_t ArgIdx = 0;
1604 for (auto paramTy : FnTy->params()) {
1605 assert(ArgIdx < FnArgs.size())(static_cast <bool> (ArgIdx < FnArgs.size()) ? void (
0) : __assert_fail ("ArgIdx < FnArgs.size()", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1605, __extension__ __PRETTY_FUNCTION__))
;
1606 if (paramTy != FnArgs[ArgIdx]->getType())
1607 CallArgs.push_back(
1608 Builder.CreateBitOrPointerCast(FnArgs[ArgIdx], paramTy));
1609 else
1610 CallArgs.push_back(FnArgs[ArgIdx]);
1611 ++ArgIdx;
1612 }
1613}
1614
1615CallInst *coro::createMustTailCall(DebugLoc Loc, Function *MustTailCallFn,
1616 ArrayRef<Value *> Arguments,
1617 IRBuilder<> &Builder) {
1618 auto *FnTy = MustTailCallFn->getFunctionType();
1619 // Coerce the arguments, llvm optimizations seem to ignore the types in
1620 // vaarg functions and throws away casts in optimized mode.
1621 SmallVector<Value *, 8> CallArgs;
1622 coerceArguments(Builder, FnTy, Arguments, CallArgs);
1623
1624 auto *TailCall = Builder.CreateCall(FnTy, MustTailCallFn, CallArgs);
1625 TailCall->setTailCallKind(CallInst::TCK_MustTail);
1626 TailCall->setDebugLoc(Loc);
1627 TailCall->setCallingConv(MustTailCallFn->getCallingConv());
1628 return TailCall;
1629}
1630
1631static void splitAsyncCoroutine(Function &F, coro::Shape &Shape,
1632 SmallVectorImpl<Function *> &Clones) {
1633 assert(Shape.ABI == coro::ABI::Async)(static_cast <bool> (Shape.ABI == coro::ABI::Async) ? void
(0) : __assert_fail ("Shape.ABI == coro::ABI::Async", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1633, __extension__ __PRETTY_FUNCTION__))
;
1634 assert(Clones.empty())(static_cast <bool> (Clones.empty()) ? void (0) : __assert_fail
("Clones.empty()", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1634, __extension__ __PRETTY_FUNCTION__))
;
1635 // Reset various things that the optimizer might have decided it
1636 // "knows" about the coroutine function due to not seeing a return.
1637 F.removeFnAttr(Attribute::NoReturn);
1638 F.removeRetAttr(Attribute::NoAlias);
1639 F.removeRetAttr(Attribute::NonNull);
1640
1641 auto &Context = F.getContext();
1642 auto *Int8PtrTy = Type::getInt8PtrTy(Context);
1643
1644 auto *Id = cast<CoroIdAsyncInst>(Shape.CoroBegin->getId());
1645 IRBuilder<> Builder(Id);
1646
1647 auto *FramePtr = Id->getStorage();
1648 FramePtr = Builder.CreateBitOrPointerCast(FramePtr, Int8PtrTy);
1649 FramePtr = Builder.CreateConstInBoundsGEP1_32(
1650 Type::getInt8Ty(Context), FramePtr, Shape.AsyncLowering.FrameOffset,
1651 "async.ctx.frameptr");
1652
1653 // Map all uses of llvm.coro.begin to the allocated frame pointer.
1654 {
1655 // Make sure we don't invalidate Shape.FramePtr.
1656 TrackingVH<Instruction> Handle(Shape.FramePtr);
1657 Shape.CoroBegin->replaceAllUsesWith(FramePtr);
1658 Shape.FramePtr = Handle.getValPtr();
1659 }
1660
1661 // Create all the functions in order after the main function.
1662 auto NextF = std::next(F.getIterator());
1663
1664 // Create a continuation function for each of the suspend points.
1665 Clones.reserve(Shape.CoroSuspends.size());
1666 for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) {
1667 auto *Suspend = cast<CoroSuspendAsyncInst>(Shape.CoroSuspends[Idx]);
1668
1669 // Create the clone declaration.
1670 auto ResumeNameSuffix = ".resume.";
1671 auto ProjectionFunctionName =
1672 Suspend->getAsyncContextProjectionFunction()->getName();
1673 bool UseSwiftMangling = false;
1674 if (ProjectionFunctionName.equals("__swift_async_resume_project_context")) {
1675 ResumeNameSuffix = "TQ";
1676 UseSwiftMangling = true;
1677 } else if (ProjectionFunctionName.equals(
1678 "__swift_async_resume_get_context")) {
1679 ResumeNameSuffix = "TY";
1680 UseSwiftMangling = true;
1681 }
1682 auto *Continuation = createCloneDeclaration(
1683 F, Shape,
1684 UseSwiftMangling ? ResumeNameSuffix + Twine(Idx) + "_"
1685 : ResumeNameSuffix + Twine(Idx),
1686 NextF, Suspend);
1687 Clones.push_back(Continuation);
1688
1689 // Insert a branch to a new return block immediately before the suspend
1690 // point.
1691 auto *SuspendBB = Suspend->getParent();
1692 auto *NewSuspendBB = SuspendBB->splitBasicBlock(Suspend);
1693 auto *Branch = cast<BranchInst>(SuspendBB->getTerminator());
1694
1695 // Place it before the first suspend.
1696 auto *ReturnBB =
1697 BasicBlock::Create(F.getContext(), "coro.return", &F, NewSuspendBB);
1698 Branch->setSuccessor(0, ReturnBB);
1699
1700 IRBuilder<> Builder(ReturnBB);
1701
1702 // Insert the call to the tail call function and inline it.
1703 auto *Fn = Suspend->getMustTailCallFunction();
1704 SmallVector<Value *, 8> Args(Suspend->args());
1705 auto FnArgs = ArrayRef<Value *>(Args).drop_front(
1706 CoroSuspendAsyncInst::MustTailCallFuncArg + 1);
1707 auto *TailCall =
1708 coro::createMustTailCall(Suspend->getDebugLoc(), Fn, FnArgs, Builder);
1709 Builder.CreateRetVoid();
1710 InlineFunctionInfo FnInfo;
1711 auto InlineRes = InlineFunction(*TailCall, FnInfo);
1712 assert(InlineRes.isSuccess() && "Expected inlining to succeed")(static_cast <bool> (InlineRes.isSuccess() && "Expected inlining to succeed"
) ? void (0) : __assert_fail ("InlineRes.isSuccess() && \"Expected inlining to succeed\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 1712, __extension__
__PRETTY_FUNCTION__))
;
1713 (void)InlineRes;
1714
1715 // Replace the lvm.coro.async.resume intrisic call.
1716 replaceAsyncResumeFunction(Suspend, Continuation);
1717 }
1718
1719 assert(Clones.size() == Shape.CoroSuspends.size())(static_cast <bool> (Clones.size() == Shape.CoroSuspends
.size()) ? void (0) : __assert_fail ("Clones.size() == Shape.CoroSuspends.size()"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 1719, __extension__
__PRETTY_FUNCTION__))
;
1720 for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) {
1721 auto *Suspend = Shape.CoroSuspends[Idx];
1722 auto *Clone = Clones[Idx];
1723
1724 CoroCloner(F, "resume." + Twine(Idx), Shape, Clone, Suspend).create();
1725 }
1726}
1727
1728static void splitRetconCoroutine(Function &F, coro::Shape &Shape,
1729 SmallVectorImpl<Function *> &Clones) {
1730 assert(Shape.ABI == coro::ABI::Retcon ||(static_cast <bool> (Shape.ABI == coro::ABI::Retcon || Shape
.ABI == coro::ABI::RetconOnce) ? void (0) : __assert_fail ("Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 1731, __extension__
__PRETTY_FUNCTION__))
1731 Shape.ABI == coro::ABI::RetconOnce)(static_cast <bool> (Shape.ABI == coro::ABI::Retcon || Shape
.ABI == coro::ABI::RetconOnce) ? void (0) : __assert_fail ("Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 1731, __extension__
__PRETTY_FUNCTION__))
;
1732 assert(Clones.empty())(static_cast <bool> (Clones.empty()) ? void (0) : __assert_fail
("Clones.empty()", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1732, __extension__ __PRETTY_FUNCTION__))
;
1733
1734 // Reset various things that the optimizer might have decided it
1735 // "knows" about the coroutine function due to not seeing a return.
1736 F.removeFnAttr(Attribute::NoReturn);
1737 F.removeRetAttr(Attribute::NoAlias);
1738 F.removeRetAttr(Attribute::NonNull);
1739
1740 // Allocate the frame.
1741 auto *Id = cast<AnyCoroIdRetconInst>(Shape.CoroBegin->getId());
1742 Value *RawFramePtr;
1743 if (Shape.RetconLowering.IsFrameInlineInStorage) {
1744 RawFramePtr = Id->getStorage();
1745 } else {
1746 IRBuilder<> Builder(Id);
1747
1748 // Determine the size of the frame.
1749 const DataLayout &DL = F.getParent()->getDataLayout();
1750 auto Size = DL.getTypeAllocSize(Shape.FrameTy);
1751
1752 // Allocate. We don't need to update the call graph node because we're
1753 // going to recompute it from scratch after splitting.
1754 // FIXME: pass the required alignment
1755 RawFramePtr = Shape.emitAlloc(Builder, Builder.getInt64(Size), nullptr);
1756 RawFramePtr =
1757 Builder.CreateBitCast(RawFramePtr, Shape.CoroBegin->getType());
1758
1759 // Stash the allocated frame pointer in the continuation storage.
1760 auto Dest = Builder.CreateBitCast(Id->getStorage(),
1761 RawFramePtr->getType()->getPointerTo());
1762 Builder.CreateStore(RawFramePtr, Dest);
1763 }
1764
1765 // Map all uses of llvm.coro.begin to the allocated frame pointer.
1766 {
1767 // Make sure we don't invalidate Shape.FramePtr.
1768 TrackingVH<Instruction> Handle(Shape.FramePtr);
1769 Shape.CoroBegin->replaceAllUsesWith(RawFramePtr);
1770 Shape.FramePtr = Handle.getValPtr();
1771 }
1772
1773 // Create a unique return block.
1774 BasicBlock *ReturnBB = nullptr;
1775 SmallVector<PHINode *, 4> ReturnPHIs;
1776
1777 // Create all the functions in order after the main function.
1778 auto NextF = std::next(F.getIterator());
1779
1780 // Create a continuation function for each of the suspend points.
1781 Clones.reserve(Shape.CoroSuspends.size());
1782 for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) {
1783 auto Suspend = cast<CoroSuspendRetconInst>(Shape.CoroSuspends[i]);
1784
1785 // Create the clone declaration.
1786 auto Continuation =
1787 createCloneDeclaration(F, Shape, ".resume." + Twine(i), NextF, nullptr);
1788 Clones.push_back(Continuation);
1789
1790 // Insert a branch to the unified return block immediately before
1791 // the suspend point.
1792 auto SuspendBB = Suspend->getParent();
1793 auto NewSuspendBB = SuspendBB->splitBasicBlock(Suspend);
1794 auto Branch = cast<BranchInst>(SuspendBB->getTerminator());
1795
1796 // Create the unified return block.
1797 if (!ReturnBB) {
1798 // Place it before the first suspend.
1799 ReturnBB = BasicBlock::Create(F.getContext(), "coro.return", &F,
1800 NewSuspendBB);
1801 Shape.RetconLowering.ReturnBlock = ReturnBB;
1802
1803 IRBuilder<> Builder(ReturnBB);
1804
1805 // Create PHIs for all the return values.
1806 assert(ReturnPHIs.empty())(static_cast <bool> (ReturnPHIs.empty()) ? void (0) : __assert_fail
("ReturnPHIs.empty()", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1806, __extension__ __PRETTY_FUNCTION__))
;
1807
1808 // First, the continuation.
1809 ReturnPHIs.push_back(Builder.CreatePHI(Continuation->getType(),
1810 Shape.CoroSuspends.size()));
1811
1812 // Next, all the directly-yielded values.
1813 for (auto ResultTy : Shape.getRetconResultTypes())
1814 ReturnPHIs.push_back(Builder.CreatePHI(ResultTy,
1815 Shape.CoroSuspends.size()));
1816
1817 // Build the return value.
1818 auto RetTy = F.getReturnType();
1819
1820 // Cast the continuation value if necessary.
1821 // We can't rely on the types matching up because that type would
1822 // have to be infinite.
1823 auto CastedContinuationTy =
1824 (ReturnPHIs.size() == 1 ? RetTy : RetTy->getStructElementType(0));
1825 auto *CastedContinuation =
1826 Builder.CreateBitCast(ReturnPHIs[0], CastedContinuationTy);
1827
1828 Value *RetV;
1829 if (ReturnPHIs.size() == 1) {
1830 RetV = CastedContinuation;
1831 } else {
1832 RetV = UndefValue::get(RetTy);
1833 RetV = Builder.CreateInsertValue(RetV, CastedContinuation, 0);
1834 for (size_t I = 1, E = ReturnPHIs.size(); I != E; ++I)
1835 RetV = Builder.CreateInsertValue(RetV, ReturnPHIs[I], I);
1836 }
1837
1838 Builder.CreateRet(RetV);
1839 }
1840
1841 // Branch to the return block.
1842 Branch->setSuccessor(0, ReturnBB);
1843 ReturnPHIs[0]->addIncoming(Continuation, SuspendBB);
1844 size_t NextPHIIndex = 1;
1845 for (auto &VUse : Suspend->value_operands())
1846 ReturnPHIs[NextPHIIndex++]->addIncoming(&*VUse, SuspendBB);
1847 assert(NextPHIIndex == ReturnPHIs.size())(static_cast <bool> (NextPHIIndex == ReturnPHIs.size())
? void (0) : __assert_fail ("NextPHIIndex == ReturnPHIs.size()"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 1847, __extension__
__PRETTY_FUNCTION__))
;
1848 }
1849
1850 assert(Clones.size() == Shape.CoroSuspends.size())(static_cast <bool> (Clones.size() == Shape.CoroSuspends
.size()) ? void (0) : __assert_fail ("Clones.size() == Shape.CoroSuspends.size()"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 1850, __extension__
__PRETTY_FUNCTION__))
;
1851 for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) {
1852 auto Suspend = Shape.CoroSuspends[i];
1853 auto Clone = Clones[i];
1854
1855 CoroCloner(F, "resume." + Twine(i), Shape, Clone, Suspend).create();
1856 }
1857}
1858
1859namespace {
1860 class PrettyStackTraceFunction : public PrettyStackTraceEntry {
1861 Function &F;
1862 public:
1863 PrettyStackTraceFunction(Function &F) : F(F) {}
1864 void print(raw_ostream &OS) const override {
1865 OS << "While splitting coroutine ";
1866 F.printAsOperand(OS, /*print type*/ false, F.getParent());
1867 OS << "\n";
1868 }
1869 };
1870}
1871
1872static coro::Shape splitCoroutine(Function &F,
1873 SmallVectorImpl<Function *> &Clones,
1874 bool OptimizeFrame) {
1875 PrettyStackTraceFunction prettyStackTrace(F);
1876
1877 // The suspend-crossing algorithm in buildCoroutineFrame get tripped
1878 // up by uses in unreachable blocks, so remove them as a first pass.
1879 removeUnreachableBlocks(F);
1880
1881 coro::Shape Shape(F, OptimizeFrame);
1882 if (!Shape.CoroBegin)
12
Assuming field 'CoroBegin' is non-null
13
Taking false branch
1883 return Shape;
1884
1885 simplifySuspendPoints(Shape);
1886 buildCoroutineFrame(F, Shape);
1887 replaceFrameSize(Shape);
1888
1889 // If there are no suspend points, no split required, just remove
1890 // the allocation and deallocation blocks, they are not needed.
1891 if (Shape.CoroSuspends.empty()) {
14
Taking false branch
1892 handleNoSuspendCoroutine(Shape);
1893 } else {
1894 switch (Shape.ABI) {
15
Control jumps to 'case Switch:' at line 1895
1895 case coro::ABI::Switch:
1896 splitSwitchCoroutine(F, Shape, Clones);
16
Calling 'splitSwitchCoroutine'
1897 break;
1898 case coro::ABI::Async:
1899 splitAsyncCoroutine(F, Shape, Clones);
1900 break;
1901 case coro::ABI::Retcon:
1902 case coro::ABI::RetconOnce:
1903 splitRetconCoroutine(F, Shape, Clones);
1904 break;
1905 }
1906 }
1907
1908 // Replace all the swifterror operations in the original function.
1909 // This invalidates SwiftErrorOps in the Shape.
1910 replaceSwiftErrorOps(F, Shape, nullptr);
1911
1912 return Shape;
1913}
1914
1915static void
1916updateCallGraphAfterCoroutineSplit(Function &F, const coro::Shape &Shape,
1917 const SmallVectorImpl<Function *> &Clones,
1918 CallGraph &CG, CallGraphSCC &SCC) {
1919 if (!Shape.CoroBegin)
1920 return;
1921
1922 removeCoroEnds(Shape, &CG);
1923 postSplitCleanup(F);
1924
1925 // Update call graph and add the functions we created to the SCC.
1926 coro::updateCallGraph(F, Clones, CG, SCC);
1927}
1928
1929static void updateCallGraphAfterCoroutineSplit(
1930 LazyCallGraph::Node &N, const coro::Shape &Shape,
1931 const SmallVectorImpl<Function *> &Clones, LazyCallGraph::SCC &C,
1932 LazyCallGraph &CG, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR,
1933 FunctionAnalysisManager &FAM) {
1934 if (!Shape.CoroBegin)
1935 return;
1936
1937 for (llvm::AnyCoroEndInst *End : Shape.CoroEnds) {
1938 auto &Context = End->getContext();
1939 End->replaceAllUsesWith(ConstantInt::getFalse(Context));
1940 End->eraseFromParent();
1941 }
1942
1943 if (!Clones.empty()) {
1944 switch (Shape.ABI) {
1945 case coro::ABI::Switch:
1946 // Each clone in the Switch lowering is independent of the other clones.
1947 // Let the LazyCallGraph know about each one separately.
1948 for (Function *Clone : Clones)
1949 CG.addSplitFunction(N.getFunction(), *Clone);
1950 break;
1951 case coro::ABI::Async:
1952 case coro::ABI::Retcon:
1953 case coro::ABI::RetconOnce:
1954 // Each clone in the Async/Retcon lowering references of the other clones.
1955 // Let the LazyCallGraph know about all of them at once.
1956 if (!Clones.empty())
1957 CG.addSplitRefRecursiveFunctions(N.getFunction(), Clones);
1958 break;
1959 }
1960
1961 // Let the CGSCC infra handle the changes to the original function.
1962 updateCGAndAnalysisManagerForCGSCCPass(CG, C, N, AM, UR, FAM);
1963 }
1964
1965 // Do some cleanup and let the CGSCC infra see if we've cleaned up any edges
1966 // to the split functions.
1967 postSplitCleanup(N.getFunction());
1968 updateCGAndAnalysisManagerForFunctionPass(CG, C, N, AM, UR, FAM);
1969}
1970
1971// When we see the coroutine the first time, we insert an indirect call to a
1972// devirt trigger function and mark the coroutine that it is now ready for
1973// split.
1974// Async lowering uses this after it has split the function to restart the
1975// pipeline.
1976static void prepareForSplit(Function &F, CallGraph &CG,
1977 bool MarkForAsyncRestart = false) {
1978 Module &M = *F.getParent();
1979 LLVMContext &Context = F.getContext();
1980#ifndef NDEBUG
1981 Function *DevirtFn = M.getFunction(CORO_DEVIRT_TRIGGER_FN"coro.devirt.trigger");
1982 assert(DevirtFn && "coro.devirt.trigger function not found")(static_cast <bool> (DevirtFn && "coro.devirt.trigger function not found"
) ? void (0) : __assert_fail ("DevirtFn && \"coro.devirt.trigger function not found\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 1982, __extension__
__PRETTY_FUNCTION__))
;
1983#endif
1984
1985 F.addFnAttr(CORO_PRESPLIT_ATTR"coroutine.presplit", MarkForAsyncRestart
1986 ? ASYNC_RESTART_AFTER_SPLIT"2"
1987 : PREPARED_FOR_SPLIT"1");
1988
1989 // Insert an indirect call sequence that will be devirtualized by CoroElide
1990 // pass:
1991 // %0 = call i8* @llvm.coro.subfn.addr(i8* null, i8 -1)
1992 // %1 = bitcast i8* %0 to void(i8*)*
1993 // call void %1(i8* null)
1994 coro::LowererBase Lowerer(M);
1995 Instruction *InsertPt =
1996 MarkForAsyncRestart ? F.getEntryBlock().getFirstNonPHIOrDbgOrLifetime()
1997 : F.getEntryBlock().getTerminator();
1998 auto *Null = ConstantPointerNull::get(Type::getInt8PtrTy(Context));
1999 auto *DevirtFnAddr =
2000 Lowerer.makeSubFnCall(Null, CoroSubFnInst::RestartTrigger, InsertPt);
2001 FunctionType *FnTy = FunctionType::get(Type::getVoidTy(Context),
2002 {Type::getInt8PtrTy(Context)}, false);
2003 auto *IndirectCall = CallInst::Create(FnTy, DevirtFnAddr, Null, "", InsertPt);
2004
2005 // Update CG graph with an indirect call we just added.
2006 CG[&F]->addCalledFunction(IndirectCall, CG.getCallsExternalNode());
2007}
2008
2009// Make sure that there is a devirtualization trigger function that the
2010// coro-split pass uses to force a restart of the CGSCC pipeline. If the devirt
2011// trigger function is not found, we will create one and add it to the current
2012// SCC.
2013static void createDevirtTriggerFunc(CallGraph &CG, CallGraphSCC &SCC) {
2014 Module &M = CG.getModule();
2015 if (M.getFunction(CORO_DEVIRT_TRIGGER_FN"coro.devirt.trigger"))
2016 return;
2017
2018 LLVMContext &C = M.getContext();
2019 auto *FnTy = FunctionType::get(Type::getVoidTy(C), Type::getInt8PtrTy(C),
2020 /*isVarArg=*/false);
2021 Function *DevirtFn =
2022 Function::Create(FnTy, GlobalValue::LinkageTypes::PrivateLinkage,
2023 CORO_DEVIRT_TRIGGER_FN"coro.devirt.trigger", &M);
2024 DevirtFn->addFnAttr(Attribute::AlwaysInline);
2025 auto *Entry = BasicBlock::Create(C, "entry", DevirtFn);
2026 ReturnInst::Create(C, Entry);
2027
2028 auto *Node = CG.getOrInsertFunction(DevirtFn);
2029
2030 SmallVector<CallGraphNode *, 8> Nodes(SCC.begin(), SCC.end());
2031 Nodes.push_back(Node);
2032 SCC.initialize(Nodes);
2033}
2034
2035/// Replace a call to llvm.coro.prepare.retcon.
2036static void replacePrepare(CallInst *Prepare, LazyCallGraph &CG,
2037 LazyCallGraph::SCC &C) {
2038 auto CastFn = Prepare->getArgOperand(0); // as an i8*
2039 auto Fn = CastFn->stripPointerCasts(); // as its original type
2040
2041 // Attempt to peephole this pattern:
2042 // %0 = bitcast [[TYPE]] @some_function to i8*
2043 // %1 = call @llvm.coro.prepare.retcon(i8* %0)
2044 // %2 = bitcast %1 to [[TYPE]]
2045 // ==>
2046 // %2 = @some_function
2047 for (Use &U : llvm::make_early_inc_range(Prepare->uses())) {
2048 // Look for bitcasts back to the original function type.
2049 auto *Cast = dyn_cast<BitCastInst>(U.getUser());
2050 if (!Cast || Cast->getType() != Fn->getType())
2051 continue;
2052
2053 // Replace and remove the cast.
2054 Cast->replaceAllUsesWith(Fn);
2055 Cast->eraseFromParent();
2056 }
2057
2058 // Replace any remaining uses with the function as an i8*.
2059 // This can never directly be a callee, so we don't need to update CG.
2060 Prepare->replaceAllUsesWith(CastFn);
2061 Prepare->eraseFromParent();
2062
2063 // Kill dead bitcasts.
2064 while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) {
2065 if (!Cast->use_empty())
2066 break;
2067 CastFn = Cast->getOperand(0);
2068 Cast->eraseFromParent();
2069 }
2070}
2071/// Replace a call to llvm.coro.prepare.retcon.
2072static void replacePrepare(CallInst *Prepare, CallGraph &CG) {
2073 auto CastFn = Prepare->getArgOperand(0); // as an i8*
2074 auto Fn = CastFn->stripPointerCasts(); // as its original type
2075
2076 // Find call graph nodes for the preparation.
2077 CallGraphNode *PrepareUserNode = nullptr, *FnNode = nullptr;
2078 if (auto ConcreteFn = dyn_cast<Function>(Fn)) {
2079 PrepareUserNode = CG[Prepare->getFunction()];
2080 FnNode = CG[ConcreteFn];
2081 }
2082
2083 // Attempt to peephole this pattern:
2084 // %0 = bitcast [[TYPE]] @some_function to i8*
2085 // %1 = call @llvm.coro.prepare.retcon(i8* %0)
2086 // %2 = bitcast %1 to [[TYPE]]
2087 // ==>
2088 // %2 = @some_function
2089 for (Use &U : llvm::make_early_inc_range(Prepare->uses())) {
2090 // Look for bitcasts back to the original function type.
2091 auto *Cast = dyn_cast<BitCastInst>(U.getUser());
2092 if (!Cast || Cast->getType() != Fn->getType()) continue;
2093
2094 // Check whether the replacement will introduce new direct calls.
2095 // If so, we'll need to update the call graph.
2096 if (PrepareUserNode) {
2097 for (auto &Use : Cast->uses()) {
2098 if (auto *CB = dyn_cast<CallBase>(Use.getUser())) {
2099 if (!CB->isCallee(&Use))
2100 continue;
2101 PrepareUserNode->removeCallEdgeFor(*CB);
2102 PrepareUserNode->addCalledFunction(CB, FnNode);
2103 }
2104 }
2105 }
2106
2107 // Replace and remove the cast.
2108 Cast->replaceAllUsesWith(Fn);
2109 Cast->eraseFromParent();
2110 }
2111
2112 // Replace any remaining uses with the function as an i8*.
2113 // This can never directly be a callee, so we don't need to update CG.
2114 Prepare->replaceAllUsesWith(CastFn);
2115 Prepare->eraseFromParent();
2116
2117 // Kill dead bitcasts.
2118 while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) {
2119 if (!Cast->use_empty()) break;
2120 CastFn = Cast->getOperand(0);
2121 Cast->eraseFromParent();
2122 }
2123}
2124
2125static bool replaceAllPrepares(Function *PrepareFn, LazyCallGraph &CG,
2126 LazyCallGraph::SCC &C) {
2127 bool Changed = false;
2128 for (Use &P : llvm::make_early_inc_range(PrepareFn->uses())) {
2129 // Intrinsics can only be used in calls.
2130 auto *Prepare = cast<CallInst>(P.getUser());
2131 replacePrepare(Prepare, CG, C);
2132 Changed = true;
2133 }
2134
2135 return Changed;
2136}
2137
2138/// Remove calls to llvm.coro.prepare.retcon, a barrier meant to prevent
2139/// IPO from operating on calls to a retcon coroutine before it's been
2140/// split. This is only safe to do after we've split all retcon
2141/// coroutines in the module. We can do that this in this pass because
2142/// this pass does promise to split all retcon coroutines (as opposed to
2143/// switch coroutines, which are lowered in multiple stages).
2144static bool replaceAllPrepares(Function *PrepareFn, CallGraph &CG) {
2145 bool Changed = false;
2146 for (Use &P : llvm::make_early_inc_range(PrepareFn->uses())) {
2147 // Intrinsics can only be used in calls.
2148 auto *Prepare = cast<CallInst>(P.getUser());
2149 replacePrepare(Prepare, CG);
2150 Changed = true;
2151 }
2152
2153 return Changed;
2154}
2155
2156static bool declaresCoroSplitIntrinsics(const Module &M) {
2157 return coro::declaresIntrinsics(M, {"llvm.coro.begin",
2158 "llvm.coro.prepare.retcon",
2159 "llvm.coro.prepare.async"});
2160}
2161
2162static void addPrepareFunction(const Module &M,
2163 SmallVectorImpl<Function *> &Fns,
2164 StringRef Name) {
2165 auto *PrepareFn = M.getFunction(Name);
2166 if (PrepareFn && !PrepareFn->use_empty())
2167 Fns.push_back(PrepareFn);
2168}
2169
2170PreservedAnalyses CoroSplitPass::run(LazyCallGraph::SCC &C,
2171 CGSCCAnalysisManager &AM,
2172 LazyCallGraph &CG, CGSCCUpdateResult &UR) {
2173 // NB: One invariant of a valid LazyCallGraph::SCC is that it must contain a
2174 // non-zero number of nodes, so we assume that here and grab the first
2175 // node's function's module.
2176 Module &M = *C.begin()->getFunction().getParent();
2177 auto &FAM =
2178 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
2179
2180 if (!declaresCoroSplitIntrinsics(M))
2181 return PreservedAnalyses::all();
2182
2183 // Check for uses of llvm.coro.prepare.retcon/async.
2184 SmallVector<Function *, 2> PrepareFns;
2185 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon");
2186 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async");
2187
2188 // Find coroutines for processing.
2189 SmallVector<LazyCallGraph::Node *, 4> Coroutines;
2190 for (LazyCallGraph::Node &N : C)
2191 if (N.getFunction().hasFnAttribute(CORO_PRESPLIT_ATTR"coroutine.presplit"))
2192 Coroutines.push_back(&N);
2193
2194 if (Coroutines.empty() && PrepareFns.empty())
2195 return PreservedAnalyses::all();
2196
2197 if (Coroutines.empty()) {
2198 for (auto *PrepareFn : PrepareFns) {
2199 replaceAllPrepares(PrepareFn, CG, C);
2200 }
2201 }
2202
2203 // Split all the coroutines.
2204 for (LazyCallGraph::Node *N : Coroutines) {
2205 Function &F = N->getFunction();
2206 LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F.getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-split")) { dbgs() << "CoroSplit: Processing coroutine '"
<< F.getName() << "' state: " << F.getFnAttribute
("coroutine.presplit").getValueAsString() << "\n"; } } while
(false)
2207 << "' state: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-split")) { dbgs() << "CoroSplit: Processing coroutine '"
<< F.getName() << "' state: " << F.getFnAttribute
("coroutine.presplit").getValueAsString() << "\n"; } } while
(false)
2208 << F.getFnAttribute(CORO_PRESPLIT_ATTR).getValueAsString()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-split")) { dbgs() << "CoroSplit: Processing coroutine '"
<< F.getName() << "' state: " << F.getFnAttribute
("coroutine.presplit").getValueAsString() << "\n"; } } while
(false)
2209 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-split")) { dbgs() << "CoroSplit: Processing coroutine '"
<< F.getName() << "' state: " << F.getFnAttribute
("coroutine.presplit").getValueAsString() << "\n"; } } while
(false)
;
2210 F.removeFnAttr(CORO_PRESPLIT_ATTR"coroutine.presplit");
2211
2212 SmallVector<Function *, 4> Clones;
2213 const coro::Shape Shape = splitCoroutine(F, Clones, OptimizeFrame);
2214 updateCallGraphAfterCoroutineSplit(*N, Shape, Clones, C, CG, AM, UR, FAM);
2215
2216 if (!Shape.CoroSuspends.empty()) {
2217 // Run the CGSCC pipeline on the original and newly split functions.
2218 UR.CWorklist.insert(&C);
2219 for (Function *Clone : Clones)
2220 UR.CWorklist.insert(CG.lookupSCC(CG.get(*Clone)));
2221 }
2222 }
2223
2224 if (!PrepareFns.empty()) {
2225 for (auto *PrepareFn : PrepareFns) {
2226 replaceAllPrepares(PrepareFn, CG, C);
2227 }
2228 }
2229
2230 return PreservedAnalyses::none();
2231}
2232
2233namespace {
2234
2235// We present a coroutine to LLVM as an ordinary function with suspension
2236// points marked up with intrinsics. We let the optimizer party on the coroutine
2237// as a single function for as long as possible. Shortly before the coroutine is
2238// eligible to be inlined into its callers, we split up the coroutine into parts
2239// corresponding to initial, resume and destroy invocations of the coroutine,
2240// add them to the current SCC and restart the IPO pipeline to optimize the
2241// coroutine subfunctions we extracted before proceeding to the caller of the
2242// coroutine.
2243struct CoroSplitLegacy : public CallGraphSCCPass {
2244 static char ID; // Pass identification, replacement for typeid
2245
2246 CoroSplitLegacy(bool OptimizeFrame = false)
2247 : CallGraphSCCPass(ID), OptimizeFrame(OptimizeFrame) {
2248 initializeCoroSplitLegacyPass(*PassRegistry::getPassRegistry());
2249 }
2250
2251 bool Run = false;
2252 bool OptimizeFrame;
2253
2254 // A coroutine is identified by the presence of coro.begin intrinsic, if
2255 // we don't have any, this pass has nothing to do.
2256 bool doInitialization(CallGraph &CG) override {
2257 Run = declaresCoroSplitIntrinsics(CG.getModule());
2258 return CallGraphSCCPass::doInitialization(CG);
2259 }
2260
2261 bool runOnSCC(CallGraphSCC &SCC) override {
2262 if (!Run)
1
Assuming field 'Run' is true
2
Taking false branch
2263 return false;
2264
2265 // Check for uses of llvm.coro.prepare.retcon.
2266 SmallVector<Function *, 2> PrepareFns;
2267 auto &M = SCC.getCallGraph().getModule();
2268 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon");
2269 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async");
2270
2271 // Find coroutines for processing.
2272 SmallVector<Function *, 4> Coroutines;
2273 for (CallGraphNode *CGN : SCC)
2274 if (auto *F = CGN->getFunction())
2275 if (F->hasFnAttribute(CORO_PRESPLIT_ATTR"coroutine.presplit"))
2276 Coroutines.push_back(F);
2277
2278 if (Coroutines.empty() && PrepareFns.empty())
2279 return false;
2280
2281 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
2282
2283 if (Coroutines.empty()) {
3
Taking false branch
2284 bool Changed = false;
2285 for (auto *PrepareFn : PrepareFns)
2286 Changed |= replaceAllPrepares(PrepareFn, CG);
2287 return Changed;
2288 }
2289
2290 createDevirtTriggerFunc(CG, SCC);
2291
2292 // Split all the coroutines.
2293 for (Function *F : Coroutines) {
4
Assuming '__begin2' is not equal to '__end2'
2294 Attribute Attr = F->getFnAttribute(CORO_PRESPLIT_ATTR"coroutine.presplit");
2295 StringRef Value = Attr.getValueAsString();
2296 LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-split")) { dbgs() << "CoroSplit: Processing coroutine '"
<< F->getName() << "' state: " << Value
<< "\n"; } } while (false)
5
Assuming 'DebugFlag' is false
6
Loop condition is false. Exiting loop
2297 << "' state: " << Value << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-split")) { dbgs() << "CoroSplit: Processing coroutine '"
<< F->getName() << "' state: " << Value
<< "\n"; } } while (false)
;
2298 // Async lowering marks coroutines to trigger a restart of the pipeline
2299 // after it has split them.
2300 if (Value == ASYNC_RESTART_AFTER_SPLIT"2") {
7
Assuming the condition is false
8
Taking false branch
2301 F->removeFnAttr(CORO_PRESPLIT_ATTR"coroutine.presplit");
2302 continue;
2303 }
2304 if (Value == UNPREPARED_FOR_SPLIT"0") {
9
Assuming the condition is false
10
Taking false branch
2305 prepareForSplit(*F, CG);
2306 continue;
2307 }
2308 F->removeFnAttr(CORO_PRESPLIT_ATTR"coroutine.presplit");
2309
2310 SmallVector<Function *, 4> Clones;
2311 const coro::Shape Shape = splitCoroutine(*F, Clones, OptimizeFrame);
11
Calling 'splitCoroutine'
2312 updateCallGraphAfterCoroutineSplit(*F, Shape, Clones, CG, SCC);
2313 if (Shape.ABI == coro::ABI::Async) {
2314 // Restart SCC passes.
2315 // Mark function for CoroElide pass. It will devirtualize causing a
2316 // restart of the SCC pipeline.
2317 prepareForSplit(*F, CG, true /*MarkForAsyncRestart*/);
2318 }
2319 }
2320
2321 for (auto *PrepareFn : PrepareFns)
2322 replaceAllPrepares(PrepareFn, CG);
2323
2324 return true;
2325 }
2326
2327 void getAnalysisUsage(AnalysisUsage &AU) const override {
2328 CallGraphSCCPass::getAnalysisUsage(AU);
2329 }
2330
2331 StringRef getPassName() const override { return "Coroutine Splitting"; }
2332};
2333
2334} // end anonymous namespace
2335
2336char CoroSplitLegacy::ID = 0;
2337
2338INITIALIZE_PASS_BEGIN(static void *initializeCoroSplitLegacyPassOnce(PassRegistry &
Registry) {
2339 CoroSplitLegacy, "coro-split",static void *initializeCoroSplitLegacyPassOnce(PassRegistry &
Registry) {
2340 "Split coroutine into a set of functions driving its state machine", false,static void *initializeCoroSplitLegacyPassOnce(PassRegistry &
Registry) {
2341 false)static void *initializeCoroSplitLegacyPassOnce(PassRegistry &
Registry) {
2342INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)initializeCallGraphWrapperPassPass(Registry);
2343INITIALIZE_PASS_END(PassInfo *PI = new PassInfo( "Split coroutine into a set of functions driving its state machine"
, "coro-split", &CoroSplitLegacy::ID, PassInfo::NormalCtor_t
(callDefaultCtor<CoroSplitLegacy>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeCoroSplitLegacyPassFlag; void llvm::initializeCoroSplitLegacyPass
(PassRegistry &Registry) { llvm::call_once(InitializeCoroSplitLegacyPassFlag
, initializeCoroSplitLegacyPassOnce, std::ref(Registry)); }
2344 CoroSplitLegacy, "coro-split",PassInfo *PI = new PassInfo( "Split coroutine into a set of functions driving its state machine"
, "coro-split", &CoroSplitLegacy::ID, PassInfo::NormalCtor_t
(callDefaultCtor<CoroSplitLegacy>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeCoroSplitLegacyPassFlag; void llvm::initializeCoroSplitLegacyPass
(PassRegistry &Registry) { llvm::call_once(InitializeCoroSplitLegacyPassFlag
, initializeCoroSplitLegacyPassOnce, std::ref(Registry)); }
2345 "Split coroutine into a set of functions driving its state machine", false,PassInfo *PI = new PassInfo( "Split coroutine into a set of functions driving its state machine"
, "coro-split", &CoroSplitLegacy::ID, PassInfo::NormalCtor_t
(callDefaultCtor<CoroSplitLegacy>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeCoroSplitLegacyPassFlag; void llvm::initializeCoroSplitLegacyPass
(PassRegistry &Registry) { llvm::call_once(InitializeCoroSplitLegacyPassFlag
, initializeCoroSplitLegacyPassOnce, std::ref(Registry)); }
2346 false)PassInfo *PI = new PassInfo( "Split coroutine into a set of functions driving its state machine"
, "coro-split", &CoroSplitLegacy::ID, PassInfo::NormalCtor_t
(callDefaultCtor<CoroSplitLegacy>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeCoroSplitLegacyPassFlag; void llvm::initializeCoroSplitLegacyPass
(PassRegistry &Registry) { llvm::call_once(InitializeCoroSplitLegacyPassFlag
, initializeCoroSplitLegacyPassOnce, std::ref(Registry)); }
2347
2348Pass *llvm::createCoroSplitLegacyPass(bool OptimizeFrame) {
2349 return new CoroSplitLegacy(OptimizeFrame);
2350}