Bug Summary

File:build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/lib/Transforms/Coroutines/CoroSplit.cpp
Warning:line 491, column 19
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name CoroSplit.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm -resource-dir /usr/lib/llvm-15/lib/clang/15.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Transforms/Coroutines -I /build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/lib/Transforms/Coroutines -I include -I /build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-15/lib/clang/15.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-04-20-140412-16051-1 -x c++ /build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/lib/Transforms/Coroutines/CoroSplit.cpp
1//===- CoroSplit.cpp - Converts a coroutine into a state machine ----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8// This pass builds the coroutine frame and outlines resume and destroy parts
9// of the coroutine into separate functions.
10//
11// We present a coroutine to an LLVM as an ordinary function with suspension
12// points marked up with intrinsics. We let the optimizer party on the coroutine
13// as a single function for as long as possible. Shortly before the coroutine is
14// eligible to be inlined into its callers, we split up the coroutine into parts
15// corresponding to an initial, resume and destroy invocations of the coroutine,
16// add them to the current SCC and restart the IPO pipeline to optimize the
17// coroutine subfunctions we extracted before proceeding to the caller of the
18// coroutine.
19//===----------------------------------------------------------------------===//
20
21#include "llvm/Transforms/Coroutines/CoroSplit.h"
22#include "CoroInstr.h"
23#include "CoroInternal.h"
24#include "llvm/ADT/DenseMap.h"
25#include "llvm/ADT/PriorityWorklist.h"
26#include "llvm/ADT/SmallPtrSet.h"
27#include "llvm/ADT/SmallVector.h"
28#include "llvm/ADT/StringRef.h"
29#include "llvm/ADT/Twine.h"
30#include "llvm/Analysis/CFG.h"
31#include "llvm/Analysis/CallGraph.h"
32#include "llvm/Analysis/CallGraphSCCPass.h"
33#include "llvm/Analysis/ConstantFolding.h"
34#include "llvm/Analysis/LazyCallGraph.h"
35#include "llvm/BinaryFormat/Dwarf.h"
36#include "llvm/IR/Argument.h"
37#include "llvm/IR/Attributes.h"
38#include "llvm/IR/BasicBlock.h"
39#include "llvm/IR/CFG.h"
40#include "llvm/IR/CallingConv.h"
41#include "llvm/IR/Constants.h"
42#include "llvm/IR/DataLayout.h"
43#include "llvm/IR/DerivedTypes.h"
44#include "llvm/IR/Dominators.h"
45#include "llvm/IR/Function.h"
46#include "llvm/IR/GlobalValue.h"
47#include "llvm/IR/GlobalVariable.h"
48#include "llvm/IR/IRBuilder.h"
49#include "llvm/IR/InstIterator.h"
50#include "llvm/IR/InstrTypes.h"
51#include "llvm/IR/Instruction.h"
52#include "llvm/IR/Instructions.h"
53#include "llvm/IR/IntrinsicInst.h"
54#include "llvm/IR/LLVMContext.h"
55#include "llvm/IR/LegacyPassManager.h"
56#include "llvm/IR/Module.h"
57#include "llvm/IR/Type.h"
58#include "llvm/IR/Value.h"
59#include "llvm/IR/Verifier.h"
60#include "llvm/InitializePasses.h"
61#include "llvm/Pass.h"
62#include "llvm/Support/Casting.h"
63#include "llvm/Support/Debug.h"
64#include "llvm/Support/PrettyStackTrace.h"
65#include "llvm/Support/raw_ostream.h"
66#include "llvm/Transforms/Scalar.h"
67#include "llvm/Transforms/Utils/BasicBlockUtils.h"
68#include "llvm/Transforms/Utils/CallGraphUpdater.h"
69#include "llvm/Transforms/Utils/Cloning.h"
70#include "llvm/Transforms/Utils/Local.h"
71#include "llvm/Transforms/Utils/ValueMapper.h"
72#include <cassert>
73#include <cstddef>
74#include <cstdint>
75#include <initializer_list>
76#include <iterator>
77
78using namespace llvm;
79
80#define DEBUG_TYPE"coro-split" "coro-split"
81
82namespace {
83
84/// A little helper class for building
85class CoroCloner {
86public:
87 enum class Kind {
88 /// The shared resume function for a switch lowering.
89 SwitchResume,
90
91 /// The shared unwind function for a switch lowering.
92 SwitchUnwind,
93
94 /// The shared cleanup function for a switch lowering.
95 SwitchCleanup,
96
97 /// An individual continuation function.
98 Continuation,
99
100 /// An async resume function.
101 Async,
102 };
103
104private:
105 Function &OrigF;
106 Function *NewF;
107 const Twine &Suffix;
108 coro::Shape &Shape;
109 Kind FKind;
110 ValueToValueMapTy VMap;
111 IRBuilder<> Builder;
112 Value *NewFramePtr = nullptr;
113
114 /// The active suspend instruction; meaningful only for continuation and async
115 /// ABIs.
116 AnyCoroSuspendInst *ActiveSuspend = nullptr;
20
Null pointer value stored to 'Cloner.ActiveSuspend'
117
118public:
119 /// Create a cloner for a switch lowering.
120 CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape,
121 Kind FKind)
122 : OrigF(OrigF), NewF(nullptr), Suffix(Suffix), Shape(Shape),
123 FKind(FKind), Builder(OrigF.getContext()) {
124 assert(Shape.ABI == coro::ABI::Switch)(static_cast <bool> (Shape.ABI == coro::ABI::Switch) ? void
(0) : __assert_fail ("Shape.ABI == coro::ABI::Switch", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 124, __extension__ __PRETTY_FUNCTION__))
;
21
Assuming field 'ABI' is equal to Switch
22
'?' condition is true
125 }
126
127 /// Create a cloner for a continuation lowering.
128 CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape,
129 Function *NewF, AnyCoroSuspendInst *ActiveSuspend)
130 : OrigF(OrigF), NewF(NewF), Suffix(Suffix), Shape(Shape),
131 FKind(Shape.ABI == coro::ABI::Async ? Kind::Async : Kind::Continuation),
132 Builder(OrigF.getContext()), ActiveSuspend(ActiveSuspend) {
133 assert(Shape.ABI == coro::ABI::Retcon ||(static_cast <bool> (Shape.ABI == coro::ABI::Retcon || Shape
.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async
) ? void (0) : __assert_fail ("Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 134, __extension__
__PRETTY_FUNCTION__))
134 Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async)(static_cast <bool> (Shape.ABI == coro::ABI::Retcon || Shape
.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async
) ? void (0) : __assert_fail ("Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 134, __extension__
__PRETTY_FUNCTION__))
;
135 assert(NewF && "need existing function for continuation")(static_cast <bool> (NewF && "need existing function for continuation"
) ? void (0) : __assert_fail ("NewF && \"need existing function for continuation\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 135, __extension__
__PRETTY_FUNCTION__))
;
136 assert(ActiveSuspend && "need active suspend point for continuation")(static_cast <bool> (ActiveSuspend && "need active suspend point for continuation"
) ? void (0) : __assert_fail ("ActiveSuspend && \"need active suspend point for continuation\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 136, __extension__
__PRETTY_FUNCTION__))
;
137 }
138
139 Function *getFunction() const {
140 assert(NewF != nullptr && "declaration not yet set")(static_cast <bool> (NewF != nullptr && "declaration not yet set"
) ? void (0) : __assert_fail ("NewF != nullptr && \"declaration not yet set\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 140, __extension__
__PRETTY_FUNCTION__))
;
141 return NewF;
142 }
143
144 void create();
145
146private:
147 bool isSwitchDestroyFunction() {
148 switch (FKind) {
149 case Kind::Async:
150 case Kind::Continuation:
151 case Kind::SwitchResume:
152 return false;
153 case Kind::SwitchUnwind:
154 case Kind::SwitchCleanup:
155 return true;
156 }
157 llvm_unreachable("Unknown CoroCloner::Kind enum")::llvm::llvm_unreachable_internal("Unknown CoroCloner::Kind enum"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 157)
;
158 }
159
160 void replaceEntryBlock();
161 Value *deriveNewFramePointer();
162 void replaceRetconOrAsyncSuspendUses();
163 void replaceCoroSuspends();
164 void replaceCoroEnds();
165 void replaceSwiftErrorOps();
166 void salvageDebugInfo();
167 void handleFinalSuspend();
168};
169
170} // end anonymous namespace
171
172static void maybeFreeRetconStorage(IRBuilder<> &Builder,
173 const coro::Shape &Shape, Value *FramePtr,
174 CallGraph *CG) {
175 assert(Shape.ABI == coro::ABI::Retcon ||(static_cast <bool> (Shape.ABI == coro::ABI::Retcon || Shape
.ABI == coro::ABI::RetconOnce) ? void (0) : __assert_fail ("Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 176, __extension__
__PRETTY_FUNCTION__))
176 Shape.ABI == coro::ABI::RetconOnce)(static_cast <bool> (Shape.ABI == coro::ABI::Retcon || Shape
.ABI == coro::ABI::RetconOnce) ? void (0) : __assert_fail ("Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 176, __extension__
__PRETTY_FUNCTION__))
;
177 if (Shape.RetconLowering.IsFrameInlineInStorage)
178 return;
179
180 Shape.emitDealloc(Builder, FramePtr, CG);
181}
182
183/// Replace an llvm.coro.end.async.
184/// Will inline the must tail call function call if there is one.
185/// \returns true if cleanup of the coro.end block is needed, false otherwise.
186static bool replaceCoroEndAsync(AnyCoroEndInst *End) {
187 IRBuilder<> Builder(End);
188
189 auto *EndAsync = dyn_cast<CoroAsyncEndInst>(End);
190 if (!EndAsync) {
191 Builder.CreateRetVoid();
192 return true /*needs cleanup of coro.end block*/;
193 }
194
195 auto *MustTailCallFunc = EndAsync->getMustTailCallFunction();
196 if (!MustTailCallFunc) {
197 Builder.CreateRetVoid();
198 return true /*needs cleanup of coro.end block*/;
199 }
200
201 // Move the must tail call from the predecessor block into the end block.
202 auto *CoroEndBlock = End->getParent();
203 auto *MustTailCallFuncBlock = CoroEndBlock->getSinglePredecessor();
204 assert(MustTailCallFuncBlock && "Must have a single predecessor block")(static_cast <bool> (MustTailCallFuncBlock && "Must have a single predecessor block"
) ? void (0) : __assert_fail ("MustTailCallFuncBlock && \"Must have a single predecessor block\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 204, __extension__
__PRETTY_FUNCTION__))
;
205 auto It = MustTailCallFuncBlock->getTerminator()->getIterator();
206 auto *MustTailCall = cast<CallInst>(&*std::prev(It));
207 CoroEndBlock->getInstList().splice(
208 End->getIterator(), MustTailCallFuncBlock->getInstList(), MustTailCall);
209
210 // Insert the return instruction.
211 Builder.SetInsertPoint(End);
212 Builder.CreateRetVoid();
213 InlineFunctionInfo FnInfo;
214
215 // Remove the rest of the block, by splitting it into an unreachable block.
216 auto *BB = End->getParent();
217 BB->splitBasicBlock(End);
218 BB->getTerminator()->eraseFromParent();
219
220 auto InlineRes = InlineFunction(*MustTailCall, FnInfo);
221 assert(InlineRes.isSuccess() && "Expected inlining to succeed")(static_cast <bool> (InlineRes.isSuccess() && "Expected inlining to succeed"
) ? void (0) : __assert_fail ("InlineRes.isSuccess() && \"Expected inlining to succeed\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 221, __extension__
__PRETTY_FUNCTION__))
;
222 (void)InlineRes;
223
224 // We have cleaned up the coro.end block above.
225 return false;
226}
227
228/// Replace a non-unwind call to llvm.coro.end.
229static void replaceFallthroughCoroEnd(AnyCoroEndInst *End,
230 const coro::Shape &Shape, Value *FramePtr,
231 bool InResume, CallGraph *CG) {
232 // Start inserting right before the coro.end.
233 IRBuilder<> Builder(End);
234
235 // Create the return instruction.
236 switch (Shape.ABI) {
237 // The cloned functions in switch-lowering always return void.
238 case coro::ABI::Switch:
239 // coro.end doesn't immediately end the coroutine in the main function
240 // in this lowering, because we need to deallocate the coroutine.
241 if (!InResume)
242 return;
243 Builder.CreateRetVoid();
244 break;
245
246 // In async lowering this returns.
247 case coro::ABI::Async: {
248 bool CoroEndBlockNeedsCleanup = replaceCoroEndAsync(End);
249 if (!CoroEndBlockNeedsCleanup)
250 return;
251 break;
252 }
253
254 // In unique continuation lowering, the continuations always return void.
255 // But we may have implicitly allocated storage.
256 case coro::ABI::RetconOnce:
257 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
258 Builder.CreateRetVoid();
259 break;
260
261 // In non-unique continuation lowering, we signal completion by returning
262 // a null continuation.
263 case coro::ABI::Retcon: {
264 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
265 auto RetTy = Shape.getResumeFunctionType()->getReturnType();
266 auto RetStructTy = dyn_cast<StructType>(RetTy);
267 PointerType *ContinuationTy =
268 cast<PointerType>(RetStructTy ? RetStructTy->getElementType(0) : RetTy);
269
270 Value *ReturnValue = ConstantPointerNull::get(ContinuationTy);
271 if (RetStructTy) {
272 ReturnValue = Builder.CreateInsertValue(UndefValue::get(RetStructTy),
273 ReturnValue, 0);
274 }
275 Builder.CreateRet(ReturnValue);
276 break;
277 }
278 }
279
280 // Remove the rest of the block, by splitting it into an unreachable block.
281 auto *BB = End->getParent();
282 BB->splitBasicBlock(End);
283 BB->getTerminator()->eraseFromParent();
284}
285
286// Mark a coroutine as done, which implies that the coroutine is finished and
287// never get resumed.
288//
289// In resume-switched ABI, the done state is represented by storing zero in
290// ResumeFnAddr.
291//
292// NOTE: We couldn't omit the argument `FramePtr`. It is necessary because the
293// pointer to the frame in splitted function is not stored in `Shape`.
294static void markCoroutineAsDone(IRBuilder<> &Builder, const coro::Shape &Shape,
295 Value *FramePtr) {
296 assert((static_cast <bool> (Shape.ABI == coro::ABI::Switch &&
"markCoroutineAsDone is only supported for Switch-Resumed ABI for now."
) ? void (0) : __assert_fail ("Shape.ABI == coro::ABI::Switch && \"markCoroutineAsDone is only supported for Switch-Resumed ABI for now.\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 298, __extension__
__PRETTY_FUNCTION__))
297 Shape.ABI == coro::ABI::Switch &&(static_cast <bool> (Shape.ABI == coro::ABI::Switch &&
"markCoroutineAsDone is only supported for Switch-Resumed ABI for now."
) ? void (0) : __assert_fail ("Shape.ABI == coro::ABI::Switch && \"markCoroutineAsDone is only supported for Switch-Resumed ABI for now.\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 298, __extension__
__PRETTY_FUNCTION__))
298 "markCoroutineAsDone is only supported for Switch-Resumed ABI for now.")(static_cast <bool> (Shape.ABI == coro::ABI::Switch &&
"markCoroutineAsDone is only supported for Switch-Resumed ABI for now."
) ? void (0) : __assert_fail ("Shape.ABI == coro::ABI::Switch && \"markCoroutineAsDone is only supported for Switch-Resumed ABI for now.\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 298, __extension__
__PRETTY_FUNCTION__))
;
299 auto *GepIndex = Builder.CreateStructGEP(
300 Shape.FrameTy, FramePtr, coro::Shape::SwitchFieldIndex::Resume,
301 "ResumeFn.addr");
302 auto *NullPtr = ConstantPointerNull::get(cast<PointerType>(
303 Shape.FrameTy->getTypeAtIndex(coro::Shape::SwitchFieldIndex::Resume)));
304 Builder.CreateStore(NullPtr, GepIndex);
305}
306
307/// Replace an unwind call to llvm.coro.end.
308static void replaceUnwindCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
309 Value *FramePtr, bool InResume,
310 CallGraph *CG) {
311 IRBuilder<> Builder(End);
312
313 switch (Shape.ABI) {
314 // In switch-lowering, this does nothing in the main function.
315 case coro::ABI::Switch: {
316 // In C++'s specification, the coroutine should be marked as done
317 // if promise.unhandled_exception() throws. The frontend will
318 // call coro.end(true) along this path.
319 //
320 // FIXME: We should refactor this once there is other language
321 // which uses Switch-Resumed style other than C++.
322 markCoroutineAsDone(Builder, Shape, FramePtr);
323 if (!InResume)
324 return;
325 break;
326 }
327 // In async lowering this does nothing.
328 case coro::ABI::Async:
329 break;
330 // In continuation-lowering, this frees the continuation storage.
331 case coro::ABI::Retcon:
332 case coro::ABI::RetconOnce:
333 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
334 break;
335 }
336
337 // If coro.end has an associated bundle, add cleanupret instruction.
338 if (auto Bundle = End->getOperandBundle(LLVMContext::OB_funclet)) {
339 auto *FromPad = cast<CleanupPadInst>(Bundle->Inputs[0]);
340 auto *CleanupRet = Builder.CreateCleanupRet(FromPad, nullptr);
341 End->getParent()->splitBasicBlock(End);
342 CleanupRet->getParent()->getTerminator()->eraseFromParent();
343 }
344}
345
346static void replaceCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
347 Value *FramePtr, bool InResume, CallGraph *CG) {
348 if (End->isUnwind())
349 replaceUnwindCoroEnd(End, Shape, FramePtr, InResume, CG);
350 else
351 replaceFallthroughCoroEnd(End, Shape, FramePtr, InResume, CG);
352
353 auto &Context = End->getContext();
354 End->replaceAllUsesWith(InResume ? ConstantInt::getTrue(Context)
355 : ConstantInt::getFalse(Context));
356 End->eraseFromParent();
357}
358
359// Create an entry block for a resume function with a switch that will jump to
360// suspend points.
361static void createResumeEntryBlock(Function &F, coro::Shape &Shape) {
362 assert(Shape.ABI == coro::ABI::Switch)(static_cast <bool> (Shape.ABI == coro::ABI::Switch) ? void
(0) : __assert_fail ("Shape.ABI == coro::ABI::Switch", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 362, __extension__ __PRETTY_FUNCTION__))
;
363 LLVMContext &C = F.getContext();
364
365 // resume.entry:
366 // %index.addr = getelementptr inbounds %f.Frame, %f.Frame* %FramePtr, i32 0,
367 // i32 2
368 // % index = load i32, i32* %index.addr
369 // switch i32 %index, label %unreachable [
370 // i32 0, label %resume.0
371 // i32 1, label %resume.1
372 // ...
373 // ]
374
375 auto *NewEntry = BasicBlock::Create(C, "resume.entry", &F);
376 auto *UnreachBB = BasicBlock::Create(C, "unreachable", &F);
377
378 IRBuilder<> Builder(NewEntry);
379 auto *FramePtr = Shape.FramePtr;
380 auto *FrameTy = Shape.FrameTy;
381 auto *GepIndex = Builder.CreateStructGEP(
382 FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr");
383 auto *Index = Builder.CreateLoad(Shape.getIndexType(), GepIndex, "index");
384 auto *Switch =
385 Builder.CreateSwitch(Index, UnreachBB, Shape.CoroSuspends.size());
386 Shape.SwitchLowering.ResumeSwitch = Switch;
387
388 size_t SuspendIndex = 0;
389 for (auto *AnyS : Shape.CoroSuspends) {
390 auto *S = cast<CoroSuspendInst>(AnyS);
391 ConstantInt *IndexVal = Shape.getIndex(SuspendIndex);
392
393 // Replace CoroSave with a store to Index:
394 // %index.addr = getelementptr %f.frame... (index field number)
395 // store i32 0, i32* %index.addr1
396 auto *Save = S->getCoroSave();
397 Builder.SetInsertPoint(Save);
398 if (S->isFinal()) {
399 // The coroutine should be marked done if it reaches the final suspend
400 // point.
401 markCoroutineAsDone(Builder, Shape, FramePtr);
402 } else {
403 auto *GepIndex = Builder.CreateStructGEP(
404 FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr");
405 Builder.CreateStore(IndexVal, GepIndex);
406 }
407 Save->replaceAllUsesWith(ConstantTokenNone::get(C));
408 Save->eraseFromParent();
409
410 // Split block before and after coro.suspend and add a jump from an entry
411 // switch:
412 //
413 // whateverBB:
414 // whatever
415 // %0 = call i8 @llvm.coro.suspend(token none, i1 false)
416 // switch i8 %0, label %suspend[i8 0, label %resume
417 // i8 1, label %cleanup]
418 // becomes:
419 //
420 // whateverBB:
421 // whatever
422 // br label %resume.0.landing
423 //
424 // resume.0: ; <--- jump from the switch in the resume.entry
425 // %0 = tail call i8 @llvm.coro.suspend(token none, i1 false)
426 // br label %resume.0.landing
427 //
428 // resume.0.landing:
429 // %1 = phi i8[-1, %whateverBB], [%0, %resume.0]
430 // switch i8 % 1, label %suspend [i8 0, label %resume
431 // i8 1, label %cleanup]
432
433 auto *SuspendBB = S->getParent();
434 auto *ResumeBB =
435 SuspendBB->splitBasicBlock(S, "resume." + Twine(SuspendIndex));
436 auto *LandingBB = ResumeBB->splitBasicBlock(
437 S->getNextNode(), ResumeBB->getName() + Twine(".landing"));
438 Switch->addCase(IndexVal, ResumeBB);
439
440 cast<BranchInst>(SuspendBB->getTerminator())->setSuccessor(0, LandingBB);
441 auto *PN = PHINode::Create(Builder.getInt8Ty(), 2, "", &LandingBB->front());
442 S->replaceAllUsesWith(PN);
443 PN->addIncoming(Builder.getInt8(-1), SuspendBB);
444 PN->addIncoming(S, ResumeBB);
445
446 ++SuspendIndex;
447 }
448
449 Builder.SetInsertPoint(UnreachBB);
450 Builder.CreateUnreachable();
451
452 Shape.SwitchLowering.ResumeEntryBlock = NewEntry;
453}
454
455
456// Rewrite final suspend point handling. We do not use suspend index to
457// represent the final suspend point. Instead we zero-out ResumeFnAddr in the
458// coroutine frame, since it is undefined behavior to resume a coroutine
459// suspended at the final suspend point. Thus, in the resume function, we can
460// simply remove the last case (when coro::Shape is built, the final suspend
461// point (if present) is always the last element of CoroSuspends array).
462// In the destroy function, we add a code sequence to check if ResumeFnAddress
463// is Null, and if so, jump to the appropriate label to handle cleanup from the
464// final suspend point.
465void CoroCloner::handleFinalSuspend() {
466 assert(Shape.ABI == coro::ABI::Switch &&(static_cast <bool> (Shape.ABI == coro::ABI::Switch &&
Shape.SwitchLowering.HasFinalSuspend) ? void (0) : __assert_fail
("Shape.ABI == coro::ABI::Switch && Shape.SwitchLowering.HasFinalSuspend"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 467, __extension__
__PRETTY_FUNCTION__))
467 Shape.SwitchLowering.HasFinalSuspend)(static_cast <bool> (Shape.ABI == coro::ABI::Switch &&
Shape.SwitchLowering.HasFinalSuspend) ? void (0) : __assert_fail
("Shape.ABI == coro::ABI::Switch && Shape.SwitchLowering.HasFinalSuspend"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 467, __extension__
__PRETTY_FUNCTION__))
;
468 auto *Switch = cast<SwitchInst>(VMap[Shape.SwitchLowering.ResumeSwitch]);
469 auto FinalCaseIt = std::prev(Switch->case_end());
470 BasicBlock *ResumeBB = FinalCaseIt->getCaseSuccessor();
471 Switch->removeCase(FinalCaseIt);
472 if (isSwitchDestroyFunction()) {
473 BasicBlock *OldSwitchBB = Switch->getParent();
474 auto *NewSwitchBB = OldSwitchBB->splitBasicBlock(Switch, "Switch");
475 Builder.SetInsertPoint(OldSwitchBB->getTerminator());
476 auto *GepIndex = Builder.CreateStructGEP(Shape.FrameTy, NewFramePtr,
477 coro::Shape::SwitchFieldIndex::Resume,
478 "ResumeFn.addr");
479 auto *Load = Builder.CreateLoad(Shape.getSwitchResumePointerType(),
480 GepIndex);
481 auto *Cond = Builder.CreateIsNull(Load);
482 Builder.CreateCondBr(Cond, ResumeBB, NewSwitchBB);
483 OldSwitchBB->getTerminator()->eraseFromParent();
484 }
485}
486
487static FunctionType *
488getFunctionTypeFromAsyncSuspend(AnyCoroSuspendInst *Suspend) {
489 auto *AsyncSuspend = cast<CoroSuspendAsyncInst>(Suspend);
490 auto *StructTy = cast<StructType>(AsyncSuspend->getType());
33
The object is a 'StructType'
491 auto &Context = Suspend->getParent()->getParent()->getContext();
34
Called C++ object pointer is null
492 auto *VoidTy = Type::getVoidTy(Context);
493 return FunctionType::get(VoidTy, StructTy->elements(), false);
494}
495
496static Function *createCloneDeclaration(Function &OrigF, coro::Shape &Shape,
497 const Twine &Suffix,
498 Module::iterator InsertBefore,
499 AnyCoroSuspendInst *ActiveSuspend) {
500 Module *M = OrigF.getParent();
501 auto *FnTy = (Shape.ABI != coro::ABI::Async)
29
Assuming field 'ABI' is equal to Async
30
'?' condition is false
502 ? Shape.getResumeFunctionType()
503 : getFunctionTypeFromAsyncSuspend(ActiveSuspend);
31
Passing null pointer value via 1st parameter 'Suspend'
32
Calling 'getFunctionTypeFromAsyncSuspend'
504
505 Function *NewF =
506 Function::Create(FnTy, GlobalValue::LinkageTypes::InternalLinkage,
507 OrigF.getName() + Suffix);
508 if (Shape.ABI != coro::ABI::Async)
509 NewF->addParamAttr(0, Attribute::NonNull);
510
511 // For the async lowering ABI we can't guarantee that the context argument is
512 // not access via a different pointer not based on the argument.
513 if (Shape.ABI != coro::ABI::Async)
514 NewF->addParamAttr(0, Attribute::NoAlias);
515
516 M->getFunctionList().insert(InsertBefore, NewF);
517
518 return NewF;
519}
520
521/// Replace uses of the active llvm.coro.suspend.retcon/async call with the
522/// arguments to the continuation function.
523///
524/// This assumes that the builder has a meaningful insertion point.
525void CoroCloner::replaceRetconOrAsyncSuspendUses() {
526 assert(Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||(static_cast <bool> (Shape.ABI == coro::ABI::Retcon || Shape
.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async
) ? void (0) : __assert_fail ("Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 527, __extension__
__PRETTY_FUNCTION__))
527 Shape.ABI == coro::ABI::Async)(static_cast <bool> (Shape.ABI == coro::ABI::Retcon || Shape
.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async
) ? void (0) : __assert_fail ("Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 527, __extension__
__PRETTY_FUNCTION__))
;
528
529 auto NewS = VMap[ActiveSuspend];
530 if (NewS->use_empty()) return;
531
532 // Copy out all the continuation arguments after the buffer pointer into
533 // an easily-indexed data structure for convenience.
534 SmallVector<Value*, 8> Args;
535 // The async ABI includes all arguments -- including the first argument.
536 bool IsAsyncABI = Shape.ABI == coro::ABI::Async;
537 for (auto I = IsAsyncABI ? NewF->arg_begin() : std::next(NewF->arg_begin()),
538 E = NewF->arg_end();
539 I != E; ++I)
540 Args.push_back(&*I);
541
542 // If the suspend returns a single scalar value, we can just do a simple
543 // replacement.
544 if (!isa<StructType>(NewS->getType())) {
545 assert(Args.size() == 1)(static_cast <bool> (Args.size() == 1) ? void (0) : __assert_fail
("Args.size() == 1", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 545, __extension__ __PRETTY_FUNCTION__))
;
546 NewS->replaceAllUsesWith(Args.front());
547 return;
548 }
549
550 // Try to peephole extracts of an aggregate return.
551 for (Use &U : llvm::make_early_inc_range(NewS->uses())) {
552 auto *EVI = dyn_cast<ExtractValueInst>(U.getUser());
553 if (!EVI || EVI->getNumIndices() != 1)
554 continue;
555
556 EVI->replaceAllUsesWith(Args[EVI->getIndices().front()]);
557 EVI->eraseFromParent();
558 }
559
560 // If we have no remaining uses, we're done.
561 if (NewS->use_empty()) return;
562
563 // Otherwise, we need to create an aggregate.
564 Value *Agg = UndefValue::get(NewS->getType());
565 for (size_t I = 0, E = Args.size(); I != E; ++I)
566 Agg = Builder.CreateInsertValue(Agg, Args[I], I);
567
568 NewS->replaceAllUsesWith(Agg);
569}
570
571void CoroCloner::replaceCoroSuspends() {
572 Value *SuspendResult;
573
574 switch (Shape.ABI) {
575 // In switch lowering, replace coro.suspend with the appropriate value
576 // for the type of function we're extracting.
577 // Replacing coro.suspend with (0) will result in control flow proceeding to
578 // a resume label associated with a suspend point, replacing it with (1) will
579 // result in control flow proceeding to a cleanup label associated with this
580 // suspend point.
581 case coro::ABI::Switch:
582 SuspendResult = Builder.getInt8(isSwitchDestroyFunction() ? 1 : 0);
583 break;
584
585 // In async lowering there are no uses of the result.
586 case coro::ABI::Async:
587 return;
588
589 // In returned-continuation lowering, the arguments from earlier
590 // continuations are theoretically arbitrary, and they should have been
591 // spilled.
592 case coro::ABI::RetconOnce:
593 case coro::ABI::Retcon:
594 return;
595 }
596
597 for (AnyCoroSuspendInst *CS : Shape.CoroSuspends) {
598 // The active suspend was handled earlier.
599 if (CS == ActiveSuspend) continue;
600
601 auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[CS]);
602 MappedCS->replaceAllUsesWith(SuspendResult);
603 MappedCS->eraseFromParent();
604 }
605}
606
607void CoroCloner::replaceCoroEnds() {
608 for (AnyCoroEndInst *CE : Shape.CoroEnds) {
609 // We use a null call graph because there's no call graph node for
610 // the cloned function yet. We'll just be rebuilding that later.
611 auto *NewCE = cast<AnyCoroEndInst>(VMap[CE]);
612 replaceCoroEnd(NewCE, Shape, NewFramePtr, /*in resume*/ true, nullptr);
613 }
614}
615
616static void replaceSwiftErrorOps(Function &F, coro::Shape &Shape,
617 ValueToValueMapTy *VMap) {
618 if (Shape.ABI == coro::ABI::Async && Shape.CoroSuspends.empty())
619 return;
620 Value *CachedSlot = nullptr;
621 auto getSwiftErrorSlot = [&](Type *ValueTy) -> Value * {
622 if (CachedSlot) {
623 assert(cast<PointerType>(CachedSlot->getType())(static_cast <bool> (cast<PointerType>(CachedSlot
->getType()) ->isOpaqueOrPointeeTypeMatches(ValueTy) &&
"multiple swifterror slots in function with different types"
) ? void (0) : __assert_fail ("cast<PointerType>(CachedSlot->getType()) ->isOpaqueOrPointeeTypeMatches(ValueTy) && \"multiple swifterror slots in function with different types\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 625, __extension__
__PRETTY_FUNCTION__))
624 ->isOpaqueOrPointeeTypeMatches(ValueTy) &&(static_cast <bool> (cast<PointerType>(CachedSlot
->getType()) ->isOpaqueOrPointeeTypeMatches(ValueTy) &&
"multiple swifterror slots in function with different types"
) ? void (0) : __assert_fail ("cast<PointerType>(CachedSlot->getType()) ->isOpaqueOrPointeeTypeMatches(ValueTy) && \"multiple swifterror slots in function with different types\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 625, __extension__
__PRETTY_FUNCTION__))
625 "multiple swifterror slots in function with different types")(static_cast <bool> (cast<PointerType>(CachedSlot
->getType()) ->isOpaqueOrPointeeTypeMatches(ValueTy) &&
"multiple swifterror slots in function with different types"
) ? void (0) : __assert_fail ("cast<PointerType>(CachedSlot->getType()) ->isOpaqueOrPointeeTypeMatches(ValueTy) && \"multiple swifterror slots in function with different types\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 625, __extension__
__PRETTY_FUNCTION__))
;
626 return CachedSlot;
627 }
628
629 // Check if the function has a swifterror argument.
630 for (auto &Arg : F.args()) {
631 if (Arg.isSwiftError()) {
632 CachedSlot = &Arg;
633 assert(cast<PointerType>(Arg.getType())(static_cast <bool> (cast<PointerType>(Arg.getType
()) ->isOpaqueOrPointeeTypeMatches(ValueTy) && "swifterror argument does not have expected type"
) ? void (0) : __assert_fail ("cast<PointerType>(Arg.getType()) ->isOpaqueOrPointeeTypeMatches(ValueTy) && \"swifterror argument does not have expected type\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 635, __extension__
__PRETTY_FUNCTION__))
634 ->isOpaqueOrPointeeTypeMatches(ValueTy) &&(static_cast <bool> (cast<PointerType>(Arg.getType
()) ->isOpaqueOrPointeeTypeMatches(ValueTy) && "swifterror argument does not have expected type"
) ? void (0) : __assert_fail ("cast<PointerType>(Arg.getType()) ->isOpaqueOrPointeeTypeMatches(ValueTy) && \"swifterror argument does not have expected type\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 635, __extension__
__PRETTY_FUNCTION__))
635 "swifterror argument does not have expected type")(static_cast <bool> (cast<PointerType>(Arg.getType
()) ->isOpaqueOrPointeeTypeMatches(ValueTy) && "swifterror argument does not have expected type"
) ? void (0) : __assert_fail ("cast<PointerType>(Arg.getType()) ->isOpaqueOrPointeeTypeMatches(ValueTy) && \"swifterror argument does not have expected type\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 635, __extension__
__PRETTY_FUNCTION__))
;
636 return &Arg;
637 }
638 }
639
640 // Create a swifterror alloca.
641 IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
642 auto Alloca = Builder.CreateAlloca(ValueTy);
643 Alloca->setSwiftError(true);
644
645 CachedSlot = Alloca;
646 return Alloca;
647 };
648
649 for (CallInst *Op : Shape.SwiftErrorOps) {
650 auto MappedOp = VMap ? cast<CallInst>((*VMap)[Op]) : Op;
651 IRBuilder<> Builder(MappedOp);
652
653 // If there are no arguments, this is a 'get' operation.
654 Value *MappedResult;
655 if (Op->arg_empty()) {
656 auto ValueTy = Op->getType();
657 auto Slot = getSwiftErrorSlot(ValueTy);
658 MappedResult = Builder.CreateLoad(ValueTy, Slot);
659 } else {
660 assert(Op->arg_size() == 1)(static_cast <bool> (Op->arg_size() == 1) ? void (0)
: __assert_fail ("Op->arg_size() == 1", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 660, __extension__ __PRETTY_FUNCTION__))
;
661 auto Value = MappedOp->getArgOperand(0);
662 auto ValueTy = Value->getType();
663 auto Slot = getSwiftErrorSlot(ValueTy);
664 Builder.CreateStore(Value, Slot);
665 MappedResult = Slot;
666 }
667
668 MappedOp->replaceAllUsesWith(MappedResult);
669 MappedOp->eraseFromParent();
670 }
671
672 // If we're updating the original function, we've invalidated SwiftErrorOps.
673 if (VMap == nullptr) {
674 Shape.SwiftErrorOps.clear();
675 }
676}
677
678void CoroCloner::replaceSwiftErrorOps() {
679 ::replaceSwiftErrorOps(*NewF, Shape, &VMap);
680}
681
682void CoroCloner::salvageDebugInfo() {
683 SmallVector<DbgVariableIntrinsic *, 8> Worklist;
684 SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache;
685 for (auto &BB : *NewF)
686 for (auto &I : BB)
687 if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&I))
688 Worklist.push_back(DVI);
689 for (DbgVariableIntrinsic *DVI : Worklist)
690 coro::salvageDebugInfo(DbgPtrAllocaCache, DVI, Shape.OptimizeFrame);
691
692 // Remove all salvaged dbg.declare intrinsics that became
693 // either unreachable or stale due to the CoroSplit transformation.
694 DominatorTree DomTree(*NewF);
695 auto IsUnreachableBlock = [&](BasicBlock *BB) {
696 return !isPotentiallyReachable(&NewF->getEntryBlock(), BB, nullptr,
697 &DomTree);
698 };
699 for (DbgVariableIntrinsic *DVI : Worklist) {
700 if (IsUnreachableBlock(DVI->getParent()))
701 DVI->eraseFromParent();
702 else if (isa_and_nonnull<AllocaInst>(DVI->getVariableLocationOp(0))) {
703 // Count all non-debuginfo uses in reachable blocks.
704 unsigned Uses = 0;
705 for (auto *User : DVI->getVariableLocationOp(0)->users())
706 if (auto *I = dyn_cast<Instruction>(User))
707 if (!isa<AllocaInst>(I) && !IsUnreachableBlock(I->getParent()))
708 ++Uses;
709 if (!Uses)
710 DVI->eraseFromParent();
711 }
712 }
713}
714
715void CoroCloner::replaceEntryBlock() {
716 // In the original function, the AllocaSpillBlock is a block immediately
717 // following the allocation of the frame object which defines GEPs for
718 // all the allocas that have been moved into the frame, and it ends by
719 // branching to the original beginning of the coroutine. Make this
720 // the entry block of the cloned function.
721 auto *Entry = cast<BasicBlock>(VMap[Shape.AllocaSpillBlock]);
722 auto *OldEntry = &NewF->getEntryBlock();
723 Entry->setName("entry" + Suffix);
724 Entry->moveBefore(OldEntry);
725 Entry->getTerminator()->eraseFromParent();
726
727 // Clear all predecessors of the new entry block. There should be
728 // exactly one predecessor, which we created when splitting out
729 // AllocaSpillBlock to begin with.
730 assert(Entry->hasOneUse())(static_cast <bool> (Entry->hasOneUse()) ? void (0) :
__assert_fail ("Entry->hasOneUse()", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 730, __extension__ __PRETTY_FUNCTION__))
;
731 auto BranchToEntry = cast<BranchInst>(Entry->user_back());
732 assert(BranchToEntry->isUnconditional())(static_cast <bool> (BranchToEntry->isUnconditional(
)) ? void (0) : __assert_fail ("BranchToEntry->isUnconditional()"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 732, __extension__
__PRETTY_FUNCTION__))
;
733 Builder.SetInsertPoint(BranchToEntry);
734 Builder.CreateUnreachable();
735 BranchToEntry->eraseFromParent();
736
737 // Branch from the entry to the appropriate place.
738 Builder.SetInsertPoint(Entry);
739 switch (Shape.ABI) {
740 case coro::ABI::Switch: {
741 // In switch-lowering, we built a resume-entry block in the original
742 // function. Make the entry block branch to this.
743 auto *SwitchBB =
744 cast<BasicBlock>(VMap[Shape.SwitchLowering.ResumeEntryBlock]);
745 Builder.CreateBr(SwitchBB);
746 break;
747 }
748 case coro::ABI::Async:
749 case coro::ABI::Retcon:
750 case coro::ABI::RetconOnce: {
751 // In continuation ABIs, we want to branch to immediately after the
752 // active suspend point. Earlier phases will have put the suspend in its
753 // own basic block, so just thread our jump directly to its successor.
754 assert((Shape.ABI == coro::ABI::Async &&(static_cast <bool> ((Shape.ABI == coro::ABI::Async &&
isa<CoroSuspendAsyncInst>(ActiveSuspend)) || ((Shape.ABI
== coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) &&
isa<CoroSuspendRetconInst>(ActiveSuspend))) ? void (0)
: __assert_fail ("(Shape.ABI == coro::ABI::Async && isa<CoroSuspendAsyncInst>(ActiveSuspend)) || ((Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) && isa<CoroSuspendRetconInst>(ActiveSuspend))"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 758, __extension__
__PRETTY_FUNCTION__))
755 isa<CoroSuspendAsyncInst>(ActiveSuspend)) ||(static_cast <bool> ((Shape.ABI == coro::ABI::Async &&
isa<CoroSuspendAsyncInst>(ActiveSuspend)) || ((Shape.ABI
== coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) &&
isa<CoroSuspendRetconInst>(ActiveSuspend))) ? void (0)
: __assert_fail ("(Shape.ABI == coro::ABI::Async && isa<CoroSuspendAsyncInst>(ActiveSuspend)) || ((Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) && isa<CoroSuspendRetconInst>(ActiveSuspend))"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 758, __extension__
__PRETTY_FUNCTION__))
756 ((Shape.ABI == coro::ABI::Retcon ||(static_cast <bool> ((Shape.ABI == coro::ABI::Async &&
isa<CoroSuspendAsyncInst>(ActiveSuspend)) || ((Shape.ABI
== coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) &&
isa<CoroSuspendRetconInst>(ActiveSuspend))) ? void (0)
: __assert_fail ("(Shape.ABI == coro::ABI::Async && isa<CoroSuspendAsyncInst>(ActiveSuspend)) || ((Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) && isa<CoroSuspendRetconInst>(ActiveSuspend))"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 758, __extension__
__PRETTY_FUNCTION__))
757 Shape.ABI == coro::ABI::RetconOnce) &&(static_cast <bool> ((Shape.ABI == coro::ABI::Async &&
isa<CoroSuspendAsyncInst>(ActiveSuspend)) || ((Shape.ABI
== coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) &&
isa<CoroSuspendRetconInst>(ActiveSuspend))) ? void (0)
: __assert_fail ("(Shape.ABI == coro::ABI::Async && isa<CoroSuspendAsyncInst>(ActiveSuspend)) || ((Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) && isa<CoroSuspendRetconInst>(ActiveSuspend))"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 758, __extension__
__PRETTY_FUNCTION__))
758 isa<CoroSuspendRetconInst>(ActiveSuspend)))(static_cast <bool> ((Shape.ABI == coro::ABI::Async &&
isa<CoroSuspendAsyncInst>(ActiveSuspend)) || ((Shape.ABI
== coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) &&
isa<CoroSuspendRetconInst>(ActiveSuspend))) ? void (0)
: __assert_fail ("(Shape.ABI == coro::ABI::Async && isa<CoroSuspendAsyncInst>(ActiveSuspend)) || ((Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) && isa<CoroSuspendRetconInst>(ActiveSuspend))"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 758, __extension__
__PRETTY_FUNCTION__))
;
759 auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[ActiveSuspend]);
760 auto Branch = cast<BranchInst>(MappedCS->getNextNode());
761 assert(Branch->isUnconditional())(static_cast <bool> (Branch->isUnconditional()) ? void
(0) : __assert_fail ("Branch->isUnconditional()", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 761, __extension__ __PRETTY_FUNCTION__))
;
762 Builder.CreateBr(Branch->getSuccessor(0));
763 break;
764 }
765 }
766
767 // Any static alloca that's still being used but not reachable from the new
768 // entry needs to be moved to the new entry.
769 Function *F = OldEntry->getParent();
770 DominatorTree DT{*F};
771 for (Instruction &I : llvm::make_early_inc_range(instructions(F))) {
772 auto *Alloca = dyn_cast<AllocaInst>(&I);
773 if (!Alloca || I.use_empty())
774 continue;
775 if (DT.isReachableFromEntry(I.getParent()) ||
776 !isa<ConstantInt>(Alloca->getArraySize()))
777 continue;
778 I.moveBefore(*Entry, Entry->getFirstInsertionPt());
779 }
780}
781
782/// Derive the value of the new frame pointer.
783Value *CoroCloner::deriveNewFramePointer() {
784 // Builder should be inserting to the front of the new entry block.
785
786 switch (Shape.ABI) {
787 // In switch-lowering, the argument is the frame pointer.
788 case coro::ABI::Switch:
789 return &*NewF->arg_begin();
790 // In async-lowering, one of the arguments is an async context as determined
791 // by the `llvm.coro.id.async` intrinsic. We can retrieve the async context of
792 // the resume function from the async context projection function associated
793 // with the active suspend. The frame is located as a tail to the async
794 // context header.
795 case coro::ABI::Async: {
796 auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(ActiveSuspend);
797 auto ContextIdx = ActiveAsyncSuspend->getStorageArgumentIndex() & 0xff;
798 auto *CalleeContext = NewF->getArg(ContextIdx);
799 auto *FramePtrTy = Shape.FrameTy->getPointerTo();
800 auto *ProjectionFunc =
801 ActiveAsyncSuspend->getAsyncContextProjectionFunction();
802 auto DbgLoc =
803 cast<CoroSuspendAsyncInst>(VMap[ActiveSuspend])->getDebugLoc();
804 // Calling i8* (i8*)
805 auto *CallerContext = Builder.CreateCall(ProjectionFunc->getFunctionType(),
806 ProjectionFunc, CalleeContext);
807 CallerContext->setCallingConv(ProjectionFunc->getCallingConv());
808 CallerContext->setDebugLoc(DbgLoc);
809 // The frame is located after the async_context header.
810 auto &Context = Builder.getContext();
811 auto *FramePtrAddr = Builder.CreateConstInBoundsGEP1_32(
812 Type::getInt8Ty(Context), CallerContext,
813 Shape.AsyncLowering.FrameOffset, "async.ctx.frameptr");
814 // Inline the projection function.
815 InlineFunctionInfo InlineInfo;
816 auto InlineRes = InlineFunction(*CallerContext, InlineInfo);
817 assert(InlineRes.isSuccess())(static_cast <bool> (InlineRes.isSuccess()) ? void (0) :
__assert_fail ("InlineRes.isSuccess()", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 817, __extension__ __PRETTY_FUNCTION__))
;
818 (void)InlineRes;
819 return Builder.CreateBitCast(FramePtrAddr, FramePtrTy);
820 }
821 // In continuation-lowering, the argument is the opaque storage.
822 case coro::ABI::Retcon:
823 case coro::ABI::RetconOnce: {
824 Argument *NewStorage = &*NewF->arg_begin();
825 auto FramePtrTy = Shape.FrameTy->getPointerTo();
826
827 // If the storage is inline, just bitcast to the storage to the frame type.
828 if (Shape.RetconLowering.IsFrameInlineInStorage)
829 return Builder.CreateBitCast(NewStorage, FramePtrTy);
830
831 // Otherwise, load the real frame from the opaque storage.
832 auto FramePtrPtr =
833 Builder.CreateBitCast(NewStorage, FramePtrTy->getPointerTo());
834 return Builder.CreateLoad(FramePtrTy, FramePtrPtr);
835 }
836 }
837 llvm_unreachable("bad ABI")::llvm::llvm_unreachable_internal("bad ABI", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 837)
;
838}
839
840static void addFramePointerAttrs(AttributeList &Attrs, LLVMContext &Context,
841 unsigned ParamIndex,
842 uint64_t Size, Align Alignment) {
843 AttrBuilder ParamAttrs(Context);
844 ParamAttrs.addAttribute(Attribute::NonNull);
845 ParamAttrs.addAttribute(Attribute::NoAlias);
846 ParamAttrs.addAlignmentAttr(Alignment);
847 ParamAttrs.addDereferenceableAttr(Size);
848 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
849}
850
851static void addAsyncContextAttrs(AttributeList &Attrs, LLVMContext &Context,
852 unsigned ParamIndex) {
853 AttrBuilder ParamAttrs(Context);
854 ParamAttrs.addAttribute(Attribute::SwiftAsync);
855 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
856}
857
858static void addSwiftSelfAttrs(AttributeList &Attrs, LLVMContext &Context,
859 unsigned ParamIndex) {
860 AttrBuilder ParamAttrs(Context);
861 ParamAttrs.addAttribute(Attribute::SwiftSelf);
862 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
863}
864
865/// Clone the body of the original function into a resume function of
866/// some sort.
867void CoroCloner::create() {
868 // Create the new function if we don't already have one.
869 if (!NewF) {
25
Assuming field 'NewF' is null
26
Taking true branch
870 NewF = createCloneDeclaration(OrigF, Shape, Suffix,
28
Calling 'createCloneDeclaration'
871 OrigF.getParent()->end(), ActiveSuspend);
27
Passing null pointer value via 5th parameter 'ActiveSuspend'
872 }
873
874 // Replace all args with dummy instructions. If an argument is the old frame
875 // pointer, the dummy will be replaced by the new frame pointer once it is
876 // computed below. Uses of all other arguments should have already been
877 // rewritten by buildCoroutineFrame() to use loads/stores on the coroutine
878 // frame.
879 SmallVector<Instruction *> DummyArgs;
880 for (Argument &A : OrigF.args()) {
881 DummyArgs.push_back(new FreezeInst(UndefValue::get(A.getType())));
882 VMap[&A] = DummyArgs.back();
883 }
884
885 SmallVector<ReturnInst *, 4> Returns;
886
887 // Ignore attempts to change certain attributes of the function.
888 // TODO: maybe there should be a way to suppress this during cloning?
889 auto savedVisibility = NewF->getVisibility();
890 auto savedUnnamedAddr = NewF->getUnnamedAddr();
891 auto savedDLLStorageClass = NewF->getDLLStorageClass();
892
893 // NewF's linkage (which CloneFunctionInto does *not* change) might not
894 // be compatible with the visibility of OrigF (which it *does* change),
895 // so protect against that.
896 auto savedLinkage = NewF->getLinkage();
897 NewF->setLinkage(llvm::GlobalValue::ExternalLinkage);
898
899 CloneFunctionInto(NewF, &OrigF, VMap,
900 CloneFunctionChangeType::LocalChangesOnly, Returns);
901
902 auto &Context = NewF->getContext();
903
904 // For async functions / continuations, adjust the scope line of the
905 // clone to the line number of the suspend point. However, only
906 // adjust the scope line when the files are the same. This ensures
907 // line number and file name belong together. The scope line is
908 // associated with all pre-prologue instructions. This avoids a jump
909 // in the linetable from the function declaration to the suspend point.
910 if (DISubprogram *SP = NewF->getSubprogram()) {
911 assert(SP != OrigF.getSubprogram() && SP->isDistinct())(static_cast <bool> (SP != OrigF.getSubprogram() &&
SP->isDistinct()) ? void (0) : __assert_fail ("SP != OrigF.getSubprogram() && SP->isDistinct()"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 911, __extension__
__PRETTY_FUNCTION__))
;
912 if (ActiveSuspend)
913 if (auto DL = ActiveSuspend->getDebugLoc())
914 if (SP->getFile() == DL->getFile())
915 SP->setScopeLine(DL->getLine());
916 // Update the linkage name to reflect the modified symbol name. It
917 // is necessary to update the linkage name in Swift, since the
918 // mangling changes for resume functions. It might also be the
919 // right thing to do in C++, but due to a limitation in LLVM's
920 // AsmPrinter we can only do this if the function doesn't have an
921 // abstract specification, since the DWARF backend expects the
922 // abstract specification to contain the linkage name and asserts
923 // that they are identical.
924 if (!SP->getDeclaration() && SP->getUnit() &&
925 SP->getUnit()->getSourceLanguage() == dwarf::DW_LANG_Swift)
926 SP->replaceLinkageName(MDString::get(Context, NewF->getName()));
927 }
928
929 NewF->setLinkage(savedLinkage);
930 NewF->setVisibility(savedVisibility);
931 NewF->setUnnamedAddr(savedUnnamedAddr);
932 NewF->setDLLStorageClass(savedDLLStorageClass);
933
934 // Replace the attributes of the new function:
935 auto OrigAttrs = NewF->getAttributes();
936 auto NewAttrs = AttributeList();
937
938 switch (Shape.ABI) {
939 case coro::ABI::Switch:
940 // Bootstrap attributes by copying function attributes from the
941 // original function. This should include optimization settings and so on.
942 NewAttrs = NewAttrs.addFnAttributes(
943 Context, AttrBuilder(Context, OrigAttrs.getFnAttrs()));
944
945 addFramePointerAttrs(NewAttrs, Context, 0,
946 Shape.FrameSize, Shape.FrameAlign);
947 break;
948 case coro::ABI::Async: {
949 auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(ActiveSuspend);
950 if (OrigF.hasParamAttribute(Shape.AsyncLowering.ContextArgNo,
951 Attribute::SwiftAsync)) {
952 uint32_t ArgAttributeIndices =
953 ActiveAsyncSuspend->getStorageArgumentIndex();
954 auto ContextArgIndex = ArgAttributeIndices & 0xff;
955 addAsyncContextAttrs(NewAttrs, Context, ContextArgIndex);
956
957 // `swiftasync` must preceed `swiftself` so 0 is not a valid index for
958 // `swiftself`.
959 auto SwiftSelfIndex = ArgAttributeIndices >> 8;
960 if (SwiftSelfIndex)
961 addSwiftSelfAttrs(NewAttrs, Context, SwiftSelfIndex);
962 }
963
964 // Transfer the original function's attributes.
965 auto FnAttrs = OrigF.getAttributes().getFnAttrs();
966 NewAttrs = NewAttrs.addFnAttributes(Context, AttrBuilder(Context, FnAttrs));
967 break;
968 }
969 case coro::ABI::Retcon:
970 case coro::ABI::RetconOnce:
971 // If we have a continuation prototype, just use its attributes,
972 // full-stop.
973 NewAttrs = Shape.RetconLowering.ResumePrototype->getAttributes();
974
975 addFramePointerAttrs(NewAttrs, Context, 0,
976 Shape.getRetconCoroId()->getStorageSize(),
977 Shape.getRetconCoroId()->getStorageAlignment());
978 break;
979 }
980
981 switch (Shape.ABI) {
982 // In these ABIs, the cloned functions always return 'void', and the
983 // existing return sites are meaningless. Note that for unique
984 // continuations, this includes the returns associated with suspends;
985 // this is fine because we can't suspend twice.
986 case coro::ABI::Switch:
987 case coro::ABI::RetconOnce:
988 // Remove old returns.
989 for (ReturnInst *Return : Returns)
990 changeToUnreachable(Return);
991 break;
992
993 // With multi-suspend continuations, we'll already have eliminated the
994 // original returns and inserted returns before all the suspend points,
995 // so we want to leave any returns in place.
996 case coro::ABI::Retcon:
997 break;
998 // Async lowering will insert musttail call functions at all suspend points
999 // followed by a return.
1000 // Don't change returns to unreachable because that will trip up the verifier.
1001 // These returns should be unreachable from the clone.
1002 case coro::ABI::Async:
1003 break;
1004 }
1005
1006 NewF->setAttributes(NewAttrs);
1007 NewF->setCallingConv(Shape.getResumeFunctionCC());
1008
1009 // Set up the new entry block.
1010 replaceEntryBlock();
1011
1012 Builder.SetInsertPoint(&NewF->getEntryBlock().front());
1013 NewFramePtr = deriveNewFramePointer();
1014
1015 // Remap frame pointer.
1016 Value *OldFramePtr = VMap[Shape.FramePtr];
1017 NewFramePtr->takeName(OldFramePtr);
1018 OldFramePtr->replaceAllUsesWith(NewFramePtr);
1019
1020 // Remap vFrame pointer.
1021 auto *NewVFrame = Builder.CreateBitCast(
1022 NewFramePtr, Type::getInt8PtrTy(Builder.getContext()), "vFrame");
1023 Value *OldVFrame = cast<Value>(VMap[Shape.CoroBegin]);
1024 if (OldVFrame != NewVFrame)
1025 OldVFrame->replaceAllUsesWith(NewVFrame);
1026
1027 // All uses of the arguments should have been resolved by this point,
1028 // so we can safely remove the dummy values.
1029 for (Instruction *DummyArg : DummyArgs) {
1030 DummyArg->replaceAllUsesWith(UndefValue::get(DummyArg->getType()));
1031 DummyArg->deleteValue();
1032 }
1033
1034 switch (Shape.ABI) {
1035 case coro::ABI::Switch:
1036 // Rewrite final suspend handling as it is not done via switch (allows to
1037 // remove final case from the switch, since it is undefined behavior to
1038 // resume the coroutine suspended at the final suspend point.
1039 if (Shape.SwitchLowering.HasFinalSuspend)
1040 handleFinalSuspend();
1041 break;
1042 case coro::ABI::Async:
1043 case coro::ABI::Retcon:
1044 case coro::ABI::RetconOnce:
1045 // Replace uses of the active suspend with the corresponding
1046 // continuation-function arguments.
1047 assert(ActiveSuspend != nullptr &&(static_cast <bool> (ActiveSuspend != nullptr &&
"no active suspend when lowering a continuation-style coroutine"
) ? void (0) : __assert_fail ("ActiveSuspend != nullptr && \"no active suspend when lowering a continuation-style coroutine\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 1048, __extension__
__PRETTY_FUNCTION__))
1048 "no active suspend when lowering a continuation-style coroutine")(static_cast <bool> (ActiveSuspend != nullptr &&
"no active suspend when lowering a continuation-style coroutine"
) ? void (0) : __assert_fail ("ActiveSuspend != nullptr && \"no active suspend when lowering a continuation-style coroutine\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 1048, __extension__
__PRETTY_FUNCTION__))
;
1049 replaceRetconOrAsyncSuspendUses();
1050 break;
1051 }
1052
1053 // Handle suspends.
1054 replaceCoroSuspends();
1055
1056 // Handle swifterror.
1057 replaceSwiftErrorOps();
1058
1059 // Remove coro.end intrinsics.
1060 replaceCoroEnds();
1061
1062 // Salvage debug info that points into the coroutine frame.
1063 salvageDebugInfo();
1064
1065 // Eliminate coro.free from the clones, replacing it with 'null' in cleanup,
1066 // to suppress deallocation code.
1067 if (Shape.ABI == coro::ABI::Switch)
1068 coro::replaceCoroFree(cast<CoroIdInst>(VMap[Shape.CoroBegin->getId()]),
1069 /*Elide=*/ FKind == CoroCloner::Kind::SwitchCleanup);
1070}
1071
1072// Create a resume clone by cloning the body of the original function, setting
1073// new entry block and replacing coro.suspend an appropriate value to force
1074// resume or cleanup pass for every suspend point.
1075static Function *createClone(Function &F, const Twine &Suffix,
1076 coro::Shape &Shape, CoroCloner::Kind FKind) {
1077 CoroCloner Cloner(F, Suffix, Shape, FKind);
19
Calling constructor for 'CoroCloner'
23
Returning from constructor for 'CoroCloner'
1078 Cloner.create();
24
Calling 'CoroCloner::create'
1079 return Cloner.getFunction();
1080}
1081
1082/// Remove calls to llvm.coro.end in the original function.
1083static void removeCoroEnds(const coro::Shape &Shape, CallGraph *CG) {
1084 for (auto End : Shape.CoroEnds) {
1085 replaceCoroEnd(End, Shape, Shape.FramePtr, /*in resume*/ false, CG);
1086 }
1087}
1088
1089static void updateAsyncFuncPointerContextSize(coro::Shape &Shape) {
1090 assert(Shape.ABI == coro::ABI::Async)(static_cast <bool> (Shape.ABI == coro::ABI::Async) ? void
(0) : __assert_fail ("Shape.ABI == coro::ABI::Async", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1090, __extension__ __PRETTY_FUNCTION__))
;
1091
1092 auto *FuncPtrStruct = cast<ConstantStruct>(
1093 Shape.AsyncLowering.AsyncFuncPointer->getInitializer());
1094 auto *OrigRelativeFunOffset = FuncPtrStruct->getOperand(0);
1095 auto *OrigContextSize = FuncPtrStruct->getOperand(1);
1096 auto *NewContextSize = ConstantInt::get(OrigContextSize->getType(),
1097 Shape.AsyncLowering.ContextSize);
1098 auto *NewFuncPtrStruct = ConstantStruct::get(
1099 FuncPtrStruct->getType(), OrigRelativeFunOffset, NewContextSize);
1100
1101 Shape.AsyncLowering.AsyncFuncPointer->setInitializer(NewFuncPtrStruct);
1102}
1103
1104static void replaceFrameSizeAndAlignment(coro::Shape &Shape) {
1105 if (Shape.ABI == coro::ABI::Async)
1106 updateAsyncFuncPointerContextSize(Shape);
1107
1108 for (CoroAlignInst *CA : Shape.CoroAligns) {
1109 CA->replaceAllUsesWith(
1110 ConstantInt::get(CA->getType(), Shape.FrameAlign.value()));
1111 CA->eraseFromParent();
1112 }
1113
1114 if (Shape.CoroSizes.empty())
1115 return;
1116
1117 // In the same function all coro.sizes should have the same result type.
1118 auto *SizeIntrin = Shape.CoroSizes.back();
1119 Module *M = SizeIntrin->getModule();
1120 const DataLayout &DL = M->getDataLayout();
1121 auto Size = DL.getTypeAllocSize(Shape.FrameTy);
1122 auto *SizeConstant = ConstantInt::get(SizeIntrin->getType(), Size);
1123
1124 for (CoroSizeInst *CS : Shape.CoroSizes) {
1125 CS->replaceAllUsesWith(SizeConstant);
1126 CS->eraseFromParent();
1127 }
1128}
1129
1130// Create a global constant array containing pointers to functions provided and
1131// set Info parameter of CoroBegin to point at this constant. Example:
1132//
1133// @f.resumers = internal constant [2 x void(%f.frame*)*]
1134// [void(%f.frame*)* @f.resume, void(%f.frame*)* @f.destroy]
1135// define void @f() {
1136// ...
1137// call i8* @llvm.coro.begin(i8* null, i32 0, i8* null,
1138// i8* bitcast([2 x void(%f.frame*)*] * @f.resumers to i8*))
1139//
1140// Assumes that all the functions have the same signature.
1141static void setCoroInfo(Function &F, coro::Shape &Shape,
1142 ArrayRef<Function *> Fns) {
1143 // This only works under the switch-lowering ABI because coro elision
1144 // only works on the switch-lowering ABI.
1145 assert(Shape.ABI == coro::ABI::Switch)(static_cast <bool> (Shape.ABI == coro::ABI::Switch) ? void
(0) : __assert_fail ("Shape.ABI == coro::ABI::Switch", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1145, __extension__ __PRETTY_FUNCTION__))
;
1146
1147 SmallVector<Constant *, 4> Args(Fns.begin(), Fns.end());
1148 assert(!Args.empty())(static_cast <bool> (!Args.empty()) ? void (0) : __assert_fail
("!Args.empty()", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1148, __extension__ __PRETTY_FUNCTION__))
;
1149 Function *Part = *Fns.begin();
1150 Module *M = Part->getParent();
1151 auto *ArrTy = ArrayType::get(Part->getType(), Args.size());
1152
1153 auto *ConstVal = ConstantArray::get(ArrTy, Args);
1154 auto *GV = new GlobalVariable(*M, ConstVal->getType(), /*isConstant=*/true,
1155 GlobalVariable::PrivateLinkage, ConstVal,
1156 F.getName() + Twine(".resumers"));
1157
1158 // Update coro.begin instruction to refer to this constant.
1159 LLVMContext &C = F.getContext();
1160 auto *BC = ConstantExpr::getPointerCast(GV, Type::getInt8PtrTy(C));
1161 Shape.getSwitchCoroId()->setInfo(BC);
1162}
1163
1164// Store addresses of Resume/Destroy/Cleanup functions in the coroutine frame.
1165static void updateCoroFrame(coro::Shape &Shape, Function *ResumeFn,
1166 Function *DestroyFn, Function *CleanupFn) {
1167 assert(Shape.ABI == coro::ABI::Switch)(static_cast <bool> (Shape.ABI == coro::ABI::Switch) ? void
(0) : __assert_fail ("Shape.ABI == coro::ABI::Switch", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1167, __extension__ __PRETTY_FUNCTION__))
;
1168
1169 IRBuilder<> Builder(Shape.getInsertPtAfterFramePtr());
1170
1171 auto *ResumeAddr = Builder.CreateStructGEP(
1172 Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Resume,
1173 "resume.addr");
1174 Builder.CreateStore(ResumeFn, ResumeAddr);
1175
1176 Value *DestroyOrCleanupFn = DestroyFn;
1177
1178 CoroIdInst *CoroId = Shape.getSwitchCoroId();
1179 if (CoroAllocInst *CA = CoroId->getCoroAlloc()) {
1180 // If there is a CoroAlloc and it returns false (meaning we elide the
1181 // allocation, use CleanupFn instead of DestroyFn).
1182 DestroyOrCleanupFn = Builder.CreateSelect(CA, DestroyFn, CleanupFn);
1183 }
1184
1185 auto *DestroyAddr = Builder.CreateStructGEP(
1186 Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Destroy,
1187 "destroy.addr");
1188 Builder.CreateStore(DestroyOrCleanupFn, DestroyAddr);
1189}
1190
1191static void postSplitCleanup(Function &F) {
1192 removeUnreachableBlocks(F);
1193
1194#ifndef NDEBUG
1195 // For now, we do a mandatory verification step because we don't
1196 // entirely trust this pass. Note that we don't want to add a verifier
1197 // pass to FPM below because it will also verify all the global data.
1198 if (verifyFunction(F, &errs()))
1199 report_fatal_error("Broken function");
1200#endif
1201}
1202
1203// Assuming we arrived at the block NewBlock from Prev instruction, store
1204// PHI's incoming values in the ResolvedValues map.
1205static void
1206scanPHIsAndUpdateValueMap(Instruction *Prev, BasicBlock *NewBlock,
1207 DenseMap<Value *, Value *> &ResolvedValues) {
1208 auto *PrevBB = Prev->getParent();
1209 for (PHINode &PN : NewBlock->phis()) {
1210 auto V = PN.getIncomingValueForBlock(PrevBB);
1211 // See if we already resolved it.
1212 auto VI = ResolvedValues.find(V);
1213 if (VI != ResolvedValues.end())
1214 V = VI->second;
1215 // Remember the value.
1216 ResolvedValues[&PN] = V;
1217 }
1218}
1219
1220// Replace a sequence of branches leading to a ret, with a clone of a ret
1221// instruction. Suspend instruction represented by a switch, track the PHI
1222// values and select the correct case successor when possible.
1223static bool simplifyTerminatorLeadingToRet(Instruction *InitialInst) {
1224 DenseMap<Value *, Value *> ResolvedValues;
1225 BasicBlock *UnconditionalSucc = nullptr;
1226 assert(InitialInst->getModule())(static_cast <bool> (InitialInst->getModule()) ? void
(0) : __assert_fail ("InitialInst->getModule()", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1226, __extension__ __PRETTY_FUNCTION__))
;
1227 const DataLayout &DL = InitialInst->getModule()->getDataLayout();
1228
1229 auto GetFirstValidInstruction = [](Instruction *I) {
1230 while (I) {
1231 // BitCastInst wouldn't generate actual code so that we could skip it.
1232 if (isa<BitCastInst>(I) || I->isDebugOrPseudoInst() ||
1233 I->isLifetimeStartOrEnd())
1234 I = I->getNextNode();
1235 else if (isInstructionTriviallyDead(I))
1236 // Duing we are in the middle of the transformation, we need to erase
1237 // the dead instruction manually.
1238 I = &*I->eraseFromParent();
1239 else
1240 break;
1241 }
1242 return I;
1243 };
1244
1245 auto TryResolveConstant = [&ResolvedValues](Value *V) {
1246 auto It = ResolvedValues.find(V);
1247 if (It != ResolvedValues.end())
1248 V = It->second;
1249 return dyn_cast<ConstantInt>(V);
1250 };
1251
1252 Instruction *I = InitialInst;
1253 while (I->isTerminator() || isa<CmpInst>(I)) {
1254 if (isa<ReturnInst>(I)) {
1255 if (I != InitialInst) {
1256 // If InitialInst is an unconditional branch,
1257 // remove PHI values that come from basic block of InitialInst
1258 if (UnconditionalSucc)
1259 UnconditionalSucc->removePredecessor(InitialInst->getParent(), true);
1260 ReplaceInstWithInst(InitialInst, I->clone());
1261 }
1262 return true;
1263 }
1264 if (auto *BR = dyn_cast<BranchInst>(I)) {
1265 if (BR->isUnconditional()) {
1266 BasicBlock *Succ = BR->getSuccessor(0);
1267 if (I == InitialInst)
1268 UnconditionalSucc = Succ;
1269 scanPHIsAndUpdateValueMap(I, Succ, ResolvedValues);
1270 I = GetFirstValidInstruction(Succ->getFirstNonPHIOrDbgOrLifetime());
1271 continue;
1272 }
1273
1274 BasicBlock *BB = BR->getParent();
1275 // Handle the case the condition of the conditional branch is constant.
1276 // e.g.,
1277 //
1278 // br i1 false, label %cleanup, label %CoroEnd
1279 //
1280 // It is possible during the transformation. We could continue the
1281 // simplifying in this case.
1282 if (ConstantFoldTerminator(BB, /*DeleteDeadConditions=*/true)) {
1283 // Handle this branch in next iteration.
1284 I = BB->getTerminator();
1285 continue;
1286 }
1287 } else if (auto *CondCmp = dyn_cast<CmpInst>(I)) {
1288 // If the case number of suspended switch instruction is reduced to
1289 // 1, then it is simplified to CmpInst in llvm::ConstantFoldTerminator.
1290 auto *BR = dyn_cast<BranchInst>(
1291 GetFirstValidInstruction(CondCmp->getNextNode()));
1292 if (!BR || !BR->isConditional() || CondCmp != BR->getCondition())
1293 return false;
1294
1295 // And the comparsion looks like : %cond = icmp eq i8 %V, constant.
1296 // So we try to resolve constant for the first operand only since the
1297 // second operand should be literal constant by design.
1298 ConstantInt *Cond0 = TryResolveConstant(CondCmp->getOperand(0));
1299 auto *Cond1 = dyn_cast<ConstantInt>(CondCmp->getOperand(1));
1300 if (!Cond0 || !Cond1)
1301 return false;
1302
1303 // Both operands of the CmpInst are Constant. So that we could evaluate
1304 // it immediately to get the destination.
1305 auto *ConstResult =
1306 dyn_cast_or_null<ConstantInt>(ConstantFoldCompareInstOperands(
1307 CondCmp->getPredicate(), Cond0, Cond1, DL));
1308 if (!ConstResult)
1309 return false;
1310
1311 CondCmp->replaceAllUsesWith(ConstResult);
1312 CondCmp->eraseFromParent();
1313
1314 // Handle this branch in next iteration.
1315 I = BR;
1316 continue;
1317 } else if (auto *SI = dyn_cast<SwitchInst>(I)) {
1318 ConstantInt *Cond = TryResolveConstant(SI->getCondition());
1319 if (!Cond)
1320 return false;
1321
1322 BasicBlock *BB = SI->findCaseValue(Cond)->getCaseSuccessor();
1323 scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
1324 I = GetFirstValidInstruction(BB->getFirstNonPHIOrDbgOrLifetime());
1325 continue;
1326 }
1327
1328 return false;
1329 }
1330 return false;
1331}
1332
1333// Check whether CI obeys the rules of musttail attribute.
1334static bool shouldBeMustTail(const CallInst &CI, const Function &F) {
1335 if (CI.isInlineAsm())
1336 return false;
1337
1338 // Match prototypes and calling conventions of resume function.
1339 FunctionType *CalleeTy = CI.getFunctionType();
1340 if (!CalleeTy->getReturnType()->isVoidTy() || (CalleeTy->getNumParams() != 1))
1341 return false;
1342
1343 Type *CalleeParmTy = CalleeTy->getParamType(0);
1344 if (!CalleeParmTy->isPointerTy() ||
1345 (CalleeParmTy->getPointerAddressSpace() != 0))
1346 return false;
1347
1348 if (CI.getCallingConv() != F.getCallingConv())
1349 return false;
1350
1351 // CI should not has any ABI-impacting function attributes.
1352 static const Attribute::AttrKind ABIAttrs[] = {
1353 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
1354 Attribute::Preallocated, Attribute::InReg, Attribute::Returned,
1355 Attribute::SwiftSelf, Attribute::SwiftError};
1356 AttributeList Attrs = CI.getAttributes();
1357 for (auto AK : ABIAttrs)
1358 if (Attrs.hasParamAttr(0, AK))
1359 return false;
1360
1361 return true;
1362}
1363
1364// Add musttail to any resume instructions that is immediately followed by a
1365// suspend (i.e. ret). We do this even in -O0 to support guaranteed tail call
1366// for symmetrical coroutine control transfer (C++ Coroutines TS extension).
1367// This transformation is done only in the resume part of the coroutine that has
1368// identical signature and calling convention as the coro.resume call.
1369static void addMustTailToCoroResumes(Function &F) {
1370 bool changed = false;
1371
1372 // Collect potential resume instructions.
1373 SmallVector<CallInst *, 4> Resumes;
1374 for (auto &I : instructions(F))
1375 if (auto *Call = dyn_cast<CallInst>(&I))
1376 if (shouldBeMustTail(*Call, F))
1377 Resumes.push_back(Call);
1378
1379 // Set musttail on those that are followed by a ret instruction.
1380 for (CallInst *Call : Resumes)
1381 if (simplifyTerminatorLeadingToRet(Call->getNextNode())) {
1382 Call->setTailCallKind(CallInst::TCK_MustTail);
1383 changed = true;
1384 }
1385
1386 if (changed)
1387 removeUnreachableBlocks(F);
1388}
1389
1390// Coroutine has no suspend points. Remove heap allocation for the coroutine
1391// frame if possible.
1392static void handleNoSuspendCoroutine(coro::Shape &Shape) {
1393 auto *CoroBegin = Shape.CoroBegin;
1394 auto *CoroId = CoroBegin->getId();
1395 auto *AllocInst = CoroId->getCoroAlloc();
1396 switch (Shape.ABI) {
1397 case coro::ABI::Switch: {
1398 auto SwitchId = cast<CoroIdInst>(CoroId);
1399 coro::replaceCoroFree(SwitchId, /*Elide=*/AllocInst != nullptr);
1400 if (AllocInst) {
1401 IRBuilder<> Builder(AllocInst);
1402 auto *Frame = Builder.CreateAlloca(Shape.FrameTy);
1403 Frame->setAlignment(Shape.FrameAlign);
1404 auto *VFrame = Builder.CreateBitCast(Frame, Builder.getInt8PtrTy());
1405 AllocInst->replaceAllUsesWith(Builder.getFalse());
1406 AllocInst->eraseFromParent();
1407 CoroBegin->replaceAllUsesWith(VFrame);
1408 } else {
1409 CoroBegin->replaceAllUsesWith(CoroBegin->getMem());
1410 }
1411
1412 break;
1413 }
1414 case coro::ABI::Async:
1415 case coro::ABI::Retcon:
1416 case coro::ABI::RetconOnce:
1417 CoroBegin->replaceAllUsesWith(UndefValue::get(CoroBegin->getType()));
1418 break;
1419 }
1420
1421 CoroBegin->eraseFromParent();
1422}
1423
1424// SimplifySuspendPoint needs to check that there is no calls between
1425// coro_save and coro_suspend, since any of the calls may potentially resume
1426// the coroutine and if that is the case we cannot eliminate the suspend point.
1427static bool hasCallsInBlockBetween(Instruction *From, Instruction *To) {
1428 for (Instruction *I = From; I != To; I = I->getNextNode()) {
1429 // Assume that no intrinsic can resume the coroutine.
1430 if (isa<IntrinsicInst>(I))
1431 continue;
1432
1433 if (isa<CallBase>(I))
1434 return true;
1435 }
1436 return false;
1437}
1438
1439static bool hasCallsInBlocksBetween(BasicBlock *SaveBB, BasicBlock *ResDesBB) {
1440 SmallPtrSet<BasicBlock *, 8> Set;
1441 SmallVector<BasicBlock *, 8> Worklist;
1442
1443 Set.insert(SaveBB);
1444 Worklist.push_back(ResDesBB);
1445
1446 // Accumulate all blocks between SaveBB and ResDesBB. Because CoroSaveIntr
1447 // returns a token consumed by suspend instruction, all blocks in between
1448 // will have to eventually hit SaveBB when going backwards from ResDesBB.
1449 while (!Worklist.empty()) {
1450 auto *BB = Worklist.pop_back_val();
1451 Set.insert(BB);
1452 for (auto *Pred : predecessors(BB))
1453 if (!Set.contains(Pred))
1454 Worklist.push_back(Pred);
1455 }
1456
1457 // SaveBB and ResDesBB are checked separately in hasCallsBetween.
1458 Set.erase(SaveBB);
1459 Set.erase(ResDesBB);
1460
1461 for (auto *BB : Set)
1462 if (hasCallsInBlockBetween(BB->getFirstNonPHI(), nullptr))
1463 return true;
1464
1465 return false;
1466}
1467
1468static bool hasCallsBetween(Instruction *Save, Instruction *ResumeOrDestroy) {
1469 auto *SaveBB = Save->getParent();
1470 auto *ResumeOrDestroyBB = ResumeOrDestroy->getParent();
1471
1472 if (SaveBB == ResumeOrDestroyBB)
1473 return hasCallsInBlockBetween(Save->getNextNode(), ResumeOrDestroy);
1474
1475 // Any calls from Save to the end of the block?
1476 if (hasCallsInBlockBetween(Save->getNextNode(), nullptr))
1477 return true;
1478
1479 // Any calls from begging of the block up to ResumeOrDestroy?
1480 if (hasCallsInBlockBetween(ResumeOrDestroyBB->getFirstNonPHI(),
1481 ResumeOrDestroy))
1482 return true;
1483
1484 // Any calls in all of the blocks between SaveBB and ResumeOrDestroyBB?
1485 if (hasCallsInBlocksBetween(SaveBB, ResumeOrDestroyBB))
1486 return true;
1487
1488 return false;
1489}
1490
1491// If a SuspendIntrin is preceded by Resume or Destroy, we can eliminate the
1492// suspend point and replace it with nornal control flow.
1493static bool simplifySuspendPoint(CoroSuspendInst *Suspend,
1494 CoroBeginInst *CoroBegin) {
1495 Instruction *Prev = Suspend->getPrevNode();
1496 if (!Prev) {
1497 auto *Pred = Suspend->getParent()->getSinglePredecessor();
1498 if (!Pred)
1499 return false;
1500 Prev = Pred->getTerminator();
1501 }
1502
1503 CallBase *CB = dyn_cast<CallBase>(Prev);
1504 if (!CB)
1505 return false;
1506
1507 auto *Callee = CB->getCalledOperand()->stripPointerCasts();
1508
1509 // See if the callsite is for resumption or destruction of the coroutine.
1510 auto *SubFn = dyn_cast<CoroSubFnInst>(Callee);
1511 if (!SubFn)
1512 return false;
1513
1514 // Does not refer to the current coroutine, we cannot do anything with it.
1515 if (SubFn->getFrame() != CoroBegin)
1516 return false;
1517
1518 // See if the transformation is safe. Specifically, see if there are any
1519 // calls in between Save and CallInstr. They can potenitally resume the
1520 // coroutine rendering this optimization unsafe.
1521 auto *Save = Suspend->getCoroSave();
1522 if (hasCallsBetween(Save, CB))
1523 return false;
1524
1525 // Replace llvm.coro.suspend with the value that results in resumption over
1526 // the resume or cleanup path.
1527 Suspend->replaceAllUsesWith(SubFn->getRawIndex());
1528 Suspend->eraseFromParent();
1529 Save->eraseFromParent();
1530
1531 // No longer need a call to coro.resume or coro.destroy.
1532 if (auto *Invoke = dyn_cast<InvokeInst>(CB)) {
1533 BranchInst::Create(Invoke->getNormalDest(), Invoke);
1534 }
1535
1536 // Grab the CalledValue from CB before erasing the CallInstr.
1537 auto *CalledValue = CB->getCalledOperand();
1538 CB->eraseFromParent();
1539
1540 // If no more users remove it. Usually it is a bitcast of SubFn.
1541 if (CalledValue != SubFn && CalledValue->user_empty())
1542 if (auto *I = dyn_cast<Instruction>(CalledValue))
1543 I->eraseFromParent();
1544
1545 // Now we are good to remove SubFn.
1546 if (SubFn->user_empty())
1547 SubFn->eraseFromParent();
1548
1549 return true;
1550}
1551
1552// Remove suspend points that are simplified.
1553static void simplifySuspendPoints(coro::Shape &Shape) {
1554 // Currently, the only simplification we do is switch-lowering-specific.
1555 if (Shape.ABI != coro::ABI::Switch)
1556 return;
1557
1558 auto &S = Shape.CoroSuspends;
1559 size_t I = 0, N = S.size();
1560 if (N == 0)
1561 return;
1562 while (true) {
1563 auto SI = cast<CoroSuspendInst>(S[I]);
1564 // Leave final.suspend to handleFinalSuspend since it is undefined behavior
1565 // to resume a coroutine suspended at the final suspend point.
1566 if (!SI->isFinal() && simplifySuspendPoint(SI, Shape.CoroBegin)) {
1567 if (--N == I)
1568 break;
1569 std::swap(S[I], S[N]);
1570 continue;
1571 }
1572 if (++I == N)
1573 break;
1574 }
1575 S.resize(N);
1576}
1577
1578static void splitSwitchCoroutine(Function &F, coro::Shape &Shape,
1579 SmallVectorImpl<Function *> &Clones) {
1580 assert(Shape.ABI == coro::ABI::Switch)(static_cast <bool> (Shape.ABI == coro::ABI::Switch) ? void
(0) : __assert_fail ("Shape.ABI == coro::ABI::Switch", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1580, __extension__ __PRETTY_FUNCTION__))
;
17
'?' condition is true
1581
1582 createResumeEntryBlock(F, Shape);
1583 auto ResumeClone = createClone(F, ".resume", Shape,
18
Calling 'createClone'
1584 CoroCloner::Kind::SwitchResume);
1585 auto DestroyClone = createClone(F, ".destroy", Shape,
1586 CoroCloner::Kind::SwitchUnwind);
1587 auto CleanupClone = createClone(F, ".cleanup", Shape,
1588 CoroCloner::Kind::SwitchCleanup);
1589
1590 postSplitCleanup(*ResumeClone);
1591 postSplitCleanup(*DestroyClone);
1592 postSplitCleanup(*CleanupClone);
1593
1594 addMustTailToCoroResumes(*ResumeClone);
1595
1596 // Store addresses resume/destroy/cleanup functions in the coroutine frame.
1597 updateCoroFrame(Shape, ResumeClone, DestroyClone, CleanupClone);
1598
1599 assert(Clones.empty())(static_cast <bool> (Clones.empty()) ? void (0) : __assert_fail
("Clones.empty()", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1599, __extension__ __PRETTY_FUNCTION__))
;
1600 Clones.push_back(ResumeClone);
1601 Clones.push_back(DestroyClone);
1602 Clones.push_back(CleanupClone);
1603
1604 // Create a constant array referring to resume/destroy/clone functions pointed
1605 // by the last argument of @llvm.coro.info, so that CoroElide pass can
1606 // determined correct function to call.
1607 setCoroInfo(F, Shape, Clones);
1608}
1609
1610static void replaceAsyncResumeFunction(CoroSuspendAsyncInst *Suspend,
1611 Value *Continuation) {
1612 auto *ResumeIntrinsic = Suspend->getResumeFunction();
1613 auto &Context = Suspend->getParent()->getParent()->getContext();
1614 auto *Int8PtrTy = Type::getInt8PtrTy(Context);
1615
1616 IRBuilder<> Builder(ResumeIntrinsic);
1617 auto *Val = Builder.CreateBitOrPointerCast(Continuation, Int8PtrTy);
1618 ResumeIntrinsic->replaceAllUsesWith(Val);
1619 ResumeIntrinsic->eraseFromParent();
1620 Suspend->setOperand(CoroSuspendAsyncInst::ResumeFunctionArg,
1621 UndefValue::get(Int8PtrTy));
1622}
1623
1624/// Coerce the arguments in \p FnArgs according to \p FnTy in \p CallArgs.
1625static void coerceArguments(IRBuilder<> &Builder, FunctionType *FnTy,
1626 ArrayRef<Value *> FnArgs,
1627 SmallVectorImpl<Value *> &CallArgs) {
1628 size_t ArgIdx = 0;
1629 for (auto paramTy : FnTy->params()) {
1630 assert(ArgIdx < FnArgs.size())(static_cast <bool> (ArgIdx < FnArgs.size()) ? void (
0) : __assert_fail ("ArgIdx < FnArgs.size()", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1630, __extension__ __PRETTY_FUNCTION__))
;
1631 if (paramTy != FnArgs[ArgIdx]->getType())
1632 CallArgs.push_back(
1633 Builder.CreateBitOrPointerCast(FnArgs[ArgIdx], paramTy));
1634 else
1635 CallArgs.push_back(FnArgs[ArgIdx]);
1636 ++ArgIdx;
1637 }
1638}
1639
1640CallInst *coro::createMustTailCall(DebugLoc Loc, Function *MustTailCallFn,
1641 ArrayRef<Value *> Arguments,
1642 IRBuilder<> &Builder) {
1643 auto *FnTy = MustTailCallFn->getFunctionType();
1644 // Coerce the arguments, llvm optimizations seem to ignore the types in
1645 // vaarg functions and throws away casts in optimized mode.
1646 SmallVector<Value *, 8> CallArgs;
1647 coerceArguments(Builder, FnTy, Arguments, CallArgs);
1648
1649 auto *TailCall = Builder.CreateCall(FnTy, MustTailCallFn, CallArgs);
1650 TailCall->setTailCallKind(CallInst::TCK_MustTail);
1651 TailCall->setDebugLoc(Loc);
1652 TailCall->setCallingConv(MustTailCallFn->getCallingConv());
1653 return TailCall;
1654}
1655
1656static void splitAsyncCoroutine(Function &F, coro::Shape &Shape,
1657 SmallVectorImpl<Function *> &Clones) {
1658 assert(Shape.ABI == coro::ABI::Async)(static_cast <bool> (Shape.ABI == coro::ABI::Async) ? void
(0) : __assert_fail ("Shape.ABI == coro::ABI::Async", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1658, __extension__ __PRETTY_FUNCTION__))
;
1659 assert(Clones.empty())(static_cast <bool> (Clones.empty()) ? void (0) : __assert_fail
("Clones.empty()", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1659, __extension__ __PRETTY_FUNCTION__))
;
1660 // Reset various things that the optimizer might have decided it
1661 // "knows" about the coroutine function due to not seeing a return.
1662 F.removeFnAttr(Attribute::NoReturn);
1663 F.removeRetAttr(Attribute::NoAlias);
1664 F.removeRetAttr(Attribute::NonNull);
1665
1666 auto &Context = F.getContext();
1667 auto *Int8PtrTy = Type::getInt8PtrTy(Context);
1668
1669 auto *Id = cast<CoroIdAsyncInst>(Shape.CoroBegin->getId());
1670 IRBuilder<> Builder(Id);
1671
1672 auto *FramePtr = Id->getStorage();
1673 FramePtr = Builder.CreateBitOrPointerCast(FramePtr, Int8PtrTy);
1674 FramePtr = Builder.CreateConstInBoundsGEP1_32(
1675 Type::getInt8Ty(Context), FramePtr, Shape.AsyncLowering.FrameOffset,
1676 "async.ctx.frameptr");
1677
1678 // Map all uses of llvm.coro.begin to the allocated frame pointer.
1679 {
1680 // Make sure we don't invalidate Shape.FramePtr.
1681 TrackingVH<Value> Handle(Shape.FramePtr);
1682 Shape.CoroBegin->replaceAllUsesWith(FramePtr);
1683 Shape.FramePtr = Handle.getValPtr();
1684 }
1685
1686 // Create all the functions in order after the main function.
1687 auto NextF = std::next(F.getIterator());
1688
1689 // Create a continuation function for each of the suspend points.
1690 Clones.reserve(Shape.CoroSuspends.size());
1691 for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) {
1692 auto *Suspend = cast<CoroSuspendAsyncInst>(Shape.CoroSuspends[Idx]);
1693
1694 // Create the clone declaration.
1695 auto ResumeNameSuffix = ".resume.";
1696 auto ProjectionFunctionName =
1697 Suspend->getAsyncContextProjectionFunction()->getName();
1698 bool UseSwiftMangling = false;
1699 if (ProjectionFunctionName.equals("__swift_async_resume_project_context")) {
1700 ResumeNameSuffix = "TQ";
1701 UseSwiftMangling = true;
1702 } else if (ProjectionFunctionName.equals(
1703 "__swift_async_resume_get_context")) {
1704 ResumeNameSuffix = "TY";
1705 UseSwiftMangling = true;
1706 }
1707 auto *Continuation = createCloneDeclaration(
1708 F, Shape,
1709 UseSwiftMangling ? ResumeNameSuffix + Twine(Idx) + "_"
1710 : ResumeNameSuffix + Twine(Idx),
1711 NextF, Suspend);
1712 Clones.push_back(Continuation);
1713
1714 // Insert a branch to a new return block immediately before the suspend
1715 // point.
1716 auto *SuspendBB = Suspend->getParent();
1717 auto *NewSuspendBB = SuspendBB->splitBasicBlock(Suspend);
1718 auto *Branch = cast<BranchInst>(SuspendBB->getTerminator());
1719
1720 // Place it before the first suspend.
1721 auto *ReturnBB =
1722 BasicBlock::Create(F.getContext(), "coro.return", &F, NewSuspendBB);
1723 Branch->setSuccessor(0, ReturnBB);
1724
1725 IRBuilder<> Builder(ReturnBB);
1726
1727 // Insert the call to the tail call function and inline it.
1728 auto *Fn = Suspend->getMustTailCallFunction();
1729 SmallVector<Value *, 8> Args(Suspend->args());
1730 auto FnArgs = ArrayRef<Value *>(Args).drop_front(
1731 CoroSuspendAsyncInst::MustTailCallFuncArg + 1);
1732 auto *TailCall =
1733 coro::createMustTailCall(Suspend->getDebugLoc(), Fn, FnArgs, Builder);
1734 Builder.CreateRetVoid();
1735 InlineFunctionInfo FnInfo;
1736 auto InlineRes = InlineFunction(*TailCall, FnInfo);
1737 assert(InlineRes.isSuccess() && "Expected inlining to succeed")(static_cast <bool> (InlineRes.isSuccess() && "Expected inlining to succeed"
) ? void (0) : __assert_fail ("InlineRes.isSuccess() && \"Expected inlining to succeed\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 1737, __extension__
__PRETTY_FUNCTION__))
;
1738 (void)InlineRes;
1739
1740 // Replace the lvm.coro.async.resume intrisic call.
1741 replaceAsyncResumeFunction(Suspend, Continuation);
1742 }
1743
1744 assert(Clones.size() == Shape.CoroSuspends.size())(static_cast <bool> (Clones.size() == Shape.CoroSuspends
.size()) ? void (0) : __assert_fail ("Clones.size() == Shape.CoroSuspends.size()"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 1744, __extension__
__PRETTY_FUNCTION__))
;
1745 for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) {
1746 auto *Suspend = Shape.CoroSuspends[Idx];
1747 auto *Clone = Clones[Idx];
1748
1749 CoroCloner(F, "resume." + Twine(Idx), Shape, Clone, Suspend).create();
1750 }
1751}
1752
1753static void splitRetconCoroutine(Function &F, coro::Shape &Shape,
1754 SmallVectorImpl<Function *> &Clones) {
1755 assert(Shape.ABI == coro::ABI::Retcon ||(static_cast <bool> (Shape.ABI == coro::ABI::Retcon || Shape
.ABI == coro::ABI::RetconOnce) ? void (0) : __assert_fail ("Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 1756, __extension__
__PRETTY_FUNCTION__))
1756 Shape.ABI == coro::ABI::RetconOnce)(static_cast <bool> (Shape.ABI == coro::ABI::Retcon || Shape
.ABI == coro::ABI::RetconOnce) ? void (0) : __assert_fail ("Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 1756, __extension__
__PRETTY_FUNCTION__))
;
1757 assert(Clones.empty())(static_cast <bool> (Clones.empty()) ? void (0) : __assert_fail
("Clones.empty()", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1757, __extension__ __PRETTY_FUNCTION__))
;
1758
1759 // Reset various things that the optimizer might have decided it
1760 // "knows" about the coroutine function due to not seeing a return.
1761 F.removeFnAttr(Attribute::NoReturn);
1762 F.removeRetAttr(Attribute::NoAlias);
1763 F.removeRetAttr(Attribute::NonNull);
1764
1765 // Allocate the frame.
1766 auto *Id = cast<AnyCoroIdRetconInst>(Shape.CoroBegin->getId());
1767 Value *RawFramePtr;
1768 if (Shape.RetconLowering.IsFrameInlineInStorage) {
1769 RawFramePtr = Id->getStorage();
1770 } else {
1771 IRBuilder<> Builder(Id);
1772
1773 // Determine the size of the frame.
1774 const DataLayout &DL = F.getParent()->getDataLayout();
1775 auto Size = DL.getTypeAllocSize(Shape.FrameTy);
1776
1777 // Allocate. We don't need to update the call graph node because we're
1778 // going to recompute it from scratch after splitting.
1779 // FIXME: pass the required alignment
1780 RawFramePtr = Shape.emitAlloc(Builder, Builder.getInt64(Size), nullptr);
1781 RawFramePtr =
1782 Builder.CreateBitCast(RawFramePtr, Shape.CoroBegin->getType());
1783
1784 // Stash the allocated frame pointer in the continuation storage.
1785 auto Dest = Builder.CreateBitCast(Id->getStorage(),
1786 RawFramePtr->getType()->getPointerTo());
1787 Builder.CreateStore(RawFramePtr, Dest);
1788 }
1789
1790 // Map all uses of llvm.coro.begin to the allocated frame pointer.
1791 {
1792 // Make sure we don't invalidate Shape.FramePtr.
1793 TrackingVH<Value> Handle(Shape.FramePtr);
1794 Shape.CoroBegin->replaceAllUsesWith(RawFramePtr);
1795 Shape.FramePtr = Handle.getValPtr();
1796 }
1797
1798 // Create a unique return block.
1799 BasicBlock *ReturnBB = nullptr;
1800 SmallVector<PHINode *, 4> ReturnPHIs;
1801
1802 // Create all the functions in order after the main function.
1803 auto NextF = std::next(F.getIterator());
1804
1805 // Create a continuation function for each of the suspend points.
1806 Clones.reserve(Shape.CoroSuspends.size());
1807 for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) {
1808 auto Suspend = cast<CoroSuspendRetconInst>(Shape.CoroSuspends[i]);
1809
1810 // Create the clone declaration.
1811 auto Continuation =
1812 createCloneDeclaration(F, Shape, ".resume." + Twine(i), NextF, nullptr);
1813 Clones.push_back(Continuation);
1814
1815 // Insert a branch to the unified return block immediately before
1816 // the suspend point.
1817 auto SuspendBB = Suspend->getParent();
1818 auto NewSuspendBB = SuspendBB->splitBasicBlock(Suspend);
1819 auto Branch = cast<BranchInst>(SuspendBB->getTerminator());
1820
1821 // Create the unified return block.
1822 if (!ReturnBB) {
1823 // Place it before the first suspend.
1824 ReturnBB = BasicBlock::Create(F.getContext(), "coro.return", &F,
1825 NewSuspendBB);
1826 Shape.RetconLowering.ReturnBlock = ReturnBB;
1827
1828 IRBuilder<> Builder(ReturnBB);
1829
1830 // Create PHIs for all the return values.
1831 assert(ReturnPHIs.empty())(static_cast <bool> (ReturnPHIs.empty()) ? void (0) : __assert_fail
("ReturnPHIs.empty()", "llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1831, __extension__ __PRETTY_FUNCTION__))
;
1832
1833 // First, the continuation.
1834 ReturnPHIs.push_back(Builder.CreatePHI(Continuation->getType(),
1835 Shape.CoroSuspends.size()));
1836
1837 // Next, all the directly-yielded values.
1838 for (auto ResultTy : Shape.getRetconResultTypes())
1839 ReturnPHIs.push_back(Builder.CreatePHI(ResultTy,
1840 Shape.CoroSuspends.size()));
1841
1842 // Build the return value.
1843 auto RetTy = F.getReturnType();
1844
1845 // Cast the continuation value if necessary.
1846 // We can't rely on the types matching up because that type would
1847 // have to be infinite.
1848 auto CastedContinuationTy =
1849 (ReturnPHIs.size() == 1 ? RetTy : RetTy->getStructElementType(0));
1850 auto *CastedContinuation =
1851 Builder.CreateBitCast(ReturnPHIs[0], CastedContinuationTy);
1852
1853 Value *RetV;
1854 if (ReturnPHIs.size() == 1) {
1855 RetV = CastedContinuation;
1856 } else {
1857 RetV = UndefValue::get(RetTy);
1858 RetV = Builder.CreateInsertValue(RetV, CastedContinuation, 0);
1859 for (size_t I = 1, E = ReturnPHIs.size(); I != E; ++I)
1860 RetV = Builder.CreateInsertValue(RetV, ReturnPHIs[I], I);
1861 }
1862
1863 Builder.CreateRet(RetV);
1864 }
1865
1866 // Branch to the return block.
1867 Branch->setSuccessor(0, ReturnBB);
1868 ReturnPHIs[0]->addIncoming(Continuation, SuspendBB);
1869 size_t NextPHIIndex = 1;
1870 for (auto &VUse : Suspend->value_operands())
1871 ReturnPHIs[NextPHIIndex++]->addIncoming(&*VUse, SuspendBB);
1872 assert(NextPHIIndex == ReturnPHIs.size())(static_cast <bool> (NextPHIIndex == ReturnPHIs.size())
? void (0) : __assert_fail ("NextPHIIndex == ReturnPHIs.size()"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 1872, __extension__
__PRETTY_FUNCTION__))
;
1873 }
1874
1875 assert(Clones.size() == Shape.CoroSuspends.size())(static_cast <bool> (Clones.size() == Shape.CoroSuspends
.size()) ? void (0) : __assert_fail ("Clones.size() == Shape.CoroSuspends.size()"
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 1875, __extension__
__PRETTY_FUNCTION__))
;
1876 for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) {
1877 auto Suspend = Shape.CoroSuspends[i];
1878 auto Clone = Clones[i];
1879
1880 CoroCloner(F, "resume." + Twine(i), Shape, Clone, Suspend).create();
1881 }
1882}
1883
1884namespace {
1885 class PrettyStackTraceFunction : public PrettyStackTraceEntry {
1886 Function &F;
1887 public:
1888 PrettyStackTraceFunction(Function &F) : F(F) {}
1889 void print(raw_ostream &OS) const override {
1890 OS << "While splitting coroutine ";
1891 F.printAsOperand(OS, /*print type*/ false, F.getParent());
1892 OS << "\n";
1893 }
1894 };
1895}
1896
1897static coro::Shape splitCoroutine(Function &F,
1898 SmallVectorImpl<Function *> &Clones,
1899 bool OptimizeFrame) {
1900 PrettyStackTraceFunction prettyStackTrace(F);
1901
1902 // The suspend-crossing algorithm in buildCoroutineFrame get tripped
1903 // up by uses in unreachable blocks, so remove them as a first pass.
1904 removeUnreachableBlocks(F);
1905
1906 coro::Shape Shape(F, OptimizeFrame);
1907 if (!Shape.CoroBegin)
12
Assuming field 'CoroBegin' is non-null
13
Taking false branch
1908 return Shape;
1909
1910 simplifySuspendPoints(Shape);
1911 buildCoroutineFrame(F, Shape);
1912 replaceFrameSizeAndAlignment(Shape);
1913
1914 // If there are no suspend points, no split required, just remove
1915 // the allocation and deallocation blocks, they are not needed.
1916 if (Shape.CoroSuspends.empty()) {
14
Taking false branch
1917 handleNoSuspendCoroutine(Shape);
1918 } else {
1919 switch (Shape.ABI) {
15
Control jumps to 'case Switch:' at line 1920
1920 case coro::ABI::Switch:
1921 splitSwitchCoroutine(F, Shape, Clones);
16
Calling 'splitSwitchCoroutine'
1922 break;
1923 case coro::ABI::Async:
1924 splitAsyncCoroutine(F, Shape, Clones);
1925 break;
1926 case coro::ABI::Retcon:
1927 case coro::ABI::RetconOnce:
1928 splitRetconCoroutine(F, Shape, Clones);
1929 break;
1930 }
1931 }
1932
1933 // Replace all the swifterror operations in the original function.
1934 // This invalidates SwiftErrorOps in the Shape.
1935 replaceSwiftErrorOps(F, Shape, nullptr);
1936
1937 // Finally, salvage the llvm.dbg.{declare,addr} in our original function that
1938 // point into the coroutine frame. We only do this for the current function
1939 // since the Cloner salvaged debug info for us in the new coroutine funclets.
1940 SmallVector<DbgVariableIntrinsic *, 8> Worklist;
1941 SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache;
1942 for (auto &BB : F) {
1943 for (auto &I : BB) {
1944 if (auto *DDI = dyn_cast<DbgDeclareInst>(&I)) {
1945 Worklist.push_back(DDI);
1946 continue;
1947 }
1948 if (auto *DDI = dyn_cast<DbgAddrIntrinsic>(&I)) {
1949 Worklist.push_back(DDI);
1950 continue;
1951 }
1952 }
1953 }
1954 for (auto *DDI : Worklist)
1955 coro::salvageDebugInfo(DbgPtrAllocaCache, DDI, Shape.OptimizeFrame);
1956
1957 return Shape;
1958}
1959
1960static void
1961updateCallGraphAfterCoroutineSplit(Function &F, const coro::Shape &Shape,
1962 const SmallVectorImpl<Function *> &Clones,
1963 CallGraph &CG, CallGraphSCC &SCC) {
1964 if (!Shape.CoroBegin)
1965 return;
1966
1967 removeCoroEnds(Shape, &CG);
1968 postSplitCleanup(F);
1969
1970 // Update call graph and add the functions we created to the SCC.
1971 coro::updateCallGraph(F, Clones, CG, SCC);
1972}
1973
1974static void updateCallGraphAfterCoroutineSplit(
1975 LazyCallGraph::Node &N, const coro::Shape &Shape,
1976 const SmallVectorImpl<Function *> &Clones, LazyCallGraph::SCC &C,
1977 LazyCallGraph &CG, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR,
1978 FunctionAnalysisManager &FAM) {
1979 if (!Shape.CoroBegin)
1980 return;
1981
1982 for (llvm::AnyCoroEndInst *End : Shape.CoroEnds) {
1983 auto &Context = End->getContext();
1984 End->replaceAllUsesWith(ConstantInt::getFalse(Context));
1985 End->eraseFromParent();
1986 }
1987
1988 if (!Clones.empty()) {
1989 switch (Shape.ABI) {
1990 case coro::ABI::Switch:
1991 // Each clone in the Switch lowering is independent of the other clones.
1992 // Let the LazyCallGraph know about each one separately.
1993 for (Function *Clone : Clones)
1994 CG.addSplitFunction(N.getFunction(), *Clone);
1995 break;
1996 case coro::ABI::Async:
1997 case coro::ABI::Retcon:
1998 case coro::ABI::RetconOnce:
1999 // Each clone in the Async/Retcon lowering references of the other clones.
2000 // Let the LazyCallGraph know about all of them at once.
2001 if (!Clones.empty())
2002 CG.addSplitRefRecursiveFunctions(N.getFunction(), Clones);
2003 break;
2004 }
2005
2006 // Let the CGSCC infra handle the changes to the original function.
2007 updateCGAndAnalysisManagerForCGSCCPass(CG, C, N, AM, UR, FAM);
2008 }
2009
2010 // Do some cleanup and let the CGSCC infra see if we've cleaned up any edges
2011 // to the split functions.
2012 postSplitCleanup(N.getFunction());
2013 updateCGAndAnalysisManagerForFunctionPass(CG, C, N, AM, UR, FAM);
2014}
2015
2016// When we see the coroutine the first time, we insert an indirect call to a
2017// devirt trigger function and mark the coroutine that it is now ready for
2018// split.
2019// Async lowering uses this after it has split the function to restart the
2020// pipeline.
2021static void prepareForSplit(Function &F, CallGraph &CG,
2022 bool MarkForAsyncRestart = false) {
2023 Module &M = *F.getParent();
2024 LLVMContext &Context = F.getContext();
2025#ifndef NDEBUG
2026 Function *DevirtFn = M.getFunction(CORO_DEVIRT_TRIGGER_FN"coro.devirt.trigger");
2027 assert(DevirtFn && "coro.devirt.trigger function not found")(static_cast <bool> (DevirtFn && "coro.devirt.trigger function not found"
) ? void (0) : __assert_fail ("DevirtFn && \"coro.devirt.trigger function not found\""
, "llvm/lib/Transforms/Coroutines/CoroSplit.cpp", 2027, __extension__
__PRETTY_FUNCTION__))
;
2028#endif
2029
2030 F.addFnAttr(CORO_PRESPLIT_ATTR"coroutine.presplit", MarkForAsyncRestart
2031 ? ASYNC_RESTART_AFTER_SPLIT"2"
2032 : PREPARED_FOR_SPLIT"1");
2033
2034 // Insert an indirect call sequence that will be devirtualized by CoroElide
2035 // pass:
2036 // %0 = call i8* @llvm.coro.subfn.addr(i8* null, i8 -1)
2037 // %1 = bitcast i8* %0 to void(i8*)*
2038 // call void %1(i8* null)
2039 coro::LowererBase Lowerer(M);
2040 Instruction *InsertPt =
2041 MarkForAsyncRestart ? F.getEntryBlock().getFirstNonPHIOrDbgOrLifetime()
2042 : F.getEntryBlock().getTerminator();
2043 auto *Null = ConstantPointerNull::get(Type::getInt8PtrTy(Context));
2044 auto *DevirtFnAddr =
2045 Lowerer.makeSubFnCall(Null, CoroSubFnInst::RestartTrigger, InsertPt);
2046 FunctionType *FnTy = FunctionType::get(Type::getVoidTy(Context),
2047 {Type::getInt8PtrTy(Context)}, false);
2048 auto *IndirectCall = CallInst::Create(FnTy, DevirtFnAddr, Null, "", InsertPt);
2049
2050 // Update CG graph with an indirect call we just added.
2051 CG[&F]->addCalledFunction(IndirectCall, CG.getCallsExternalNode());
2052}
2053
2054// Make sure that there is a devirtualization trigger function that the
2055// coro-split pass uses to force a restart of the CGSCC pipeline. If the devirt
2056// trigger function is not found, we will create one and add it to the current
2057// SCC.
2058static void createDevirtTriggerFunc(CallGraph &CG, CallGraphSCC &SCC) {
2059 Module &M = CG.getModule();
2060 if (M.getFunction(CORO_DEVIRT_TRIGGER_FN"coro.devirt.trigger"))
2061 return;
2062
2063 LLVMContext &C = M.getContext();
2064 auto *FnTy = FunctionType::get(Type::getVoidTy(C), Type::getInt8PtrTy(C),
2065 /*isVarArg=*/false);
2066 Function *DevirtFn =
2067 Function::Create(FnTy, GlobalValue::LinkageTypes::PrivateLinkage,
2068 CORO_DEVIRT_TRIGGER_FN"coro.devirt.trigger", &M);
2069 DevirtFn->addFnAttr(Attribute::AlwaysInline);
2070 auto *Entry = BasicBlock::Create(C, "entry", DevirtFn);
2071 ReturnInst::Create(C, Entry);
2072
2073 auto *Node = CG.getOrInsertFunction(DevirtFn);
2074
2075 SmallVector<CallGraphNode *, 8> Nodes(SCC.begin(), SCC.end());
2076 Nodes.push_back(Node);
2077 SCC.initialize(Nodes);
2078}
2079
2080/// Replace a call to llvm.coro.prepare.retcon.
2081static void replacePrepare(CallInst *Prepare, LazyCallGraph &CG,
2082 LazyCallGraph::SCC &C) {
2083 auto CastFn = Prepare->getArgOperand(0); // as an i8*
2084 auto Fn = CastFn->stripPointerCasts(); // as its original type
2085
2086 // Attempt to peephole this pattern:
2087 // %0 = bitcast [[TYPE]] @some_function to i8*
2088 // %1 = call @llvm.coro.prepare.retcon(i8* %0)
2089 // %2 = bitcast %1 to [[TYPE]]
2090 // ==>
2091 // %2 = @some_function
2092 for (Use &U : llvm::make_early_inc_range(Prepare->uses())) {
2093 // Look for bitcasts back to the original function type.
2094 auto *Cast = dyn_cast<BitCastInst>(U.getUser());
2095 if (!Cast || Cast->getType() != Fn->getType())
2096 continue;
2097
2098 // Replace and remove the cast.
2099 Cast->replaceAllUsesWith(Fn);
2100 Cast->eraseFromParent();
2101 }
2102
2103 // Replace any remaining uses with the function as an i8*.
2104 // This can never directly be a callee, so we don't need to update CG.
2105 Prepare->replaceAllUsesWith(CastFn);
2106 Prepare->eraseFromParent();
2107
2108 // Kill dead bitcasts.
2109 while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) {
2110 if (!Cast->use_empty())
2111 break;
2112 CastFn = Cast->getOperand(0);
2113 Cast->eraseFromParent();
2114 }
2115}
2116/// Replace a call to llvm.coro.prepare.retcon.
2117static void replacePrepare(CallInst *Prepare, CallGraph &CG) {
2118 auto CastFn = Prepare->getArgOperand(0); // as an i8*
2119 auto Fn = CastFn->stripPointerCasts(); // as its original type
2120
2121 // Find call graph nodes for the preparation.
2122 CallGraphNode *PrepareUserNode = nullptr, *FnNode = nullptr;
2123 if (auto ConcreteFn = dyn_cast<Function>(Fn)) {
2124 PrepareUserNode = CG[Prepare->getFunction()];
2125 FnNode = CG[ConcreteFn];
2126 }
2127
2128 // Attempt to peephole this pattern:
2129 // %0 = bitcast [[TYPE]] @some_function to i8*
2130 // %1 = call @llvm.coro.prepare.retcon(i8* %0)
2131 // %2 = bitcast %1 to [[TYPE]]
2132 // ==>
2133 // %2 = @some_function
2134 for (Use &U : llvm::make_early_inc_range(Prepare->uses())) {
2135 // Look for bitcasts back to the original function type.
2136 auto *Cast = dyn_cast<BitCastInst>(U.getUser());
2137 if (!Cast || Cast->getType() != Fn->getType()) continue;
2138
2139 // Check whether the replacement will introduce new direct calls.
2140 // If so, we'll need to update the call graph.
2141 if (PrepareUserNode) {
2142 for (auto &Use : Cast->uses()) {
2143 if (auto *CB = dyn_cast<CallBase>(Use.getUser())) {
2144 if (!CB->isCallee(&Use))
2145 continue;
2146 PrepareUserNode->removeCallEdgeFor(*CB);
2147 PrepareUserNode->addCalledFunction(CB, FnNode);
2148 }
2149 }
2150 }
2151
2152 // Replace and remove the cast.
2153 Cast->replaceAllUsesWith(Fn);
2154 Cast->eraseFromParent();
2155 }
2156
2157 // Replace any remaining uses with the function as an i8*.
2158 // This can never directly be a callee, so we don't need to update CG.
2159 Prepare->replaceAllUsesWith(CastFn);
2160 Prepare->eraseFromParent();
2161
2162 // Kill dead bitcasts.
2163 while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) {
2164 if (!Cast->use_empty()) break;
2165 CastFn = Cast->getOperand(0);
2166 Cast->eraseFromParent();
2167 }
2168}
2169
2170static bool replaceAllPrepares(Function *PrepareFn, LazyCallGraph &CG,
2171 LazyCallGraph::SCC &C) {
2172 bool Changed = false;
2173 for (Use &P : llvm::make_early_inc_range(PrepareFn->uses())) {
2174 // Intrinsics can only be used in calls.
2175 auto *Prepare = cast<CallInst>(P.getUser());
2176 replacePrepare(Prepare, CG, C);
2177 Changed = true;
2178 }
2179
2180 return Changed;
2181}
2182
2183/// Remove calls to llvm.coro.prepare.retcon, a barrier meant to prevent
2184/// IPO from operating on calls to a retcon coroutine before it's been
2185/// split. This is only safe to do after we've split all retcon
2186/// coroutines in the module. We can do that this in this pass because
2187/// this pass does promise to split all retcon coroutines (as opposed to
2188/// switch coroutines, which are lowered in multiple stages).
2189static bool replaceAllPrepares(Function *PrepareFn, CallGraph &CG) {
2190 bool Changed = false;
2191 for (Use &P : llvm::make_early_inc_range(PrepareFn->uses())) {
2192 // Intrinsics can only be used in calls.
2193 auto *Prepare = cast<CallInst>(P.getUser());
2194 replacePrepare(Prepare, CG);
2195 Changed = true;
2196 }
2197
2198 return Changed;
2199}
2200
2201static bool declaresCoroSplitIntrinsics(const Module &M) {
2202 return coro::declaresIntrinsics(M, {"llvm.coro.begin",
2203 "llvm.coro.prepare.retcon",
2204 "llvm.coro.prepare.async"});
2205}
2206
2207static void addPrepareFunction(const Module &M,
2208 SmallVectorImpl<Function *> &Fns,
2209 StringRef Name) {
2210 auto *PrepareFn = M.getFunction(Name);
2211 if (PrepareFn && !PrepareFn->use_empty())
2212 Fns.push_back(PrepareFn);
2213}
2214
2215PreservedAnalyses CoroSplitPass::run(LazyCallGraph::SCC &C,
2216 CGSCCAnalysisManager &AM,
2217 LazyCallGraph &CG, CGSCCUpdateResult &UR) {
2218 // NB: One invariant of a valid LazyCallGraph::SCC is that it must contain a
2219 // non-zero number of nodes, so we assume that here and grab the first
2220 // node's function's module.
2221 Module &M = *C.begin()->getFunction().getParent();
2222 auto &FAM =
2223 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
2224
2225 // Check for uses of llvm.coro.prepare.retcon/async.
2226 SmallVector<Function *, 2> PrepareFns;
2227 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon");
2228 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async");
2229
2230 // Find coroutines for processing.
2231 SmallVector<LazyCallGraph::Node *> Coroutines;
2232 for (LazyCallGraph::Node &N : C)
2233 if (N.getFunction().hasFnAttribute(CORO_PRESPLIT_ATTR"coroutine.presplit"))
2234 Coroutines.push_back(&N);
2235
2236 if (Coroutines.empty() && PrepareFns.empty())
2237 return PreservedAnalyses::all();
2238
2239 if (Coroutines.empty()) {
2240 for (auto *PrepareFn : PrepareFns) {
2241 replaceAllPrepares(PrepareFn, CG, C);
2242 }
2243 }
2244
2245 // Split all the coroutines.
2246 for (LazyCallGraph::Node *N : Coroutines) {
2247 Function &F = N->getFunction();
2248 LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F.getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-split")) { dbgs() << "CoroSplit: Processing coroutine '"
<< F.getName() << "' state: " << F.getFnAttribute
("coroutine.presplit").getValueAsString() << "\n"; } } while
(false)
2249 << "' state: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-split")) { dbgs() << "CoroSplit: Processing coroutine '"
<< F.getName() << "' state: " << F.getFnAttribute
("coroutine.presplit").getValueAsString() << "\n"; } } while
(false)
2250 << F.getFnAttribute(CORO_PRESPLIT_ATTR).getValueAsString()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-split")) { dbgs() << "CoroSplit: Processing coroutine '"
<< F.getName() << "' state: " << F.getFnAttribute
("coroutine.presplit").getValueAsString() << "\n"; } } while
(false)
2251 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-split")) { dbgs() << "CoroSplit: Processing coroutine '"
<< F.getName() << "' state: " << F.getFnAttribute
("coroutine.presplit").getValueAsString() << "\n"; } } while
(false)
;
2252 F.removeFnAttr(CORO_PRESPLIT_ATTR"coroutine.presplit");
2253
2254 SmallVector<Function *, 4> Clones;
2255 const coro::Shape Shape = splitCoroutine(F, Clones, OptimizeFrame);
2256 updateCallGraphAfterCoroutineSplit(*N, Shape, Clones, C, CG, AM, UR, FAM);
2257
2258 if (!Shape.CoroSuspends.empty()) {
2259 // Run the CGSCC pipeline on the original and newly split functions.
2260 UR.CWorklist.insert(&C);
2261 for (Function *Clone : Clones)
2262 UR.CWorklist.insert(CG.lookupSCC(CG.get(*Clone)));
2263 }
2264 }
2265
2266 if (!PrepareFns.empty()) {
2267 for (auto *PrepareFn : PrepareFns) {
2268 replaceAllPrepares(PrepareFn, CG, C);
2269 }
2270 }
2271
2272 return PreservedAnalyses::none();
2273}
2274
2275namespace {
2276
2277// We present a coroutine to LLVM as an ordinary function with suspension
2278// points marked up with intrinsics. We let the optimizer party on the coroutine
2279// as a single function for as long as possible. Shortly before the coroutine is
2280// eligible to be inlined into its callers, we split up the coroutine into parts
2281// corresponding to initial, resume and destroy invocations of the coroutine,
2282// add them to the current SCC and restart the IPO pipeline to optimize the
2283// coroutine subfunctions we extracted before proceeding to the caller of the
2284// coroutine.
2285struct CoroSplitLegacy : public CallGraphSCCPass {
2286 static char ID; // Pass identification, replacement for typeid
2287
2288 CoroSplitLegacy(bool OptimizeFrame = false)
2289 : CallGraphSCCPass(ID), OptimizeFrame(OptimizeFrame) {
2290 initializeCoroSplitLegacyPass(*PassRegistry::getPassRegistry());
2291 }
2292
2293 bool Run = false;
2294 bool OptimizeFrame;
2295
2296 // A coroutine is identified by the presence of coro.begin intrinsic, if
2297 // we don't have any, this pass has nothing to do.
2298 bool doInitialization(CallGraph &CG) override {
2299 Run = declaresCoroSplitIntrinsics(CG.getModule());
2300 return CallGraphSCCPass::doInitialization(CG);
2301 }
2302
2303 bool runOnSCC(CallGraphSCC &SCC) override {
2304 if (!Run)
1
Assuming field 'Run' is true
2
Taking false branch
2305 return false;
2306
2307 // Check for uses of llvm.coro.prepare.retcon.
2308 SmallVector<Function *, 2> PrepareFns;
2309 auto &M = SCC.getCallGraph().getModule();
2310 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon");
2311 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async");
2312
2313 // Find coroutines for processing.
2314 SmallVector<Function *, 4> Coroutines;
2315 for (CallGraphNode *CGN : SCC)
2316 if (auto *F = CGN->getFunction())
2317 if (F->hasFnAttribute(CORO_PRESPLIT_ATTR"coroutine.presplit"))
2318 Coroutines.push_back(F);
2319
2320 if (Coroutines.empty() && PrepareFns.empty())
2321 return false;
2322
2323 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
2324
2325 if (Coroutines.empty()) {
3
Taking false branch
2326 bool Changed = false;
2327 for (auto *PrepareFn : PrepareFns)
2328 Changed |= replaceAllPrepares(PrepareFn, CG);
2329 return Changed;
2330 }
2331
2332 createDevirtTriggerFunc(CG, SCC);
2333
2334 // Split all the coroutines.
2335 for (Function *F : Coroutines) {
4
Assuming '__begin2' is not equal to '__end2'
2336 Attribute Attr = F->getFnAttribute(CORO_PRESPLIT_ATTR"coroutine.presplit");
2337 StringRef Value = Attr.getValueAsString();
2338 LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-split")) { dbgs() << "CoroSplit: Processing coroutine '"
<< F->getName() << "' state: " << Value
<< "\n"; } } while (false)
5
Assuming 'DebugFlag' is false
6
Loop condition is false. Exiting loop
2339 << "' state: " << Value << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-split")) { dbgs() << "CoroSplit: Processing coroutine '"
<< F->getName() << "' state: " << Value
<< "\n"; } } while (false)
;
2340 // Async lowering marks coroutines to trigger a restart of the pipeline
2341 // after it has split them.
2342 if (Value == ASYNC_RESTART_AFTER_SPLIT"2") {
7
Assuming the condition is false
8
Taking false branch
2343 F->removeFnAttr(CORO_PRESPLIT_ATTR"coroutine.presplit");
2344 continue;
2345 }
2346 if (Value == UNPREPARED_FOR_SPLIT"0") {
9
Assuming the condition is false
10
Taking false branch
2347 prepareForSplit(*F, CG);
2348 continue;
2349 }
2350 F->removeFnAttr(CORO_PRESPLIT_ATTR"coroutine.presplit");
2351
2352 SmallVector<Function *, 4> Clones;
2353 const coro::Shape Shape = splitCoroutine(*F, Clones, OptimizeFrame);
11
Calling 'splitCoroutine'
2354 updateCallGraphAfterCoroutineSplit(*F, Shape, Clones, CG, SCC);
2355 if (Shape.ABI == coro::ABI::Async) {
2356 // Restart SCC passes.
2357 // Mark function for CoroElide pass. It will devirtualize causing a
2358 // restart of the SCC pipeline.
2359 prepareForSplit(*F, CG, true /*MarkForAsyncRestart*/);
2360 }
2361 }
2362
2363 for (auto *PrepareFn : PrepareFns)
2364 replaceAllPrepares(PrepareFn, CG);
2365
2366 return true;
2367 }
2368
2369 void getAnalysisUsage(AnalysisUsage &AU) const override {
2370 CallGraphSCCPass::getAnalysisUsage(AU);
2371 }
2372
2373 StringRef getPassName() const override { return "Coroutine Splitting"; }
2374};
2375
2376} // end anonymous namespace
2377
2378char CoroSplitLegacy::ID = 0;
2379
2380INITIALIZE_PASS_BEGIN(static void *initializeCoroSplitLegacyPassOnce(PassRegistry &
Registry) {
2381 CoroSplitLegacy, "coro-split",static void *initializeCoroSplitLegacyPassOnce(PassRegistry &
Registry) {
2382 "Split coroutine into a set of functions driving its state machine", false,static void *initializeCoroSplitLegacyPassOnce(PassRegistry &
Registry) {
2383 false)static void *initializeCoroSplitLegacyPassOnce(PassRegistry &
Registry) {
2384INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)initializeCallGraphWrapperPassPass(Registry);
2385INITIALIZE_PASS_END(PassInfo *PI = new PassInfo( "Split coroutine into a set of functions driving its state machine"
, "coro-split", &CoroSplitLegacy::ID, PassInfo::NormalCtor_t
(callDefaultCtor<CoroSplitLegacy>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeCoroSplitLegacyPassFlag; void llvm::initializeCoroSplitLegacyPass
(PassRegistry &Registry) { llvm::call_once(InitializeCoroSplitLegacyPassFlag
, initializeCoroSplitLegacyPassOnce, std::ref(Registry)); }
2386 CoroSplitLegacy, "coro-split",PassInfo *PI = new PassInfo( "Split coroutine into a set of functions driving its state machine"
, "coro-split", &CoroSplitLegacy::ID, PassInfo::NormalCtor_t
(callDefaultCtor<CoroSplitLegacy>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeCoroSplitLegacyPassFlag; void llvm::initializeCoroSplitLegacyPass
(PassRegistry &Registry) { llvm::call_once(InitializeCoroSplitLegacyPassFlag
, initializeCoroSplitLegacyPassOnce, std::ref(Registry)); }
2387 "Split coroutine into a set of functions driving its state machine", false,PassInfo *PI = new PassInfo( "Split coroutine into a set of functions driving its state machine"
, "coro-split", &CoroSplitLegacy::ID, PassInfo::NormalCtor_t
(callDefaultCtor<CoroSplitLegacy>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeCoroSplitLegacyPassFlag; void llvm::initializeCoroSplitLegacyPass
(PassRegistry &Registry) { llvm::call_once(InitializeCoroSplitLegacyPassFlag
, initializeCoroSplitLegacyPassOnce, std::ref(Registry)); }
2388 false)PassInfo *PI = new PassInfo( "Split coroutine into a set of functions driving its state machine"
, "coro-split", &CoroSplitLegacy::ID, PassInfo::NormalCtor_t
(callDefaultCtor<CoroSplitLegacy>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeCoroSplitLegacyPassFlag; void llvm::initializeCoroSplitLegacyPass
(PassRegistry &Registry) { llvm::call_once(InitializeCoroSplitLegacyPassFlag
, initializeCoroSplitLegacyPassOnce, std::ref(Registry)); }
2389
2390Pass *llvm::createCoroSplitLegacyPass(bool OptimizeFrame) {
2391 return new CoroSplitLegacy(OptimizeFrame);
2392}