Bug Summary

File:llvm/lib/Transforms/Coroutines/CoroSplit.cpp
Warning:line 463, column 19
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name CoroSplit.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/lib/Transforms/Coroutines -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/lib/Transforms/Coroutines -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/include -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/lib/Transforms/Coroutines -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-08-28-193554-24367-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp
1//===- CoroSplit.cpp - Converts a coroutine into a state machine ----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8// This pass builds the coroutine frame and outlines resume and destroy parts
9// of the coroutine into separate functions.
10//
11// We present a coroutine to an LLVM as an ordinary function with suspension
12// points marked up with intrinsics. We let the optimizer party on the coroutine
13// as a single function for as long as possible. Shortly before the coroutine is
14// eligible to be inlined into its callers, we split up the coroutine into parts
15// corresponding to an initial, resume and destroy invocations of the coroutine,
16// add them to the current SCC and restart the IPO pipeline to optimize the
17// coroutine subfunctions we extracted before proceeding to the caller of the
18// coroutine.
19//===----------------------------------------------------------------------===//
20
21#include "llvm/Transforms/Coroutines/CoroSplit.h"
22#include "CoroInstr.h"
23#include "CoroInternal.h"
24#include "llvm/ADT/DenseMap.h"
25#include "llvm/ADT/SmallPtrSet.h"
26#include "llvm/ADT/SmallVector.h"
27#include "llvm/ADT/StringRef.h"
28#include "llvm/ADT/Twine.h"
29#include "llvm/Analysis/CFG.h"
30#include "llvm/Analysis/CallGraph.h"
31#include "llvm/Analysis/CallGraphSCCPass.h"
32#include "llvm/Analysis/LazyCallGraph.h"
33#include "llvm/IR/Argument.h"
34#include "llvm/IR/Attributes.h"
35#include "llvm/IR/BasicBlock.h"
36#include "llvm/IR/CFG.h"
37#include "llvm/IR/CallingConv.h"
38#include "llvm/IR/Constants.h"
39#include "llvm/IR/DataLayout.h"
40#include "llvm/IR/DerivedTypes.h"
41#include "llvm/IR/Dominators.h"
42#include "llvm/IR/Function.h"
43#include "llvm/IR/GlobalValue.h"
44#include "llvm/IR/GlobalVariable.h"
45#include "llvm/IR/IRBuilder.h"
46#include "llvm/IR/InstIterator.h"
47#include "llvm/IR/InstrTypes.h"
48#include "llvm/IR/Instruction.h"
49#include "llvm/IR/Instructions.h"
50#include "llvm/IR/IntrinsicInst.h"
51#include "llvm/IR/LLVMContext.h"
52#include "llvm/IR/LegacyPassManager.h"
53#include "llvm/IR/Module.h"
54#include "llvm/IR/Type.h"
55#include "llvm/IR/Value.h"
56#include "llvm/IR/Verifier.h"
57#include "llvm/InitializePasses.h"
58#include "llvm/Pass.h"
59#include "llvm/Support/Casting.h"
60#include "llvm/Support/Debug.h"
61#include "llvm/Support/PrettyStackTrace.h"
62#include "llvm/Support/raw_ostream.h"
63#include "llvm/Transforms/Scalar.h"
64#include "llvm/Transforms/Utils/BasicBlockUtils.h"
65#include "llvm/Transforms/Utils/CallGraphUpdater.h"
66#include "llvm/Transforms/Utils/Cloning.h"
67#include "llvm/Transforms/Utils/Local.h"
68#include "llvm/Transforms/Utils/ValueMapper.h"
69#include <cassert>
70#include <cstddef>
71#include <cstdint>
72#include <initializer_list>
73#include <iterator>
74
75using namespace llvm;
76
77#define DEBUG_TYPE"coro-split" "coro-split"
78
79namespace {
80
81/// A little helper class for building
82class CoroCloner {
83public:
84 enum class Kind {
85 /// The shared resume function for a switch lowering.
86 SwitchResume,
87
88 /// The shared unwind function for a switch lowering.
89 SwitchUnwind,
90
91 /// The shared cleanup function for a switch lowering.
92 SwitchCleanup,
93
94 /// An individual continuation function.
95 Continuation,
96
97 /// An async resume function.
98 Async,
99 };
100
101private:
102 Function &OrigF;
103 Function *NewF;
104 const Twine &Suffix;
105 coro::Shape &Shape;
106 Kind FKind;
107 ValueToValueMapTy VMap;
108 IRBuilder<> Builder;
109 Value *NewFramePtr = nullptr;
110
111 /// The active suspend instruction; meaningful only for continuation and async
112 /// ABIs.
113 AnyCoroSuspendInst *ActiveSuspend = nullptr;
20
Null pointer value stored to 'Cloner.ActiveSuspend'
114
115public:
116 /// Create a cloner for a switch lowering.
117 CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape,
118 Kind FKind)
119 : OrigF(OrigF), NewF(nullptr), Suffix(Suffix), Shape(Shape),
120 FKind(FKind), Builder(OrigF.getContext()) {
121 assert(Shape.ABI == coro::ABI::Switch)(static_cast <bool> (Shape.ABI == coro::ABI::Switch) ? void
(0) : __assert_fail ("Shape.ABI == coro::ABI::Switch", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 121, __extension__ __PRETTY_FUNCTION__))
;
21
Assuming field 'ABI' is equal to Switch
22
'?' condition is true
122 }
123
124 /// Create a cloner for a continuation lowering.
125 CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape,
126 Function *NewF, AnyCoroSuspendInst *ActiveSuspend)
127 : OrigF(OrigF), NewF(NewF), Suffix(Suffix), Shape(Shape),
128 FKind(Shape.ABI == coro::ABI::Async ? Kind::Async : Kind::Continuation),
129 Builder(OrigF.getContext()), ActiveSuspend(ActiveSuspend) {
130 assert(Shape.ABI == coro::ABI::Retcon ||(static_cast <bool> (Shape.ABI == coro::ABI::Retcon || Shape
.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async
) ? void (0) : __assert_fail ("Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 131, __extension__ __PRETTY_FUNCTION__))
131 Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async)(static_cast <bool> (Shape.ABI == coro::ABI::Retcon || Shape
.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async
) ? void (0) : __assert_fail ("Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 131, __extension__ __PRETTY_FUNCTION__))
;
132 assert(NewF && "need existing function for continuation")(static_cast <bool> (NewF && "need existing function for continuation"
) ? void (0) : __assert_fail ("NewF && \"need existing function for continuation\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 132, __extension__ __PRETTY_FUNCTION__))
;
133 assert(ActiveSuspend && "need active suspend point for continuation")(static_cast <bool> (ActiveSuspend && "need active suspend point for continuation"
) ? void (0) : __assert_fail ("ActiveSuspend && \"need active suspend point for continuation\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 133, __extension__ __PRETTY_FUNCTION__))
;
134 }
135
136 Function *getFunction() const {
137 assert(NewF != nullptr && "declaration not yet set")(static_cast <bool> (NewF != nullptr && "declaration not yet set"
) ? void (0) : __assert_fail ("NewF != nullptr && \"declaration not yet set\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 137, __extension__ __PRETTY_FUNCTION__))
;
138 return NewF;
139 }
140
141 void create();
142
143private:
144 bool isSwitchDestroyFunction() {
145 switch (FKind) {
146 case Kind::Async:
147 case Kind::Continuation:
148 case Kind::SwitchResume:
149 return false;
150 case Kind::SwitchUnwind:
151 case Kind::SwitchCleanup:
152 return true;
153 }
154 llvm_unreachable("Unknown CoroCloner::Kind enum")::llvm::llvm_unreachable_internal("Unknown CoroCloner::Kind enum"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 154)
;
155 }
156
157 void replaceEntryBlock();
158 Value *deriveNewFramePointer();
159 void replaceRetconOrAsyncSuspendUses();
160 void replaceCoroSuspends();
161 void replaceCoroEnds();
162 void replaceSwiftErrorOps();
163 void salvageDebugInfo();
164 void handleFinalSuspend();
165};
166
167} // end anonymous namespace
168
169static void maybeFreeRetconStorage(IRBuilder<> &Builder,
170 const coro::Shape &Shape, Value *FramePtr,
171 CallGraph *CG) {
172 assert(Shape.ABI == coro::ABI::Retcon ||(static_cast <bool> (Shape.ABI == coro::ABI::Retcon || Shape
.ABI == coro::ABI::RetconOnce) ? void (0) : __assert_fail ("Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 173, __extension__ __PRETTY_FUNCTION__))
173 Shape.ABI == coro::ABI::RetconOnce)(static_cast <bool> (Shape.ABI == coro::ABI::Retcon || Shape
.ABI == coro::ABI::RetconOnce) ? void (0) : __assert_fail ("Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 173, __extension__ __PRETTY_FUNCTION__))
;
174 if (Shape.RetconLowering.IsFrameInlineInStorage)
175 return;
176
177 Shape.emitDealloc(Builder, FramePtr, CG);
178}
179
180/// Replace an llvm.coro.end.async.
181/// Will inline the must tail call function call if there is one.
182/// \returns true if cleanup of the coro.end block is needed, false otherwise.
183static bool replaceCoroEndAsync(AnyCoroEndInst *End) {
184 IRBuilder<> Builder(End);
185
186 auto *EndAsync = dyn_cast<CoroAsyncEndInst>(End);
187 if (!EndAsync) {
188 Builder.CreateRetVoid();
189 return true /*needs cleanup of coro.end block*/;
190 }
191
192 auto *MustTailCallFunc = EndAsync->getMustTailCallFunction();
193 if (!MustTailCallFunc) {
194 Builder.CreateRetVoid();
195 return true /*needs cleanup of coro.end block*/;
196 }
197
198 // Move the must tail call from the predecessor block into the end block.
199 auto *CoroEndBlock = End->getParent();
200 auto *MustTailCallFuncBlock = CoroEndBlock->getSinglePredecessor();
201 assert(MustTailCallFuncBlock && "Must have a single predecessor block")(static_cast <bool> (MustTailCallFuncBlock && "Must have a single predecessor block"
) ? void (0) : __assert_fail ("MustTailCallFuncBlock && \"Must have a single predecessor block\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 201, __extension__ __PRETTY_FUNCTION__))
;
202 auto It = MustTailCallFuncBlock->getTerminator()->getIterator();
203 auto *MustTailCall = cast<CallInst>(&*std::prev(It));
204 CoroEndBlock->getInstList().splice(
205 End->getIterator(), MustTailCallFuncBlock->getInstList(), MustTailCall);
206
207 // Insert the return instruction.
208 Builder.SetInsertPoint(End);
209 Builder.CreateRetVoid();
210 InlineFunctionInfo FnInfo;
211
212 // Remove the rest of the block, by splitting it into an unreachable block.
213 auto *BB = End->getParent();
214 BB->splitBasicBlock(End);
215 BB->getTerminator()->eraseFromParent();
216
217 auto InlineRes = InlineFunction(*MustTailCall, FnInfo);
218 assert(InlineRes.isSuccess() && "Expected inlining to succeed")(static_cast <bool> (InlineRes.isSuccess() && "Expected inlining to succeed"
) ? void (0) : __assert_fail ("InlineRes.isSuccess() && \"Expected inlining to succeed\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 218, __extension__ __PRETTY_FUNCTION__))
;
219 (void)InlineRes;
220
221 // We have cleaned up the coro.end block above.
222 return false;
223}
224
225/// Replace a non-unwind call to llvm.coro.end.
226static void replaceFallthroughCoroEnd(AnyCoroEndInst *End,
227 const coro::Shape &Shape, Value *FramePtr,
228 bool InResume, CallGraph *CG) {
229 // Start inserting right before the coro.end.
230 IRBuilder<> Builder(End);
231
232 // Create the return instruction.
233 switch (Shape.ABI) {
234 // The cloned functions in switch-lowering always return void.
235 case coro::ABI::Switch:
236 // coro.end doesn't immediately end the coroutine in the main function
237 // in this lowering, because we need to deallocate the coroutine.
238 if (!InResume)
239 return;
240 Builder.CreateRetVoid();
241 break;
242
243 // In async lowering this returns.
244 case coro::ABI::Async: {
245 bool CoroEndBlockNeedsCleanup = replaceCoroEndAsync(End);
246 if (!CoroEndBlockNeedsCleanup)
247 return;
248 break;
249 }
250
251 // In unique continuation lowering, the continuations always return void.
252 // But we may have implicitly allocated storage.
253 case coro::ABI::RetconOnce:
254 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
255 Builder.CreateRetVoid();
256 break;
257
258 // In non-unique continuation lowering, we signal completion by returning
259 // a null continuation.
260 case coro::ABI::Retcon: {
261 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
262 auto RetTy = Shape.getResumeFunctionType()->getReturnType();
263 auto RetStructTy = dyn_cast<StructType>(RetTy);
264 PointerType *ContinuationTy =
265 cast<PointerType>(RetStructTy ? RetStructTy->getElementType(0) : RetTy);
266
267 Value *ReturnValue = ConstantPointerNull::get(ContinuationTy);
268 if (RetStructTy) {
269 ReturnValue = Builder.CreateInsertValue(UndefValue::get(RetStructTy),
270 ReturnValue, 0);
271 }
272 Builder.CreateRet(ReturnValue);
273 break;
274 }
275 }
276
277 // Remove the rest of the block, by splitting it into an unreachable block.
278 auto *BB = End->getParent();
279 BB->splitBasicBlock(End);
280 BB->getTerminator()->eraseFromParent();
281}
282
283/// Replace an unwind call to llvm.coro.end.
284static void replaceUnwindCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
285 Value *FramePtr, bool InResume,
286 CallGraph *CG) {
287 IRBuilder<> Builder(End);
288
289 switch (Shape.ABI) {
290 // In switch-lowering, this does nothing in the main function.
291 case coro::ABI::Switch:
292 if (!InResume)
293 return;
294 break;
295 // In async lowering this does nothing.
296 case coro::ABI::Async:
297 break;
298 // In continuation-lowering, this frees the continuation storage.
299 case coro::ABI::Retcon:
300 case coro::ABI::RetconOnce:
301 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
302 break;
303 }
304
305 // If coro.end has an associated bundle, add cleanupret instruction.
306 if (auto Bundle = End->getOperandBundle(LLVMContext::OB_funclet)) {
307 auto *FromPad = cast<CleanupPadInst>(Bundle->Inputs[0]);
308 auto *CleanupRet = Builder.CreateCleanupRet(FromPad, nullptr);
309 End->getParent()->splitBasicBlock(End);
310 CleanupRet->getParent()->getTerminator()->eraseFromParent();
311 }
312}
313
314static void replaceCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
315 Value *FramePtr, bool InResume, CallGraph *CG) {
316 if (End->isUnwind())
317 replaceUnwindCoroEnd(End, Shape, FramePtr, InResume, CG);
318 else
319 replaceFallthroughCoroEnd(End, Shape, FramePtr, InResume, CG);
320
321 auto &Context = End->getContext();
322 End->replaceAllUsesWith(InResume ? ConstantInt::getTrue(Context)
323 : ConstantInt::getFalse(Context));
324 End->eraseFromParent();
325}
326
327// Create an entry block for a resume function with a switch that will jump to
328// suspend points.
329static void createResumeEntryBlock(Function &F, coro::Shape &Shape) {
330 assert(Shape.ABI == coro::ABI::Switch)(static_cast <bool> (Shape.ABI == coro::ABI::Switch) ? void
(0) : __assert_fail ("Shape.ABI == coro::ABI::Switch", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 330, __extension__ __PRETTY_FUNCTION__))
;
331 LLVMContext &C = F.getContext();
332
333 // resume.entry:
334 // %index.addr = getelementptr inbounds %f.Frame, %f.Frame* %FramePtr, i32 0,
335 // i32 2
336 // % index = load i32, i32* %index.addr
337 // switch i32 %index, label %unreachable [
338 // i32 0, label %resume.0
339 // i32 1, label %resume.1
340 // ...
341 // ]
342
343 auto *NewEntry = BasicBlock::Create(C, "resume.entry", &F);
344 auto *UnreachBB = BasicBlock::Create(C, "unreachable", &F);
345
346 IRBuilder<> Builder(NewEntry);
347 auto *FramePtr = Shape.FramePtr;
348 auto *FrameTy = Shape.FrameTy;
349 auto *GepIndex = Builder.CreateStructGEP(
350 FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr");
351 auto *Index = Builder.CreateLoad(Shape.getIndexType(), GepIndex, "index");
352 auto *Switch =
353 Builder.CreateSwitch(Index, UnreachBB, Shape.CoroSuspends.size());
354 Shape.SwitchLowering.ResumeSwitch = Switch;
355
356 size_t SuspendIndex = 0;
357 for (auto *AnyS : Shape.CoroSuspends) {
358 auto *S = cast<CoroSuspendInst>(AnyS);
359 ConstantInt *IndexVal = Shape.getIndex(SuspendIndex);
360
361 // Replace CoroSave with a store to Index:
362 // %index.addr = getelementptr %f.frame... (index field number)
363 // store i32 0, i32* %index.addr1
364 auto *Save = S->getCoroSave();
365 Builder.SetInsertPoint(Save);
366 if (S->isFinal()) {
367 // Final suspend point is represented by storing zero in ResumeFnAddr.
368 auto *GepIndex = Builder.CreateStructGEP(FrameTy, FramePtr,
369 coro::Shape::SwitchFieldIndex::Resume,
370 "ResumeFn.addr");
371 auto *NullPtr = ConstantPointerNull::get(cast<PointerType>(
372 FrameTy->getTypeAtIndex(coro::Shape::SwitchFieldIndex::Resume)));
373 Builder.CreateStore(NullPtr, GepIndex);
374 } else {
375 auto *GepIndex = Builder.CreateStructGEP(
376 FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr");
377 Builder.CreateStore(IndexVal, GepIndex);
378 }
379 Save->replaceAllUsesWith(ConstantTokenNone::get(C));
380 Save->eraseFromParent();
381
382 // Split block before and after coro.suspend and add a jump from an entry
383 // switch:
384 //
385 // whateverBB:
386 // whatever
387 // %0 = call i8 @llvm.coro.suspend(token none, i1 false)
388 // switch i8 %0, label %suspend[i8 0, label %resume
389 // i8 1, label %cleanup]
390 // becomes:
391 //
392 // whateverBB:
393 // whatever
394 // br label %resume.0.landing
395 //
396 // resume.0: ; <--- jump from the switch in the resume.entry
397 // %0 = tail call i8 @llvm.coro.suspend(token none, i1 false)
398 // br label %resume.0.landing
399 //
400 // resume.0.landing:
401 // %1 = phi i8[-1, %whateverBB], [%0, %resume.0]
402 // switch i8 % 1, label %suspend [i8 0, label %resume
403 // i8 1, label %cleanup]
404
405 auto *SuspendBB = S->getParent();
406 auto *ResumeBB =
407 SuspendBB->splitBasicBlock(S, "resume." + Twine(SuspendIndex));
408 auto *LandingBB = ResumeBB->splitBasicBlock(
409 S->getNextNode(), ResumeBB->getName() + Twine(".landing"));
410 Switch->addCase(IndexVal, ResumeBB);
411
412 cast<BranchInst>(SuspendBB->getTerminator())->setSuccessor(0, LandingBB);
413 auto *PN = PHINode::Create(Builder.getInt8Ty(), 2, "", &LandingBB->front());
414 S->replaceAllUsesWith(PN);
415 PN->addIncoming(Builder.getInt8(-1), SuspendBB);
416 PN->addIncoming(S, ResumeBB);
417
418 ++SuspendIndex;
419 }
420
421 Builder.SetInsertPoint(UnreachBB);
422 Builder.CreateUnreachable();
423
424 Shape.SwitchLowering.ResumeEntryBlock = NewEntry;
425}
426
427
428// Rewrite final suspend point handling. We do not use suspend index to
429// represent the final suspend point. Instead we zero-out ResumeFnAddr in the
430// coroutine frame, since it is undefined behavior to resume a coroutine
431// suspended at the final suspend point. Thus, in the resume function, we can
432// simply remove the last case (when coro::Shape is built, the final suspend
433// point (if present) is always the last element of CoroSuspends array).
434// In the destroy function, we add a code sequence to check if ResumeFnAddress
435// is Null, and if so, jump to the appropriate label to handle cleanup from the
436// final suspend point.
437void CoroCloner::handleFinalSuspend() {
438 assert(Shape.ABI == coro::ABI::Switch &&(static_cast <bool> (Shape.ABI == coro::ABI::Switch &&
Shape.SwitchLowering.HasFinalSuspend) ? void (0) : __assert_fail
("Shape.ABI == coro::ABI::Switch && Shape.SwitchLowering.HasFinalSuspend"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 439, __extension__ __PRETTY_FUNCTION__))
439 Shape.SwitchLowering.HasFinalSuspend)(static_cast <bool> (Shape.ABI == coro::ABI::Switch &&
Shape.SwitchLowering.HasFinalSuspend) ? void (0) : __assert_fail
("Shape.ABI == coro::ABI::Switch && Shape.SwitchLowering.HasFinalSuspend"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 439, __extension__ __PRETTY_FUNCTION__))
;
440 auto *Switch = cast<SwitchInst>(VMap[Shape.SwitchLowering.ResumeSwitch]);
441 auto FinalCaseIt = std::prev(Switch->case_end());
442 BasicBlock *ResumeBB = FinalCaseIt->getCaseSuccessor();
443 Switch->removeCase(FinalCaseIt);
444 if (isSwitchDestroyFunction()) {
445 BasicBlock *OldSwitchBB = Switch->getParent();
446 auto *NewSwitchBB = OldSwitchBB->splitBasicBlock(Switch, "Switch");
447 Builder.SetInsertPoint(OldSwitchBB->getTerminator());
448 auto *GepIndex = Builder.CreateStructGEP(Shape.FrameTy, NewFramePtr,
449 coro::Shape::SwitchFieldIndex::Resume,
450 "ResumeFn.addr");
451 auto *Load = Builder.CreateLoad(Shape.getSwitchResumePointerType(),
452 GepIndex);
453 auto *Cond = Builder.CreateIsNull(Load);
454 Builder.CreateCondBr(Cond, ResumeBB, NewSwitchBB);
455 OldSwitchBB->getTerminator()->eraseFromParent();
456 }
457}
458
459static FunctionType *
460getFunctionTypeFromAsyncSuspend(AnyCoroSuspendInst *Suspend) {
461 auto *AsyncSuspend = cast<CoroSuspendAsyncInst>(Suspend);
462 auto *StructTy = cast<StructType>(AsyncSuspend->getType());
33
The object is a 'StructType'
463 auto &Context = Suspend->getParent()->getParent()->getContext();
34
Called C++ object pointer is null
464 auto *VoidTy = Type::getVoidTy(Context);
465 return FunctionType::get(VoidTy, StructTy->elements(), false);
466}
467
468static Function *createCloneDeclaration(Function &OrigF, coro::Shape &Shape,
469 const Twine &Suffix,
470 Module::iterator InsertBefore,
471 AnyCoroSuspendInst *ActiveSuspend) {
472 Module *M = OrigF.getParent();
473 auto *FnTy = (Shape.ABI != coro::ABI::Async)
29
Assuming field 'ABI' is equal to Async
30
'?' condition is false
474 ? Shape.getResumeFunctionType()
475 : getFunctionTypeFromAsyncSuspend(ActiveSuspend);
31
Passing null pointer value via 1st parameter 'Suspend'
32
Calling 'getFunctionTypeFromAsyncSuspend'
476
477 Function *NewF =
478 Function::Create(FnTy, GlobalValue::LinkageTypes::InternalLinkage,
479 OrigF.getName() + Suffix);
480 if (Shape.ABI != coro::ABI::Async)
481 NewF->addParamAttr(0, Attribute::NonNull);
482
483 // For the async lowering ABI we can't guarantee that the context argument is
484 // not access via a different pointer not based on the argument.
485 if (Shape.ABI != coro::ABI::Async)
486 NewF->addParamAttr(0, Attribute::NoAlias);
487
488 M->getFunctionList().insert(InsertBefore, NewF);
489
490 return NewF;
491}
492
493/// Replace uses of the active llvm.coro.suspend.retcon/async call with the
494/// arguments to the continuation function.
495///
496/// This assumes that the builder has a meaningful insertion point.
497void CoroCloner::replaceRetconOrAsyncSuspendUses() {
498 assert(Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||(static_cast <bool> (Shape.ABI == coro::ABI::Retcon || Shape
.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async
) ? void (0) : __assert_fail ("Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 499, __extension__ __PRETTY_FUNCTION__))
499 Shape.ABI == coro::ABI::Async)(static_cast <bool> (Shape.ABI == coro::ABI::Retcon || Shape
.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async
) ? void (0) : __assert_fail ("Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 499, __extension__ __PRETTY_FUNCTION__))
;
500
501 auto NewS = VMap[ActiveSuspend];
502 if (NewS->use_empty()) return;
503
504 // Copy out all the continuation arguments after the buffer pointer into
505 // an easily-indexed data structure for convenience.
506 SmallVector<Value*, 8> Args;
507 // The async ABI includes all arguments -- including the first argument.
508 bool IsAsyncABI = Shape.ABI == coro::ABI::Async;
509 for (auto I = IsAsyncABI ? NewF->arg_begin() : std::next(NewF->arg_begin()),
510 E = NewF->arg_end();
511 I != E; ++I)
512 Args.push_back(&*I);
513
514 // If the suspend returns a single scalar value, we can just do a simple
515 // replacement.
516 if (!isa<StructType>(NewS->getType())) {
517 assert(Args.size() == 1)(static_cast <bool> (Args.size() == 1) ? void (0) : __assert_fail
("Args.size() == 1", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 517, __extension__ __PRETTY_FUNCTION__))
;
518 NewS->replaceAllUsesWith(Args.front());
519 return;
520 }
521
522 // Try to peephole extracts of an aggregate return.
523 for (auto UI = NewS->use_begin(), UE = NewS->use_end(); UI != UE; ) {
524 auto EVI = dyn_cast<ExtractValueInst>((UI++)->getUser());
525 if (!EVI || EVI->getNumIndices() != 1)
526 continue;
527
528 EVI->replaceAllUsesWith(Args[EVI->getIndices().front()]);
529 EVI->eraseFromParent();
530 }
531
532 // If we have no remaining uses, we're done.
533 if (NewS->use_empty()) return;
534
535 // Otherwise, we need to create an aggregate.
536 Value *Agg = UndefValue::get(NewS->getType());
537 for (size_t I = 0, E = Args.size(); I != E; ++I)
538 Agg = Builder.CreateInsertValue(Agg, Args[I], I);
539
540 NewS->replaceAllUsesWith(Agg);
541}
542
543void CoroCloner::replaceCoroSuspends() {
544 Value *SuspendResult;
545
546 switch (Shape.ABI) {
547 // In switch lowering, replace coro.suspend with the appropriate value
548 // for the type of function we're extracting.
549 // Replacing coro.suspend with (0) will result in control flow proceeding to
550 // a resume label associated with a suspend point, replacing it with (1) will
551 // result in control flow proceeding to a cleanup label associated with this
552 // suspend point.
553 case coro::ABI::Switch:
554 SuspendResult = Builder.getInt8(isSwitchDestroyFunction() ? 1 : 0);
555 break;
556
557 // In async lowering there are no uses of the result.
558 case coro::ABI::Async:
559 return;
560
561 // In returned-continuation lowering, the arguments from earlier
562 // continuations are theoretically arbitrary, and they should have been
563 // spilled.
564 case coro::ABI::RetconOnce:
565 case coro::ABI::Retcon:
566 return;
567 }
568
569 for (AnyCoroSuspendInst *CS : Shape.CoroSuspends) {
570 // The active suspend was handled earlier.
571 if (CS == ActiveSuspend) continue;
572
573 auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[CS]);
574 MappedCS->replaceAllUsesWith(SuspendResult);
575 MappedCS->eraseFromParent();
576 }
577}
578
579void CoroCloner::replaceCoroEnds() {
580 for (AnyCoroEndInst *CE : Shape.CoroEnds) {
581 // We use a null call graph because there's no call graph node for
582 // the cloned function yet. We'll just be rebuilding that later.
583 auto *NewCE = cast<AnyCoroEndInst>(VMap[CE]);
584 replaceCoroEnd(NewCE, Shape, NewFramePtr, /*in resume*/ true, nullptr);
585 }
586}
587
588static void replaceSwiftErrorOps(Function &F, coro::Shape &Shape,
589 ValueToValueMapTy *VMap) {
590 if (Shape.ABI == coro::ABI::Async && Shape.CoroSuspends.empty())
591 return;
592 Value *CachedSlot = nullptr;
593 auto getSwiftErrorSlot = [&](Type *ValueTy) -> Value * {
594 if (CachedSlot) {
595 assert(CachedSlot->getType()->getPointerElementType() == ValueTy &&(static_cast <bool> (CachedSlot->getType()->getPointerElementType
() == ValueTy && "multiple swifterror slots in function with different types"
) ? void (0) : __assert_fail ("CachedSlot->getType()->getPointerElementType() == ValueTy && \"multiple swifterror slots in function with different types\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 596, __extension__ __PRETTY_FUNCTION__))
596 "multiple swifterror slots in function with different types")(static_cast <bool> (CachedSlot->getType()->getPointerElementType
() == ValueTy && "multiple swifterror slots in function with different types"
) ? void (0) : __assert_fail ("CachedSlot->getType()->getPointerElementType() == ValueTy && \"multiple swifterror slots in function with different types\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 596, __extension__ __PRETTY_FUNCTION__))
;
597 return CachedSlot;
598 }
599
600 // Check if the function has a swifterror argument.
601 for (auto &Arg : F.args()) {
602 if (Arg.isSwiftError()) {
603 CachedSlot = &Arg;
604 assert(Arg.getType()->getPointerElementType() == ValueTy &&(static_cast <bool> (Arg.getType()->getPointerElementType
() == ValueTy && "swifterror argument does not have expected type"
) ? void (0) : __assert_fail ("Arg.getType()->getPointerElementType() == ValueTy && \"swifterror argument does not have expected type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 605, __extension__ __PRETTY_FUNCTION__))
605 "swifterror argument does not have expected type")(static_cast <bool> (Arg.getType()->getPointerElementType
() == ValueTy && "swifterror argument does not have expected type"
) ? void (0) : __assert_fail ("Arg.getType()->getPointerElementType() == ValueTy && \"swifterror argument does not have expected type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 605, __extension__ __PRETTY_FUNCTION__))
;
606 return &Arg;
607 }
608 }
609
610 // Create a swifterror alloca.
611 IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
612 auto Alloca = Builder.CreateAlloca(ValueTy);
613 Alloca->setSwiftError(true);
614
615 CachedSlot = Alloca;
616 return Alloca;
617 };
618
619 for (CallInst *Op : Shape.SwiftErrorOps) {
620 auto MappedOp = VMap ? cast<CallInst>((*VMap)[Op]) : Op;
621 IRBuilder<> Builder(MappedOp);
622
623 // If there are no arguments, this is a 'get' operation.
624 Value *MappedResult;
625 if (Op->getNumArgOperands() == 0) {
626 auto ValueTy = Op->getType();
627 auto Slot = getSwiftErrorSlot(ValueTy);
628 MappedResult = Builder.CreateLoad(ValueTy, Slot);
629 } else {
630 assert(Op->getNumArgOperands() == 1)(static_cast <bool> (Op->getNumArgOperands() == 1) ?
void (0) : __assert_fail ("Op->getNumArgOperands() == 1",
"/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 630, __extension__ __PRETTY_FUNCTION__))
;
631 auto Value = MappedOp->getArgOperand(0);
632 auto ValueTy = Value->getType();
633 auto Slot = getSwiftErrorSlot(ValueTy);
634 Builder.CreateStore(Value, Slot);
635 MappedResult = Slot;
636 }
637
638 MappedOp->replaceAllUsesWith(MappedResult);
639 MappedOp->eraseFromParent();
640 }
641
642 // If we're updating the original function, we've invalidated SwiftErrorOps.
643 if (VMap == nullptr) {
644 Shape.SwiftErrorOps.clear();
645 }
646}
647
648void CoroCloner::replaceSwiftErrorOps() {
649 ::replaceSwiftErrorOps(*NewF, Shape, &VMap);
650}
651
652void CoroCloner::salvageDebugInfo() {
653 SmallVector<DbgVariableIntrinsic *, 8> Worklist;
654 SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache;
655 for (auto &BB : *NewF)
656 for (auto &I : BB)
657 if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&I))
658 Worklist.push_back(DVI);
659 for (DbgVariableIntrinsic *DVI : Worklist)
660 coro::salvageDebugInfo(DbgPtrAllocaCache, DVI, Shape.ReuseFrameSlot);
661
662 // Remove all salvaged dbg.declare intrinsics that became
663 // either unreachable or stale due to the CoroSplit transformation.
664 DominatorTree DomTree(*NewF);
665 auto IsUnreachableBlock = [&](BasicBlock *BB) {
666 return !isPotentiallyReachable(&NewF->getEntryBlock(), BB, nullptr,
667 &DomTree);
668 };
669 for (DbgVariableIntrinsic *DVI : Worklist) {
670 if (IsUnreachableBlock(DVI->getParent()))
671 DVI->eraseFromParent();
672 else if (dyn_cast_or_null<AllocaInst>(DVI->getVariableLocationOp(0))) {
673 // Count all non-debuginfo uses in reachable blocks.
674 unsigned Uses = 0;
675 for (auto *User : DVI->getVariableLocationOp(0)->users())
676 if (auto *I = dyn_cast<Instruction>(User))
677 if (!isa<AllocaInst>(I) && !IsUnreachableBlock(I->getParent()))
678 ++Uses;
679 if (!Uses)
680 DVI->eraseFromParent();
681 }
682 }
683}
684
685void CoroCloner::replaceEntryBlock() {
686 // In the original function, the AllocaSpillBlock is a block immediately
687 // following the allocation of the frame object which defines GEPs for
688 // all the allocas that have been moved into the frame, and it ends by
689 // branching to the original beginning of the coroutine. Make this
690 // the entry block of the cloned function.
691 auto *Entry = cast<BasicBlock>(VMap[Shape.AllocaSpillBlock]);
692 auto *OldEntry = &NewF->getEntryBlock();
693 Entry->setName("entry" + Suffix);
694 Entry->moveBefore(OldEntry);
695 Entry->getTerminator()->eraseFromParent();
696
697 // Clear all predecessors of the new entry block. There should be
698 // exactly one predecessor, which we created when splitting out
699 // AllocaSpillBlock to begin with.
700 assert(Entry->hasOneUse())(static_cast <bool> (Entry->hasOneUse()) ? void (0) :
__assert_fail ("Entry->hasOneUse()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 700, __extension__ __PRETTY_FUNCTION__))
;
701 auto BranchToEntry = cast<BranchInst>(Entry->user_back());
702 assert(BranchToEntry->isUnconditional())(static_cast <bool> (BranchToEntry->isUnconditional(
)) ? void (0) : __assert_fail ("BranchToEntry->isUnconditional()"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 702, __extension__ __PRETTY_FUNCTION__))
;
703 Builder.SetInsertPoint(BranchToEntry);
704 Builder.CreateUnreachable();
705 BranchToEntry->eraseFromParent();
706
707 // Branch from the entry to the appropriate place.
708 Builder.SetInsertPoint(Entry);
709 switch (Shape.ABI) {
710 case coro::ABI::Switch: {
711 // In switch-lowering, we built a resume-entry block in the original
712 // function. Make the entry block branch to this.
713 auto *SwitchBB =
714 cast<BasicBlock>(VMap[Shape.SwitchLowering.ResumeEntryBlock]);
715 Builder.CreateBr(SwitchBB);
716 break;
717 }
718 case coro::ABI::Async:
719 case coro::ABI::Retcon:
720 case coro::ABI::RetconOnce: {
721 // In continuation ABIs, we want to branch to immediately after the
722 // active suspend point. Earlier phases will have put the suspend in its
723 // own basic block, so just thread our jump directly to its successor.
724 assert((Shape.ABI == coro::ABI::Async &&(static_cast <bool> ((Shape.ABI == coro::ABI::Async &&
isa<CoroSuspendAsyncInst>(ActiveSuspend)) || ((Shape.ABI
== coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) &&
isa<CoroSuspendRetconInst>(ActiveSuspend))) ? void (0)
: __assert_fail ("(Shape.ABI == coro::ABI::Async && isa<CoroSuspendAsyncInst>(ActiveSuspend)) || ((Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) && isa<CoroSuspendRetconInst>(ActiveSuspend))"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 728, __extension__ __PRETTY_FUNCTION__))
725 isa<CoroSuspendAsyncInst>(ActiveSuspend)) ||(static_cast <bool> ((Shape.ABI == coro::ABI::Async &&
isa<CoroSuspendAsyncInst>(ActiveSuspend)) || ((Shape.ABI
== coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) &&
isa<CoroSuspendRetconInst>(ActiveSuspend))) ? void (0)
: __assert_fail ("(Shape.ABI == coro::ABI::Async && isa<CoroSuspendAsyncInst>(ActiveSuspend)) || ((Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) && isa<CoroSuspendRetconInst>(ActiveSuspend))"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 728, __extension__ __PRETTY_FUNCTION__))
726 ((Shape.ABI == coro::ABI::Retcon ||(static_cast <bool> ((Shape.ABI == coro::ABI::Async &&
isa<CoroSuspendAsyncInst>(ActiveSuspend)) || ((Shape.ABI
== coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) &&
isa<CoroSuspendRetconInst>(ActiveSuspend))) ? void (0)
: __assert_fail ("(Shape.ABI == coro::ABI::Async && isa<CoroSuspendAsyncInst>(ActiveSuspend)) || ((Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) && isa<CoroSuspendRetconInst>(ActiveSuspend))"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 728, __extension__ __PRETTY_FUNCTION__))
727 Shape.ABI == coro::ABI::RetconOnce) &&(static_cast <bool> ((Shape.ABI == coro::ABI::Async &&
isa<CoroSuspendAsyncInst>(ActiveSuspend)) || ((Shape.ABI
== coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) &&
isa<CoroSuspendRetconInst>(ActiveSuspend))) ? void (0)
: __assert_fail ("(Shape.ABI == coro::ABI::Async && isa<CoroSuspendAsyncInst>(ActiveSuspend)) || ((Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) && isa<CoroSuspendRetconInst>(ActiveSuspend))"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 728, __extension__ __PRETTY_FUNCTION__))
728 isa<CoroSuspendRetconInst>(ActiveSuspend)))(static_cast <bool> ((Shape.ABI == coro::ABI::Async &&
isa<CoroSuspendAsyncInst>(ActiveSuspend)) || ((Shape.ABI
== coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) &&
isa<CoroSuspendRetconInst>(ActiveSuspend))) ? void (0)
: __assert_fail ("(Shape.ABI == coro::ABI::Async && isa<CoroSuspendAsyncInst>(ActiveSuspend)) || ((Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) && isa<CoroSuspendRetconInst>(ActiveSuspend))"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 728, __extension__ __PRETTY_FUNCTION__))
;
729 auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[ActiveSuspend]);
730 auto Branch = cast<BranchInst>(MappedCS->getNextNode());
731 assert(Branch->isUnconditional())(static_cast <bool> (Branch->isUnconditional()) ? void
(0) : __assert_fail ("Branch->isUnconditional()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 731, __extension__ __PRETTY_FUNCTION__))
;
732 Builder.CreateBr(Branch->getSuccessor(0));
733 break;
734 }
735 }
736
737 // Any static alloca that's still being used but not reachable from the new
738 // entry needs to be moved to the new entry.
739 Function *F = OldEntry->getParent();
740 DominatorTree DT{*F};
741 for (auto IT = inst_begin(F), End = inst_end(F); IT != End;) {
742 Instruction &I = *IT++;
743 auto *Alloca = dyn_cast<AllocaInst>(&I);
744 if (!Alloca || I.use_empty())
745 continue;
746 if (DT.isReachableFromEntry(I.getParent()) ||
747 !isa<ConstantInt>(Alloca->getArraySize()))
748 continue;
749 I.moveBefore(*Entry, Entry->getFirstInsertionPt());
750 }
751}
752
753/// Derive the value of the new frame pointer.
754Value *CoroCloner::deriveNewFramePointer() {
755 // Builder should be inserting to the front of the new entry block.
756
757 switch (Shape.ABI) {
758 // In switch-lowering, the argument is the frame pointer.
759 case coro::ABI::Switch:
760 return &*NewF->arg_begin();
761 // In async-lowering, one of the arguments is an async context as determined
762 // by the `llvm.coro.id.async` intrinsic. We can retrieve the async context of
763 // the resume function from the async context projection function associated
764 // with the active suspend. The frame is located as a tail to the async
765 // context header.
766 case coro::ABI::Async: {
767 auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(ActiveSuspend);
768 auto ContextIdx = ActiveAsyncSuspend->getStorageArgumentIndex() & 0xff;
769 auto *CalleeContext = NewF->getArg(ContextIdx);
770 auto *FramePtrTy = Shape.FrameTy->getPointerTo();
771 auto *ProjectionFunc =
772 ActiveAsyncSuspend->getAsyncContextProjectionFunction();
773 auto DbgLoc =
774 cast<CoroSuspendAsyncInst>(VMap[ActiveSuspend])->getDebugLoc();
775 // Calling i8* (i8*)
776 auto *CallerContext = Builder.CreateCall(
777 cast<FunctionType>(ProjectionFunc->getType()->getPointerElementType()),
778 ProjectionFunc, CalleeContext);
779 CallerContext->setCallingConv(ProjectionFunc->getCallingConv());
780 CallerContext->setDebugLoc(DbgLoc);
781 // The frame is located after the async_context header.
782 auto &Context = Builder.getContext();
783 auto *FramePtrAddr = Builder.CreateConstInBoundsGEP1_32(
784 Type::getInt8Ty(Context), CallerContext,
785 Shape.AsyncLowering.FrameOffset, "async.ctx.frameptr");
786 // Inline the projection function.
787 InlineFunctionInfo InlineInfo;
788 auto InlineRes = InlineFunction(*CallerContext, InlineInfo);
789 assert(InlineRes.isSuccess())(static_cast <bool> (InlineRes.isSuccess()) ? void (0) :
__assert_fail ("InlineRes.isSuccess()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 789, __extension__ __PRETTY_FUNCTION__))
;
790 (void)InlineRes;
791 return Builder.CreateBitCast(FramePtrAddr, FramePtrTy);
792 }
793 // In continuation-lowering, the argument is the opaque storage.
794 case coro::ABI::Retcon:
795 case coro::ABI::RetconOnce: {
796 Argument *NewStorage = &*NewF->arg_begin();
797 auto FramePtrTy = Shape.FrameTy->getPointerTo();
798
799 // If the storage is inline, just bitcast to the storage to the frame type.
800 if (Shape.RetconLowering.IsFrameInlineInStorage)
801 return Builder.CreateBitCast(NewStorage, FramePtrTy);
802
803 // Otherwise, load the real frame from the opaque storage.
804 auto FramePtrPtr =
805 Builder.CreateBitCast(NewStorage, FramePtrTy->getPointerTo());
806 return Builder.CreateLoad(FramePtrTy, FramePtrPtr);
807 }
808 }
809 llvm_unreachable("bad ABI")::llvm::llvm_unreachable_internal("bad ABI", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 809)
;
810}
811
812static void addFramePointerAttrs(AttributeList &Attrs, LLVMContext &Context,
813 unsigned ParamIndex,
814 uint64_t Size, Align Alignment) {
815 AttrBuilder ParamAttrs;
816 ParamAttrs.addAttribute(Attribute::NonNull);
817 ParamAttrs.addAttribute(Attribute::NoAlias);
818 ParamAttrs.addAlignmentAttr(Alignment);
819 ParamAttrs.addDereferenceableAttr(Size);
820 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
821}
822
823static void addAsyncContextAttrs(AttributeList &Attrs, LLVMContext &Context,
824 unsigned ParamIndex) {
825 AttrBuilder ParamAttrs;
826 ParamAttrs.addAttribute(Attribute::SwiftAsync);
827 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
828}
829
830static void addSwiftSelfAttrs(AttributeList &Attrs, LLVMContext &Context,
831 unsigned ParamIndex) {
832 AttrBuilder ParamAttrs;
833 ParamAttrs.addAttribute(Attribute::SwiftSelf);
834 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
835}
836
837/// Clone the body of the original function into a resume function of
838/// some sort.
839void CoroCloner::create() {
840 // Create the new function if we don't already have one.
841 if (!NewF) {
25
Assuming field 'NewF' is null
26
Taking true branch
842 NewF = createCloneDeclaration(OrigF, Shape, Suffix,
28
Calling 'createCloneDeclaration'
843 OrigF.getParent()->end(), ActiveSuspend);
27
Passing null pointer value via 5th parameter 'ActiveSuspend'
844 }
845
846 // Replace all args with undefs. The buildCoroutineFrame algorithm already
847 // rewritten access to the args that occurs after suspend points with loads
848 // and stores to/from the coroutine frame.
849 for (Argument &A : OrigF.args())
850 VMap[&A] = UndefValue::get(A.getType());
851
852 SmallVector<ReturnInst *, 4> Returns;
853
854 // Ignore attempts to change certain attributes of the function.
855 // TODO: maybe there should be a way to suppress this during cloning?
856 auto savedVisibility = NewF->getVisibility();
857 auto savedUnnamedAddr = NewF->getUnnamedAddr();
858 auto savedDLLStorageClass = NewF->getDLLStorageClass();
859
860 // NewF's linkage (which CloneFunctionInto does *not* change) might not
861 // be compatible with the visibility of OrigF (which it *does* change),
862 // so protect against that.
863 auto savedLinkage = NewF->getLinkage();
864 NewF->setLinkage(llvm::GlobalValue::ExternalLinkage);
865
866 CloneFunctionInto(NewF, &OrigF, VMap,
867 CloneFunctionChangeType::LocalChangesOnly, Returns);
868
869 auto &Context = NewF->getContext();
870
871 // For async functions / continuations, adjust the scope line of the
872 // clone to the line number of the suspend point. However, only
873 // adjust the scope line when the files are the same. This ensures
874 // line number and file name belong together. The scope line is
875 // associated with all pre-prologue instructions. This avoids a jump
876 // in the linetable from the function declaration to the suspend point.
877 if (DISubprogram *SP = NewF->getSubprogram()) {
878 assert(SP != OrigF.getSubprogram() && SP->isDistinct())(static_cast <bool> (SP != OrigF.getSubprogram() &&
SP->isDistinct()) ? void (0) : __assert_fail ("SP != OrigF.getSubprogram() && SP->isDistinct()"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 878, __extension__ __PRETTY_FUNCTION__))
;
879 if (ActiveSuspend)
880 if (auto DL = ActiveSuspend->getDebugLoc())
881 if (SP->getFile() == DL->getFile())
882 SP->setScopeLine(DL->getLine());
883 // Update the linkage name to reflect the modified symbol name. It
884 // is necessary to update the linkage name in Swift, since the
885 // mangling changes for resume functions. It might also be the
886 // right thing to do in C++, but due to a limitation in LLVM's
887 // AsmPrinter we can only do this if the function doesn't have an
888 // abstract specification, since the DWARF backend expects the
889 // abstract specification to contain the linkage name and asserts
890 // that they are identical.
891 if (!SP->getDeclaration() && SP->getUnit() &&
892 SP->getUnit()->getSourceLanguage() == dwarf::DW_LANG_Swift)
893 SP->replaceLinkageName(MDString::get(Context, NewF->getName()));
894 }
895
896 NewF->setLinkage(savedLinkage);
897 NewF->setVisibility(savedVisibility);
898 NewF->setUnnamedAddr(savedUnnamedAddr);
899 NewF->setDLLStorageClass(savedDLLStorageClass);
900
901 // Replace the attributes of the new function:
902 auto OrigAttrs = NewF->getAttributes();
903 auto NewAttrs = AttributeList();
904
905 switch (Shape.ABI) {
906 case coro::ABI::Switch:
907 // Bootstrap attributes by copying function attributes from the
908 // original function. This should include optimization settings and so on.
909 NewAttrs = NewAttrs.addFnAttributes(Context, OrigAttrs.getFnAttrs());
910
911 addFramePointerAttrs(NewAttrs, Context, 0,
912 Shape.FrameSize, Shape.FrameAlign);
913 break;
914 case coro::ABI::Async: {
915 auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(ActiveSuspend);
916 if (OrigF.hasParamAttribute(Shape.AsyncLowering.ContextArgNo,
917 Attribute::SwiftAsync)) {
918 uint32_t ArgAttributeIndices =
919 ActiveAsyncSuspend->getStorageArgumentIndex();
920 auto ContextArgIndex = ArgAttributeIndices & 0xff;
921 addAsyncContextAttrs(NewAttrs, Context, ContextArgIndex);
922
923 // `swiftasync` must preceed `swiftself` so 0 is not a valid index for
924 // `swiftself`.
925 auto SwiftSelfIndex = ArgAttributeIndices >> 8;
926 if (SwiftSelfIndex)
927 addSwiftSelfAttrs(NewAttrs, Context, SwiftSelfIndex);
928 }
929
930 // Transfer the original function's attributes.
931 auto FnAttrs = OrigF.getAttributes().getFnAttrs();
932 NewAttrs = NewAttrs.addFnAttributes(Context, FnAttrs);
933 break;
934 }
935 case coro::ABI::Retcon:
936 case coro::ABI::RetconOnce:
937 // If we have a continuation prototype, just use its attributes,
938 // full-stop.
939 NewAttrs = Shape.RetconLowering.ResumePrototype->getAttributes();
940
941 addFramePointerAttrs(NewAttrs, Context, 0,
942 Shape.getRetconCoroId()->getStorageSize(),
943 Shape.getRetconCoroId()->getStorageAlignment());
944 break;
945 }
946
947 switch (Shape.ABI) {
948 // In these ABIs, the cloned functions always return 'void', and the
949 // existing return sites are meaningless. Note that for unique
950 // continuations, this includes the returns associated with suspends;
951 // this is fine because we can't suspend twice.
952 case coro::ABI::Switch:
953 case coro::ABI::RetconOnce:
954 // Remove old returns.
955 for (ReturnInst *Return : Returns)
956 changeToUnreachable(Return);
957 break;
958
959 // With multi-suspend continuations, we'll already have eliminated the
960 // original returns and inserted returns before all the suspend points,
961 // so we want to leave any returns in place.
962 case coro::ABI::Retcon:
963 break;
964 // Async lowering will insert musttail call functions at all suspend points
965 // followed by a return.
966 // Don't change returns to unreachable because that will trip up the verifier.
967 // These returns should be unreachable from the clone.
968 case coro::ABI::Async:
969 break;
970 }
971
972 NewF->setAttributes(NewAttrs);
973 NewF->setCallingConv(Shape.getResumeFunctionCC());
974
975 // Set up the new entry block.
976 replaceEntryBlock();
977
978 Builder.SetInsertPoint(&NewF->getEntryBlock().front());
979 NewFramePtr = deriveNewFramePointer();
980
981 // Remap frame pointer.
982 Value *OldFramePtr = VMap[Shape.FramePtr];
983 NewFramePtr->takeName(OldFramePtr);
984 OldFramePtr->replaceAllUsesWith(NewFramePtr);
985
986 // Remap vFrame pointer.
987 auto *NewVFrame = Builder.CreateBitCast(
988 NewFramePtr, Type::getInt8PtrTy(Builder.getContext()), "vFrame");
989 Value *OldVFrame = cast<Value>(VMap[Shape.CoroBegin]);
990 OldVFrame->replaceAllUsesWith(NewVFrame);
991
992 switch (Shape.ABI) {
993 case coro::ABI::Switch:
994 // Rewrite final suspend handling as it is not done via switch (allows to
995 // remove final case from the switch, since it is undefined behavior to
996 // resume the coroutine suspended at the final suspend point.
997 if (Shape.SwitchLowering.HasFinalSuspend)
998 handleFinalSuspend();
999 break;
1000 case coro::ABI::Async:
1001 case coro::ABI::Retcon:
1002 case coro::ABI::RetconOnce:
1003 // Replace uses of the active suspend with the corresponding
1004 // continuation-function arguments.
1005 assert(ActiveSuspend != nullptr &&(static_cast <bool> (ActiveSuspend != nullptr &&
"no active suspend when lowering a continuation-style coroutine"
) ? void (0) : __assert_fail ("ActiveSuspend != nullptr && \"no active suspend when lowering a continuation-style coroutine\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1006, __extension__ __PRETTY_FUNCTION__))
1006 "no active suspend when lowering a continuation-style coroutine")(static_cast <bool> (ActiveSuspend != nullptr &&
"no active suspend when lowering a continuation-style coroutine"
) ? void (0) : __assert_fail ("ActiveSuspend != nullptr && \"no active suspend when lowering a continuation-style coroutine\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1006, __extension__ __PRETTY_FUNCTION__))
;
1007 replaceRetconOrAsyncSuspendUses();
1008 break;
1009 }
1010
1011 // Handle suspends.
1012 replaceCoroSuspends();
1013
1014 // Handle swifterror.
1015 replaceSwiftErrorOps();
1016
1017 // Remove coro.end intrinsics.
1018 replaceCoroEnds();
1019
1020 // Salvage debug info that points into the coroutine frame.
1021 salvageDebugInfo();
1022
1023 // Eliminate coro.free from the clones, replacing it with 'null' in cleanup,
1024 // to suppress deallocation code.
1025 if (Shape.ABI == coro::ABI::Switch)
1026 coro::replaceCoroFree(cast<CoroIdInst>(VMap[Shape.CoroBegin->getId()]),
1027 /*Elide=*/ FKind == CoroCloner::Kind::SwitchCleanup);
1028}
1029
1030// Create a resume clone by cloning the body of the original function, setting
1031// new entry block and replacing coro.suspend an appropriate value to force
1032// resume or cleanup pass for every suspend point.
1033static Function *createClone(Function &F, const Twine &Suffix,
1034 coro::Shape &Shape, CoroCloner::Kind FKind) {
1035 CoroCloner Cloner(F, Suffix, Shape, FKind);
19
Calling constructor for 'CoroCloner'
23
Returning from constructor for 'CoroCloner'
1036 Cloner.create();
24
Calling 'CoroCloner::create'
1037 return Cloner.getFunction();
1038}
1039
1040/// Remove calls to llvm.coro.end in the original function.
1041static void removeCoroEnds(const coro::Shape &Shape, CallGraph *CG) {
1042 for (auto End : Shape.CoroEnds) {
1043 replaceCoroEnd(End, Shape, Shape.FramePtr, /*in resume*/ false, CG);
1044 }
1045}
1046
1047static void updateAsyncFuncPointerContextSize(coro::Shape &Shape) {
1048 assert(Shape.ABI == coro::ABI::Async)(static_cast <bool> (Shape.ABI == coro::ABI::Async) ? void
(0) : __assert_fail ("Shape.ABI == coro::ABI::Async", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1048, __extension__ __PRETTY_FUNCTION__))
;
1049
1050 auto *FuncPtrStruct = cast<ConstantStruct>(
1051 Shape.AsyncLowering.AsyncFuncPointer->getInitializer());
1052 auto *OrigRelativeFunOffset = FuncPtrStruct->getOperand(0);
1053 auto *OrigContextSize = FuncPtrStruct->getOperand(1);
1054 auto *NewContextSize = ConstantInt::get(OrigContextSize->getType(),
1055 Shape.AsyncLowering.ContextSize);
1056 auto *NewFuncPtrStruct = ConstantStruct::get(
1057 FuncPtrStruct->getType(), OrigRelativeFunOffset, NewContextSize);
1058
1059 Shape.AsyncLowering.AsyncFuncPointer->setInitializer(NewFuncPtrStruct);
1060}
1061
1062static void replaceFrameSize(coro::Shape &Shape) {
1063 if (Shape.ABI == coro::ABI::Async)
1064 updateAsyncFuncPointerContextSize(Shape);
1065
1066 if (Shape.CoroSizes.empty())
1067 return;
1068
1069 // In the same function all coro.sizes should have the same result type.
1070 auto *SizeIntrin = Shape.CoroSizes.back();
1071 Module *M = SizeIntrin->getModule();
1072 const DataLayout &DL = M->getDataLayout();
1073 auto Size = DL.getTypeAllocSize(Shape.FrameTy);
1074 auto *SizeConstant = ConstantInt::get(SizeIntrin->getType(), Size);
1075
1076 for (CoroSizeInst *CS : Shape.CoroSizes) {
1077 CS->replaceAllUsesWith(SizeConstant);
1078 CS->eraseFromParent();
1079 }
1080}
1081
1082// Create a global constant array containing pointers to functions provided and
1083// set Info parameter of CoroBegin to point at this constant. Example:
1084//
1085// @f.resumers = internal constant [2 x void(%f.frame*)*]
1086// [void(%f.frame*)* @f.resume, void(%f.frame*)* @f.destroy]
1087// define void @f() {
1088// ...
1089// call i8* @llvm.coro.begin(i8* null, i32 0, i8* null,
1090// i8* bitcast([2 x void(%f.frame*)*] * @f.resumers to i8*))
1091//
1092// Assumes that all the functions have the same signature.
1093static void setCoroInfo(Function &F, coro::Shape &Shape,
1094 ArrayRef<Function *> Fns) {
1095 // This only works under the switch-lowering ABI because coro elision
1096 // only works on the switch-lowering ABI.
1097 assert(Shape.ABI == coro::ABI::Switch)(static_cast <bool> (Shape.ABI == coro::ABI::Switch) ? void
(0) : __assert_fail ("Shape.ABI == coro::ABI::Switch", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1097, __extension__ __PRETTY_FUNCTION__))
;
1098
1099 SmallVector<Constant *, 4> Args(Fns.begin(), Fns.end());
1100 assert(!Args.empty())(static_cast <bool> (!Args.empty()) ? void (0) : __assert_fail
("!Args.empty()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1100, __extension__ __PRETTY_FUNCTION__))
;
1101 Function *Part = *Fns.begin();
1102 Module *M = Part->getParent();
1103 auto *ArrTy = ArrayType::get(Part->getType(), Args.size());
1104
1105 auto *ConstVal = ConstantArray::get(ArrTy, Args);
1106 auto *GV = new GlobalVariable(*M, ConstVal->getType(), /*isConstant=*/true,
1107 GlobalVariable::PrivateLinkage, ConstVal,
1108 F.getName() + Twine(".resumers"));
1109
1110 // Update coro.begin instruction to refer to this constant.
1111 LLVMContext &C = F.getContext();
1112 auto *BC = ConstantExpr::getPointerCast(GV, Type::getInt8PtrTy(C));
1113 Shape.getSwitchCoroId()->setInfo(BC);
1114}
1115
1116// Store addresses of Resume/Destroy/Cleanup functions in the coroutine frame.
1117static void updateCoroFrame(coro::Shape &Shape, Function *ResumeFn,
1118 Function *DestroyFn, Function *CleanupFn) {
1119 assert(Shape.ABI == coro::ABI::Switch)(static_cast <bool> (Shape.ABI == coro::ABI::Switch) ? void
(0) : __assert_fail ("Shape.ABI == coro::ABI::Switch", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1119, __extension__ __PRETTY_FUNCTION__))
;
1120
1121 IRBuilder<> Builder(Shape.FramePtr->getNextNode());
1122 auto *ResumeAddr = Builder.CreateStructGEP(
1123 Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Resume,
1124 "resume.addr");
1125 Builder.CreateStore(ResumeFn, ResumeAddr);
1126
1127 Value *DestroyOrCleanupFn = DestroyFn;
1128
1129 CoroIdInst *CoroId = Shape.getSwitchCoroId();
1130 if (CoroAllocInst *CA = CoroId->getCoroAlloc()) {
1131 // If there is a CoroAlloc and it returns false (meaning we elide the
1132 // allocation, use CleanupFn instead of DestroyFn).
1133 DestroyOrCleanupFn = Builder.CreateSelect(CA, DestroyFn, CleanupFn);
1134 }
1135
1136 auto *DestroyAddr = Builder.CreateStructGEP(
1137 Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Destroy,
1138 "destroy.addr");
1139 Builder.CreateStore(DestroyOrCleanupFn, DestroyAddr);
1140}
1141
1142static void postSplitCleanup(Function &F) {
1143 removeUnreachableBlocks(F);
1144
1145 // For now, we do a mandatory verification step because we don't
1146 // entirely trust this pass. Note that we don't want to add a verifier
1147 // pass to FPM below because it will also verify all the global data.
1148 if (verifyFunction(F, &errs()))
1149 report_fatal_error("Broken function");
1150}
1151
1152// Assuming we arrived at the block NewBlock from Prev instruction, store
1153// PHI's incoming values in the ResolvedValues map.
1154static void
1155scanPHIsAndUpdateValueMap(Instruction *Prev, BasicBlock *NewBlock,
1156 DenseMap<Value *, Value *> &ResolvedValues) {
1157 auto *PrevBB = Prev->getParent();
1158 for (PHINode &PN : NewBlock->phis()) {
1159 auto V = PN.getIncomingValueForBlock(PrevBB);
1160 // See if we already resolved it.
1161 auto VI = ResolvedValues.find(V);
1162 if (VI != ResolvedValues.end())
1163 V = VI->second;
1164 // Remember the value.
1165 ResolvedValues[&PN] = V;
1166 }
1167}
1168
1169// Replace a sequence of branches leading to a ret, with a clone of a ret
1170// instruction. Suspend instruction represented by a switch, track the PHI
1171// values and select the correct case successor when possible.
1172static bool simplifyTerminatorLeadingToRet(Instruction *InitialInst) {
1173 DenseMap<Value *, Value *> ResolvedValues;
1174 BasicBlock *UnconditionalSucc = nullptr;
1175
1176 Instruction *I = InitialInst;
1177 while (I->isTerminator() ||
1178 (isa<CmpInst>(I) && I->getNextNode()->isTerminator())) {
1179 if (isa<ReturnInst>(I)) {
1180 if (I != InitialInst) {
1181 // If InitialInst is an unconditional branch,
1182 // remove PHI values that come from basic block of InitialInst
1183 if (UnconditionalSucc)
1184 UnconditionalSucc->removePredecessor(InitialInst->getParent(), true);
1185 ReplaceInstWithInst(InitialInst, I->clone());
1186 }
1187 return true;
1188 }
1189 if (auto *BR = dyn_cast<BranchInst>(I)) {
1190 if (BR->isUnconditional()) {
1191 BasicBlock *BB = BR->getSuccessor(0);
1192 if (I == InitialInst)
1193 UnconditionalSucc = BB;
1194 scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
1195 I = BB->getFirstNonPHIOrDbgOrLifetime();
1196 continue;
1197 }
1198 } else if (auto *CondCmp = dyn_cast<CmpInst>(I)) {
1199 auto *BR = dyn_cast<BranchInst>(I->getNextNode());
1200 if (BR && BR->isConditional() && CondCmp == BR->getCondition()) {
1201 // If the case number of suspended switch instruction is reduced to
1202 // 1, then it is simplified to CmpInst in llvm::ConstantFoldTerminator.
1203 // And the comparsion looks like : %cond = icmp eq i8 %V, constant.
1204 ConstantInt *CondConst = dyn_cast<ConstantInt>(CondCmp->getOperand(1));
1205 if (CondConst && CondCmp->getPredicate() == CmpInst::ICMP_EQ) {
1206 Value *V = CondCmp->getOperand(0);
1207 auto it = ResolvedValues.find(V);
1208 if (it != ResolvedValues.end())
1209 V = it->second;
1210
1211 if (ConstantInt *Cond0 = dyn_cast<ConstantInt>(V)) {
1212 BasicBlock *BB = Cond0->equalsInt(CondConst->getZExtValue())
1213 ? BR->getSuccessor(0)
1214 : BR->getSuccessor(1);
1215 scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
1216 I = BB->getFirstNonPHIOrDbgOrLifetime();
1217 continue;
1218 }
1219 }
1220 }
1221 } else if (auto *SI = dyn_cast<SwitchInst>(I)) {
1222 Value *V = SI->getCondition();
1223 auto it = ResolvedValues.find(V);
1224 if (it != ResolvedValues.end())
1225 V = it->second;
1226 if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) {
1227 BasicBlock *BB = SI->findCaseValue(Cond)->getCaseSuccessor();
1228 scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
1229 I = BB->getFirstNonPHIOrDbgOrLifetime();
1230 continue;
1231 }
1232 }
1233 return false;
1234 }
1235 return false;
1236}
1237
1238// Check whether CI obeys the rules of musttail attribute.
1239static bool shouldBeMustTail(const CallInst &CI, const Function &F) {
1240 if (CI.isInlineAsm())
1241 return false;
1242
1243 // Match prototypes and calling conventions of resume function.
1244 FunctionType *CalleeTy = CI.getFunctionType();
1245 if (!CalleeTy->getReturnType()->isVoidTy() || (CalleeTy->getNumParams() != 1))
1246 return false;
1247
1248 Type *CalleeParmTy = CalleeTy->getParamType(0);
1249 if (!CalleeParmTy->isPointerTy() ||
1250 (CalleeParmTy->getPointerAddressSpace() != 0))
1251 return false;
1252
1253 if (CI.getCallingConv() != F.getCallingConv())
1254 return false;
1255
1256 // CI should not has any ABI-impacting function attributes.
1257 static const Attribute::AttrKind ABIAttrs[] = {
1258 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
1259 Attribute::Preallocated, Attribute::InReg, Attribute::Returned,
1260 Attribute::SwiftSelf, Attribute::SwiftError};
1261 AttributeList Attrs = CI.getAttributes();
1262 for (auto AK : ABIAttrs)
1263 if (Attrs.hasParamAttr(0, AK))
1264 return false;
1265
1266 return true;
1267}
1268
1269// Add musttail to any resume instructions that is immediately followed by a
1270// suspend (i.e. ret). We do this even in -O0 to support guaranteed tail call
1271// for symmetrical coroutine control transfer (C++ Coroutines TS extension).
1272// This transformation is done only in the resume part of the coroutine that has
1273// identical signature and calling convention as the coro.resume call.
1274static void addMustTailToCoroResumes(Function &F) {
1275 bool changed = false;
1276
1277 // Collect potential resume instructions.
1278 SmallVector<CallInst *, 4> Resumes;
1279 for (auto &I : instructions(F))
1280 if (auto *Call = dyn_cast<CallInst>(&I))
1281 if (shouldBeMustTail(*Call, F))
1282 Resumes.push_back(Call);
1283
1284 // Set musttail on those that are followed by a ret instruction.
1285 for (CallInst *Call : Resumes)
1286 if (simplifyTerminatorLeadingToRet(Call->getNextNode())) {
1287 Call->setTailCallKind(CallInst::TCK_MustTail);
1288 changed = true;
1289 }
1290
1291 if (changed)
1292 removeUnreachableBlocks(F);
1293}
1294
1295// Coroutine has no suspend points. Remove heap allocation for the coroutine
1296// frame if possible.
1297static void handleNoSuspendCoroutine(coro::Shape &Shape) {
1298 auto *CoroBegin = Shape.CoroBegin;
1299 auto *CoroId = CoroBegin->getId();
1300 auto *AllocInst = CoroId->getCoroAlloc();
1301 switch (Shape.ABI) {
1302 case coro::ABI::Switch: {
1303 auto SwitchId = cast<CoroIdInst>(CoroId);
1304 coro::replaceCoroFree(SwitchId, /*Elide=*/AllocInst != nullptr);
1305 if (AllocInst) {
1306 IRBuilder<> Builder(AllocInst);
1307 auto *Frame = Builder.CreateAlloca(Shape.FrameTy);
1308 Frame->setAlignment(Shape.FrameAlign);
1309 auto *VFrame = Builder.CreateBitCast(Frame, Builder.getInt8PtrTy());
1310 AllocInst->replaceAllUsesWith(Builder.getFalse());
1311 AllocInst->eraseFromParent();
1312 CoroBegin->replaceAllUsesWith(VFrame);
1313 } else {
1314 CoroBegin->replaceAllUsesWith(CoroBegin->getMem());
1315 }
1316
1317 break;
1318 }
1319 case coro::ABI::Async:
1320 case coro::ABI::Retcon:
1321 case coro::ABI::RetconOnce:
1322 CoroBegin->replaceAllUsesWith(UndefValue::get(CoroBegin->getType()));
1323 break;
1324 }
1325
1326 CoroBegin->eraseFromParent();
1327}
1328
1329// SimplifySuspendPoint needs to check that there is no calls between
1330// coro_save and coro_suspend, since any of the calls may potentially resume
1331// the coroutine and if that is the case we cannot eliminate the suspend point.
1332static bool hasCallsInBlockBetween(Instruction *From, Instruction *To) {
1333 for (Instruction *I = From; I != To; I = I->getNextNode()) {
1334 // Assume that no intrinsic can resume the coroutine.
1335 if (isa<IntrinsicInst>(I))
1336 continue;
1337
1338 if (isa<CallBase>(I))
1339 return true;
1340 }
1341 return false;
1342}
1343
1344static bool hasCallsInBlocksBetween(BasicBlock *SaveBB, BasicBlock *ResDesBB) {
1345 SmallPtrSet<BasicBlock *, 8> Set;
1346 SmallVector<BasicBlock *, 8> Worklist;
1347
1348 Set.insert(SaveBB);
1349 Worklist.push_back(ResDesBB);
1350
1351 // Accumulate all blocks between SaveBB and ResDesBB. Because CoroSaveIntr
1352 // returns a token consumed by suspend instruction, all blocks in between
1353 // will have to eventually hit SaveBB when going backwards from ResDesBB.
1354 while (!Worklist.empty()) {
1355 auto *BB = Worklist.pop_back_val();
1356 Set.insert(BB);
1357 for (auto *Pred : predecessors(BB))
1358 if (Set.count(Pred) == 0)
1359 Worklist.push_back(Pred);
1360 }
1361
1362 // SaveBB and ResDesBB are checked separately in hasCallsBetween.
1363 Set.erase(SaveBB);
1364 Set.erase(ResDesBB);
1365
1366 for (auto *BB : Set)
1367 if (hasCallsInBlockBetween(BB->getFirstNonPHI(), nullptr))
1368 return true;
1369
1370 return false;
1371}
1372
1373static bool hasCallsBetween(Instruction *Save, Instruction *ResumeOrDestroy) {
1374 auto *SaveBB = Save->getParent();
1375 auto *ResumeOrDestroyBB = ResumeOrDestroy->getParent();
1376
1377 if (SaveBB == ResumeOrDestroyBB)
1378 return hasCallsInBlockBetween(Save->getNextNode(), ResumeOrDestroy);
1379
1380 // Any calls from Save to the end of the block?
1381 if (hasCallsInBlockBetween(Save->getNextNode(), nullptr))
1382 return true;
1383
1384 // Any calls from begging of the block up to ResumeOrDestroy?
1385 if (hasCallsInBlockBetween(ResumeOrDestroyBB->getFirstNonPHI(),
1386 ResumeOrDestroy))
1387 return true;
1388
1389 // Any calls in all of the blocks between SaveBB and ResumeOrDestroyBB?
1390 if (hasCallsInBlocksBetween(SaveBB, ResumeOrDestroyBB))
1391 return true;
1392
1393 return false;
1394}
1395
1396// If a SuspendIntrin is preceded by Resume or Destroy, we can eliminate the
1397// suspend point and replace it with nornal control flow.
1398static bool simplifySuspendPoint(CoroSuspendInst *Suspend,
1399 CoroBeginInst *CoroBegin) {
1400 Instruction *Prev = Suspend->getPrevNode();
1401 if (!Prev) {
1402 auto *Pred = Suspend->getParent()->getSinglePredecessor();
1403 if (!Pred)
1404 return false;
1405 Prev = Pred->getTerminator();
1406 }
1407
1408 CallBase *CB = dyn_cast<CallBase>(Prev);
1409 if (!CB)
1410 return false;
1411
1412 auto *Callee = CB->getCalledOperand()->stripPointerCasts();
1413
1414 // See if the callsite is for resumption or destruction of the coroutine.
1415 auto *SubFn = dyn_cast<CoroSubFnInst>(Callee);
1416 if (!SubFn)
1417 return false;
1418
1419 // Does not refer to the current coroutine, we cannot do anything with it.
1420 if (SubFn->getFrame() != CoroBegin)
1421 return false;
1422
1423 // See if the transformation is safe. Specifically, see if there are any
1424 // calls in between Save and CallInstr. They can potenitally resume the
1425 // coroutine rendering this optimization unsafe.
1426 auto *Save = Suspend->getCoroSave();
1427 if (hasCallsBetween(Save, CB))
1428 return false;
1429
1430 // Replace llvm.coro.suspend with the value that results in resumption over
1431 // the resume or cleanup path.
1432 Suspend->replaceAllUsesWith(SubFn->getRawIndex());
1433 Suspend->eraseFromParent();
1434 Save->eraseFromParent();
1435
1436 // No longer need a call to coro.resume or coro.destroy.
1437 if (auto *Invoke = dyn_cast<InvokeInst>(CB)) {
1438 BranchInst::Create(Invoke->getNormalDest(), Invoke);
1439 }
1440
1441 // Grab the CalledValue from CB before erasing the CallInstr.
1442 auto *CalledValue = CB->getCalledOperand();
1443 CB->eraseFromParent();
1444
1445 // If no more users remove it. Usually it is a bitcast of SubFn.
1446 if (CalledValue != SubFn && CalledValue->user_empty())
1447 if (auto *I = dyn_cast<Instruction>(CalledValue))
1448 I->eraseFromParent();
1449
1450 // Now we are good to remove SubFn.
1451 if (SubFn->user_empty())
1452 SubFn->eraseFromParent();
1453
1454 return true;
1455}
1456
1457// Remove suspend points that are simplified.
1458static void simplifySuspendPoints(coro::Shape &Shape) {
1459 // Currently, the only simplification we do is switch-lowering-specific.
1460 if (Shape.ABI != coro::ABI::Switch)
1461 return;
1462
1463 auto &S = Shape.CoroSuspends;
1464 size_t I = 0, N = S.size();
1465 if (N == 0)
1466 return;
1467 while (true) {
1468 auto SI = cast<CoroSuspendInst>(S[I]);
1469 // Leave final.suspend to handleFinalSuspend since it is undefined behavior
1470 // to resume a coroutine suspended at the final suspend point.
1471 if (!SI->isFinal() && simplifySuspendPoint(SI, Shape.CoroBegin)) {
1472 if (--N == I)
1473 break;
1474 std::swap(S[I], S[N]);
1475 continue;
1476 }
1477 if (++I == N)
1478 break;
1479 }
1480 S.resize(N);
1481}
1482
1483static void splitSwitchCoroutine(Function &F, coro::Shape &Shape,
1484 SmallVectorImpl<Function *> &Clones) {
1485 assert(Shape.ABI == coro::ABI::Switch)(static_cast <bool> (Shape.ABI == coro::ABI::Switch) ? void
(0) : __assert_fail ("Shape.ABI == coro::ABI::Switch", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1485, __extension__ __PRETTY_FUNCTION__))
;
17
'?' condition is true
1486
1487 createResumeEntryBlock(F, Shape);
1488 auto ResumeClone = createClone(F, ".resume", Shape,
18
Calling 'createClone'
1489 CoroCloner::Kind::SwitchResume);
1490 auto DestroyClone = createClone(F, ".destroy", Shape,
1491 CoroCloner::Kind::SwitchUnwind);
1492 auto CleanupClone = createClone(F, ".cleanup", Shape,
1493 CoroCloner::Kind::SwitchCleanup);
1494
1495 postSplitCleanup(*ResumeClone);
1496 postSplitCleanup(*DestroyClone);
1497 postSplitCleanup(*CleanupClone);
1498
1499 addMustTailToCoroResumes(*ResumeClone);
1500
1501 // Store addresses resume/destroy/cleanup functions in the coroutine frame.
1502 updateCoroFrame(Shape, ResumeClone, DestroyClone, CleanupClone);
1503
1504 assert(Clones.empty())(static_cast <bool> (Clones.empty()) ? void (0) : __assert_fail
("Clones.empty()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1504, __extension__ __PRETTY_FUNCTION__))
;
1505 Clones.push_back(ResumeClone);
1506 Clones.push_back(DestroyClone);
1507 Clones.push_back(CleanupClone);
1508
1509 // Create a constant array referring to resume/destroy/clone functions pointed
1510 // by the last argument of @llvm.coro.info, so that CoroElide pass can
1511 // determined correct function to call.
1512 setCoroInfo(F, Shape, Clones);
1513}
1514
1515static void replaceAsyncResumeFunction(CoroSuspendAsyncInst *Suspend,
1516 Value *Continuation) {
1517 auto *ResumeIntrinsic = Suspend->getResumeFunction();
1518 auto &Context = Suspend->getParent()->getParent()->getContext();
1519 auto *Int8PtrTy = Type::getInt8PtrTy(Context);
1520
1521 IRBuilder<> Builder(ResumeIntrinsic);
1522 auto *Val = Builder.CreateBitOrPointerCast(Continuation, Int8PtrTy);
1523 ResumeIntrinsic->replaceAllUsesWith(Val);
1524 ResumeIntrinsic->eraseFromParent();
1525 Suspend->setOperand(CoroSuspendAsyncInst::ResumeFunctionArg,
1526 UndefValue::get(Int8PtrTy));
1527}
1528
1529/// Coerce the arguments in \p FnArgs according to \p FnTy in \p CallArgs.
1530static void coerceArguments(IRBuilder<> &Builder, FunctionType *FnTy,
1531 ArrayRef<Value *> FnArgs,
1532 SmallVectorImpl<Value *> &CallArgs) {
1533 size_t ArgIdx = 0;
1534 for (auto paramTy : FnTy->params()) {
1535 assert(ArgIdx < FnArgs.size())(static_cast <bool> (ArgIdx < FnArgs.size()) ? void (
0) : __assert_fail ("ArgIdx < FnArgs.size()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1535, __extension__ __PRETTY_FUNCTION__))
;
1536 if (paramTy != FnArgs[ArgIdx]->getType())
1537 CallArgs.push_back(
1538 Builder.CreateBitOrPointerCast(FnArgs[ArgIdx], paramTy));
1539 else
1540 CallArgs.push_back(FnArgs[ArgIdx]);
1541 ++ArgIdx;
1542 }
1543}
1544
1545CallInst *coro::createMustTailCall(DebugLoc Loc, Function *MustTailCallFn,
1546 ArrayRef<Value *> Arguments,
1547 IRBuilder<> &Builder) {
1548 auto *FnTy =
1549 cast<FunctionType>(MustTailCallFn->getType()->getPointerElementType());
1550 // Coerce the arguments, llvm optimizations seem to ignore the types in
1551 // vaarg functions and throws away casts in optimized mode.
1552 SmallVector<Value *, 8> CallArgs;
1553 coerceArguments(Builder, FnTy, Arguments, CallArgs);
1554
1555 auto *TailCall = Builder.CreateCall(FnTy, MustTailCallFn, CallArgs);
1556 TailCall->setTailCallKind(CallInst::TCK_MustTail);
1557 TailCall->setDebugLoc(Loc);
1558 TailCall->setCallingConv(MustTailCallFn->getCallingConv());
1559 return TailCall;
1560}
1561
1562static void splitAsyncCoroutine(Function &F, coro::Shape &Shape,
1563 SmallVectorImpl<Function *> &Clones) {
1564 assert(Shape.ABI == coro::ABI::Async)(static_cast <bool> (Shape.ABI == coro::ABI::Async) ? void
(0) : __assert_fail ("Shape.ABI == coro::ABI::Async", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1564, __extension__ __PRETTY_FUNCTION__))
;
1565 assert(Clones.empty())(static_cast <bool> (Clones.empty()) ? void (0) : __assert_fail
("Clones.empty()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1565, __extension__ __PRETTY_FUNCTION__))
;
1566 // Reset various things that the optimizer might have decided it
1567 // "knows" about the coroutine function due to not seeing a return.
1568 F.removeFnAttr(Attribute::NoReturn);
1569 F.removeRetAttr(Attribute::NoAlias);
1570 F.removeRetAttr(Attribute::NonNull);
1571
1572 auto &Context = F.getContext();
1573 auto *Int8PtrTy = Type::getInt8PtrTy(Context);
1574
1575 auto *Id = cast<CoroIdAsyncInst>(Shape.CoroBegin->getId());
1576 IRBuilder<> Builder(Id);
1577
1578 auto *FramePtr = Id->getStorage();
1579 FramePtr = Builder.CreateBitOrPointerCast(FramePtr, Int8PtrTy);
1580 FramePtr = Builder.CreateConstInBoundsGEP1_32(
1581 Type::getInt8Ty(Context), FramePtr, Shape.AsyncLowering.FrameOffset,
1582 "async.ctx.frameptr");
1583
1584 // Map all uses of llvm.coro.begin to the allocated frame pointer.
1585 {
1586 // Make sure we don't invalidate Shape.FramePtr.
1587 TrackingVH<Instruction> Handle(Shape.FramePtr);
1588 Shape.CoroBegin->replaceAllUsesWith(FramePtr);
1589 Shape.FramePtr = Handle.getValPtr();
1590 }
1591
1592 // Create all the functions in order after the main function.
1593 auto NextF = std::next(F.getIterator());
1594
1595 // Create a continuation function for each of the suspend points.
1596 Clones.reserve(Shape.CoroSuspends.size());
1597 for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) {
1598 auto *Suspend = cast<CoroSuspendAsyncInst>(Shape.CoroSuspends[Idx]);
1599
1600 // Create the clone declaration.
1601 auto ResumeNameSuffix = ".resume.";
1602 auto ProjectionFunctionName =
1603 Suspend->getAsyncContextProjectionFunction()->getName();
1604 bool UseSwiftMangling = false;
1605 if (ProjectionFunctionName.equals("__swift_async_resume_project_context")) {
1606 ResumeNameSuffix = "TQ";
1607 UseSwiftMangling = true;
1608 } else if (ProjectionFunctionName.equals(
1609 "__swift_async_resume_get_context")) {
1610 ResumeNameSuffix = "TY";
1611 UseSwiftMangling = true;
1612 }
1613 auto *Continuation = createCloneDeclaration(
1614 F, Shape,
1615 UseSwiftMangling ? ResumeNameSuffix + Twine(Idx) + "_"
1616 : ResumeNameSuffix + Twine(Idx),
1617 NextF, Suspend);
1618 Clones.push_back(Continuation);
1619
1620 // Insert a branch to a new return block immediately before the suspend
1621 // point.
1622 auto *SuspendBB = Suspend->getParent();
1623 auto *NewSuspendBB = SuspendBB->splitBasicBlock(Suspend);
1624 auto *Branch = cast<BranchInst>(SuspendBB->getTerminator());
1625
1626 // Place it before the first suspend.
1627 auto *ReturnBB =
1628 BasicBlock::Create(F.getContext(), "coro.return", &F, NewSuspendBB);
1629 Branch->setSuccessor(0, ReturnBB);
1630
1631 IRBuilder<> Builder(ReturnBB);
1632
1633 // Insert the call to the tail call function and inline it.
1634 auto *Fn = Suspend->getMustTailCallFunction();
1635 SmallVector<Value *, 8> Args(Suspend->args());
1636 auto FnArgs = ArrayRef<Value *>(Args).drop_front(
1637 CoroSuspendAsyncInst::MustTailCallFuncArg + 1);
1638 auto *TailCall =
1639 coro::createMustTailCall(Suspend->getDebugLoc(), Fn, FnArgs, Builder);
1640 Builder.CreateRetVoid();
1641 InlineFunctionInfo FnInfo;
1642 auto InlineRes = InlineFunction(*TailCall, FnInfo);
1643 assert(InlineRes.isSuccess() && "Expected inlining to succeed")(static_cast <bool> (InlineRes.isSuccess() && "Expected inlining to succeed"
) ? void (0) : __assert_fail ("InlineRes.isSuccess() && \"Expected inlining to succeed\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1643, __extension__ __PRETTY_FUNCTION__))
;
1644 (void)InlineRes;
1645
1646 // Replace the lvm.coro.async.resume intrisic call.
1647 replaceAsyncResumeFunction(Suspend, Continuation);
1648 }
1649
1650 assert(Clones.size() == Shape.CoroSuspends.size())(static_cast <bool> (Clones.size() == Shape.CoroSuspends
.size()) ? void (0) : __assert_fail ("Clones.size() == Shape.CoroSuspends.size()"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1650, __extension__ __PRETTY_FUNCTION__))
;
1651 for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) {
1652 auto *Suspend = Shape.CoroSuspends[Idx];
1653 auto *Clone = Clones[Idx];
1654
1655 CoroCloner(F, "resume." + Twine(Idx), Shape, Clone, Suspend).create();
1656 }
1657}
1658
1659static void splitRetconCoroutine(Function &F, coro::Shape &Shape,
1660 SmallVectorImpl<Function *> &Clones) {
1661 assert(Shape.ABI == coro::ABI::Retcon ||(static_cast <bool> (Shape.ABI == coro::ABI::Retcon || Shape
.ABI == coro::ABI::RetconOnce) ? void (0) : __assert_fail ("Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1662, __extension__ __PRETTY_FUNCTION__))
1662 Shape.ABI == coro::ABI::RetconOnce)(static_cast <bool> (Shape.ABI == coro::ABI::Retcon || Shape
.ABI == coro::ABI::RetconOnce) ? void (0) : __assert_fail ("Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1662, __extension__ __PRETTY_FUNCTION__))
;
1663 assert(Clones.empty())(static_cast <bool> (Clones.empty()) ? void (0) : __assert_fail
("Clones.empty()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1663, __extension__ __PRETTY_FUNCTION__))
;
1664
1665 // Reset various things that the optimizer might have decided it
1666 // "knows" about the coroutine function due to not seeing a return.
1667 F.removeFnAttr(Attribute::NoReturn);
1668 F.removeRetAttr(Attribute::NoAlias);
1669 F.removeRetAttr(Attribute::NonNull);
1670
1671 // Allocate the frame.
1672 auto *Id = cast<AnyCoroIdRetconInst>(Shape.CoroBegin->getId());
1673 Value *RawFramePtr;
1674 if (Shape.RetconLowering.IsFrameInlineInStorage) {
1675 RawFramePtr = Id->getStorage();
1676 } else {
1677 IRBuilder<> Builder(Id);
1678
1679 // Determine the size of the frame.
1680 const DataLayout &DL = F.getParent()->getDataLayout();
1681 auto Size = DL.getTypeAllocSize(Shape.FrameTy);
1682
1683 // Allocate. We don't need to update the call graph node because we're
1684 // going to recompute it from scratch after splitting.
1685 // FIXME: pass the required alignment
1686 RawFramePtr = Shape.emitAlloc(Builder, Builder.getInt64(Size), nullptr);
1687 RawFramePtr =
1688 Builder.CreateBitCast(RawFramePtr, Shape.CoroBegin->getType());
1689
1690 // Stash the allocated frame pointer in the continuation storage.
1691 auto Dest = Builder.CreateBitCast(Id->getStorage(),
1692 RawFramePtr->getType()->getPointerTo());
1693 Builder.CreateStore(RawFramePtr, Dest);
1694 }
1695
1696 // Map all uses of llvm.coro.begin to the allocated frame pointer.
1697 {
1698 // Make sure we don't invalidate Shape.FramePtr.
1699 TrackingVH<Instruction> Handle(Shape.FramePtr);
1700 Shape.CoroBegin->replaceAllUsesWith(RawFramePtr);
1701 Shape.FramePtr = Handle.getValPtr();
1702 }
1703
1704 // Create a unique return block.
1705 BasicBlock *ReturnBB = nullptr;
1706 SmallVector<PHINode *, 4> ReturnPHIs;
1707
1708 // Create all the functions in order after the main function.
1709 auto NextF = std::next(F.getIterator());
1710
1711 // Create a continuation function for each of the suspend points.
1712 Clones.reserve(Shape.CoroSuspends.size());
1713 for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) {
1714 auto Suspend = cast<CoroSuspendRetconInst>(Shape.CoroSuspends[i]);
1715
1716 // Create the clone declaration.
1717 auto Continuation =
1718 createCloneDeclaration(F, Shape, ".resume." + Twine(i), NextF, nullptr);
1719 Clones.push_back(Continuation);
1720
1721 // Insert a branch to the unified return block immediately before
1722 // the suspend point.
1723 auto SuspendBB = Suspend->getParent();
1724 auto NewSuspendBB = SuspendBB->splitBasicBlock(Suspend);
1725 auto Branch = cast<BranchInst>(SuspendBB->getTerminator());
1726
1727 // Create the unified return block.
1728 if (!ReturnBB) {
1729 // Place it before the first suspend.
1730 ReturnBB = BasicBlock::Create(F.getContext(), "coro.return", &F,
1731 NewSuspendBB);
1732 Shape.RetconLowering.ReturnBlock = ReturnBB;
1733
1734 IRBuilder<> Builder(ReturnBB);
1735
1736 // Create PHIs for all the return values.
1737 assert(ReturnPHIs.empty())(static_cast <bool> (ReturnPHIs.empty()) ? void (0) : __assert_fail
("ReturnPHIs.empty()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1737, __extension__ __PRETTY_FUNCTION__))
;
1738
1739 // First, the continuation.
1740 ReturnPHIs.push_back(Builder.CreatePHI(Continuation->getType(),
1741 Shape.CoroSuspends.size()));
1742
1743 // Next, all the directly-yielded values.
1744 for (auto ResultTy : Shape.getRetconResultTypes())
1745 ReturnPHIs.push_back(Builder.CreatePHI(ResultTy,
1746 Shape.CoroSuspends.size()));
1747
1748 // Build the return value.
1749 auto RetTy = F.getReturnType();
1750
1751 // Cast the continuation value if necessary.
1752 // We can't rely on the types matching up because that type would
1753 // have to be infinite.
1754 auto CastedContinuationTy =
1755 (ReturnPHIs.size() == 1 ? RetTy : RetTy->getStructElementType(0));
1756 auto *CastedContinuation =
1757 Builder.CreateBitCast(ReturnPHIs[0], CastedContinuationTy);
1758
1759 Value *RetV;
1760 if (ReturnPHIs.size() == 1) {
1761 RetV = CastedContinuation;
1762 } else {
1763 RetV = UndefValue::get(RetTy);
1764 RetV = Builder.CreateInsertValue(RetV, CastedContinuation, 0);
1765 for (size_t I = 1, E = ReturnPHIs.size(); I != E; ++I)
1766 RetV = Builder.CreateInsertValue(RetV, ReturnPHIs[I], I);
1767 }
1768
1769 Builder.CreateRet(RetV);
1770 }
1771
1772 // Branch to the return block.
1773 Branch->setSuccessor(0, ReturnBB);
1774 ReturnPHIs[0]->addIncoming(Continuation, SuspendBB);
1775 size_t NextPHIIndex = 1;
1776 for (auto &VUse : Suspend->value_operands())
1777 ReturnPHIs[NextPHIIndex++]->addIncoming(&*VUse, SuspendBB);
1778 assert(NextPHIIndex == ReturnPHIs.size())(static_cast <bool> (NextPHIIndex == ReturnPHIs.size())
? void (0) : __assert_fail ("NextPHIIndex == ReturnPHIs.size()"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1778, __extension__ __PRETTY_FUNCTION__))
;
1779 }
1780
1781 assert(Clones.size() == Shape.CoroSuspends.size())(static_cast <bool> (Clones.size() == Shape.CoroSuspends
.size()) ? void (0) : __assert_fail ("Clones.size() == Shape.CoroSuspends.size()"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1781, __extension__ __PRETTY_FUNCTION__))
;
1782 for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) {
1783 auto Suspend = Shape.CoroSuspends[i];
1784 auto Clone = Clones[i];
1785
1786 CoroCloner(F, "resume." + Twine(i), Shape, Clone, Suspend).create();
1787 }
1788}
1789
1790namespace {
1791 class PrettyStackTraceFunction : public PrettyStackTraceEntry {
1792 Function &F;
1793 public:
1794 PrettyStackTraceFunction(Function &F) : F(F) {}
1795 void print(raw_ostream &OS) const override {
1796 OS << "While splitting coroutine ";
1797 F.printAsOperand(OS, /*print type*/ false, F.getParent());
1798 OS << "\n";
1799 }
1800 };
1801}
1802
1803static coro::Shape splitCoroutine(Function &F,
1804 SmallVectorImpl<Function *> &Clones,
1805 bool ReuseFrameSlot) {
1806 PrettyStackTraceFunction prettyStackTrace(F);
1807
1808 // The suspend-crossing algorithm in buildCoroutineFrame get tripped
1809 // up by uses in unreachable blocks, so remove them as a first pass.
1810 removeUnreachableBlocks(F);
1811
1812 coro::Shape Shape(F, ReuseFrameSlot);
1813 if (!Shape.CoroBegin)
12
Assuming field 'CoroBegin' is non-null
13
Taking false branch
1814 return Shape;
1815
1816 simplifySuspendPoints(Shape);
1817 buildCoroutineFrame(F, Shape);
1818 replaceFrameSize(Shape);
1819
1820 // If there are no suspend points, no split required, just remove
1821 // the allocation and deallocation blocks, they are not needed.
1822 if (Shape.CoroSuspends.empty()) {
14
Taking false branch
1823 handleNoSuspendCoroutine(Shape);
1824 } else {
1825 switch (Shape.ABI) {
15
Control jumps to 'case Switch:' at line 1826
1826 case coro::ABI::Switch:
1827 splitSwitchCoroutine(F, Shape, Clones);
16
Calling 'splitSwitchCoroutine'
1828 break;
1829 case coro::ABI::Async:
1830 splitAsyncCoroutine(F, Shape, Clones);
1831 break;
1832 case coro::ABI::Retcon:
1833 case coro::ABI::RetconOnce:
1834 splitRetconCoroutine(F, Shape, Clones);
1835 break;
1836 }
1837 }
1838
1839 // Replace all the swifterror operations in the original function.
1840 // This invalidates SwiftErrorOps in the Shape.
1841 replaceSwiftErrorOps(F, Shape, nullptr);
1842
1843 return Shape;
1844}
1845
1846static void
1847updateCallGraphAfterCoroutineSplit(Function &F, const coro::Shape &Shape,
1848 const SmallVectorImpl<Function *> &Clones,
1849 CallGraph &CG, CallGraphSCC &SCC) {
1850 if (!Shape.CoroBegin)
1851 return;
1852
1853 removeCoroEnds(Shape, &CG);
1854 postSplitCleanup(F);
1855
1856 // Update call graph and add the functions we created to the SCC.
1857 coro::updateCallGraph(F, Clones, CG, SCC);
1858}
1859
1860static void updateCallGraphAfterCoroutineSplit(
1861 LazyCallGraph::Node &N, const coro::Shape &Shape,
1862 const SmallVectorImpl<Function *> &Clones, LazyCallGraph::SCC &C,
1863 LazyCallGraph &CG, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR,
1864 FunctionAnalysisManager &FAM) {
1865 if (!Shape.CoroBegin)
1866 return;
1867
1868 for (llvm::AnyCoroEndInst *End : Shape.CoroEnds) {
1869 auto &Context = End->getContext();
1870 End->replaceAllUsesWith(ConstantInt::getFalse(Context));
1871 End->eraseFromParent();
1872 }
1873
1874 if (!Clones.empty()) {
1875 switch (Shape.ABI) {
1876 case coro::ABI::Switch:
1877 // Each clone in the Switch lowering is independent of the other clones.
1878 // Let the LazyCallGraph know about each one separately.
1879 for (Function *Clone : Clones)
1880 CG.addSplitFunction(N.getFunction(), *Clone);
1881 break;
1882 case coro::ABI::Async:
1883 case coro::ABI::Retcon:
1884 case coro::ABI::RetconOnce:
1885 // Each clone in the Async/Retcon lowering references of the other clones.
1886 // Let the LazyCallGraph know about all of them at once.
1887 if (!Clones.empty())
1888 CG.addSplitRefRecursiveFunctions(N.getFunction(), Clones);
1889 break;
1890 }
1891
1892 // Let the CGSCC infra handle the changes to the original function.
1893 updateCGAndAnalysisManagerForCGSCCPass(CG, C, N, AM, UR, FAM);
1894 }
1895
1896 // Do some cleanup and let the CGSCC infra see if we've cleaned up any edges
1897 // to the split functions.
1898 postSplitCleanup(N.getFunction());
1899 updateCGAndAnalysisManagerForFunctionPass(CG, C, N, AM, UR, FAM);
1900}
1901
1902// When we see the coroutine the first time, we insert an indirect call to a
1903// devirt trigger function and mark the coroutine that it is now ready for
1904// split.
1905// Async lowering uses this after it has split the function to restart the
1906// pipeline.
1907static void prepareForSplit(Function &F, CallGraph &CG,
1908 bool MarkForAsyncRestart = false) {
1909 Module &M = *F.getParent();
1910 LLVMContext &Context = F.getContext();
1911#ifndef NDEBUG
1912 Function *DevirtFn = M.getFunction(CORO_DEVIRT_TRIGGER_FN"coro.devirt.trigger");
1913 assert(DevirtFn && "coro.devirt.trigger function not found")(static_cast <bool> (DevirtFn && "coro.devirt.trigger function not found"
) ? void (0) : __assert_fail ("DevirtFn && \"coro.devirt.trigger function not found\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Coroutines/CoroSplit.cpp"
, 1913, __extension__ __PRETTY_FUNCTION__))
;
1914#endif
1915
1916 F.addFnAttr(CORO_PRESPLIT_ATTR"coroutine.presplit", MarkForAsyncRestart
1917 ? ASYNC_RESTART_AFTER_SPLIT"2"
1918 : PREPARED_FOR_SPLIT"1");
1919
1920 // Insert an indirect call sequence that will be devirtualized by CoroElide
1921 // pass:
1922 // %0 = call i8* @llvm.coro.subfn.addr(i8* null, i8 -1)
1923 // %1 = bitcast i8* %0 to void(i8*)*
1924 // call void %1(i8* null)
1925 coro::LowererBase Lowerer(M);
1926 Instruction *InsertPt =
1927 MarkForAsyncRestart ? F.getEntryBlock().getFirstNonPHIOrDbgOrLifetime()
1928 : F.getEntryBlock().getTerminator();
1929 auto *Null = ConstantPointerNull::get(Type::getInt8PtrTy(Context));
1930 auto *DevirtFnAddr =
1931 Lowerer.makeSubFnCall(Null, CoroSubFnInst::RestartTrigger, InsertPt);
1932 FunctionType *FnTy = FunctionType::get(Type::getVoidTy(Context),
1933 {Type::getInt8PtrTy(Context)}, false);
1934 auto *IndirectCall = CallInst::Create(FnTy, DevirtFnAddr, Null, "", InsertPt);
1935
1936 // Update CG graph with an indirect call we just added.
1937 CG[&F]->addCalledFunction(IndirectCall, CG.getCallsExternalNode());
1938}
1939
1940// Make sure that there is a devirtualization trigger function that the
1941// coro-split pass uses to force a restart of the CGSCC pipeline. If the devirt
1942// trigger function is not found, we will create one and add it to the current
1943// SCC.
1944static void createDevirtTriggerFunc(CallGraph &CG, CallGraphSCC &SCC) {
1945 Module &M = CG.getModule();
1946 if (M.getFunction(CORO_DEVIRT_TRIGGER_FN"coro.devirt.trigger"))
1947 return;
1948
1949 LLVMContext &C = M.getContext();
1950 auto *FnTy = FunctionType::get(Type::getVoidTy(C), Type::getInt8PtrTy(C),
1951 /*isVarArg=*/false);
1952 Function *DevirtFn =
1953 Function::Create(FnTy, GlobalValue::LinkageTypes::PrivateLinkage,
1954 CORO_DEVIRT_TRIGGER_FN"coro.devirt.trigger", &M);
1955 DevirtFn->addFnAttr(Attribute::AlwaysInline);
1956 auto *Entry = BasicBlock::Create(C, "entry", DevirtFn);
1957 ReturnInst::Create(C, Entry);
1958
1959 auto *Node = CG.getOrInsertFunction(DevirtFn);
1960
1961 SmallVector<CallGraphNode *, 8> Nodes(SCC.begin(), SCC.end());
1962 Nodes.push_back(Node);
1963 SCC.initialize(Nodes);
1964}
1965
1966/// Replace a call to llvm.coro.prepare.retcon.
1967static void replacePrepare(CallInst *Prepare, LazyCallGraph &CG,
1968 LazyCallGraph::SCC &C) {
1969 auto CastFn = Prepare->getArgOperand(0); // as an i8*
1970 auto Fn = CastFn->stripPointerCasts(); // as its original type
1971
1972 // Attempt to peephole this pattern:
1973 // %0 = bitcast [[TYPE]] @some_function to i8*
1974 // %1 = call @llvm.coro.prepare.retcon(i8* %0)
1975 // %2 = bitcast %1 to [[TYPE]]
1976 // ==>
1977 // %2 = @some_function
1978 for (auto UI = Prepare->use_begin(), UE = Prepare->use_end(); UI != UE;) {
1979 // Look for bitcasts back to the original function type.
1980 auto *Cast = dyn_cast<BitCastInst>((UI++)->getUser());
1981 if (!Cast || Cast->getType() != Fn->getType())
1982 continue;
1983
1984 // Replace and remove the cast.
1985 Cast->replaceAllUsesWith(Fn);
1986 Cast->eraseFromParent();
1987 }
1988
1989 // Replace any remaining uses with the function as an i8*.
1990 // This can never directly be a callee, so we don't need to update CG.
1991 Prepare->replaceAllUsesWith(CastFn);
1992 Prepare->eraseFromParent();
1993
1994 // Kill dead bitcasts.
1995 while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) {
1996 if (!Cast->use_empty())
1997 break;
1998 CastFn = Cast->getOperand(0);
1999 Cast->eraseFromParent();
2000 }
2001}
2002/// Replace a call to llvm.coro.prepare.retcon.
2003static void replacePrepare(CallInst *Prepare, CallGraph &CG) {
2004 auto CastFn = Prepare->getArgOperand(0); // as an i8*
2005 auto Fn = CastFn->stripPointerCasts(); // as its original type
2006
2007 // Find call graph nodes for the preparation.
2008 CallGraphNode *PrepareUserNode = nullptr, *FnNode = nullptr;
2009 if (auto ConcreteFn = dyn_cast<Function>(Fn)) {
2010 PrepareUserNode = CG[Prepare->getFunction()];
2011 FnNode = CG[ConcreteFn];
2012 }
2013
2014 // Attempt to peephole this pattern:
2015 // %0 = bitcast [[TYPE]] @some_function to i8*
2016 // %1 = call @llvm.coro.prepare.retcon(i8* %0)
2017 // %2 = bitcast %1 to [[TYPE]]
2018 // ==>
2019 // %2 = @some_function
2020 for (auto UI = Prepare->use_begin(), UE = Prepare->use_end();
2021 UI != UE; ) {
2022 // Look for bitcasts back to the original function type.
2023 auto *Cast = dyn_cast<BitCastInst>((UI++)->getUser());
2024 if (!Cast || Cast->getType() != Fn->getType()) continue;
2025
2026 // Check whether the replacement will introduce new direct calls.
2027 // If so, we'll need to update the call graph.
2028 if (PrepareUserNode) {
2029 for (auto &Use : Cast->uses()) {
2030 if (auto *CB = dyn_cast<CallBase>(Use.getUser())) {
2031 if (!CB->isCallee(&Use))
2032 continue;
2033 PrepareUserNode->removeCallEdgeFor(*CB);
2034 PrepareUserNode->addCalledFunction(CB, FnNode);
2035 }
2036 }
2037 }
2038
2039 // Replace and remove the cast.
2040 Cast->replaceAllUsesWith(Fn);
2041 Cast->eraseFromParent();
2042 }
2043
2044 // Replace any remaining uses with the function as an i8*.
2045 // This can never directly be a callee, so we don't need to update CG.
2046 Prepare->replaceAllUsesWith(CastFn);
2047 Prepare->eraseFromParent();
2048
2049 // Kill dead bitcasts.
2050 while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) {
2051 if (!Cast->use_empty()) break;
2052 CastFn = Cast->getOperand(0);
2053 Cast->eraseFromParent();
2054 }
2055}
2056
2057static bool replaceAllPrepares(Function *PrepareFn, LazyCallGraph &CG,
2058 LazyCallGraph::SCC &C) {
2059 bool Changed = false;
2060 for (auto PI = PrepareFn->use_begin(), PE = PrepareFn->use_end(); PI != PE;) {
2061 // Intrinsics can only be used in calls.
2062 auto *Prepare = cast<CallInst>((PI++)->getUser());
2063 replacePrepare(Prepare, CG, C);
2064 Changed = true;
2065 }
2066
2067 return Changed;
2068}
2069
2070/// Remove calls to llvm.coro.prepare.retcon, a barrier meant to prevent
2071/// IPO from operating on calls to a retcon coroutine before it's been
2072/// split. This is only safe to do after we've split all retcon
2073/// coroutines in the module. We can do that this in this pass because
2074/// this pass does promise to split all retcon coroutines (as opposed to
2075/// switch coroutines, which are lowered in multiple stages).
2076static bool replaceAllPrepares(Function *PrepareFn, CallGraph &CG) {
2077 bool Changed = false;
2078 for (auto PI = PrepareFn->use_begin(), PE = PrepareFn->use_end();
2079 PI != PE; ) {
2080 // Intrinsics can only be used in calls.
2081 auto *Prepare = cast<CallInst>((PI++)->getUser());
2082 replacePrepare(Prepare, CG);
2083 Changed = true;
2084 }
2085
2086 return Changed;
2087}
2088
2089static bool declaresCoroSplitIntrinsics(const Module &M) {
2090 return coro::declaresIntrinsics(M, {"llvm.coro.begin",
2091 "llvm.coro.prepare.retcon",
2092 "llvm.coro.prepare.async"});
2093}
2094
2095static void addPrepareFunction(const Module &M,
2096 SmallVectorImpl<Function *> &Fns,
2097 StringRef Name) {
2098 auto *PrepareFn = M.getFunction(Name);
2099 if (PrepareFn && !PrepareFn->use_empty())
2100 Fns.push_back(PrepareFn);
2101}
2102
2103PreservedAnalyses CoroSplitPass::run(LazyCallGraph::SCC &C,
2104 CGSCCAnalysisManager &AM,
2105 LazyCallGraph &CG, CGSCCUpdateResult &UR) {
2106 // NB: One invariant of a valid LazyCallGraph::SCC is that it must contain a
2107 // non-zero number of nodes, so we assume that here and grab the first
2108 // node's function's module.
2109 Module &M = *C.begin()->getFunction().getParent();
2110 auto &FAM =
2111 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
2112
2113 if (!declaresCoroSplitIntrinsics(M))
2114 return PreservedAnalyses::all();
2115
2116 // Check for uses of llvm.coro.prepare.retcon/async.
2117 SmallVector<Function *, 2> PrepareFns;
2118 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon");
2119 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async");
2120
2121 // Find coroutines for processing.
2122 SmallVector<LazyCallGraph::Node *, 4> Coroutines;
2123 for (LazyCallGraph::Node &N : C)
2124 if (N.getFunction().hasFnAttribute(CORO_PRESPLIT_ATTR"coroutine.presplit"))
2125 Coroutines.push_back(&N);
2126
2127 if (Coroutines.empty() && PrepareFns.empty())
2128 return PreservedAnalyses::all();
2129
2130 if (Coroutines.empty()) {
2131 for (auto *PrepareFn : PrepareFns) {
2132 replaceAllPrepares(PrepareFn, CG, C);
2133 }
2134 }
2135
2136 // Split all the coroutines.
2137 for (LazyCallGraph::Node *N : Coroutines) {
2138 Function &F = N->getFunction();
2139 LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F.getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-split")) { dbgs() << "CoroSplit: Processing coroutine '"
<< F.getName() << "' state: " << F.getFnAttribute
("coroutine.presplit").getValueAsString() << "\n"; } } while
(false)
2140 << "' state: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-split")) { dbgs() << "CoroSplit: Processing coroutine '"
<< F.getName() << "' state: " << F.getFnAttribute
("coroutine.presplit").getValueAsString() << "\n"; } } while
(false)
2141 << F.getFnAttribute(CORO_PRESPLIT_ATTR).getValueAsString()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-split")) { dbgs() << "CoroSplit: Processing coroutine '"
<< F.getName() << "' state: " << F.getFnAttribute
("coroutine.presplit").getValueAsString() << "\n"; } } while
(false)
2142 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-split")) { dbgs() << "CoroSplit: Processing coroutine '"
<< F.getName() << "' state: " << F.getFnAttribute
("coroutine.presplit").getValueAsString() << "\n"; } } while
(false)
;
2143 F.removeFnAttr(CORO_PRESPLIT_ATTR"coroutine.presplit");
2144
2145 SmallVector<Function *, 4> Clones;
2146 const coro::Shape Shape = splitCoroutine(F, Clones, ReuseFrameSlot);
2147 updateCallGraphAfterCoroutineSplit(*N, Shape, Clones, C, CG, AM, UR, FAM);
2148
2149 if (!Shape.CoroSuspends.empty()) {
2150 // Run the CGSCC pipeline on the original and newly split functions.
2151 UR.CWorklist.insert(&C);
2152 for (Function *Clone : Clones)
2153 UR.CWorklist.insert(CG.lookupSCC(CG.get(*Clone)));
2154 }
2155 }
2156
2157 if (!PrepareFns.empty()) {
2158 for (auto *PrepareFn : PrepareFns) {
2159 replaceAllPrepares(PrepareFn, CG, C);
2160 }
2161 }
2162
2163 return PreservedAnalyses::none();
2164}
2165
2166namespace {
2167
2168// We present a coroutine to LLVM as an ordinary function with suspension
2169// points marked up with intrinsics. We let the optimizer party on the coroutine
2170// as a single function for as long as possible. Shortly before the coroutine is
2171// eligible to be inlined into its callers, we split up the coroutine into parts
2172// corresponding to initial, resume and destroy invocations of the coroutine,
2173// add them to the current SCC and restart the IPO pipeline to optimize the
2174// coroutine subfunctions we extracted before proceeding to the caller of the
2175// coroutine.
2176struct CoroSplitLegacy : public CallGraphSCCPass {
2177 static char ID; // Pass identification, replacement for typeid
2178
2179 CoroSplitLegacy(bool ReuseFrameSlot = false)
2180 : CallGraphSCCPass(ID), ReuseFrameSlot(ReuseFrameSlot) {
2181 initializeCoroSplitLegacyPass(*PassRegistry::getPassRegistry());
2182 }
2183
2184 bool Run = false;
2185 bool ReuseFrameSlot;
2186
2187 // A coroutine is identified by the presence of coro.begin intrinsic, if
2188 // we don't have any, this pass has nothing to do.
2189 bool doInitialization(CallGraph &CG) override {
2190 Run = declaresCoroSplitIntrinsics(CG.getModule());
2191 return CallGraphSCCPass::doInitialization(CG);
2192 }
2193
2194 bool runOnSCC(CallGraphSCC &SCC) override {
2195 if (!Run)
1
Assuming field 'Run' is true
2
Taking false branch
2196 return false;
2197
2198 // Check for uses of llvm.coro.prepare.retcon.
2199 SmallVector<Function *, 2> PrepareFns;
2200 auto &M = SCC.getCallGraph().getModule();
2201 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon");
2202 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async");
2203
2204 // Find coroutines for processing.
2205 SmallVector<Function *, 4> Coroutines;
2206 for (CallGraphNode *CGN : SCC)
2207 if (auto *F = CGN->getFunction())
2208 if (F->hasFnAttribute(CORO_PRESPLIT_ATTR"coroutine.presplit"))
2209 Coroutines.push_back(F);
2210
2211 if (Coroutines.empty() && PrepareFns.empty())
2212 return false;
2213
2214 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
2215
2216 if (Coroutines.empty()) {
3
Taking false branch
2217 bool Changed = false;
2218 for (auto *PrepareFn : PrepareFns)
2219 Changed |= replaceAllPrepares(PrepareFn, CG);
2220 return Changed;
2221 }
2222
2223 createDevirtTriggerFunc(CG, SCC);
2224
2225 // Split all the coroutines.
2226 for (Function *F : Coroutines) {
4
Assuming '__begin2' is not equal to '__end2'
2227 Attribute Attr = F->getFnAttribute(CORO_PRESPLIT_ATTR"coroutine.presplit");
2228 StringRef Value = Attr.getValueAsString();
2229 LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-split")) { dbgs() << "CoroSplit: Processing coroutine '"
<< F->getName() << "' state: " << Value
<< "\n"; } } while (false)
5
Assuming 'DebugFlag' is false
6
Loop condition is false. Exiting loop
2230 << "' state: " << Value << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-split")) { dbgs() << "CoroSplit: Processing coroutine '"
<< F->getName() << "' state: " << Value
<< "\n"; } } while (false)
;
2231 // Async lowering marks coroutines to trigger a restart of the pipeline
2232 // after it has split them.
2233 if (Value == ASYNC_RESTART_AFTER_SPLIT"2") {
7
Assuming the condition is false
8
Taking false branch
2234 F->removeFnAttr(CORO_PRESPLIT_ATTR"coroutine.presplit");
2235 continue;
2236 }
2237 if (Value == UNPREPARED_FOR_SPLIT"0") {
9
Assuming the condition is false
10
Taking false branch
2238 prepareForSplit(*F, CG);
2239 continue;
2240 }
2241 F->removeFnAttr(CORO_PRESPLIT_ATTR"coroutine.presplit");
2242
2243 SmallVector<Function *, 4> Clones;
2244 const coro::Shape Shape = splitCoroutine(*F, Clones, ReuseFrameSlot);
11
Calling 'splitCoroutine'
2245 updateCallGraphAfterCoroutineSplit(*F, Shape, Clones, CG, SCC);
2246 if (Shape.ABI == coro::ABI::Async) {
2247 // Restart SCC passes.
2248 // Mark function for CoroElide pass. It will devirtualize causing a
2249 // restart of the SCC pipeline.
2250 prepareForSplit(*F, CG, true /*MarkForAsyncRestart*/);
2251 }
2252 }
2253
2254 for (auto *PrepareFn : PrepareFns)
2255 replaceAllPrepares(PrepareFn, CG);
2256
2257 return true;
2258 }
2259
2260 void getAnalysisUsage(AnalysisUsage &AU) const override {
2261 CallGraphSCCPass::getAnalysisUsage(AU);
2262 }
2263
2264 StringRef getPassName() const override { return "Coroutine Splitting"; }
2265};
2266
2267} // end anonymous namespace
2268
2269char CoroSplitLegacy::ID = 0;
2270
2271INITIALIZE_PASS_BEGIN(static void *initializeCoroSplitLegacyPassOnce(PassRegistry &
Registry) {
2272 CoroSplitLegacy, "coro-split",static void *initializeCoroSplitLegacyPassOnce(PassRegistry &
Registry) {
2273 "Split coroutine into a set of functions driving its state machine", false,static void *initializeCoroSplitLegacyPassOnce(PassRegistry &
Registry) {
2274 false)static void *initializeCoroSplitLegacyPassOnce(PassRegistry &
Registry) {
2275INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)initializeCallGraphWrapperPassPass(Registry);
2276INITIALIZE_PASS_END(PassInfo *PI = new PassInfo( "Split coroutine into a set of functions driving its state machine"
, "coro-split", &CoroSplitLegacy::ID, PassInfo::NormalCtor_t
(callDefaultCtor<CoroSplitLegacy>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeCoroSplitLegacyPassFlag; void llvm::initializeCoroSplitLegacyPass
(PassRegistry &Registry) { llvm::call_once(InitializeCoroSplitLegacyPassFlag
, initializeCoroSplitLegacyPassOnce, std::ref(Registry)); }
2277 CoroSplitLegacy, "coro-split",PassInfo *PI = new PassInfo( "Split coroutine into a set of functions driving its state machine"
, "coro-split", &CoroSplitLegacy::ID, PassInfo::NormalCtor_t
(callDefaultCtor<CoroSplitLegacy>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeCoroSplitLegacyPassFlag; void llvm::initializeCoroSplitLegacyPass
(PassRegistry &Registry) { llvm::call_once(InitializeCoroSplitLegacyPassFlag
, initializeCoroSplitLegacyPassOnce, std::ref(Registry)); }
2278 "Split coroutine into a set of functions driving its state machine", false,PassInfo *PI = new PassInfo( "Split coroutine into a set of functions driving its state machine"
, "coro-split", &CoroSplitLegacy::ID, PassInfo::NormalCtor_t
(callDefaultCtor<CoroSplitLegacy>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeCoroSplitLegacyPassFlag; void llvm::initializeCoroSplitLegacyPass
(PassRegistry &Registry) { llvm::call_once(InitializeCoroSplitLegacyPassFlag
, initializeCoroSplitLegacyPassOnce, std::ref(Registry)); }
2279 false)PassInfo *PI = new PassInfo( "Split coroutine into a set of functions driving its state machine"
, "coro-split", &CoroSplitLegacy::ID, PassInfo::NormalCtor_t
(callDefaultCtor<CoroSplitLegacy>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeCoroSplitLegacyPassFlag; void llvm::initializeCoroSplitLegacyPass
(PassRegistry &Registry) { llvm::call_once(InitializeCoroSplitLegacyPassFlag
, initializeCoroSplitLegacyPassOnce, std::ref(Registry)); }
2280
2281Pass *llvm::createCoroSplitLegacyPass(bool ReuseFrameSlot) {
2282 return new CoroSplitLegacy(ReuseFrameSlot);
2283}