Bug Summary

File:llvm/lib/Transforms/Coroutines/CoroFrame.cpp
Warning:line 997, column 39
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name CoroFrame.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -fno-rounding-math -masm-verbose -mconstructor-aliases -munwind-tables -target-cpu x86-64 -dwarf-column-info -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-11/lib/clang/11.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/build-llvm/lib/Transforms/Coroutines -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Coroutines -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/build-llvm/include -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-11/lib/clang/11.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/build-llvm/lib/Transforms/Coroutines -fdebug-prefix-map=/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-03-09-184146-41876-1 -x c++ /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
1//===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8// This file contains classes used to discover if for a particular value
9// there from sue to definition that crosses a suspend block.
10//
11// Using the information discovered we form a Coroutine Frame structure to
12// contain those values. All uses of those values are replaced with appropriate
13// GEP + load from the coroutine frame. At the point of the definition we spill
14// the value into the coroutine frame.
15//
16// TODO: pack values tightly using liveness info.
17//===----------------------------------------------------------------------===//
18
19#include "CoroInternal.h"
20#include "llvm/ADT/BitVector.h"
21#include "llvm/ADT/SmallString.h"
22#include "llvm/Analysis/PtrUseVisitor.h"
23#include "llvm/Config/llvm-config.h"
24#include "llvm/IR/CFG.h"
25#include "llvm/IR/DIBuilder.h"
26#include "llvm/IR/Dominators.h"
27#include "llvm/IR/IRBuilder.h"
28#include "llvm/IR/InstIterator.h"
29#include "llvm/Support/Debug.h"
30#include "llvm/Support/MathExtras.h"
31#include "llvm/Support/circular_raw_ostream.h"
32#include "llvm/Transforms/Utils/BasicBlockUtils.h"
33#include "llvm/Transforms/Utils/Local.h"
34#include "llvm/Transforms/Utils/PromoteMemToReg.h"
35#include <algorithm>
36
37using namespace llvm;
38
39// The "coro-suspend-crossing" flag is very noisy. There is another debug type,
40// "coro-frame", which results in leaner debug spew.
41#define DEBUG_TYPE"coro-frame" "coro-suspend-crossing"
42
43enum { SmallVectorThreshold = 32 };
44
45// Provides two way mapping between the blocks and numbers.
46namespace {
47class BlockToIndexMapping {
48 SmallVector<BasicBlock *, SmallVectorThreshold> V;
49
50public:
51 size_t size() const { return V.size(); }
52
53 BlockToIndexMapping(Function &F) {
54 for (BasicBlock &BB : F)
55 V.push_back(&BB);
56 llvm::sort(V);
57 }
58
59 size_t blockToIndex(BasicBlock *BB) const {
60 auto *I = llvm::lower_bound(V, BB);
61 assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block")((I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block"
) ? static_cast<void> (0) : __assert_fail ("I != V.end() && *I == BB && \"BasicBlockNumberng: Unknown block\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Coroutines/CoroFrame.cpp"
, 61, __PRETTY_FUNCTION__))
;
62 return I - V.begin();
63 }
64
65 BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; }
66};
67} // end anonymous namespace
68
69// The SuspendCrossingInfo maintains data that allows to answer a question
70// whether given two BasicBlocks A and B there is a path from A to B that
71// passes through a suspend point.
72//
73// For every basic block 'i' it maintains a BlockData that consists of:
74// Consumes: a bit vector which contains a set of indices of blocks that can
75// reach block 'i'
76// Kills: a bit vector which contains a set of indices of blocks that can
77// reach block 'i', but one of the path will cross a suspend point
78// Suspend: a boolean indicating whether block 'i' contains a suspend point.
79// End: a boolean indicating whether block 'i' contains a coro.end intrinsic.
80//
81namespace {
82struct SuspendCrossingInfo {
83 BlockToIndexMapping Mapping;
84
85 struct BlockData {
86 BitVector Consumes;
87 BitVector Kills;
88 bool Suspend = false;
89 bool End = false;
90 };
91 SmallVector<BlockData, SmallVectorThreshold> Block;
92
93 iterator_range<succ_iterator> successors(BlockData const &BD) const {
94 BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]);
95 return llvm::successors(BB);
96 }
97
98 BlockData &getBlockData(BasicBlock *BB) {
99 return Block[Mapping.blockToIndex(BB)];
100 }
101
102 void dump() const;
103 void dump(StringRef Label, BitVector const &BV) const;
104
105 SuspendCrossingInfo(Function &F, coro::Shape &Shape);
106
107 bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const {
108 size_t const DefIndex = Mapping.blockToIndex(DefBB);
109 size_t const UseIndex = Mapping.blockToIndex(UseBB);
110
111 assert(Block[UseIndex].Consumes[DefIndex] && "use must consume def")((Block[UseIndex].Consumes[DefIndex] && "use must consume def"
) ? static_cast<void> (0) : __assert_fail ("Block[UseIndex].Consumes[DefIndex] && \"use must consume def\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Coroutines/CoroFrame.cpp"
, 111, __PRETTY_FUNCTION__))
;
112 bool const Result = Block[UseIndex].Kills[DefIndex];
113 LLVM_DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-frame")) { dbgs() << UseBB->getName() <<
" => " << DefBB->getName() << " answer is "
<< Result << "\n"; } } while (false)
114 << " answer is " << Result << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-frame")) { dbgs() << UseBB->getName() <<
" => " << DefBB->getName() << " answer is "
<< Result << "\n"; } } while (false)
;
115 return Result;
116 }
117
118 bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const {
119 auto *I = cast<Instruction>(U);
120
121 // We rewrote PHINodes, so that only the ones with exactly one incoming
122 // value need to be analyzed.
123 if (auto *PN = dyn_cast<PHINode>(I))
124 if (PN->getNumIncomingValues() > 1)
125 return false;
126
127 BasicBlock *UseBB = I->getParent();
128
129 // As a special case, treat uses by an llvm.coro.suspend.retcon
130 // as if they were uses in the suspend's single predecessor: the
131 // uses conceptually occur before the suspend.
132 if (isa<CoroSuspendRetconInst>(I)) {
133 UseBB = UseBB->getSinglePredecessor();
134 assert(UseBB && "should have split coro.suspend into its own block")((UseBB && "should have split coro.suspend into its own block"
) ? static_cast<void> (0) : __assert_fail ("UseBB && \"should have split coro.suspend into its own block\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Coroutines/CoroFrame.cpp"
, 134, __PRETTY_FUNCTION__))
;
135 }
136
137 return hasPathCrossingSuspendPoint(DefBB, UseBB);
138 }
139
140 bool isDefinitionAcrossSuspend(Argument &A, User *U) const {
141 return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U);
142 }
143
144 bool isDefinitionAcrossSuspend(Instruction &I, User *U) const {
145 auto *DefBB = I.getParent();
146
147 // As a special case, treat values produced by an llvm.coro.suspend.*
148 // as if they were defined in the single successor: the uses
149 // conceptually occur after the suspend.
150 if (isa<AnyCoroSuspendInst>(I)) {
151 DefBB = DefBB->getSingleSuccessor();
152 assert(DefBB && "should have split coro.suspend into its own block")((DefBB && "should have split coro.suspend into its own block"
) ? static_cast<void> (0) : __assert_fail ("DefBB && \"should have split coro.suspend into its own block\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Coroutines/CoroFrame.cpp"
, 152, __PRETTY_FUNCTION__))
;
153 }
154
155 return isDefinitionAcrossSuspend(DefBB, U);
156 }
157};
158} // end anonymous namespace
159
160#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
161LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void SuspendCrossingInfo::dump(StringRef Label,
162 BitVector const &BV) const {
163 dbgs() << Label << ":";
164 for (size_t I = 0, N = BV.size(); I < N; ++I)
165 if (BV[I])
166 dbgs() << " " << Mapping.indexToBlock(I)->getName();
167 dbgs() << "\n";
168}
169
170LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void SuspendCrossingInfo::dump() const {
171 for (size_t I = 0, N = Block.size(); I < N; ++I) {
172 BasicBlock *const B = Mapping.indexToBlock(I);
173 dbgs() << B->getName() << ":\n";
174 dump(" Consumes", Block[I].Consumes);
175 dump(" Kills", Block[I].Kills);
176 }
177 dbgs() << "\n";
178}
179#endif
180
181SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape)
182 : Mapping(F) {
183 const size_t N = Mapping.size();
184 Block.resize(N);
185
186 // Initialize every block so that it consumes itself
187 for (size_t I = 0; I < N; ++I) {
188 auto &B = Block[I];
189 B.Consumes.resize(N);
190 B.Kills.resize(N);
191 B.Consumes.set(I);
192 }
193
194 // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as
195 // the code beyond coro.end is reachable during initial invocation of the
196 // coroutine.
197 for (auto *CE : Shape.CoroEnds)
198 getBlockData(CE->getParent()).End = true;
199
200 // Mark all suspend blocks and indicate that they kill everything they
201 // consume. Note, that crossing coro.save also requires a spill, as any code
202 // between coro.save and coro.suspend may resume the coroutine and all of the
203 // state needs to be saved by that time.
204 auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) {
205 BasicBlock *SuspendBlock = BarrierInst->getParent();
206 auto &B = getBlockData(SuspendBlock);
207 B.Suspend = true;
208 B.Kills |= B.Consumes;
209 };
210 for (auto *CSI : Shape.CoroSuspends) {
211 markSuspendBlock(CSI);
212 if (auto *Save = CSI->getCoroSave())
213 markSuspendBlock(Save);
214 }
215
216 // Iterate propagating consumes and kills until they stop changing.
217 int Iteration = 0;
218 (void)Iteration;
219
220 bool Changed;
221 do {
222 LLVM_DEBUG(dbgs() << "iteration " << ++Iteration)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-frame")) { dbgs() << "iteration " << ++Iteration
; } } while (false)
;
223 LLVM_DEBUG(dbgs() << "==============\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-frame")) { dbgs() << "==============\n"; } } while
(false)
;
224
225 Changed = false;
226 for (size_t I = 0; I < N; ++I) {
227 auto &B = Block[I];
228 for (BasicBlock *SI : successors(B)) {
229
230 auto SuccNo = Mapping.blockToIndex(SI);
231
232 // Saved Consumes and Kills bitsets so that it is easy to see
233 // if anything changed after propagation.
234 auto &S = Block[SuccNo];
235 auto SavedConsumes = S.Consumes;
236 auto SavedKills = S.Kills;
237
238 // Propagate Kills and Consumes from block B into its successor S.
239 S.Consumes |= B.Consumes;
240 S.Kills |= B.Kills;
241
242 // If block B is a suspend block, it should propagate kills into the
243 // its successor for every block B consumes.
244 if (B.Suspend) {
245 S.Kills |= B.Consumes;
246 }
247 if (S.Suspend) {
248 // If block S is a suspend block, it should kill all of the blocks it
249 // consumes.
250 S.Kills |= S.Consumes;
251 } else if (S.End) {
252 // If block S is an end block, it should not propagate kills as the
253 // blocks following coro.end() are reached during initial invocation
254 // of the coroutine while all the data are still available on the
255 // stack or in the registers.
256 S.Kills.reset();
257 } else {
258 // This is reached when S block it not Suspend nor coro.end and it
259 // need to make sure that it is not in the kill set.
260 S.Kills.reset(SuccNo);
261 }
262
263 // See if anything changed.
264 Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes);
265
266 if (S.Kills != SavedKills) {
267 LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-frame")) { dbgs() << "\nblock " << I <<
" follower " << SI->getName() << "\n"; } } while
(false)
268 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-frame")) { dbgs() << "\nblock " << I <<
" follower " << SI->getName() << "\n"; } } while
(false)
;
269 LLVM_DEBUG(dump("S.Kills", S.Kills))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-frame")) { dump("S.Kills", S.Kills); } } while (false)
;
270 LLVM_DEBUG(dump("SavedKills", SavedKills))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-frame")) { dump("SavedKills", SavedKills); } } while (
false)
;
271 }
272 if (S.Consumes != SavedConsumes) {
273 LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-frame")) { dbgs() << "\nblock " << I <<
" follower " << SI << "\n"; } } while (false)
;
274 LLVM_DEBUG(dump("S.Consume", S.Consumes))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-frame")) { dump("S.Consume", S.Consumes); } } while (false
)
;
275 LLVM_DEBUG(dump("SavedCons", SavedConsumes))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-frame")) { dump("SavedCons", SavedConsumes); } } while
(false)
;
276 }
277 }
278 }
279 } while (Changed);
280 LLVM_DEBUG(dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-frame")) { dump(); } } while (false)
;
281}
282
283#undef DEBUG_TYPE"coro-frame" // "coro-suspend-crossing"
284#define DEBUG_TYPE"coro-frame" "coro-frame"
285
286// We build up the list of spills for every case where a use is separated
287// from the definition by a suspend point.
288
289static const unsigned InvalidFieldIndex = ~0U;
290
291namespace {
292class Spill {
293 Value *Def = nullptr;
294 Instruction *User = nullptr;
295 unsigned FieldNo = InvalidFieldIndex;
296
297public:
298 Spill(Value *Def, llvm::User *U) : Def(Def), User(cast<Instruction>(U)) {}
299
300 Value *def() const { return Def; }
301 Instruction *user() const { return User; }
302 BasicBlock *userBlock() const { return User->getParent(); }
303
304 // Note that field index is stored in the first SpillEntry for a particular
305 // definition. Subsequent mentions of a defintion do not have fieldNo
306 // assigned. This works out fine as the users of Spills capture the info about
307 // the definition the first time they encounter it. Consider refactoring
308 // SpillInfo into two arrays to normalize the spill representation.
309 unsigned fieldIndex() const {
310 assert(FieldNo != InvalidFieldIndex && "Accessing unassigned field")((FieldNo != InvalidFieldIndex && "Accessing unassigned field"
) ? static_cast<void> (0) : __assert_fail ("FieldNo != InvalidFieldIndex && \"Accessing unassigned field\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Coroutines/CoroFrame.cpp"
, 310, __PRETTY_FUNCTION__))
;
311 return FieldNo;
312 }
313 void setFieldIndex(unsigned FieldNumber) {
314 assert(FieldNo == InvalidFieldIndex && "Reassigning field number")((FieldNo == InvalidFieldIndex && "Reassigning field number"
) ? static_cast<void> (0) : __assert_fail ("FieldNo == InvalidFieldIndex && \"Reassigning field number\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Coroutines/CoroFrame.cpp"
, 314, __PRETTY_FUNCTION__))
;
315 FieldNo = FieldNumber;
316 }
317};
318} // namespace
319
320// Note that there may be more than one record with the same value of Def in
321// the SpillInfo vector.
322using SpillInfo = SmallVector<Spill, 8>;
323
324#ifndef NDEBUG
325static void dump(StringRef Title, SpillInfo const &Spills) {
326 dbgs() << "------------- " << Title << "--------------\n";
327 Value *CurrentValue = nullptr;
328 for (auto const &E : Spills) {
329 if (CurrentValue != E.def()) {
330 CurrentValue = E.def();
331 CurrentValue->dump();
332 }
333 dbgs() << " user: ";
334 E.user()->dump();
335 }
336}
337#endif
338
339namespace {
340// We cannot rely solely on natural alignment of a type when building a
341// coroutine frame and if the alignment specified on the Alloca instruction
342// differs from the natural alignment of the alloca type we will need to insert
343// padding.
344struct PaddingCalculator {
345 const DataLayout &DL;
346 LLVMContext &Context;
347 unsigned StructSize = 0;
348
349 PaddingCalculator(LLVMContext &Context, DataLayout const &DL)
350 : DL(DL), Context(Context) {}
351
352 // Replicate the logic from IR/DataLayout.cpp to match field offset
353 // computation for LLVM structs.
354 void addType(Type *Ty) {
355 unsigned TyAlign = DL.getABITypeAlignment(Ty);
356 if ((StructSize & (TyAlign - 1)) != 0)
357 StructSize = alignTo(StructSize, TyAlign);
358
359 StructSize += DL.getTypeAllocSize(Ty); // Consume space for this data item.
360 }
361
362 void addTypes(SmallVectorImpl<Type *> const &Types) {
363 for (auto *Ty : Types)
364 addType(Ty);
365 }
366
367 unsigned computePadding(Type *Ty, unsigned ForcedAlignment) {
368 unsigned TyAlign = DL.getABITypeAlignment(Ty);
369 auto Natural = alignTo(StructSize, TyAlign);
370 auto Forced = alignTo(StructSize, ForcedAlignment);
371
372 // Return how many bytes of padding we need to insert.
373 if (Natural != Forced)
374 return std::max(Natural, Forced) - StructSize;
375
376 // Rely on natural alignment.
377 return 0;
378 }
379
380 // If padding required, return the padding field type to insert.
381 ArrayType *getPaddingType(Type *Ty, unsigned ForcedAlignment) {
382 if (auto Padding = computePadding(Ty, ForcedAlignment))
383 return ArrayType::get(Type::getInt8Ty(Context), Padding);
384
385 return nullptr;
386 }
387};
388} // namespace
389
390// Build a struct that will keep state for an active coroutine.
391// struct f.frame {
392// ResumeFnTy ResumeFnAddr;
393// ResumeFnTy DestroyFnAddr;
394// int ResumeIndex;
395// ... promise (if present) ...
396// ... spills ...
397// };
398static StructType *buildFrameType(Function &F, coro::Shape &Shape,
399 SpillInfo &Spills) {
400 LLVMContext &C = F.getContext();
401 const DataLayout &DL = F.getParent()->getDataLayout();
402 PaddingCalculator Padder(C, DL);
403 SmallString<32> Name(F.getName());
404 Name.append(".Frame");
405 StructType *FrameTy = StructType::create(C, Name);
406 SmallVector<Type *, 8> Types;
407
408 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
409
410 if (Shape.ABI == coro::ABI::Switch) {
411 auto *FramePtrTy = FrameTy->getPointerTo();
412 auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy,
413 /*IsVarArg=*/false);
414 auto *FnPtrTy = FnTy->getPointerTo();
415
416 // Figure out how wide should be an integer type storing the suspend index.
417 unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size()));
418 Type *PromiseType = PromiseAlloca
419 ? PromiseAlloca->getType()->getElementType()
420 : Type::getInt1Ty(C);
421 Type *IndexType = Type::getIntNTy(C, IndexBits);
422 Types.push_back(FnPtrTy);
423 Types.push_back(FnPtrTy);
424 Types.push_back(PromiseType);
425 Types.push_back(IndexType);
426 } else {
427 assert(PromiseAlloca == nullptr && "lowering doesn't support promises")((PromiseAlloca == nullptr && "lowering doesn't support promises"
) ? static_cast<void> (0) : __assert_fail ("PromiseAlloca == nullptr && \"lowering doesn't support promises\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Coroutines/CoroFrame.cpp"
, 427, __PRETTY_FUNCTION__))
;
428 }
429
430 Value *CurrentDef = nullptr;
431
432 Padder.addTypes(Types);
433
434 // Create an entry for every spilled value.
435 for (auto &S : Spills) {
436 if (CurrentDef == S.def())
437 continue;
438
439 CurrentDef = S.def();
440 // PromiseAlloca was already added to Types array earlier.
441 if (CurrentDef == PromiseAlloca)
442 continue;
443
444 uint64_t Count = 1;
445 Type *Ty = nullptr;
446 if (auto *AI = dyn_cast<AllocaInst>(CurrentDef)) {
447 Ty = AI->getAllocatedType();
448 if (unsigned AllocaAlignment = AI->getAlignment()) {
449 // If alignment is specified in alloca, see if we need to insert extra
450 // padding.
451 if (auto PaddingTy = Padder.getPaddingType(Ty, AllocaAlignment)) {
452 Types.push_back(PaddingTy);
453 Padder.addType(PaddingTy);
454 }
455 }
456 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
457 Count = CI->getValue().getZExtValue();
458 else
459 report_fatal_error("Coroutines cannot handle non static allocas yet");
460 } else {
461 Ty = CurrentDef->getType();
462 }
463 S.setFieldIndex(Types.size());
464 if (Count == 1)
465 Types.push_back(Ty);
466 else
467 Types.push_back(ArrayType::get(Ty, Count));
468 Padder.addType(Ty);
469 }
470 FrameTy->setBody(Types);
471
472 switch (Shape.ABI) {
473 case coro::ABI::Switch:
474 break;
475
476 // Remember whether the frame is inline in the storage.
477 case coro::ABI::Retcon:
478 case coro::ABI::RetconOnce: {
479 auto &Layout = F.getParent()->getDataLayout();
480 auto Id = Shape.getRetconCoroId();
481 Shape.RetconLowering.IsFrameInlineInStorage
482 = (Layout.getTypeAllocSize(FrameTy) <= Id->getStorageSize() &&
483 Layout.getABITypeAlignment(FrameTy) <= Id->getStorageAlignment());
484 break;
485 }
486 }
487
488 return FrameTy;
489}
490
491// We use a pointer use visitor to discover if there are any writes into an
492// alloca that dominates CoroBegin. If that is the case, insertSpills will copy
493// the value from the alloca into the coroutine frame spill slot corresponding
494// to that alloca.
495namespace {
496struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
497 using Base = PtrUseVisitor<AllocaUseVisitor>;
498 AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT,
499 const CoroBeginInst &CB)
500 : PtrUseVisitor(DL), DT(DT), CoroBegin(CB) {}
501
502 // We are only interested in uses that dominate coro.begin.
503 void visit(Instruction &I) {
504 if (DT.dominates(&I, &CoroBegin))
505 Base::visit(I);
506 }
507 // We need to provide this overload as PtrUseVisitor uses a pointer based
508 // visiting function.
509 void visit(Instruction *I) { return visit(*I); }
510
511 void visitLoadInst(LoadInst &) {} // Good. Nothing to do.
512
513 // If the use is an operand, the pointer escaped and anything can write into
514 // that memory. If the use is the pointer, we are definitely writing into the
515 // alloca and therefore we need to copy.
516 void visitStoreInst(StoreInst &SI) { PI.setAborted(&SI); }
517
518 // Any other instruction that is not filtered out by PtrUseVisitor, will
519 // result in the copy.
520 void visitInstruction(Instruction &I) { PI.setAborted(&I); }
521
522private:
523 const DominatorTree &DT;
524 const CoroBeginInst &CoroBegin;
525};
526} // namespace
527static bool mightWriteIntoAllocaPtr(AllocaInst &A, const DominatorTree &DT,
528 const CoroBeginInst &CB) {
529 const DataLayout &DL = A.getModule()->getDataLayout();
530 AllocaUseVisitor Visitor(DL, DT, CB);
531 auto PtrI = Visitor.visitPtr(A);
532 if (PtrI.isEscaped() || PtrI.isAborted()) {
533 auto *PointerEscapingInstr = PtrI.getEscapingInst()
534 ? PtrI.getEscapingInst()
535 : PtrI.getAbortingInst();
536 if (PointerEscapingInstr) {
537 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-frame")) { dbgs() << "AllocaInst copy was triggered by instruction: "
<< *PointerEscapingInstr << "\n"; } } while (false
)
538 dbgs() << "AllocaInst copy was triggered by instruction: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-frame")) { dbgs() << "AllocaInst copy was triggered by instruction: "
<< *PointerEscapingInstr << "\n"; } } while (false
)
539 << *PointerEscapingInstr << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-frame")) { dbgs() << "AllocaInst copy was triggered by instruction: "
<< *PointerEscapingInstr << "\n"; } } while (false
)
;
540 }
541 return true;
542 }
543 return false;
544}
545
546// We need to make room to insert a spill after initial PHIs, but before
547// catchswitch instruction. Placing it before violates the requirement that
548// catchswitch, like all other EHPads must be the first nonPHI in a block.
549//
550// Split away catchswitch into a separate block and insert in its place:
551//
552// cleanuppad <InsertPt> cleanupret.
553//
554// cleanupret instruction will act as an insert point for the spill.
555static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) {
556 BasicBlock *CurrentBlock = CatchSwitch->getParent();
557 BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch);
558 CurrentBlock->getTerminator()->eraseFromParent();
559
560 auto *CleanupPad =
561 CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock);
562 auto *CleanupRet =
563 CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock);
564 return CleanupRet;
565}
566
567// Replace all alloca and SSA values that are accessed across suspend points
568// with GetElementPointer from coroutine frame + loads and stores. Create an
569// AllocaSpillBB that will become the new entry block for the resume parts of
570// the coroutine:
571//
572// %hdl = coro.begin(...)
573// whatever
574//
575// becomes:
576//
577// %hdl = coro.begin(...)
578// %FramePtr = bitcast i8* hdl to %f.frame*
579// br label %AllocaSpillBB
580//
581// AllocaSpillBB:
582// ; geps corresponding to allocas that were moved to coroutine frame
583// br label PostSpill
584//
585// PostSpill:
586// whatever
587//
588//
589static Instruction *insertSpills(const SpillInfo &Spills, coro::Shape &Shape) {
590 auto *CB = Shape.CoroBegin;
591 LLVMContext &C = CB->getContext();
592 IRBuilder<> Builder(CB->getNextNode());
593 StructType *FrameTy = Shape.FrameTy;
594 PointerType *FramePtrTy = FrameTy->getPointerTo();
595 auto *FramePtr =
596 cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr"));
597 DominatorTree DT(*CB->getFunction());
598
599 Value *CurrentValue = nullptr;
600 BasicBlock *CurrentBlock = nullptr;
601 Value *CurrentReload = nullptr;
602
603 // Proper field number will be read from field definition.
604 unsigned Index = InvalidFieldIndex;
605
606 // We need to keep track of any allocas that need "spilling"
607 // since they will live in the coroutine frame now, all access to them
608 // need to be changed, not just the access across suspend points
609 // we remember allocas and their indices to be handled once we processed
610 // all the spills.
611 SmallVector<std::pair<AllocaInst *, unsigned>, 4> Allocas;
612 // Promise alloca (if present) has a fixed field number.
613 if (auto *PromiseAlloca = Shape.getPromiseAlloca()) {
614 assert(Shape.ABI == coro::ABI::Switch)((Shape.ABI == coro::ABI::Switch) ? static_cast<void> (
0) : __assert_fail ("Shape.ABI == coro::ABI::Switch", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Coroutines/CoroFrame.cpp"
, 614, __PRETTY_FUNCTION__))
;
615 Allocas.emplace_back(PromiseAlloca, coro::Shape::SwitchFieldIndex::Promise);
616 }
617
618 // Create a GEP with the given index into the coroutine frame for the original
619 // value Orig. Appends an extra 0 index for array-allocas, preserving the
620 // original type.
621 auto GetFramePointer = [&](uint32_t Index, Value *Orig) -> Value * {
622 SmallVector<Value *, 3> Indices = {
623 ConstantInt::get(Type::getInt32Ty(C), 0),
624 ConstantInt::get(Type::getInt32Ty(C), Index),
625 };
626
627 if (auto *AI = dyn_cast<AllocaInst>(Orig)) {
628 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) {
629 auto Count = CI->getValue().getZExtValue();
630 if (Count > 1) {
631 Indices.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
632 }
633 } else {
634 report_fatal_error("Coroutines cannot handle non static allocas yet");
635 }
636 }
637
638 return Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices);
639 };
640
641 // Create a load instruction to reload the spilled value from the coroutine
642 // frame. Populates the Value pointer reference provided with the frame GEP.
643 auto CreateReload = [&](Instruction *InsertBefore, Value *&G) {
644 assert(Index != InvalidFieldIndex && "accessing unassigned field number")((Index != InvalidFieldIndex && "accessing unassigned field number"
) ? static_cast<void> (0) : __assert_fail ("Index != InvalidFieldIndex && \"accessing unassigned field number\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Coroutines/CoroFrame.cpp"
, 644, __PRETTY_FUNCTION__))
;
645 Builder.SetInsertPoint(InsertBefore);
646
647 G = GetFramePointer(Index, CurrentValue);
648 G->setName(CurrentValue->getName() + Twine(".reload.addr"));
649
650 return isa<AllocaInst>(CurrentValue)
651 ? G
652 : Builder.CreateLoad(FrameTy->getElementType(Index), G,
653 CurrentValue->getName() + Twine(".reload"));
654 };
655
656 Value *GEP = nullptr, *CurrentGEP = nullptr;
657 for (auto const &E : Spills) {
658 // If we have not seen the value, generate a spill.
659 if (CurrentValue != E.def()) {
660 CurrentValue = E.def();
661 CurrentBlock = nullptr;
662 CurrentReload = nullptr;
663
664 Index = E.fieldIndex();
665
666 if (auto *AI = dyn_cast<AllocaInst>(CurrentValue)) {
667 // Spilled AllocaInst will be replaced with GEP from the coroutine frame
668 // there is no spill required.
669 Allocas.emplace_back(AI, Index);
670 if (!AI->isStaticAlloca())
671 report_fatal_error("Coroutines cannot handle non static allocas yet");
672 } else {
673 // Otherwise, create a store instruction storing the value into the
674 // coroutine frame.
675
676 Instruction *InsertPt = nullptr;
677 if (auto Arg = dyn_cast<Argument>(CurrentValue)) {
678 // For arguments, we will place the store instruction right after
679 // the coroutine frame pointer instruction, i.e. bitcast of
680 // coro.begin from i8* to %f.frame*.
681 InsertPt = FramePtr->getNextNode();
682
683 // If we're spilling an Argument, make sure we clear 'nocapture'
684 // from the coroutine function.
685 Arg->getParent()->removeParamAttr(Arg->getArgNo(),
686 Attribute::NoCapture);
687
688 } else if (auto *II = dyn_cast<InvokeInst>(CurrentValue)) {
689 // If we are spilling the result of the invoke instruction, split the
690 // normal edge and insert the spill in the new block.
691 auto NewBB = SplitEdge(II->getParent(), II->getNormalDest());
692 InsertPt = NewBB->getTerminator();
693 } else if (isa<PHINode>(CurrentValue)) {
694 // Skip the PHINodes and EH pads instructions.
695 BasicBlock *DefBlock = cast<Instruction>(E.def())->getParent();
696 if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator()))
697 InsertPt = splitBeforeCatchSwitch(CSI);
698 else
699 InsertPt = &*DefBlock->getFirstInsertionPt();
700 } else if (auto CSI = dyn_cast<AnyCoroSuspendInst>(CurrentValue)) {
701 // Don't spill immediately after a suspend; splitting assumes
702 // that the suspend will be followed by a branch.
703 InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHI();
704 } else {
705 auto *I = cast<Instruction>(E.def());
706 assert(!I->isTerminator() && "unexpected terminator")((!I->isTerminator() && "unexpected terminator") ?
static_cast<void> (0) : __assert_fail ("!I->isTerminator() && \"unexpected terminator\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Coroutines/CoroFrame.cpp"
, 706, __PRETTY_FUNCTION__))
;
707 // For all other values, the spill is placed immediately after
708 // the definition.
709 if (DT.dominates(CB, I)) {
710 InsertPt = I->getNextNode();
711 } else {
712 // Unless, it is not dominated by CoroBegin, then it will be
713 // inserted immediately after CoroFrame is computed.
714 InsertPt = FramePtr->getNextNode();
715 }
716 }
717
718 Builder.SetInsertPoint(InsertPt);
719 auto *G = Builder.CreateConstInBoundsGEP2_32(
720 FrameTy, FramePtr, 0, Index,
721 CurrentValue->getName() + Twine(".spill.addr"));
722 Builder.CreateStore(CurrentValue, G);
723 }
724 }
725
726 // If we have not seen the use block, generate a reload in it.
727 if (CurrentBlock != E.userBlock()) {
728 CurrentBlock = E.userBlock();
729 CurrentReload = CreateReload(&*CurrentBlock->getFirstInsertionPt(), GEP);
730 }
731
732 // If we have a single edge PHINode, remove it and replace it with a reload
733 // from the coroutine frame. (We already took care of multi edge PHINodes
734 // by rewriting them in the rewritePHIs function).
735 if (auto *PN = dyn_cast<PHINode>(E.user())) {
736 assert(PN->getNumIncomingValues() == 1 && "unexpected number of incoming "((PN->getNumIncomingValues() == 1 && "unexpected number of incoming "
"values in the PHINode") ? static_cast<void> (0) : __assert_fail
("PN->getNumIncomingValues() == 1 && \"unexpected number of incoming \" \"values in the PHINode\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Coroutines/CoroFrame.cpp"
, 737, __PRETTY_FUNCTION__))
737 "values in the PHINode")((PN->getNumIncomingValues() == 1 && "unexpected number of incoming "
"values in the PHINode") ? static_cast<void> (0) : __assert_fail
("PN->getNumIncomingValues() == 1 && \"unexpected number of incoming \" \"values in the PHINode\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Coroutines/CoroFrame.cpp"
, 737, __PRETTY_FUNCTION__))
;
738 PN->replaceAllUsesWith(CurrentReload);
739 PN->eraseFromParent();
740 continue;
741 }
742
743 // If we have not seen this GEP instruction, migrate any dbg.declare from
744 // the alloca to it.
745 if (CurrentGEP != GEP) {
746 CurrentGEP = GEP;
747 TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(CurrentValue);
748 if (!DIs.empty())
749 DIBuilder(*CurrentBlock->getParent()->getParent(),
750 /*AllowUnresolved*/ false)
751 .insertDeclare(CurrentGEP, DIs.front()->getVariable(),
752 DIs.front()->getExpression(),
753 DIs.front()->getDebugLoc(), DIs.front());
754 }
755
756 // Replace all uses of CurrentValue in the current instruction with reload.
757 E.user()->replaceUsesOfWith(CurrentValue, CurrentReload);
758 }
759
760 BasicBlock *FramePtrBB = FramePtr->getParent();
761
762 auto SpillBlock =
763 FramePtrBB->splitBasicBlock(FramePtr->getNextNode(), "AllocaSpillBB");
764 SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill");
765 Shape.AllocaSpillBlock = SpillBlock;
766 // If we found any alloca, replace all of their remaining uses with GEP
767 // instructions. Because new dbg.declare have been created for these alloca,
768 // we also delete the original dbg.declare and replace other uses with undef.
769 // Note: We cannot replace the alloca with GEP instructions indiscriminately,
770 // as some of the uses may not be dominated by CoroBegin.
771 bool MightNeedToCopy = false;
772 Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front());
773 SmallVector<Instruction *, 4> UsersToUpdate;
774 for (auto &P : Allocas) {
775 AllocaInst *const A = P.first;
776
777 for (auto *DI : FindDbgDeclareUses(A))
778 DI->eraseFromParent();
779 replaceDbgUsesWithUndef(A);
780
781 UsersToUpdate.clear();
782 for (User *U : A->users()) {
783 auto *I = cast<Instruction>(U);
784 if (DT.dominates(CB, I))
785 UsersToUpdate.push_back(I);
786 else
787 MightNeedToCopy = true;
788 }
789 if (!UsersToUpdate.empty()) {
790 auto *G = GetFramePointer(P.second, A);
791 G->takeName(A);
792 for (Instruction *I : UsersToUpdate)
793 I->replaceUsesOfWith(A, G);
794 }
795 }
796 // If we discovered such uses not dominated by CoroBegin, see if any of them
797 // preceed coro begin and have instructions that can modify the
798 // value of the alloca and therefore would require a copying the value into
799 // the spill slot in the coroutine frame.
800 if (MightNeedToCopy) {
801 Builder.SetInsertPoint(FramePtr->getNextNode());
802
803 for (auto &P : Allocas) {
804 AllocaInst *const A = P.first;
805 if (mightWriteIntoAllocaPtr(*A, DT, *CB)) {
806 if (A->isArrayAllocation())
807 report_fatal_error(
808 "Coroutines cannot handle copying of array allocas yet");
809
810 auto *G = GetFramePointer(P.second, A);
811 auto *Value = Builder.CreateLoad(A->getAllocatedType(), A);
812 Builder.CreateStore(Value, G);
813 }
814 }
815 }
816 return FramePtr;
817}
818
819// Sets the unwind edge of an instruction to a particular successor.
820static void setUnwindEdgeTo(Instruction *TI, BasicBlock *Succ) {
821 if (auto *II = dyn_cast<InvokeInst>(TI))
822 II->setUnwindDest(Succ);
823 else if (auto *CS = dyn_cast<CatchSwitchInst>(TI))
824 CS->setUnwindDest(Succ);
825 else if (auto *CR = dyn_cast<CleanupReturnInst>(TI))
826 CR->setUnwindDest(Succ);
827 else
828 llvm_unreachable("unexpected terminator instruction")::llvm::llvm_unreachable_internal("unexpected terminator instruction"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Coroutines/CoroFrame.cpp"
, 828)
;
829}
830
831// Replaces all uses of OldPred with the NewPred block in all PHINodes in a
832// block.
833static void updatePhiNodes(BasicBlock *DestBB, BasicBlock *OldPred,
834 BasicBlock *NewPred,
835 PHINode *LandingPadReplacement) {
836 unsigned BBIdx = 0;
837 for (BasicBlock::iterator I = DestBB->begin(); isa<PHINode>(I); ++I) {
838 PHINode *PN = cast<PHINode>(I);
839
840 // We manually update the LandingPadReplacement PHINode and it is the last
841 // PHI Node. So, if we find it, we are done.
842 if (LandingPadReplacement == PN)
843 break;
844
845 // Reuse the previous value of BBIdx if it lines up. In cases where we
846 // have multiple phi nodes with *lots* of predecessors, this is a speed
847 // win because we don't have to scan the PHI looking for TIBB. This
848 // happens because the BB list of PHI nodes are usually in the same
849 // order.
850 if (PN->getIncomingBlock(BBIdx) != OldPred)
851 BBIdx = PN->getBasicBlockIndex(OldPred);
852
853 assert(BBIdx != (unsigned)-1 && "Invalid PHI Index!")((BBIdx != (unsigned)-1 && "Invalid PHI Index!") ? static_cast
<void> (0) : __assert_fail ("BBIdx != (unsigned)-1 && \"Invalid PHI Index!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Coroutines/CoroFrame.cpp"
, 853, __PRETTY_FUNCTION__))
;
854 PN->setIncomingBlock(BBIdx, NewPred);
855 }
856}
857
858// Uses SplitEdge unless the successor block is an EHPad, in which case do EH
859// specific handling.
860static BasicBlock *ehAwareSplitEdge(BasicBlock *BB, BasicBlock *Succ,
861 LandingPadInst *OriginalPad,
862 PHINode *LandingPadReplacement) {
863 auto *PadInst = Succ->getFirstNonPHI();
864 if (!LandingPadReplacement && !PadInst->isEHPad())
865 return SplitEdge(BB, Succ);
866
867 auto *NewBB = BasicBlock::Create(BB->getContext(), "", BB->getParent(), Succ);
868 setUnwindEdgeTo(BB->getTerminator(), NewBB);
869 updatePhiNodes(Succ, BB, NewBB, LandingPadReplacement);
870
871 if (LandingPadReplacement) {
872 auto *NewLP = OriginalPad->clone();
873 auto *Terminator = BranchInst::Create(Succ, NewBB);
874 NewLP->insertBefore(Terminator);
875 LandingPadReplacement->addIncoming(NewLP, NewBB);
876 return NewBB;
877 }
878 Value *ParentPad = nullptr;
879 if (auto *FuncletPad = dyn_cast<FuncletPadInst>(PadInst))
880 ParentPad = FuncletPad->getParentPad();
881 else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(PadInst))
882 ParentPad = CatchSwitch->getParentPad();
883 else
884 llvm_unreachable("handling for other EHPads not implemented yet")::llvm::llvm_unreachable_internal("handling for other EHPads not implemented yet"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Coroutines/CoroFrame.cpp"
, 884)
;
885
886 auto *NewCleanupPad = CleanupPadInst::Create(ParentPad, {}, "", NewBB);
887 CleanupReturnInst::Create(NewCleanupPad, Succ, NewBB);
888 return NewBB;
889}
890
891static void rewritePHIs(BasicBlock &BB) {
892 // For every incoming edge we will create a block holding all
893 // incoming values in a single PHI nodes.
894 //
895 // loop:
896 // %n.val = phi i32[%n, %entry], [%inc, %loop]
897 //
898 // It will create:
899 //
900 // loop.from.entry:
901 // %n.loop.pre = phi i32 [%n, %entry]
902 // br %label loop
903 // loop.from.loop:
904 // %inc.loop.pre = phi i32 [%inc, %loop]
905 // br %label loop
906 //
907 // After this rewrite, further analysis will ignore any phi nodes with more
908 // than one incoming edge.
909
910 // TODO: Simplify PHINodes in the basic block to remove duplicate
911 // predecessors.
912
913 LandingPadInst *LandingPad = nullptr;
914 PHINode *ReplPHI = nullptr;
915 if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) {
916 // ehAwareSplitEdge will clone the LandingPad in all the edge blocks.
917 // We replace the original landing pad with a PHINode that will collect the
918 // results from all of them.
919 ReplPHI = PHINode::Create(LandingPad->getType(), 1, "", LandingPad);
920 ReplPHI->takeName(LandingPad);
921 LandingPad->replaceAllUsesWith(ReplPHI);
922 // We will erase the original landing pad at the end of this function after
923 // ehAwareSplitEdge cloned it in the transition blocks.
924 }
925
926 SmallVector<BasicBlock *, 8> Preds(pred_begin(&BB), pred_end(&BB));
927 for (BasicBlock *Pred : Preds) {
928 auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI);
929 IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName());
930 auto *PN = cast<PHINode>(&BB.front());
931 do {
932 int Index = PN->getBasicBlockIndex(IncomingBB);
933 Value *V = PN->getIncomingValue(Index);
934 PHINode *InputV = PHINode::Create(
935 V->getType(), 1, V->getName() + Twine(".") + BB.getName(),
936 &IncomingBB->front());
937 InputV->addIncoming(V, Pred);
938 PN->setIncomingValue(Index, InputV);
939 PN = dyn_cast<PHINode>(PN->getNextNode());
940 } while (PN != ReplPHI); // ReplPHI is either null or the PHI that replaced
941 // the landing pad.
942 }
943
944 if (LandingPad) {
945 // Calls to ehAwareSplitEdge function cloned the original lading pad.
946 // No longer need it.
947 LandingPad->eraseFromParent();
948 }
949}
950
951static void rewritePHIs(Function &F) {
952 SmallVector<BasicBlock *, 8> WorkList;
953
954 for (BasicBlock &BB : F)
955 if (auto *PN = dyn_cast<PHINode>(&BB.front()))
956 if (PN->getNumIncomingValues() > 1)
957 WorkList.push_back(&BB);
958
959 for (BasicBlock *BB : WorkList)
960 rewritePHIs(*BB);
961}
962
963// Check for instructions that we can recreate on resume as opposed to spill
964// the result into a coroutine frame.
965static bool materializable(Instruction &V) {
966 return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) ||
967 isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V);
968}
969
970// Check for structural coroutine intrinsics that should not be spilled into
971// the coroutine frame.
972static bool isCoroutineStructureIntrinsic(Instruction &I) {
973 return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) ||
974 isa<CoroSuspendInst>(&I);
975}
976
977// For every use of the value that is across suspend point, recreate that value
978// after a suspend point.
979static void rewriteMaterializableInstructions(IRBuilder<> &IRB,
980 SpillInfo const &Spills) {
981 BasicBlock *CurrentBlock = nullptr;
982 Instruction *CurrentMaterialization = nullptr;
983 Instruction *CurrentDef = nullptr;
9
'CurrentDef' initialized to a null pointer value
984
985 for (auto const &E : Spills) {
10
Assuming '__begin1' is not equal to '__end1'
986 // If it is a new definition, update CurrentXXX variables.
987 if (CurrentDef != E.def()) {
11
Assuming the condition is false
12
Taking false branch
988 CurrentDef = cast<Instruction>(E.def());
989 CurrentBlock = nullptr;
990 CurrentMaterialization = nullptr;
991 }
992
993 // If we have not seen this block, materialize the value.
994 if (CurrentBlock != E.userBlock()) {
13
Assuming the condition is true
14
Taking true branch
995 CurrentBlock = E.userBlock();
996 CurrentMaterialization = cast<Instruction>(CurrentDef)->clone();
997 CurrentMaterialization->setName(CurrentDef->getName());
15
Called C++ object pointer is null
998 CurrentMaterialization->insertBefore(
999 &*CurrentBlock->getFirstInsertionPt());
1000 }
1001
1002 if (auto *PN = dyn_cast<PHINode>(E.user())) {
1003 assert(PN->getNumIncomingValues() == 1 && "unexpected number of incoming "((PN->getNumIncomingValues() == 1 && "unexpected number of incoming "
"values in the PHINode") ? static_cast<void> (0) : __assert_fail
("PN->getNumIncomingValues() == 1 && \"unexpected number of incoming \" \"values in the PHINode\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Coroutines/CoroFrame.cpp"
, 1004, __PRETTY_FUNCTION__))
1004 "values in the PHINode")((PN->getNumIncomingValues() == 1 && "unexpected number of incoming "
"values in the PHINode") ? static_cast<void> (0) : __assert_fail
("PN->getNumIncomingValues() == 1 && \"unexpected number of incoming \" \"values in the PHINode\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Coroutines/CoroFrame.cpp"
, 1004, __PRETTY_FUNCTION__))
;
1005 PN->replaceAllUsesWith(CurrentMaterialization);
1006 PN->eraseFromParent();
1007 continue;
1008 }
1009
1010 // Replace all uses of CurrentDef in the current instruction with the
1011 // CurrentMaterialization for the block.
1012 E.user()->replaceUsesOfWith(CurrentDef, CurrentMaterialization);
1013 }
1014}
1015
1016// Splits the block at a particular instruction unless it is the first
1017// instruction in the block with a single predecessor.
1018static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) {
1019 auto *BB = I->getParent();
1020 if (&BB->front() == I) {
1021 if (BB->getSinglePredecessor()) {
1022 BB->setName(Name);
1023 return BB;
1024 }
1025 }
1026 return BB->splitBasicBlock(I, Name);
1027}
1028
1029// Split above and below a particular instruction so that it
1030// will be all alone by itself in a block.
1031static void splitAround(Instruction *I, const Twine &Name) {
1032 splitBlockIfNotFirst(I, Name);
1033 splitBlockIfNotFirst(I->getNextNode(), "After" + Name);
1034}
1035
1036static bool isSuspendBlock(BasicBlock *BB) {
1037 return isa<AnyCoroSuspendInst>(BB->front());
1038}
1039
1040typedef SmallPtrSet<BasicBlock*, 8> VisitedBlocksSet;
1041
1042/// Does control flow starting at the given block ever reach a suspend
1043/// instruction before reaching a block in VisitedOrFreeBBs?
1044static bool isSuspendReachableFrom(BasicBlock *From,
1045 VisitedBlocksSet &VisitedOrFreeBBs) {
1046 // Eagerly try to add this block to the visited set. If it's already
1047 // there, stop recursing; this path doesn't reach a suspend before
1048 // either looping or reaching a freeing block.
1049 if (!VisitedOrFreeBBs.insert(From).second)
1050 return false;
1051
1052 // We assume that we'll already have split suspends into their own blocks.
1053 if (isSuspendBlock(From))
1054 return true;
1055
1056 // Recurse on the successors.
1057 for (auto Succ : successors(From)) {
1058 if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs))
1059 return true;
1060 }
1061
1062 return false;
1063}
1064
1065/// Is the given alloca "local", i.e. bounded in lifetime to not cross a
1066/// suspend point?
1067static bool isLocalAlloca(CoroAllocaAllocInst *AI) {
1068 // Seed the visited set with all the basic blocks containing a free
1069 // so that we won't pass them up.
1070 VisitedBlocksSet VisitedOrFreeBBs;
1071 for (auto User : AI->users()) {
1072 if (auto FI = dyn_cast<CoroAllocaFreeInst>(User))
1073 VisitedOrFreeBBs.insert(FI->getParent());
1074 }
1075
1076 return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs);
1077}
1078
1079/// After we split the coroutine, will the given basic block be along
1080/// an obvious exit path for the resumption function?
1081static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB,
1082 unsigned depth = 3) {
1083 // If we've bottomed out our depth count, stop searching and assume
1084 // that the path might loop back.
1085 if (depth == 0) return false;
1086
1087 // If this is a suspend block, we're about to exit the resumption function.
1088 if (isSuspendBlock(BB)) return true;
1089
1090 // Recurse into the successors.
1091 for (auto Succ : successors(BB)) {
1092 if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1))
1093 return false;
1094 }
1095
1096 // If none of the successors leads back in a loop, we're on an exit/abort.
1097 return true;
1098}
1099
1100static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI) {
1101 // Look for a free that isn't sufficiently obviously followed by
1102 // either a suspend or a termination, i.e. something that will leave
1103 // the coro resumption frame.
1104 for (auto U : AI->users()) {
1105 auto FI = dyn_cast<CoroAllocaFreeInst>(U);
1106 if (!FI) continue;
1107
1108 if (!willLeaveFunctionImmediatelyAfter(FI->getParent()))
1109 return true;
1110 }
1111
1112 // If we never found one, we don't need a stack save.
1113 return false;
1114}
1115
1116/// Turn each of the given local allocas into a normal (dynamic) alloca
1117/// instruction.
1118static void lowerLocalAllocas(ArrayRef<CoroAllocaAllocInst*> LocalAllocas,
1119 SmallVectorImpl<Instruction*> &DeadInsts) {
1120 for (auto AI : LocalAllocas) {
1121 auto M = AI->getModule();
1122 IRBuilder<> Builder(AI);
1123
1124 // Save the stack depth. Try to avoid doing this if the stackrestore
1125 // is going to immediately precede a return or something.
1126 Value *StackSave = nullptr;
1127 if (localAllocaNeedsStackSave(AI))
1128 StackSave = Builder.CreateCall(
1129 Intrinsic::getDeclaration(M, Intrinsic::stacksave));
1130
1131 // Allocate memory.
1132 auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize());
1133 Alloca->setAlignment(MaybeAlign(AI->getAlignment()));
1134
1135 for (auto U : AI->users()) {
1136 // Replace gets with the allocation.
1137 if (isa<CoroAllocaGetInst>(U)) {
1138 U->replaceAllUsesWith(Alloca);
1139
1140 // Replace frees with stackrestores. This is safe because
1141 // alloca.alloc is required to obey a stack discipline, although we
1142 // don't enforce that structurally.
1143 } else {
1144 auto FI = cast<CoroAllocaFreeInst>(U);
1145 if (StackSave) {
1146 Builder.SetInsertPoint(FI);
1147 Builder.CreateCall(
1148 Intrinsic::getDeclaration(M, Intrinsic::stackrestore),
1149 StackSave);
1150 }
1151 }
1152 DeadInsts.push_back(cast<Instruction>(U));
1153 }
1154
1155 DeadInsts.push_back(AI);
1156 }
1157}
1158
1159/// Turn the given coro.alloca.alloc call into a dynamic allocation.
1160/// This happens during the all-instructions iteration, so it must not
1161/// delete the call.
1162static Instruction *lowerNonLocalAlloca(CoroAllocaAllocInst *AI,
1163 coro::Shape &Shape,
1164 SmallVectorImpl<Instruction*> &DeadInsts) {
1165 IRBuilder<> Builder(AI);
1166 auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr);
1167
1168 for (User *U : AI->users()) {
1169 if (isa<CoroAllocaGetInst>(U)) {
1170 U->replaceAllUsesWith(Alloc);
1171 } else {
1172 auto FI = cast<CoroAllocaFreeInst>(U);
1173 Builder.SetInsertPoint(FI);
1174 Shape.emitDealloc(Builder, Alloc, nullptr);
1175 }
1176 DeadInsts.push_back(cast<Instruction>(U));
1177 }
1178
1179 // Push this on last so that it gets deleted after all the others.
1180 DeadInsts.push_back(AI);
1181
1182 // Return the new allocation value so that we can check for needed spills.
1183 return cast<Instruction>(Alloc);
1184}
1185
1186/// Get the current swifterror value.
1187static Value *emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy,
1188 coro::Shape &Shape) {
1189 // Make a fake function pointer as a sort of intrinsic.
1190 auto FnTy = FunctionType::get(ValueTy, {}, false);
1191 auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());
1192
1193 auto Call = Builder.CreateCall(FnTy, Fn, {});
1194 Shape.SwiftErrorOps.push_back(Call);
1195
1196 return Call;
1197}
1198
1199/// Set the given value as the current swifterror value.
1200///
1201/// Returns a slot that can be used as a swifterror slot.
1202static Value *emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V,
1203 coro::Shape &Shape) {
1204 // Make a fake function pointer as a sort of intrinsic.
1205 auto FnTy = FunctionType::get(V->getType()->getPointerTo(),
1206 {V->getType()}, false);
1207 auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());
1208
1209 auto Call = Builder.CreateCall(FnTy, Fn, { V });
1210 Shape.SwiftErrorOps.push_back(Call);
1211
1212 return Call;
1213}
1214
1215/// Set the swifterror value from the given alloca before a call,
1216/// then put in back in the alloca afterwards.
1217///
1218/// Returns an address that will stand in for the swifterror slot
1219/// until splitting.
1220static Value *emitSetAndGetSwiftErrorValueAround(Instruction *Call,
1221 AllocaInst *Alloca,
1222 coro::Shape &Shape) {
1223 auto ValueTy = Alloca->getAllocatedType();
1224 IRBuilder<> Builder(Call);
1225
1226 // Load the current value from the alloca and set it as the
1227 // swifterror value.
1228 auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca);
1229 auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape);
1230
1231 // Move to after the call. Since swifterror only has a guaranteed
1232 // value on normal exits, we can ignore implicit and explicit unwind
1233 // edges.
1234 if (isa<CallInst>(Call)) {
1235 Builder.SetInsertPoint(Call->getNextNode());
1236 } else {
1237 auto Invoke = cast<InvokeInst>(Call);
1238 Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg());
1239 }
1240
1241 // Get the current swifterror value and store it to the alloca.
1242 auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape);
1243 Builder.CreateStore(ValueAfterCall, Alloca);
1244
1245 return Addr;
1246}
1247
1248/// Eliminate a formerly-swifterror alloca by inserting the get/set
1249/// intrinsics and attempting to MemToReg the alloca away.
1250static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca,
1251 coro::Shape &Shape) {
1252 for (auto UI = Alloca->use_begin(), UE = Alloca->use_end(); UI != UE; ) {
1253 // We're likely changing the use list, so use a mutation-safe
1254 // iteration pattern.
1255 auto &Use = *UI;
1256 ++UI;
1257
1258 // swifterror values can only be used in very specific ways.
1259 // We take advantage of that here.
1260 auto User = Use.getUser();
1261 if (isa<LoadInst>(User) || isa<StoreInst>(User))
1262 continue;
1263
1264 assert(isa<CallInst>(User) || isa<InvokeInst>(User))((isa<CallInst>(User) || isa<InvokeInst>(User)) ?
static_cast<void> (0) : __assert_fail ("isa<CallInst>(User) || isa<InvokeInst>(User)"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Coroutines/CoroFrame.cpp"
, 1264, __PRETTY_FUNCTION__))
;
1265 auto Call = cast<Instruction>(User);
1266
1267 auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape);
1268
1269 // Use the returned slot address as the call argument.
1270 Use.set(Addr);
1271 }
1272
1273 // All the uses should be loads and stores now.
1274 assert(isAllocaPromotable(Alloca))((isAllocaPromotable(Alloca)) ? static_cast<void> (0) :
__assert_fail ("isAllocaPromotable(Alloca)", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Coroutines/CoroFrame.cpp"
, 1274, __PRETTY_FUNCTION__))
;
1275}
1276
1277/// "Eliminate" a swifterror argument by reducing it to the alloca case
1278/// and then loading and storing in the prologue and epilog.
1279///
1280/// The argument keeps the swifterror flag.
1281static void eliminateSwiftErrorArgument(Function &F, Argument &Arg,
1282 coro::Shape &Shape,
1283 SmallVectorImpl<AllocaInst*> &AllocasToPromote) {
1284 IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
1285
1286 auto ArgTy = cast<PointerType>(Arg.getType());
1287 auto ValueTy = ArgTy->getElementType();
1288
1289 // Reduce to the alloca case:
1290
1291 // Create an alloca and replace all uses of the arg with it.
1292 auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace());
1293 Arg.replaceAllUsesWith(Alloca);
1294
1295 // Set an initial value in the alloca. swifterror is always null on entry.
1296 auto InitialValue = Constant::getNullValue(ValueTy);
1297 Builder.CreateStore(InitialValue, Alloca);
1298
1299 // Find all the suspends in the function and save and restore around them.
1300 for (auto Suspend : Shape.CoroSuspends) {
1301 (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape);
1302 }
1303
1304 // Find all the coro.ends in the function and restore the error value.
1305 for (auto End : Shape.CoroEnds) {
1306 Builder.SetInsertPoint(End);
1307 auto FinalValue = Builder.CreateLoad(ValueTy, Alloca);
1308 (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape);
1309 }
1310
1311 // Now we can use the alloca logic.
1312 AllocasToPromote.push_back(Alloca);
1313 eliminateSwiftErrorAlloca(F, Alloca, Shape);
1314}
1315
1316/// Eliminate all problematic uses of swifterror arguments and allocas
1317/// from the function. We'll fix them up later when splitting the function.
1318static void eliminateSwiftError(Function &F, coro::Shape &Shape) {
1319 SmallVector<AllocaInst*, 4> AllocasToPromote;
1320
1321 // Look for a swifterror argument.
1322 for (auto &Arg : F.args()) {
1323 if (!Arg.hasSwiftErrorAttr()) continue;
1324
1325 eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote);
1326 break;
1327 }
1328
1329 // Look for swifterror allocas.
1330 for (auto &Inst : F.getEntryBlock()) {
1331 auto Alloca = dyn_cast<AllocaInst>(&Inst);
1332 if (!Alloca || !Alloca->isSwiftError()) continue;
1333
1334 // Clear the swifterror flag.
1335 Alloca->setSwiftError(false);
1336
1337 AllocasToPromote.push_back(Alloca);
1338 eliminateSwiftErrorAlloca(F, Alloca, Shape);
1339 }
1340
1341 // If we have any allocas to promote, compute a dominator tree and
1342 // promote them en masse.
1343 if (!AllocasToPromote.empty()) {
1344 DominatorTree DT(F);
1345 PromoteMemToReg(AllocasToPromote, DT);
1346 }
1347}
1348
1349void coro::buildCoroutineFrame(Function &F, Shape &Shape) {
1350 eliminateSwiftError(F, Shape);
1351
1352 if (Shape.ABI == coro::ABI::Switch &&
1
Assuming field 'ABI' is not equal to Switch
1353 Shape.SwitchLowering.PromiseAlloca) {
1354 Shape.getSwitchCoroId()->clearPromise();
1355 }
1356
1357 // Make sure that all coro.save, coro.suspend and the fallthrough coro.end
1358 // intrinsics are in their own blocks to simplify the logic of building up
1359 // SuspendCrossing data.
1360 for (auto *CSI : Shape.CoroSuspends) {
2
Assuming '__begin1' is equal to '__end1'
1361 if (auto *Save = CSI->getCoroSave())
1362 splitAround(Save, "CoroSave");
1363 splitAround(CSI, "CoroSuspend");
1364 }
1365
1366 // Put CoroEnds into their own blocks.
1367 for (CoroEndInst *CE : Shape.CoroEnds)
3
Assuming '__begin1' is equal to '__end1'
1368 splitAround(CE, "CoroEnd");
1369
1370 // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will
1371 // never has its definition separated from the PHI by the suspend point.
1372 rewritePHIs(F);
1373
1374 // Build suspend crossing info.
1375 SuspendCrossingInfo Checker(F, Shape);
1376
1377 IRBuilder<> Builder(F.getContext());
1378 SpillInfo Spills;
1379 SmallVector<CoroAllocaAllocInst*, 4> LocalAllocas;
1380 SmallVector<Instruction*, 4> DeadInstructions;
1381
1382 for (int Repeat = 0; Repeat < 4; ++Repeat) {
4
Loop condition is true. Entering loop body
1383 // See if there are materializable instructions across suspend points.
1384 for (Instruction &I : instructions(F))
1385 if (materializable(I))
1386 for (User *U : I.users())
1387 if (Checker.isDefinitionAcrossSuspend(I, U))
1388 Spills.emplace_back(&I, U);
1389
1390 if (Spills.empty())
5
Taking false branch
1391 break;
1392
1393 // Rewrite materializable instructions to be materialized at the use point.
1394 LLVM_DEBUG(dump("Materializations", Spills))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-frame")) { dump("Materializations", Spills); } } while
(false)
;
6
Assuming 'DebugFlag' is false
7
Loop condition is false. Exiting loop
1395 rewriteMaterializableInstructions(Builder, Spills);
8
Calling 'rewriteMaterializableInstructions'
1396 Spills.clear();
1397 }
1398
1399 // Collect the spills for arguments and other not-materializable values.
1400 for (Argument &A : F.args())
1401 for (User *U : A.users())
1402 if (Checker.isDefinitionAcrossSuspend(A, U))
1403 Spills.emplace_back(&A, U);
1404
1405 for (Instruction &I : instructions(F)) {
1406 // Values returned from coroutine structure intrinsics should not be part
1407 // of the Coroutine Frame.
1408 if (isCoroutineStructureIntrinsic(I) || &I == Shape.CoroBegin)
1409 continue;
1410
1411 // The Coroutine Promise always included into coroutine frame, no need to
1412 // check for suspend crossing.
1413 if (Shape.ABI == coro::ABI::Switch &&
1414 Shape.SwitchLowering.PromiseAlloca == &I)
1415 continue;
1416
1417 // Handle alloca.alloc specially here.
1418 if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) {
1419 // Check whether the alloca's lifetime is bounded by suspend points.
1420 if (isLocalAlloca(AI)) {
1421 LocalAllocas.push_back(AI);
1422 continue;
1423 }
1424
1425 // If not, do a quick rewrite of the alloca and then add spills of
1426 // the rewritten value. The rewrite doesn't invalidate anything in
1427 // Spills because the other alloca intrinsics have no other operands
1428 // besides AI, and it doesn't invalidate the iteration because we delay
1429 // erasing AI.
1430 auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions);
1431
1432 for (User *U : Alloc->users()) {
1433 if (Checker.isDefinitionAcrossSuspend(*Alloc, U))
1434 Spills.emplace_back(Alloc, U);
1435 }
1436 continue;
1437 }
1438
1439 // Ignore alloca.get; we process this as part of coro.alloca.alloc.
1440 if (isa<CoroAllocaGetInst>(I)) {
1441 continue;
1442 }
1443
1444 for (User *U : I.users())
1445 if (Checker.isDefinitionAcrossSuspend(I, U)) {
1446 // We cannot spill a token.
1447 if (I.getType()->isTokenTy())
1448 report_fatal_error(
1449 "token definition is separated from the use by a suspend point");
1450 Spills.emplace_back(&I, U);
1451 }
1452 }
1453 LLVM_DEBUG(dump("Spills", Spills))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coro-frame")) { dump("Spills", Spills); } } while (false)
;
1454 Shape.FrameTy = buildFrameType(F, Shape, Spills);
1455 Shape.FramePtr = insertSpills(Spills, Shape);
1456 lowerLocalAllocas(LocalAllocas, DeadInstructions);
1457
1458 for (auto I : DeadInstructions)
1459 I->eraseFromParent();
1460}