LLVM 22.0.0git
SpillUtils.cpp
Go to the documentation of this file.
1//===- SpillUtils.cpp - Utilities for checking for spills ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
10#include "CoroInternal.h"
11#include "llvm/Analysis/CFG.h"
13#include "llvm/IR/CFG.h"
14#include "llvm/IR/DebugInfo.h"
15#include "llvm/IR/Dominators.h"
18
19using namespace llvm;
20using namespace llvm::coro;
21
23
25 // Structural coroutine intrinsics that should not be spilled into the
26 // coroutine frame.
28}
29
30/// Does control flow starting at the given block ever reach a suspend
31/// instruction before reaching a block in VisitedOrFreeBBs?
33 VisitedBlocksSet &VisitedOrFreeBBs) {
34 // Eagerly try to add this block to the visited set. If it's already
35 // there, stop recursing; this path doesn't reach a suspend before
36 // either looping or reaching a freeing block.
37 if (!VisitedOrFreeBBs.insert(From).second)
38 return false;
39
40 // We assume that we'll already have split suspends into their own blocks.
41 if (coro::isSuspendBlock(From))
42 return true;
43
44 // Recurse on the successors.
45 for (auto *Succ : successors(From)) {
46 if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs))
47 return true;
48 }
49
50 return false;
51}
52
53/// Is the given alloca "local", i.e. bounded in lifetime to not cross a
54/// suspend point?
56 // Seed the visited set with all the basic blocks containing a free
57 // so that we won't pass them up.
58 VisitedBlocksSet VisitedOrFreeBBs;
59 for (auto *User : AI->users()) {
61 VisitedOrFreeBBs.insert(FI->getParent());
62 }
63
64 return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs);
65}
66
67/// Turn the given coro.alloca.alloc call into a dynamic allocation.
68/// This happens during the all-instructions iteration, so it must not
69/// delete the call.
70static Instruction *
73 IRBuilder<> Builder(AI);
74 auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr);
75
76 for (User *U : AI->users()) {
78 U->replaceAllUsesWith(Alloc);
79 } else {
80 auto FI = cast<CoroAllocaFreeInst>(U);
81 Builder.SetInsertPoint(FI);
82 Shape.emitDealloc(Builder, Alloc, nullptr);
83 }
84 DeadInsts.push_back(cast<Instruction>(U));
85 }
86
87 // Push this on last so that it gets deleted after all the others.
88 DeadInsts.push_back(AI);
89
90 // Return the new allocation value so that we can check for needed spills.
92}
93
94// We need to make room to insert a spill after initial PHIs, but before
95// catchswitch instruction. Placing it before violates the requirement that
96// catchswitch, like all other EHPads must be the first nonPHI in a block.
97//
98// Split away catchswitch into a separate block and insert in its place:
99//
100// cleanuppad <InsertPt> cleanupret.
101//
102// cleanupret instruction will act as an insert point for the spill.
104 BasicBlock *CurrentBlock = CatchSwitch->getParent();
105 BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch);
106 CurrentBlock->getTerminator()->eraseFromParent();
107
108 auto *CleanupPad =
109 CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock);
110 auto *CleanupRet =
111 CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock);
112 return CleanupRet;
113}
114
115// We use a pointer use visitor to track how an alloca is being used.
116// The goal is to be able to answer the following three questions:
117// 1. Should this alloca be allocated on the frame instead.
118// 2. Could the content of the alloca be modified prior to CoroBegin, which
119// would require copying the data from the alloca to the frame after
120// CoroBegin.
121// 3. Are there any aliases created for this alloca prior to CoroBegin, but
122// used after CoroBegin. In that case, we will need to recreate the alias
123// after CoroBegin based off the frame.
124//
125// To answer question 1, we track two things:
126// A. List of all BasicBlocks that use this alloca or any of the aliases of
127// the alloca. In the end, we check if there exists any two basic blocks that
128// cross suspension points. If so, this alloca must be put on the frame.
129// B. Whether the alloca or any alias of the alloca is escaped at some point,
130// either by storing the address somewhere, or the address is used in a
131// function call that might capture. If it's ever escaped, this alloca must be
132// put on the frame conservatively.
133//
134// To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin.
135// Whenever a potential write happens, either through a store instruction, a
136// function call or any of the memory intrinsics, we check whether this
137// instruction is prior to CoroBegin.
138//
139// To answer question 3, we track the offsets of all aliases created for the
140// alloca prior to CoroBegin but used after CoroBegin. std::optional is used to
141// be able to represent the case when the offset is unknown (e.g. when you have
142// a PHINode that takes in different offset values). We cannot handle unknown
143// offsets and will assert. This is the potential issue left out. An ideal
144// solution would likely require a significant redesign.
145
146namespace {
147struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
148 using Base = PtrUseVisitor<AllocaUseVisitor>;
149 AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT,
150 const coro::Shape &CoroShape,
151 const SuspendCrossingInfo &Checker,
152 bool ShouldUseLifetimeStartInfo)
153 : PtrUseVisitor(DL), DT(DT), CoroShape(CoroShape), Checker(Checker),
154 ShouldUseLifetimeStartInfo(ShouldUseLifetimeStartInfo) {
155 for (AnyCoroSuspendInst *SuspendInst : CoroShape.CoroSuspends)
156 CoroSuspendBBs.insert(SuspendInst->getParent());
157 }
158
159 void visit(Instruction &I) {
160 Users.insert(&I);
161 Base::visit(I);
162 // If the pointer is escaped prior to CoroBegin, we have to assume it would
163 // be written into before CoroBegin as well.
164 if (PI.isEscaped() &&
165 !DT.dominates(CoroShape.CoroBegin, PI.getEscapingInst())) {
166 MayWriteBeforeCoroBegin = true;
167 }
168 }
169 // We need to provide this overload as PtrUseVisitor uses a pointer based
170 // visiting function.
171 void visit(Instruction *I) { return visit(*I); }
172
173 void visitPHINode(PHINode &I) {
174 enqueueUsers(I);
175 handleAlias(I);
176 }
177
178 void visitSelectInst(SelectInst &I) {
179 enqueueUsers(I);
180 handleAlias(I);
181 }
182
183 void visitInsertElementInst(InsertElementInst &I) {
184 enqueueUsers(I);
185 handleAlias(I);
186 }
187
188 void visitInsertValueInst(InsertValueInst &I) {
189 enqueueUsers(I);
190 handleAlias(I);
191 }
192
193 void visitStoreInst(StoreInst &SI) {
194 // Regardless whether the alias of the alloca is the value operand or the
195 // pointer operand, we need to assume the alloca is been written.
196 handleMayWrite(SI);
197
198 if (SI.getValueOperand() != U->get())
199 return;
200
201 // We are storing the pointer into a memory location, potentially escaping.
202 // As an optimization, we try to detect simple cases where it doesn't
203 // actually escape, for example:
204 // %ptr = alloca ..
205 // %addr = alloca ..
206 // store %ptr, %addr
207 // %x = load %addr
208 // ..
209 // If %addr is only used by loading from it, we could simply treat %x as
210 // another alias of %ptr, and not considering %ptr being escaped.
211 auto IsSimpleStoreThenLoad = [&]() {
212 auto *AI = dyn_cast<AllocaInst>(SI.getPointerOperand());
213 // If the memory location we are storing to is not an alloca, it
214 // could be an alias of some other memory locations, which is difficult
215 // to analyze.
216 if (!AI)
217 return false;
218 // StoreAliases contains aliases of the memory location stored into.
219 SmallVector<Instruction *, 4> StoreAliases = {AI};
220 while (!StoreAliases.empty()) {
221 Instruction *I = StoreAliases.pop_back_val();
222 for (User *U : I->users()) {
223 // If we are loading from the memory location, we are creating an
224 // alias of the original pointer.
225 if (auto *LI = dyn_cast<LoadInst>(U)) {
226 enqueueUsers(*LI);
227 handleAlias(*LI);
228 continue;
229 }
230 // If we are overriding the memory location, the pointer certainly
231 // won't escape.
232 if (auto *S = dyn_cast<StoreInst>(U))
233 if (S->getPointerOperand() == I)
234 continue;
236 continue;
237 // BitCastInst creats aliases of the memory location being stored
238 // into.
239 if (auto *BI = dyn_cast<BitCastInst>(U)) {
240 StoreAliases.push_back(BI);
241 continue;
242 }
243 return false;
244 }
245 }
246
247 return true;
248 };
249
250 if (!IsSimpleStoreThenLoad())
251 PI.setEscaped(&SI);
252 }
253
254 // All mem intrinsics modify the data.
255 void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); }
256
257 void visitBitCastInst(BitCastInst &BC) {
259 handleAlias(BC);
260 }
261
262 void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) {
264 handleAlias(ASC);
265 }
266
267 void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
268 // The base visitor will adjust Offset accordingly.
270 handleAlias(GEPI);
271 }
272
273 void visitIntrinsicInst(IntrinsicInst &II) {
274 switch (II.getIntrinsicID()) {
275 default:
277 case Intrinsic::lifetime_start:
278 LifetimeStarts.insert(&II);
279 LifetimeStartBBs.push_back(II.getParent());
280 break;
281 case Intrinsic::lifetime_end:
282 LifetimeEndBBs.insert(II.getParent());
283 break;
284 }
285 }
286
287 void visitCallBase(CallBase &CB) {
288 for (unsigned Op = 0, OpCount = CB.arg_size(); Op < OpCount; ++Op)
289 if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op))
290 PI.setEscaped(&CB);
291 handleMayWrite(CB);
292 }
293
294 bool getShouldLiveOnFrame() const {
295 if (!ShouldLiveOnFrame)
296 ShouldLiveOnFrame = computeShouldLiveOnFrame();
297 return *ShouldLiveOnFrame;
298 }
299
300 bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; }
301
302 DenseMap<Instruction *, std::optional<APInt>> getAliasesCopy() const {
303 assert(getShouldLiveOnFrame() && "This method should only be called if the "
304 "alloca needs to live on the frame.");
305 for (const auto &P : AliasOffetMap)
306 if (!P.second)
307 report_fatal_error("Unable to handle an alias with unknown offset "
308 "created before CoroBegin.");
309 return AliasOffetMap;
310 }
311
312private:
313 const DominatorTree &DT;
314 const coro::Shape &CoroShape;
315 const SuspendCrossingInfo &Checker;
316 // All alias to the original AllocaInst, created before CoroBegin and used
317 // after CoroBegin. Each entry contains the instruction and the offset in the
318 // original Alloca. They need to be recreated after CoroBegin off the frame.
319 DenseMap<Instruction *, std::optional<APInt>> AliasOffetMap{};
320 SmallPtrSet<Instruction *, 4> Users{};
321 SmallPtrSet<IntrinsicInst *, 2> LifetimeStarts{};
322 SmallVector<BasicBlock *> LifetimeStartBBs{};
323 SmallPtrSet<BasicBlock *, 2> LifetimeEndBBs{};
324 SmallPtrSet<const BasicBlock *, 2> CoroSuspendBBs{};
325 bool MayWriteBeforeCoroBegin{false};
326 bool ShouldUseLifetimeStartInfo{true};
327
328 mutable std::optional<bool> ShouldLiveOnFrame{};
329
330 bool computeShouldLiveOnFrame() const {
331 // If lifetime information is available, we check it first since it's
332 // more precise. We look at every pair of lifetime.start intrinsic and
333 // every basic block that uses the pointer to see if they cross suspension
334 // points. The uses cover both direct uses as well as indirect uses.
335 if (ShouldUseLifetimeStartInfo && !LifetimeStarts.empty()) {
336 // If there is no explicit lifetime.end, then assume the address can
337 // cross suspension points.
338 if (LifetimeEndBBs.empty())
339 return true;
340
341 // If there is a path from a lifetime.start to a suspend without a
342 // corresponding lifetime.end, then the alloca's lifetime persists
343 // beyond that suspension point and the alloca must go on the frame.
344 llvm::SmallVector<BasicBlock *> Worklist(LifetimeStartBBs);
345 if (isManyPotentiallyReachableFromMany(Worklist, CoroSuspendBBs,
346 &LifetimeEndBBs, &DT))
347 return true;
348
349 // Addresses are guaranteed to be identical after every lifetime.start so
350 // we cannot use the local stack if the address escaped and there is a
351 // suspend point between lifetime markers. This should also cover the
352 // case of a single lifetime.start intrinsic in a loop with suspend point.
353 if (PI.isEscaped()) {
354 for (auto *A : LifetimeStarts) {
355 for (auto *B : LifetimeStarts) {
356 if (Checker.hasPathOrLoopCrossingSuspendPoint(A->getParent(),
357 B->getParent()))
358 return true;
359 }
360 }
361 }
362 return false;
363 }
364 // FIXME: Ideally the isEscaped check should come at the beginning.
365 // However there are a few loose ends that need to be fixed first before
366 // we can do that. We need to make sure we are not over-conservative, so
367 // that the data accessed in-between await_suspend and symmetric transfer
368 // is always put on the stack, and also data accessed after coro.end is
369 // always put on the stack (esp the return object). To fix that, we need
370 // to:
371 // 1) Potentially treat sret as nocapture in calls
372 // 2) Special handle the return object and put it on the stack
373 // 3) Utilize lifetime.end intrinsic
374 if (PI.isEscaped())
375 return true;
376
377 for (auto *U1 : Users)
378 for (auto *U2 : Users)
379 if (Checker.isDefinitionAcrossSuspend(*U1, U2))
380 return true;
381
382 return false;
383 }
384
385 void handleMayWrite(const Instruction &I) {
386 if (!DT.dominates(CoroShape.CoroBegin, &I))
387 MayWriteBeforeCoroBegin = true;
388 }
389
390 bool usedAfterCoroBegin(Instruction &I) {
391 for (auto &U : I.uses())
392 if (DT.dominates(CoroShape.CoroBegin, U))
393 return true;
394 return false;
395 }
396
397 void handleAlias(Instruction &I) {
398 // We track all aliases created prior to CoroBegin but used after.
399 // These aliases may need to be recreated after CoroBegin if the alloca
400 // need to live on the frame.
401 if (DT.dominates(CoroShape.CoroBegin, &I) || !usedAfterCoroBegin(I))
402 return;
403
404 if (!IsOffsetKnown) {
405 AliasOffetMap[&I].reset();
406 } else {
407 auto [Itr, Inserted] = AliasOffetMap.try_emplace(&I, Offset);
408 if (!Inserted && Itr->second && *Itr->second != Offset) {
409 // If we have seen two different possible values for this alias, we set
410 // it to empty.
411 Itr->second.reset();
412 }
413 }
414 }
415};
416} // namespace
417
419 const SuspendCrossingInfo &Checker,
421 const DominatorTree &DT) {
422 if (Shape.CoroSuspends.empty())
423 return;
424
425 // The PromiseAlloca will be specially handled since it needs to be in a
426 // fixed position in the frame.
428 return;
429
430 // The __coro_gro alloca should outlive the promise, make sure we
431 // keep it outside the frame.
432 if (AI->hasMetadata(LLVMContext::MD_coro_outside_frame))
433 return;
434
435 // The code that uses lifetime.start intrinsic does not work for functions
436 // with loops without exit. Disable it on ABIs we know to generate such
437 // code.
438 bool ShouldUseLifetimeStartInfo =
441 AllocaUseVisitor Visitor{AI->getDataLayout(), DT, Shape, Checker,
442 ShouldUseLifetimeStartInfo};
443 Visitor.visitPtr(*AI);
444 if (!Visitor.getShouldLiveOnFrame())
445 return;
446 Allocas.emplace_back(AI, Visitor.getAliasesCopy(),
447 Visitor.getMayWriteBeforeCoroBegin());
448}
449
451 const SuspendCrossingInfo &Checker) {
452 // Collect the spills for arguments and other not-materializable values.
453 for (Argument &A : F.args())
454 for (User *U : A.users())
455 if (Checker.isDefinitionAcrossSuspend(A, U))
456 Spills[&A].push_back(cast<Instruction>(U));
457}
458
460 SpillInfo &Spills, SmallVector<AllocaInfo, 8> &Allocas,
461 SmallVector<Instruction *, 4> &DeadInstructions,
463 const SuspendCrossingInfo &Checker, const DominatorTree &DT,
464 const coro::Shape &Shape) {
465
466 for (Instruction &I : instructions(F)) {
467 // Values returned from coroutine structure intrinsics should not be part
468 // of the Coroutine Frame.
470 continue;
471
472 // Handle alloca.alloc specially here.
473 if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) {
474 // Check whether the alloca's lifetime is bounded by suspend points.
475 if (isLocalAlloca(AI)) {
476 LocalAllocas.push_back(AI);
477 continue;
478 }
479
480 // If not, do a quick rewrite of the alloca and then add spills of
481 // the rewritten value. The rewrite doesn't invalidate anything in
482 // Spills because the other alloca intrinsics have no other operands
483 // besides AI, and it doesn't invalidate the iteration because we delay
484 // erasing AI.
485 auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions);
486
487 for (User *U : Alloc->users()) {
488 if (Checker.isDefinitionAcrossSuspend(*Alloc, U))
489 Spills[Alloc].push_back(cast<Instruction>(U));
490 }
491 continue;
492 }
493
494 // Ignore alloca.get; we process this as part of coro.alloca.alloc.
496 continue;
497
498 if (auto *AI = dyn_cast<AllocaInst>(&I)) {
499 collectFrameAlloca(AI, Shape, Checker, Allocas, DT);
500 continue;
501 }
502
503 for (User *U : I.users())
504 if (Checker.isDefinitionAcrossSuspend(I, U)) {
505 // We cannot spill a token.
506 if (I.getType()->isTokenTy())
508 "token definition is separated from the use by a suspend point");
509 Spills[&I].push_back(cast<Instruction>(U));
510 }
511 }
512}
513
515 const SuspendCrossingInfo &Checker) {
516 // We don't want the layout of coroutine frame to be affected
517 // by debug information. So we only choose to salvage dbg.values for
518 // whose value is already in the frame.
519 // We would handle the dbg.values for allocas specially
520 for (auto &Iter : Spills) {
521 auto *V = Iter.first;
523 findDbgValues(V, DVRs);
524 // Add the instructions which carry debug info that is in the frame.
525 for (DbgVariableRecord *DVR : DVRs)
526 if (Checker.isDefinitionAcrossSuspend(*V, DVR->Marker->MarkedInstr))
527 Spills[V].push_back(DVR->Marker->MarkedInstr);
528 }
529}
530
531/// Async and Retcon{Once} conventions assume that all spill uses can be sunk
532/// after the coro.begin intrinsic.
534 const DominatorTree &Dom, CoroBeginInst *CoroBegin, coro::SpillInfo &Spills,
538
539 // Collect all users that precede coro.begin.
540 auto collectUsers = [&](Value *Def) {
541 for (User *U : Def->users()) {
542 auto Inst = cast<Instruction>(U);
543 if (Inst->getParent() != CoroBegin->getParent() ||
544 Dom.dominates(CoroBegin, Inst))
545 continue;
546 if (ToMove.insert(Inst))
547 Worklist.push_back(Inst);
548 }
549 };
550 for (auto &I : Spills)
551 collectUsers(I.first);
552 for (auto &I : Allocas)
553 collectUsers(I.Alloca);
554
555 // Recursively collect users before coro.begin.
556 while (!Worklist.empty()) {
557 auto *Def = Worklist.pop_back_val();
558 for (User *U : Def->users()) {
559 auto Inst = cast<Instruction>(U);
560 if (Dom.dominates(CoroBegin, Inst))
561 continue;
562 if (ToMove.insert(Inst))
563 Worklist.push_back(Inst);
564 }
565 }
566
567 // Sort by dominance.
568 SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end());
569 llvm::sort(InsertionList, [&Dom](Instruction *A, Instruction *B) -> bool {
570 // If a dominates b it should precede (<) b.
571 return Dom.dominates(A, B);
572 });
573
574 Instruction *InsertPt = CoroBegin->getNextNode();
575 for (Instruction *Inst : InsertionList)
576 Inst->moveBefore(InsertPt->getIterator());
577}
578
580 Value *Def,
581 const DominatorTree &DT) {
582 BasicBlock::iterator InsertPt;
583 if (auto *Arg = dyn_cast<Argument>(Def)) {
584 // For arguments, we will place the store instruction right after
585 // the coroutine frame pointer instruction, i.e. coro.begin.
586 InsertPt = Shape.getInsertPtAfterFramePtr();
587
588 // If we're spilling an Argument, make sure we clear 'captures'
589 // from the coroutine function.
590 Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::Captures);
591 } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) {
592 // Don't spill immediately after a suspend; splitting assumes
593 // that the suspend will be followed by a branch.
594 InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHIIt();
595 } else {
596 auto *I = cast<Instruction>(Def);
597 if (!DT.dominates(Shape.CoroBegin, I)) {
598 // If it is not dominated by CoroBegin, then spill should be
599 // inserted immediately after CoroFrame is computed.
600 InsertPt = Shape.getInsertPtAfterFramePtr();
601 } else if (auto *II = dyn_cast<InvokeInst>(I)) {
602 // If we are spilling the result of the invoke instruction, split
603 // the normal edge and insert the spill in the new block.
604 auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest());
605 InsertPt = NewBB->getTerminator()->getIterator();
606 } else if (isa<PHINode>(I)) {
607 // Skip the PHINodes and EH pads instructions.
608 BasicBlock *DefBlock = I->getParent();
609 if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator()))
610 InsertPt = splitBeforeCatchSwitch(CSI)->getIterator();
611 else
612 InsertPt = DefBlock->getFirstInsertionPt();
613 } else {
614 assert(!I->isTerminator() && "unexpected terminator");
615 // For all other values, the spill is placed immediately after
616 // the definition.
617 InsertPt = I->getNextNode()->getIterator();
618 }
619 }
620
621 return InsertPt;
622}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Prepare AGPR Alloc
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
uint64_t IntrinsicInst * II
#define P(N)
This file provides a collection of visitors which walk the (instruction) uses of a pointer.
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
static bool isSuspendReachableFrom(BasicBlock *From, VisitedBlocksSet &VisitedOrFreeBBs)
Does control flow starting at the given block ever reach a suspend instruction before reaching a bloc...
static Instruction * splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch)
static bool isLocalAlloca(CoroAllocaAllocInst *AI)
Is the given alloca "local", i.e.
static bool isNonSpilledIntrinsic(Instruction &I)
static Instruction * lowerNonLocalAlloca(CoroAllocaAllocInst *AI, const Shape &Shape, SmallVectorImpl< Instruction * > &DeadInsts)
Turn the given coro.alloca.alloc call into a dynamic allocation.
SmallPtrSet< BasicBlock *, 8 > VisitedBlocksSet
static void collectFrameAlloca(AllocaInst *AI, const coro::Shape &Shape, const SuspendCrossingInfo &Checker, SmallVectorImpl< AllocaInfo > &Allocas, const DominatorTree &DT)
an instruction to allocate memory on the stack
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
LLVM Basic Block Representation.
Definition BasicBlock.h:62
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
bool doesNotCapture(unsigned OpNo) const
Determine whether this data operand is not captured.
Value * getArgOperand(unsigned i) const
unsigned arg_size() const
Value * getParentPad() const
static CleanupPadInst * Create(Value *ParentPad, ArrayRef< Value * > Args={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, InsertPosition InsertBefore=nullptr)
This represents the llvm.coro.alloca.alloc instruction.
Definition CoroInstr.h:758
Value * getSize() const
Definition CoroInstr.h:762
This class represents the llvm.coro.begin or llvm.coro.begin.custom.abi instructions.
Definition CoroInstr.h:461
Record of a variable value-assignment, aka a non instruction representation of the dbg....
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:165
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2788
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
A base class for visitors over the uses of a pointer value.
void visitGetElementPtrInst(GetElementPtrInst &GEPI)
void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC)
void visitIntrinsicInst(IntrinsicInst &II)
iterator end()
Get an iterator to the end of the SetVector.
Definition SetVector.h:111
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition SetVector.h:105
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:150
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:338
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const
LLVM Value Representation.
Definition Value.h:75
iterator_range< user_iterator > users()
Definition Value.h:426
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
SmallMapVector< Value *, SmallVector< Instruction *, 2 >, 8 > SpillInfo
Definition SpillUtils.h:18
@ Async
The "async continuation" lowering, where each suspend point creates a single continuation function.
Definition CoroShape.h:48
@ RetconOnce
The "unique returned-continuation" lowering, where each suspend point creates a single continuation f...
Definition CoroShape.h:43
@ Retcon
The "returned-continuation" lowering, where each suspend point creates a single continuation function...
Definition CoroShape.h:36
BasicBlock::iterator getSpillInsertionPt(const coro::Shape &, Value *Def, const DominatorTree &DT)
bool isSuspendBlock(BasicBlock *BB)
void sinkSpillUsesAfterCoroBegin(const DominatorTree &DT, CoroBeginInst *CoroBegin, coro::SpillInfo &Spills, SmallVectorImpl< coro::AllocaInfo > &Allocas)
Async and Retcon{Once} conventions assume that all spill uses can be sunk after the coro....
void collectSpillsFromArgs(SpillInfo &Spills, Function &F, const SuspendCrossingInfo &Checker)
void collectSpillsFromDbgInfo(SpillInfo &Spills, Function &F, const SuspendCrossingInfo &Checker)
void collectSpillsAndAllocasFromInsts(SpillInfo &Spills, SmallVector< AllocaInfo, 8 > &Allocas, SmallVector< Instruction *, 4 > &DeadInstructions, SmallVector< CoroAllocaAllocInst *, 4 > &LocalAllocas, Function &F, const SuspendCrossingInfo &Checker, const DominatorTree &DT, const coro::Shape &Shape)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
LLVM_ABI void findDbgValues(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the dbg.values describing a value.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:644
auto successors(const MachineBasicBlock *BB)
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1622
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
LLVM_ABI bool isManyPotentiallyReachableFromMany(SmallVectorImpl< BasicBlock * > &Worklist, const SmallPtrSetImpl< const BasicBlock * > &StopSet, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether there is a potentially a path from at least one block in 'Worklist' to at least one...
Definition CFG.cpp:249
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:560
LLVM_ABI BasicBlock * SplitEdge(BasicBlock *From, BasicBlock *To, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the edge connecting the specified blocks, and return the newly created basic block between From...
SmallVector< AnyCoroSuspendInst *, 4 > CoroSuspends
Definition CoroShape.h:59
LLVM_ABI Value * emitAlloc(IRBuilder<> &Builder, Value *Size, CallGraph *CG) const
Allocate memory according to the rules of the active lowering.
SwitchLoweringStorage SwitchLowering
Definition CoroShape.h:155
CoroBeginInst * CoroBegin
Definition CoroShape.h:54
BasicBlock::iterator getInsertPtAfterFramePtr() const
Definition CoroShape.h:252
LLVM_ABI void emitDealloc(IRBuilder<> &Builder, Value *Ptr, CallGraph *CG) const
Deallocate memory according to the rules of the active lowering.