LLVM 22.0.0git
CoroFrame.cpp
Go to the documentation of this file.
1//===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8// This file contains classes used to discover if for a particular value
9// its definition precedes and its uses follow a suspend block. This is
10// referred to as a suspend crossing value.
11//
12// Using the information discovered we form a Coroutine Frame structure to
13// contain those values. All uses of those values are replaced with appropriate
14// GEP + load from the coroutine frame. At the point of the definition we spill
15// the value into the coroutine frame.
16//===----------------------------------------------------------------------===//
17
18#include "CoroInternal.h"
19#include "llvm/ADT/ScopeExit.h"
22#include "llvm/IR/DIBuilder.h"
23#include "llvm/IR/DebugInfo.h"
24#include "llvm/IR/Dominators.h"
25#include "llvm/IR/IRBuilder.h"
28#include "llvm/IR/Module.h"
30#include "llvm/Support/Debug.h"
40#include <algorithm>
41#include <optional>
42
43using namespace llvm;
44
45#define DEBUG_TYPE "coro-frame"
46
47namespace {
48class FrameTypeBuilder;
49// Mapping from the to-be-spilled value to all the users that need reload.
50struct FrameDataInfo {
51 // All the values (that are not allocas) that needs to be spilled to the
52 // frame.
53 coro::SpillInfo &Spills;
54 // Allocas contains all values defined as allocas that need to live in the
55 // frame.
57
58 FrameDataInfo(coro::SpillInfo &Spills,
60 : Spills(Spills), Allocas(Allocas) {}
61
62 SmallVector<Value *, 8> getAllDefs() const {
64 for (const auto &P : Spills)
65 Defs.push_back(P.first);
66 for (const auto &A : Allocas)
67 Defs.push_back(A.Alloca);
68 return Defs;
69 }
70
71 uint32_t getFieldIndex(Value *V) const {
72 auto Itr = FieldIndexMap.find(V);
73 assert(Itr != FieldIndexMap.end() &&
74 "Value does not have a frame field index");
75 return Itr->second;
76 }
77
78 void setFieldIndex(Value *V, uint32_t Index) {
79 assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) &&
80 "Cannot set the index for the same field twice.");
81 FieldIndexMap[V] = Index;
82 }
83
84 Align getAlign(Value *V) const {
85 auto Iter = FieldAlignMap.find(V);
86 assert(Iter != FieldAlignMap.end());
87 return Iter->second;
88 }
89
90 void setAlign(Value *V, Align AL) {
91 assert(FieldAlignMap.count(V) == 0);
92 FieldAlignMap.insert({V, AL});
93 }
94
95 uint64_t getDynamicAlign(Value *V) const {
96 auto Iter = FieldDynamicAlignMap.find(V);
97 assert(Iter != FieldDynamicAlignMap.end());
98 return Iter->second;
99 }
100
101 void setDynamicAlign(Value *V, uint64_t Align) {
102 assert(FieldDynamicAlignMap.count(V) == 0);
103 FieldDynamicAlignMap.insert({V, Align});
104 }
105
106 uint64_t getOffset(Value *V) const {
107 auto Iter = FieldOffsetMap.find(V);
108 assert(Iter != FieldOffsetMap.end());
109 return Iter->second;
110 }
111
112 void setOffset(Value *V, uint64_t Offset) {
113 assert(FieldOffsetMap.count(V) == 0);
114 FieldOffsetMap.insert({V, Offset});
115 }
116
117 // Remap the index of every field in the frame, using the final layout index.
118 void updateLayoutIndex(FrameTypeBuilder &B);
119
120private:
121 // LayoutIndexUpdateStarted is used to avoid updating the index of any field
122 // twice by mistake.
123 bool LayoutIndexUpdateStarted = false;
124 // Map from values to their slot indexes on the frame. They will be first set
125 // with their original insertion field index. After the frame is built, their
126 // indexes will be updated into the final layout index.
127 DenseMap<Value *, uint32_t> FieldIndexMap;
128 // Map from values to their alignment on the frame. They would be set after
129 // the frame is built.
130 DenseMap<Value *, Align> FieldAlignMap;
131 DenseMap<Value *, uint64_t> FieldDynamicAlignMap;
132 // Map from values to their offset on the frame. They would be set after
133 // the frame is built.
134 DenseMap<Value *, uint64_t> FieldOffsetMap;
135};
136} // namespace
137
138#ifndef NDEBUG
139static void dumpSpills(StringRef Title, const coro::SpillInfo &Spills) {
140 dbgs() << "------------- " << Title << " --------------\n";
141 for (const auto &E : Spills) {
142 E.first->dump();
143 dbgs() << " user: ";
144 for (auto *I : E.second)
145 I->dump();
146 }
147}
148
150 dbgs() << "------------- Allocas --------------\n";
151 for (const auto &A : Allocas) {
152 A.Alloca->dump();
153 }
154}
155#endif
156
157namespace {
158using FieldIDType = size_t;
159// We cannot rely solely on natural alignment of a type when building a
160// coroutine frame and if the alignment specified on the Alloca instruction
161// differs from the natural alignment of the alloca type we will need to insert
162// padding.
163class FrameTypeBuilder {
164private:
165 struct Field {
166 uint64_t Size;
167 uint64_t Offset;
168 Type *Ty;
169 FieldIDType LayoutFieldIndex;
171 Align TyAlignment;
172 uint64_t DynamicAlignBuffer;
173 };
174
175 const DataLayout &DL;
176 LLVMContext &Context;
177 uint64_t StructSize = 0;
178 Align StructAlign;
179 bool IsFinished = false;
180
181 std::optional<Align> MaxFrameAlignment;
182
184 DenseMap<Value*, unsigned> FieldIndexByKey;
185
186public:
187 FrameTypeBuilder(LLVMContext &Context, const DataLayout &DL,
188 std::optional<Align> MaxFrameAlignment)
189 : DL(DL), Context(Context), MaxFrameAlignment(MaxFrameAlignment) {}
190
191 /// Add a field to this structure for the storage of an `alloca`
192 /// instruction.
193 [[nodiscard]] FieldIDType addFieldForAlloca(AllocaInst *AI,
194 bool IsHeader = false) {
195 Type *Ty = AI->getAllocatedType();
196
197 // Make an array type if this is a static array allocation.
198 if (AI->isArrayAllocation()) {
199 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
200 Ty = ArrayType::get(Ty, CI->getValue().getZExtValue());
201 else
202 report_fatal_error("Coroutines cannot handle non static allocas yet");
203 }
204
205 return addField(Ty, AI->getAlign(), IsHeader);
206 }
207
208 /// We want to put the allocas whose lifetime-ranges are not overlapped
209 /// into one slot of coroutine frame.
210 /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566
211 ///
212 /// cppcoro::task<void> alternative_paths(bool cond) {
213 /// if (cond) {
214 /// big_structure a;
215 /// process(a);
216 /// co_await something();
217 /// } else {
218 /// big_structure b;
219 /// process2(b);
220 /// co_await something();
221 /// }
222 /// }
223 ///
224 /// We want to put variable a and variable b in the same slot to
225 /// reduce the size of coroutine frame.
226 ///
227 /// This function use StackLifetime algorithm to partition the AllocaInsts in
228 /// Spills to non-overlapped sets in order to put Alloca in the same
229 /// non-overlapped set into the same slot in the Coroutine Frame. Then add
230 /// field for the allocas in the same non-overlapped set by using the largest
231 /// type as the field type.
232 ///
233 /// Side Effects: Because We sort the allocas, the order of allocas in the
234 /// frame may be different with the order in the source code.
235 void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData,
236 coro::Shape &Shape, bool OptimizeFrame);
237
238 /// Add a field to this structure.
239 [[nodiscard]] FieldIDType addField(Type *Ty, MaybeAlign MaybeFieldAlignment,
240 bool IsHeader = false,
241 bool IsSpillOfValue = false) {
242 assert(!IsFinished && "adding fields to a finished builder");
243 assert(Ty && "must provide a type for a field");
244
245 // The field size is always the alloc size of the type.
246 uint64_t FieldSize = DL.getTypeAllocSize(Ty);
247
248 // For an alloca with size=0, we don't need to add a field and they
249 // can just point to any index in the frame. Use index 0.
250 if (FieldSize == 0) {
251 return 0;
252 }
253
254 // The field alignment might not be the type alignment, but we need
255 // to remember the type alignment anyway to build the type.
256 // If we are spilling values we don't need to worry about ABI alignment
257 // concerns.
258 Align ABIAlign = DL.getABITypeAlign(Ty);
259 Align TyAlignment = ABIAlign;
260 if (IsSpillOfValue && MaxFrameAlignment && *MaxFrameAlignment < ABIAlign)
261 TyAlignment = *MaxFrameAlignment;
262 Align FieldAlignment = MaybeFieldAlignment.value_or(TyAlignment);
263
264 // The field alignment could be bigger than the max frame case, in that case
265 // we request additional storage to be able to dynamically align the
266 // pointer.
267 uint64_t DynamicAlignBuffer = 0;
268 if (MaxFrameAlignment && (FieldAlignment > *MaxFrameAlignment)) {
269 DynamicAlignBuffer =
270 offsetToAlignment(MaxFrameAlignment->value(), FieldAlignment);
271 FieldAlignment = *MaxFrameAlignment;
272 FieldSize = FieldSize + DynamicAlignBuffer;
273 }
274
275 // Lay out header fields immediately.
276 uint64_t Offset;
277 if (IsHeader) {
278 Offset = alignTo(StructSize, FieldAlignment);
279 StructSize = Offset + FieldSize;
280
281 // Everything else has a flexible offset.
282 } else {
284 }
285
286 Fields.push_back({FieldSize, Offset, Ty, 0, FieldAlignment, TyAlignment,
287 DynamicAlignBuffer});
288 return Fields.size() - 1;
289 }
290
291 /// Finish the layout and create the struct type with the given name.
292 StructType *finish(StringRef Name);
293
294 uint64_t getStructSize() const {
295 assert(IsFinished && "not yet finished!");
296 return StructSize;
297 }
298
299 Align getStructAlign() const {
300 assert(IsFinished && "not yet finished!");
301 return StructAlign;
302 }
303
304 FieldIDType getLayoutFieldIndex(FieldIDType Id) const {
305 assert(IsFinished && "not yet finished!");
306 return Fields[Id].LayoutFieldIndex;
307 }
308
309 Field getLayoutField(FieldIDType Id) const {
310 assert(IsFinished && "not yet finished!");
311 return Fields[Id];
312 }
313};
314} // namespace
315
316void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) {
317 auto Updater = [&](Value *I) {
318 auto Field = B.getLayoutField(getFieldIndex(I));
319 setFieldIndex(I, Field.LayoutFieldIndex);
320 setAlign(I, Field.Alignment);
321 uint64_t dynamicAlign =
322 Field.DynamicAlignBuffer
323 ? Field.DynamicAlignBuffer + Field.Alignment.value()
324 : 0;
325 setDynamicAlign(I, dynamicAlign);
326 setOffset(I, Field.Offset);
327 };
328 LayoutIndexUpdateStarted = true;
329 for (auto &S : Spills)
330 Updater(S.first);
331 for (const auto &A : Allocas)
332 Updater(A.Alloca);
333 LayoutIndexUpdateStarted = false;
334}
335
336void FrameTypeBuilder::addFieldForAllocas(const Function &F,
337 FrameDataInfo &FrameData,
338 coro::Shape &Shape,
339 bool OptimizeFrame) {
340 using AllocaSetType = SmallVector<AllocaInst *, 4>;
341 SmallVector<AllocaSetType, 4> NonOverlapedAllocas;
342
343 // We need to add field for allocas at the end of this function.
344 auto AddFieldForAllocasAtExit = make_scope_exit([&]() {
345 for (auto AllocaList : NonOverlapedAllocas) {
346 auto *LargestAI = *AllocaList.begin();
347 FieldIDType Id = addFieldForAlloca(LargestAI);
348 for (auto *Alloca : AllocaList)
349 FrameData.setFieldIndex(Alloca, Id);
350 }
351 });
352
353 if (!OptimizeFrame) {
354 for (const auto &A : FrameData.Allocas) {
355 AllocaInst *Alloca = A.Alloca;
356 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
357 }
358 return;
359 }
360
361 // Because there are paths from the lifetime.start to coro.end
362 // for each alloca, the liferanges for every alloca is overlaped
363 // in the blocks who contain coro.end and the successor blocks.
364 // So we choose to skip there blocks when we calculate the liferange
365 // for each alloca. It should be reasonable since there shouldn't be uses
366 // in these blocks and the coroutine frame shouldn't be used outside the
367 // coroutine body.
368 //
369 // Note that the user of coro.suspend may not be SwitchInst. However, this
370 // case seems too complex to handle. And it is harmless to skip these
371 // patterns since it just prevend putting the allocas to live in the same
372 // slot.
373 DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest;
374 for (auto *CoroSuspendInst : Shape.CoroSuspends) {
375 for (auto *U : CoroSuspendInst->users()) {
376 if (auto *ConstSWI = dyn_cast<SwitchInst>(U)) {
377 auto *SWI = const_cast<SwitchInst *>(ConstSWI);
378 DefaultSuspendDest[SWI] = SWI->getDefaultDest();
379 SWI->setDefaultDest(SWI->getSuccessor(1));
380 }
381 }
382 }
383
384 auto ExtractAllocas = [&]() {
385 AllocaSetType Allocas;
386 Allocas.reserve(FrameData.Allocas.size());
387 for (const auto &A : FrameData.Allocas)
388 Allocas.push_back(A.Alloca);
389 return Allocas;
390 };
391 StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(),
392 StackLifetime::LivenessType::May);
393 StackLifetimeAnalyzer.run();
394 auto DoAllocasInterfere = [&](const AllocaInst *AI1, const AllocaInst *AI2) {
395 return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps(
396 StackLifetimeAnalyzer.getLiveRange(AI2));
397 };
398 auto GetAllocaSize = [&](const coro::AllocaInfo &A) {
399 std::optional<TypeSize> RetSize = A.Alloca->getAllocationSize(DL);
400 assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n");
401 assert(!RetSize->isScalable() && "Scalable vectors are not yet supported");
402 return RetSize->getFixedValue();
403 };
404 // Put larger allocas in the front. So the larger allocas have higher
405 // priority to merge, which can save more space potentially. Also each
406 // AllocaSet would be ordered. So we can get the largest Alloca in one
407 // AllocaSet easily.
408 sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) {
409 return GetAllocaSize(Iter1) > GetAllocaSize(Iter2);
410 });
411 for (const auto &A : FrameData.Allocas) {
412 AllocaInst *Alloca = A.Alloca;
413 bool Merged = false;
414 // Try to find if the Alloca does not interfere with any existing
415 // NonOverlappedAllocaSet. If it is true, insert the alloca to that
416 // NonOverlappedAllocaSet.
417 for (auto &AllocaSet : NonOverlapedAllocas) {
418 assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n");
419 bool NoInterference = none_of(AllocaSet, [&](auto Iter) {
420 return DoAllocasInterfere(Alloca, Iter);
421 });
422 // If the alignment of A is multiple of the alignment of B, the address
423 // of A should satisfy the requirement for aligning for B.
424 //
425 // There may be other more fine-grained strategies to handle the alignment
426 // infomation during the merging process. But it seems hard to handle
427 // these strategies and benefit little.
428 bool Alignable = [&]() -> bool {
429 auto *LargestAlloca = *AllocaSet.begin();
430 return LargestAlloca->getAlign().value() % Alloca->getAlign().value() ==
431 0;
432 }();
433 bool CouldMerge = NoInterference && Alignable;
434 if (!CouldMerge)
435 continue;
436 AllocaSet.push_back(Alloca);
437 Merged = true;
438 break;
439 }
440 if (!Merged) {
441 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
442 }
443 }
444 // Recover the default target destination for each Switch statement
445 // reserved.
446 for (auto SwitchAndDefaultDest : DefaultSuspendDest) {
447 SwitchInst *SWI = SwitchAndDefaultDest.first;
448 BasicBlock *DestBB = SwitchAndDefaultDest.second;
449 SWI->setDefaultDest(DestBB);
450 }
451 // This Debug Info could tell us which allocas are merged into one slot.
452 LLVM_DEBUG(for (auto &AllocaSet
453 : NonOverlapedAllocas) {
454 if (AllocaSet.size() > 1) {
455 dbgs() << "In Function:" << F.getName() << "\n";
456 dbgs() << "Find Union Set "
457 << "\n";
458 dbgs() << "\tAllocas are \n";
459 for (auto Alloca : AllocaSet)
460 dbgs() << "\t\t" << *Alloca << "\n";
461 }
462 });
463}
464
465StructType *FrameTypeBuilder::finish(StringRef Name) {
466 assert(!IsFinished && "already finished!");
467
468 // Prepare the optimal-layout field array.
469 // The Id in the layout field is a pointer to our Field for it.
471 LayoutFields.reserve(Fields.size());
472 for (auto &Field : Fields) {
473 LayoutFields.emplace_back(&Field, Field.Size, Field.Alignment,
474 Field.Offset);
475 }
476
477 // Perform layout.
478 auto SizeAndAlign = performOptimizedStructLayout(LayoutFields);
479 StructSize = SizeAndAlign.first;
480 StructAlign = SizeAndAlign.second;
481
482 auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & {
483 return *static_cast<Field *>(const_cast<void*>(LayoutField.Id));
484 };
485
486 // We need to produce a packed struct type if there's a field whose
487 // assigned offset isn't a multiple of its natural type alignment.
488 bool Packed = [&] {
489 for (auto &LayoutField : LayoutFields) {
490 auto &F = getField(LayoutField);
491 if (!isAligned(F.TyAlignment, LayoutField.Offset))
492 return true;
493 }
494 return false;
495 }();
496
497 // Build the struct body.
498 SmallVector<Type*, 16> FieldTypes;
499 FieldTypes.reserve(LayoutFields.size() * 3 / 2);
500 uint64_t LastOffset = 0;
501 for (auto &LayoutField : LayoutFields) {
502 auto &F = getField(LayoutField);
503
504 auto Offset = LayoutField.Offset;
505
506 // Add a padding field if there's a padding gap and we're either
507 // building a packed struct or the padding gap is more than we'd
508 // get from aligning to the field type's natural alignment.
509 assert(Offset >= LastOffset);
510 if (Offset != LastOffset) {
511 if (Packed || alignTo(LastOffset, F.TyAlignment) != Offset)
512 FieldTypes.push_back(ArrayType::get(Type::getInt8Ty(Context),
513 Offset - LastOffset));
514 }
515
516 F.Offset = Offset;
517 F.LayoutFieldIndex = FieldTypes.size();
518
519 FieldTypes.push_back(F.Ty);
520 if (F.DynamicAlignBuffer) {
521 FieldTypes.push_back(
522 ArrayType::get(Type::getInt8Ty(Context), F.DynamicAlignBuffer));
523 }
524 LastOffset = Offset + F.Size;
525 }
526
527 StructType *Ty = StructType::create(Context, FieldTypes, Name, Packed);
528
529#ifndef NDEBUG
530 // Check that the IR layout matches the offsets we expect.
531 auto Layout = DL.getStructLayout(Ty);
532 for (auto &F : Fields) {
533 assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty);
534 assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset);
535 }
536#endif
537
538 IsFinished = true;
539
540 return Ty;
541}
542
543static void cacheDIVar(FrameDataInfo &FrameData,
545 for (auto *V : FrameData.getAllDefs()) {
546 if (DIVarCache.contains(V))
547 continue;
548
549 auto CacheIt = [&DIVarCache, V](const auto &Container) {
550 auto *I = llvm::find_if(Container, [](auto *DDI) {
551 return DDI->getExpression()->getNumElements() == 0;
552 });
553 if (I != Container.end())
554 DIVarCache.insert({V, (*I)->getVariable()});
555 };
556 CacheIt(findDVRDeclares(V));
557 CacheIt(findDVRDeclareValues(V));
558 }
559}
560
561/// Create name for Type. It uses MDString to store new created string to
562/// avoid memory leak.
564 if (Ty->isIntegerTy()) {
565 // The longest name in common may be '__int_128', which has 9 bits.
566 SmallString<16> Buffer;
567 raw_svector_ostream OS(Buffer);
568 OS << "__int_" << cast<IntegerType>(Ty)->getBitWidth();
569 auto *MDName = MDString::get(Ty->getContext(), OS.str());
570 return MDName->getString();
571 }
572
573 if (Ty->isFloatingPointTy()) {
574 if (Ty->isFloatTy())
575 return "__float_";
576 if (Ty->isDoubleTy())
577 return "__double_";
578 return "__floating_type_";
579 }
580
581 if (Ty->isPointerTy())
582 return "PointerType";
583
584 if (Ty->isStructTy()) {
585 if (!cast<StructType>(Ty)->hasName())
586 return "__LiteralStructType_";
587
588 auto Name = Ty->getStructName();
589
590 SmallString<16> Buffer(Name);
591 for (auto &Iter : Buffer)
592 if (Iter == '.' || Iter == ':')
593 Iter = '_';
594 auto *MDName = MDString::get(Ty->getContext(), Buffer.str());
595 return MDName->getString();
596 }
597
598 return "UnknownType";
599}
600
601static DIType *solveDIType(DIBuilder &Builder, Type *Ty,
602 const DataLayout &Layout, DIScope *Scope,
603 unsigned LineNum,
604 DenseMap<Type *, DIType *> &DITypeCache) {
605 if (DIType *DT = DITypeCache.lookup(Ty))
606 return DT;
607
608 StringRef Name = solveTypeName(Ty);
609
610 DIType *RetType = nullptr;
611
612 if (Ty->isIntegerTy()) {
613 auto BitWidth = cast<IntegerType>(Ty)->getBitWidth();
614 RetType = Builder.createBasicType(Name, BitWidth, dwarf::DW_ATE_signed,
615 llvm::DINode::FlagArtificial);
616 } else if (Ty->isFloatingPointTy()) {
617 RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty),
618 dwarf::DW_ATE_float,
619 llvm::DINode::FlagArtificial);
620 } else if (Ty->isPointerTy()) {
621 // Construct PointerType points to null (aka void *) instead of exploring
622 // pointee type to avoid infinite search problem. For example, we would be
623 // in trouble if we traverse recursively:
624 //
625 // struct Node {
626 // Node* ptr;
627 // };
628 RetType =
629 Builder.createPointerType(nullptr, Layout.getTypeSizeInBits(Ty),
630 Layout.getABITypeAlign(Ty).value() * CHAR_BIT,
631 /*DWARFAddressSpace=*/std::nullopt, Name);
632 } else if (Ty->isStructTy()) {
633 auto *DIStruct = Builder.createStructType(
634 Scope, Name, Scope->getFile(), LineNum, Layout.getTypeSizeInBits(Ty),
635 Layout.getPrefTypeAlign(Ty).value() * CHAR_BIT,
636 llvm::DINode::FlagArtificial, nullptr, llvm::DINodeArray());
637
638 auto *StructTy = cast<StructType>(Ty);
640 for (unsigned I = 0; I < StructTy->getNumElements(); I++) {
641 DIType *DITy = solveDIType(Builder, StructTy->getElementType(I), Layout,
642 DIStruct, LineNum, DITypeCache);
643 assert(DITy);
644 Elements.push_back(Builder.createMemberType(
645 DIStruct, DITy->getName(), DIStruct->getFile(), LineNum,
646 DITy->getSizeInBits(), DITy->getAlignInBits(),
647 Layout.getStructLayout(StructTy)->getElementOffsetInBits(I),
648 llvm::DINode::FlagArtificial, DITy));
649 }
650
651 Builder.replaceArrays(DIStruct, Builder.getOrCreateArray(Elements));
652
653 RetType = DIStruct;
654 } else {
655 LLVM_DEBUG(dbgs() << "Unresolved Type: " << *Ty << "\n");
656 TypeSize Size = Layout.getTypeSizeInBits(Ty);
657 auto *CharSizeType = Builder.createBasicType(
658 Name, 8, dwarf::DW_ATE_unsigned_char, llvm::DINode::FlagArtificial);
659
660 if (Size <= 8)
661 RetType = CharSizeType;
662 else {
663 if (Size % 8 != 0)
664 Size = TypeSize::getFixed(Size + 8 - (Size % 8));
665
666 RetType = Builder.createArrayType(
667 Size, Layout.getPrefTypeAlign(Ty).value(), CharSizeType,
668 Builder.getOrCreateArray(Builder.getOrCreateSubrange(0, Size / 8)));
669 }
670 }
671
672 DITypeCache.insert({Ty, RetType});
673 return RetType;
674}
675
676/// Build artificial debug info for C++ coroutine frames to allow users to
677/// inspect the contents of the frame directly
678///
679/// Create Debug information for coroutine frame with debug name "__coro_frame".
680/// The debug information for the fields of coroutine frame is constructed from
681/// the following way:
682/// 1. For all the value in the Frame, we search the use of dbg.declare to find
683/// the corresponding debug variables for the value. If we can find the
684/// debug variable, we can get full and accurate debug information.
685/// 2. If we can't get debug information in step 1 and 2, we could only try to
686/// build the DIType by Type. We did this in solveDIType. We only handle
687/// integer, float, double, integer type and struct type for now.
689 FrameDataInfo &FrameData) {
690 DISubprogram *DIS = F.getSubprogram();
691 // If there is no DISubprogram for F, it implies the function is compiled
692 // without debug info. So we also don't generate debug info for the frame.
693
694 if (!DIS || !DIS->getUnit())
695 return;
696
698 DIS->getUnit()->getSourceLanguage().getUnversionedName())) ||
699 DIS->getUnit()->getEmissionKind() !=
701 return;
702
703 assert(Shape.ABI == coro::ABI::Switch &&
704 "We could only build debug infomation for C++ coroutine now.\n");
705
706 DIBuilder DBuilder(*F.getParent(), /*AllowUnresolved*/ false);
707
708 DIFile *DFile = DIS->getFile();
709 unsigned LineNum = DIS->getLine();
710
711 DICompositeType *FrameDITy = DBuilder.createStructType(
712 DIS->getUnit(), Twine(F.getName() + ".coro_frame_ty").str(),
713 DFile, LineNum, Shape.FrameSize * 8,
714 Shape.FrameAlign.value() * 8, llvm::DINode::FlagArtificial, nullptr,
715 llvm::DINodeArray());
716 StructType *FrameTy = Shape.FrameTy;
718 DataLayout Layout = F.getDataLayout();
719
721 cacheDIVar(FrameData, DIVarCache);
722
723 unsigned ResumeIndex = coro::Shape::SwitchFieldIndex::Resume;
724 unsigned DestroyIndex = coro::Shape::SwitchFieldIndex::Destroy;
725 unsigned IndexIndex = Shape.SwitchLowering.IndexField;
726
728 NameCache.insert({ResumeIndex, "__resume_fn"});
729 NameCache.insert({DestroyIndex, "__destroy_fn"});
730 NameCache.insert({IndexIndex, "__coro_index"});
731
732 Type *ResumeFnTy = FrameTy->getElementType(ResumeIndex),
733 *DestroyFnTy = FrameTy->getElementType(DestroyIndex),
734 *IndexTy = FrameTy->getElementType(IndexIndex);
735
737 TyCache.insert(
738 {ResumeIndex, DBuilder.createPointerType(
739 nullptr, Layout.getTypeSizeInBits(ResumeFnTy))});
740 TyCache.insert(
741 {DestroyIndex, DBuilder.createPointerType(
742 nullptr, Layout.getTypeSizeInBits(DestroyFnTy))});
743
744 /// FIXME: If we fill the field `SizeInBits` with the actual size of
745 /// __coro_index in bits, then __coro_index wouldn't show in the debugger.
746 TyCache.insert({IndexIndex, DBuilder.createBasicType(
747 "__coro_index",
748 (Layout.getTypeSizeInBits(IndexTy) < 8)
749 ? 8
750 : Layout.getTypeSizeInBits(IndexTy),
751 dwarf::DW_ATE_unsigned_char)});
752
753 for (auto *V : FrameData.getAllDefs()) {
754 auto It = DIVarCache.find(V);
755 if (It == DIVarCache.end())
756 continue;
757
758 auto Index = FrameData.getFieldIndex(V);
759
760 NameCache.insert({Index, It->second->getName()});
761 TyCache.insert({Index, It->second->getType()});
762 }
763
764 // Cache from index to (Align, Offset Pair)
766 // The Align and Offset of Resume function and Destroy function are fixed.
767 OffsetCache.insert({ResumeIndex, {8, 0}});
768 OffsetCache.insert({DestroyIndex, {8, 8}});
769 OffsetCache.insert(
770 {IndexIndex,
772
773 for (auto *V : FrameData.getAllDefs()) {
774 auto Index = FrameData.getFieldIndex(V);
775
776 OffsetCache.insert(
777 {Index, {FrameData.getAlign(V).value(), FrameData.getOffset(V)}});
778 }
779
780 DenseMap<Type *, DIType *> DITypeCache;
781 // This counter is used to avoid same type names. e.g., there would be
782 // many i32 and i64 types in one coroutine. And we would use i32_0 and
783 // i32_1 to avoid the same type. Since it makes no sense the name of the
784 // fields confilicts with each other.
785 unsigned UnknownTypeNum = 0;
786 for (unsigned Index = 0; Index < FrameTy->getNumElements(); Index++) {
787 auto OCIt = OffsetCache.find(Index);
788 if (OCIt == OffsetCache.end())
789 continue;
790
791 std::string Name;
792 uint64_t SizeInBits;
793 uint32_t AlignInBits;
794 uint64_t OffsetInBits;
795 DIType *DITy = nullptr;
796
797 Type *Ty = FrameTy->getElementType(Index);
798 assert(Ty->isSized() && "We can't handle type which is not sized.\n");
799 SizeInBits = Layout.getTypeSizeInBits(Ty).getFixedValue();
800 AlignInBits = OCIt->second.first * 8;
801 OffsetInBits = OCIt->second.second * 8;
802
803 if (auto It = NameCache.find(Index); It != NameCache.end()) {
804 Name = It->second.str();
805 DITy = TyCache[Index];
806 } else {
807 DITy = solveDIType(DBuilder, Ty, Layout, FrameDITy, LineNum, DITypeCache);
808 assert(DITy && "SolveDIType shouldn't return nullptr.\n");
809 Name = DITy->getName().str();
810 Name += "_" + std::to_string(UnknownTypeNum);
811 UnknownTypeNum++;
812 }
813
814 Elements.push_back(DBuilder.createMemberType(
815 FrameDITy, Name, DFile, LineNum, SizeInBits, AlignInBits, OffsetInBits,
816 llvm::DINode::FlagArtificial, DITy));
817 }
818
819 DBuilder.replaceArrays(FrameDITy, DBuilder.getOrCreateArray(Elements));
820
821 auto *FrameDIVar =
822 DBuilder.createAutoVariable(DIS, "__coro_frame", DFile, LineNum,
823 FrameDITy, true, DINode::FlagArtificial);
824
825 // Subprogram would have ContainedNodes field which records the debug
826 // variables it contained. So we need to add __coro_frame to the
827 // ContainedNodes of it.
828 //
829 // If we don't add __coro_frame to the RetainedNodes, user may get
830 // `no symbol __coro_frame in context` rather than `__coro_frame`
831 // is optimized out, which is more precise.
832 auto RetainedNodes = DIS->getRetainedNodes();
833 SmallVector<Metadata *, 32> RetainedNodesVec(RetainedNodes.begin(),
834 RetainedNodes.end());
835 RetainedNodesVec.push_back(FrameDIVar);
836 DIS->replaceOperandWith(7, (MDTuple::get(F.getContext(), RetainedNodesVec)));
837
838 // Construct the location for the frame debug variable. The column number
839 // is fake but it should be fine.
840 DILocation *DILoc =
841 DILocation::get(DIS->getContext(), LineNum, /*Column=*/1, DIS);
842 assert(FrameDIVar->isValidLocationForIntrinsic(DILoc));
843
844 DbgVariableRecord *NewDVR =
845 new DbgVariableRecord(ValueAsMetadata::get(Shape.FramePtr), FrameDIVar,
846 DBuilder.createExpression(), DILoc,
849 It->getParent()->insertDbgRecordBefore(NewDVR, It);
850}
851
852// Build a struct that will keep state for an active coroutine.
853// struct f.frame {
854// ResumeFnTy ResumeFnAddr;
855// ResumeFnTy DestroyFnAddr;
856// ... promise (if present) ...
857// int ResumeIndex;
858// ... spills ...
859// };
861 FrameDataInfo &FrameData,
862 bool OptimizeFrame) {
863 LLVMContext &C = F.getContext();
864 const DataLayout &DL = F.getDataLayout();
865
866 // We will use this value to cap the alignment of spilled values.
867 std::optional<Align> MaxFrameAlignment;
868 if (Shape.ABI == coro::ABI::Async)
869 MaxFrameAlignment = Shape.AsyncLowering.getContextAlignment();
870 FrameTypeBuilder B(C, DL, MaxFrameAlignment);
871
872 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
873 std::optional<FieldIDType> SwitchIndexFieldId;
874
875 if (Shape.ABI == coro::ABI::Switch) {
876 auto *FnPtrTy = PointerType::getUnqual(C);
877
878 // Add header fields for the resume and destroy functions.
879 // We can rely on these being perfectly packed.
880 (void)B.addField(FnPtrTy, std::nullopt, /*header*/ true);
881 (void)B.addField(FnPtrTy, std::nullopt, /*header*/ true);
882
883 // PromiseAlloca field needs to be explicitly added here because it's
884 // a header field with a fixed offset based on its alignment. Hence it
885 // needs special handling and cannot be added to FrameData.Allocas.
886 if (PromiseAlloca)
887 FrameData.setFieldIndex(
888 PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, /*header*/ true));
889
890 // Add a field to store the suspend index. This doesn't need to
891 // be in the header.
892 unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size()));
893 Type *IndexType = Type::getIntNTy(C, IndexBits);
894
895 SwitchIndexFieldId = B.addField(IndexType, std::nullopt);
896 } else {
897 assert(PromiseAlloca == nullptr && "lowering doesn't support promises");
898 }
899
900 // Because multiple allocas may own the same field slot,
901 // we add allocas to field here.
902 B.addFieldForAllocas(F, FrameData, Shape, OptimizeFrame);
903 // Add PromiseAlloca to Allocas list so that
904 // 1. updateLayoutIndex could update its index after
905 // `performOptimizedStructLayout`
906 // 2. it is processed in insertSpills.
907 if (Shape.ABI == coro::ABI::Switch && PromiseAlloca)
908 // We assume that the promise alloca won't be modified before
909 // CoroBegin and no alias will be create before CoroBegin.
910 FrameData.Allocas.emplace_back(
911 PromiseAlloca, DenseMap<Instruction *, std::optional<APInt>>{}, false);
912 // Create an entry for every spilled value.
913 for (auto &S : FrameData.Spills) {
914 Type *FieldType = S.first->getType();
915 MaybeAlign MA;
916 // For byval arguments, we need to store the pointed value in the frame,
917 // instead of the pointer itself.
918 if (const Argument *A = dyn_cast<Argument>(S.first)) {
919 if (A->hasByValAttr()) {
920 FieldType = A->getParamByValType();
921 MA = A->getParamAlign();
922 }
923 }
924 FieldIDType Id =
925 B.addField(FieldType, MA, false /*header*/, true /*IsSpillOfValue*/);
926 FrameData.setFieldIndex(S.first, Id);
927 }
928
929 StructType *FrameTy = [&] {
930 SmallString<32> Name(F.getName());
931 Name.append(".Frame");
932 return B.finish(Name);
933 }();
934
935 FrameData.updateLayoutIndex(B);
936 Shape.FrameAlign = B.getStructAlign();
937 Shape.FrameSize = B.getStructSize();
938
939 switch (Shape.ABI) {
940 case coro::ABI::Switch: {
941 // In the switch ABI, remember the switch-index field.
942 auto IndexField = B.getLayoutField(*SwitchIndexFieldId);
943 Shape.SwitchLowering.IndexField = IndexField.LayoutFieldIndex;
944 Shape.SwitchLowering.IndexAlign = IndexField.Alignment.value();
945 Shape.SwitchLowering.IndexOffset = IndexField.Offset;
946
947 // Also round the frame size up to a multiple of its alignment, as is
948 // generally expected in C/C++.
949 Shape.FrameSize = alignTo(Shape.FrameSize, Shape.FrameAlign);
950 break;
951 }
952
953 // In the retcon ABI, remember whether the frame is inline in the storage.
956 auto Id = Shape.getRetconCoroId();
958 = (B.getStructSize() <= Id->getStorageSize() &&
959 B.getStructAlign() <= Id->getStorageAlignment());
960 break;
961 }
962 case coro::ABI::Async: {
965 // Also make the final context size a multiple of the context alignment to
966 // make allocation easier for allocators.
970 if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) {
972 "The alignment requirment of frame variables cannot be higher than "
973 "the alignment of the async function context");
974 }
975 break;
976 }
977 }
978
979 return FrameTy;
980}
981
982// Replace all alloca and SSA values that are accessed across suspend points
983// with GetElementPointer from coroutine frame + loads and stores. Create an
984// AllocaSpillBB that will become the new entry block for the resume parts of
985// the coroutine:
986//
987// %hdl = coro.begin(...)
988// whatever
989//
990// becomes:
991//
992// %hdl = coro.begin(...)
993// br label %AllocaSpillBB
994//
995// AllocaSpillBB:
996// ; geps corresponding to allocas that were moved to coroutine frame
997// br label PostSpill
998//
999// PostSpill:
1000// whatever
1001//
1002//
1003static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape) {
1004 LLVMContext &C = Shape.CoroBegin->getContext();
1005 Function *F = Shape.CoroBegin->getFunction();
1006 IRBuilder<> Builder(C);
1007 StructType *FrameTy = Shape.FrameTy;
1008 Value *FramePtr = Shape.FramePtr;
1009 DominatorTree DT(*F);
1011
1012 // Create a GEP with the given index into the coroutine frame for the original
1013 // value Orig. Appends an extra 0 index for array-allocas, preserving the
1014 // original type.
1015 auto GetFramePointer = [&](Value *Orig) -> Value * {
1016 FieldIDType Index = FrameData.getFieldIndex(Orig);
1017 SmallVector<Value *, 3> Indices = {
1018 ConstantInt::get(Type::getInt32Ty(C), 0),
1019 ConstantInt::get(Type::getInt32Ty(C), Index),
1020 };
1021
1022 if (auto *AI = dyn_cast<AllocaInst>(Orig)) {
1023 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) {
1024 auto Count = CI->getValue().getZExtValue();
1025 if (Count > 1) {
1026 Indices.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
1027 }
1028 } else {
1029 report_fatal_error("Coroutines cannot handle non static allocas yet");
1030 }
1031 }
1032
1034 Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices));
1035 if (auto *AI = dyn_cast<AllocaInst>(Orig)) {
1036 if (FrameData.getDynamicAlign(Orig) != 0) {
1037 assert(FrameData.getDynamicAlign(Orig) == AI->getAlign().value());
1038 auto *M = AI->getModule();
1039 auto *IntPtrTy = M->getDataLayout().getIntPtrType(AI->getType());
1040 auto *PtrValue = Builder.CreatePtrToInt(GEP, IntPtrTy);
1041 auto *AlignMask =
1042 ConstantInt::get(IntPtrTy, AI->getAlign().value() - 1);
1043 PtrValue = Builder.CreateAdd(PtrValue, AlignMask);
1044 PtrValue = Builder.CreateAnd(PtrValue, Builder.CreateNot(AlignMask));
1045 return Builder.CreateIntToPtr(PtrValue, AI->getType());
1046 }
1047 // If the type of GEP is not equal to the type of AllocaInst, it implies
1048 // that the AllocaInst may be reused in the Frame slot of other
1049 // AllocaInst. So We cast GEP to the AllocaInst here to re-use
1050 // the Frame storage.
1051 //
1052 // Note: If we change the strategy dealing with alignment, we need to refine
1053 // this casting.
1054 if (GEP->getType() != Orig->getType())
1055 return Builder.CreateAddrSpaceCast(GEP, Orig->getType(),
1056 Orig->getName() + Twine(".cast"));
1057 }
1058 return GEP;
1059 };
1060
1061 for (auto const &E : FrameData.Spills) {
1062 Value *Def = E.first;
1063 auto SpillAlignment = Align(FrameData.getAlign(Def));
1064 // Create a store instruction storing the value into the
1065 // coroutine frame.
1066 BasicBlock::iterator InsertPt = coro::getSpillInsertionPt(Shape, Def, DT);
1067
1068 Type *ByValTy = nullptr;
1069 if (auto *Arg = dyn_cast<Argument>(Def)) {
1070 // If we're spilling an Argument, make sure we clear 'captures'
1071 // from the coroutine function.
1072 Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::Captures);
1073
1074 if (Arg->hasByValAttr())
1075 ByValTy = Arg->getParamByValType();
1076 }
1077
1078 auto Index = FrameData.getFieldIndex(Def);
1079 Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
1080 auto *G = Builder.CreateConstInBoundsGEP2_32(
1081 FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr"));
1082 if (ByValTy) {
1083 // For byval arguments, we need to store the pointed value in the frame,
1084 // instead of the pointer itself.
1085 auto *Value = Builder.CreateLoad(ByValTy, Def);
1086 Builder.CreateAlignedStore(Value, G, SpillAlignment);
1087 } else {
1088 Builder.CreateAlignedStore(Def, G, SpillAlignment);
1089 }
1090
1091 BasicBlock *CurrentBlock = nullptr;
1092 Value *CurrentReload = nullptr;
1093 for (auto *U : E.second) {
1094 // If we have not seen the use block, create a load instruction to reload
1095 // the spilled value from the coroutine frame. Populates the Value pointer
1096 // reference provided with the frame GEP.
1097 if (CurrentBlock != U->getParent()) {
1098 CurrentBlock = U->getParent();
1099 Builder.SetInsertPoint(CurrentBlock,
1100 CurrentBlock->getFirstInsertionPt());
1101
1102 auto *GEP = GetFramePointer(E.first);
1103 GEP->setName(E.first->getName() + Twine(".reload.addr"));
1104 if (ByValTy)
1105 CurrentReload = GEP;
1106 else
1107 CurrentReload = Builder.CreateAlignedLoad(
1108 FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP,
1109 SpillAlignment, E.first->getName() + Twine(".reload"));
1110
1112 // Try best to find dbg.declare. If the spill is a temp, there may not
1113 // be a direct dbg.declare. Walk up the load chain to find one from an
1114 // alias.
1115 if (F->getSubprogram()) {
1116 auto *CurDef = Def;
1117 while (DVRs.empty() && isa<LoadInst>(CurDef)) {
1118 auto *LdInst = cast<LoadInst>(CurDef);
1119 // Only consider ptr to ptr same type load.
1120 if (LdInst->getPointerOperandType() != LdInst->getType())
1121 break;
1122 CurDef = LdInst->getPointerOperand();
1123 if (!isa<AllocaInst, LoadInst>(CurDef))
1124 break;
1125 DVRs = findDVRDeclares(CurDef);
1126 }
1127 }
1128
1129 auto SalvageOne = [&](DbgVariableRecord *DDI) {
1130 // This dbg.declare is preserved for all coro-split function
1131 // fragments. It will be unreachable in the main function, and
1132 // processed by coro::salvageDebugInfo() by the Cloner.
1134 ValueAsMetadata::get(CurrentReload), DDI->getVariable(),
1135 DDI->getExpression(), DDI->getDebugLoc(),
1137 Builder.GetInsertPoint()->getParent()->insertDbgRecordBefore(
1138 NewDVR, Builder.GetInsertPoint());
1139 // This dbg.declare is for the main function entry point. It
1140 // will be deleted in all coro-split functions.
1141 coro::salvageDebugInfo(ArgToAllocaMap, *DDI, false /*UseEntryValue*/);
1142 };
1143 for_each(DVRs, SalvageOne);
1144 }
1145
1146 TinyPtrVector<DbgVariableRecord *> DVRDeclareValues =
1148 // Try best to find dbg.declare_value. If the spill is a temp, there may
1149 // not be a direct dbg.declare_value. Walk up the load chain to find one
1150 // from an alias.
1151 if (F->getSubprogram()) {
1152 auto *CurDef = Def;
1153 while (DVRDeclareValues.empty() && isa<LoadInst>(CurDef)) {
1154 auto *LdInst = cast<LoadInst>(CurDef);
1155 // Only consider ptr to ptr same type load.
1156 if (LdInst->getPointerOperandType() != LdInst->getType())
1157 break;
1158 CurDef = LdInst->getPointerOperand();
1159 if (!isa<AllocaInst, LoadInst>(CurDef))
1160 break;
1161 DVRDeclareValues = findDVRDeclareValues(CurDef);
1162 }
1163 }
1164
1165 auto SalvageOneCoro = [&](auto *DDI) {
1166 // This dbg.declare_value is preserved for all coro-split function
1167 // fragments. It will be unreachable in the main function, and
1168 // processed by coro::salvageDebugInfo() by the Cloner. However, convert
1169 // it to a dbg.declare to make sure future passes don't have to deal
1170 // with a dbg.declare_value.
1171 auto *VAM = ValueAsMetadata::get(CurrentReload);
1172 Type *Ty = VAM->getValue()->getType();
1173 // If the metadata type is not a pointer, emit a dbg.value instead.
1175 ValueAsMetadata::get(CurrentReload), DDI->getVariable(),
1176 DDI->getExpression(), DDI->getDebugLoc(),
1179 Builder.GetInsertPoint()->getParent()->insertDbgRecordBefore(
1180 NewDVR, Builder.GetInsertPoint());
1181 // This dbg.declare_value is for the main function entry point. It
1182 // will be deleted in all coro-split functions.
1183 coro::salvageDebugInfo(ArgToAllocaMap, *DDI, false /*UseEntryValue*/);
1184 };
1185 for_each(DVRDeclareValues, SalvageOneCoro);
1186
1187 // If we have a single edge PHINode, remove it and replace it with a
1188 // reload from the coroutine frame. (We already took care of multi edge
1189 // PHINodes by normalizing them in the rewritePHIs function).
1190 if (auto *PN = dyn_cast<PHINode>(U)) {
1191 assert(PN->getNumIncomingValues() == 1 &&
1192 "unexpected number of incoming "
1193 "values in the PHINode");
1194 PN->replaceAllUsesWith(CurrentReload);
1195 PN->eraseFromParent();
1196 continue;
1197 }
1198
1199 // Replace all uses of CurrentValue in the current instruction with
1200 // reload.
1201 U->replaceUsesOfWith(Def, CurrentReload);
1202 // Instructions are added to Def's user list if the attached
1203 // debug records use Def. Update those now.
1204 for (DbgVariableRecord &DVR : filterDbgVars(U->getDbgRecordRange()))
1205 DVR.replaceVariableLocationOp(Def, CurrentReload, true);
1206 }
1207 }
1208
1209 BasicBlock *FramePtrBB = Shape.getInsertPtAfterFramePtr()->getParent();
1210
1211 auto SpillBlock = FramePtrBB->splitBasicBlock(
1212 Shape.getInsertPtAfterFramePtr(), "AllocaSpillBB");
1213 SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill");
1214 Shape.AllocaSpillBlock = SpillBlock;
1215
1216 // retcon and retcon.once lowering assumes all uses have been sunk.
1217 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
1218 Shape.ABI == coro::ABI::Async) {
1219 // If we found any allocas, replace all of their remaining uses with Geps.
1220 Builder.SetInsertPoint(SpillBlock, SpillBlock->begin());
1221 for (const auto &P : FrameData.Allocas) {
1222 AllocaInst *Alloca = P.Alloca;
1223 auto *G = GetFramePointer(Alloca);
1224
1225 // Remove any lifetime intrinsics, now that these are no longer allocas.
1226 for (User *U : make_early_inc_range(Alloca->users())) {
1227 auto *I = cast<Instruction>(U);
1228 if (I->isLifetimeStartOrEnd())
1229 I->eraseFromParent();
1230 }
1231
1232 // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G))
1233 // here, as we are changing location of the instruction.
1234 G->takeName(Alloca);
1235 Alloca->replaceAllUsesWith(G);
1236 Alloca->eraseFromParent();
1237 }
1238 return;
1239 }
1240
1241 // If we found any alloca, replace all of their remaining uses with GEP
1242 // instructions. To remain debugbility, we replace the uses of allocas for
1243 // dbg.declares and dbg.values with the reload from the frame.
1244 // Note: We cannot replace the alloca with GEP instructions indiscriminately,
1245 // as some of the uses may not be dominated by CoroBegin.
1246 Builder.SetInsertPoint(Shape.AllocaSpillBlock,
1247 Shape.AllocaSpillBlock->begin());
1248 SmallVector<Instruction *, 4> UsersToUpdate;
1249 for (const auto &A : FrameData.Allocas) {
1250 AllocaInst *Alloca = A.Alloca;
1251 UsersToUpdate.clear();
1252 for (User *U : make_early_inc_range(Alloca->users())) {
1253 auto *I = cast<Instruction>(U);
1254 // It is meaningless to retain the lifetime intrinsics refer for the
1255 // member of coroutine frames and the meaningless lifetime intrinsics
1256 // are possible to block further optimizations.
1257 if (I->isLifetimeStartOrEnd())
1258 I->eraseFromParent();
1259 else if (DT.dominates(Shape.CoroBegin, I))
1260 UsersToUpdate.push_back(I);
1261 }
1262
1263 if (UsersToUpdate.empty())
1264 continue;
1265 auto *G = GetFramePointer(Alloca);
1266 G->setName(Alloca->getName() + Twine(".reload.addr"));
1267
1268 SmallVector<DbgVariableRecord *> DbgVariableRecords;
1269 findDbgUsers(Alloca, DbgVariableRecords);
1270 for (auto *DVR : DbgVariableRecords)
1271 DVR->replaceVariableLocationOp(Alloca, G);
1272
1273 for (Instruction *I : UsersToUpdate)
1274 I->replaceUsesOfWith(Alloca, G);
1275 }
1276 Builder.SetInsertPoint(&*Shape.getInsertPtAfterFramePtr());
1277 for (const auto &A : FrameData.Allocas) {
1278 AllocaInst *Alloca = A.Alloca;
1279 if (A.MayWriteBeforeCoroBegin) {
1280 // isEscaped really means potentially modified before CoroBegin.
1281 if (Alloca->isArrayAllocation())
1283 "Coroutines cannot handle copying of array allocas yet");
1284
1285 auto *G = GetFramePointer(Alloca);
1286 auto *Value = Builder.CreateLoad(Alloca->getAllocatedType(), Alloca);
1287 Builder.CreateStore(Value, G);
1288 }
1289 // For each alias to Alloca created before CoroBegin but used after
1290 // CoroBegin, we recreate them after CoroBegin by applying the offset
1291 // to the pointer in the frame.
1292 for (const auto &Alias : A.Aliases) {
1293 auto *FramePtr = GetFramePointer(Alloca);
1294 auto &Value = *Alias.second;
1295 auto ITy = IntegerType::get(C, Value.getBitWidth());
1296 auto *AliasPtr =
1297 Builder.CreatePtrAdd(FramePtr, ConstantInt::get(ITy, Value));
1298 Alias.first->replaceUsesWithIf(
1299 AliasPtr, [&](Use &U) { return DT.dominates(Shape.CoroBegin, U); });
1300 }
1301 }
1302
1303 // PromiseAlloca is not collected in FrameData.Allocas. So we don't handle
1304 // the case that the PromiseAlloca may have writes before CoroBegin in the
1305 // above codes. And it may be problematic in edge cases. See
1306 // https://github.com/llvm/llvm-project/issues/57861 for an example.
1307 if (Shape.ABI == coro::ABI::Switch && Shape.SwitchLowering.PromiseAlloca) {
1309 // If there is memory accessing to promise alloca before CoroBegin;
1310 bool HasAccessingPromiseBeforeCB = llvm::any_of(PA->uses(), [&](Use &U) {
1311 auto *Inst = dyn_cast<Instruction>(U.getUser());
1312 if (!Inst || DT.dominates(Shape.CoroBegin, Inst))
1313 return false;
1314
1315 if (auto *CI = dyn_cast<CallInst>(Inst)) {
1316 // It is fine if the call wouldn't write to the Promise.
1317 // This is possible for @llvm.coro.id intrinsics, which
1318 // would take the promise as the second argument as a
1319 // marker.
1320 if (CI->onlyReadsMemory() ||
1321 CI->onlyReadsMemory(CI->getArgOperandNo(&U)))
1322 return false;
1323 return true;
1324 }
1325
1326 return isa<StoreInst>(Inst) ||
1327 // It may take too much time to track the uses.
1328 // Be conservative about the case the use may escape.
1329 isa<GetElementPtrInst>(Inst) ||
1330 // There would always be a bitcast for the promise alloca
1331 // before we enabled Opaque pointers. And now given
1332 // opaque pointers are enabled by default. This should be
1333 // fine.
1334 isa<BitCastInst>(Inst);
1335 });
1336 if (HasAccessingPromiseBeforeCB) {
1337 Builder.SetInsertPoint(&*Shape.getInsertPtAfterFramePtr());
1338 auto *G = GetFramePointer(PA);
1339 auto *Value = Builder.CreateLoad(PA->getAllocatedType(), PA);
1340 Builder.CreateStore(Value, G);
1341 }
1342 }
1343}
1344
1345// Moves the values in the PHIs in SuccBB that correspong to PredBB into a new
1346// PHI in InsertedBB.
1348 BasicBlock *InsertedBB,
1349 BasicBlock *PredBB,
1350 PHINode *UntilPHI = nullptr) {
1351 auto *PN = cast<PHINode>(&SuccBB->front());
1352 do {
1353 int Index = PN->getBasicBlockIndex(InsertedBB);
1354 Value *V = PN->getIncomingValue(Index);
1355 PHINode *InputV = PHINode::Create(
1356 V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName());
1357 InputV->insertBefore(InsertedBB->begin());
1358 InputV->addIncoming(V, PredBB);
1359 PN->setIncomingValue(Index, InputV);
1360 PN = dyn_cast<PHINode>(PN->getNextNode());
1361 } while (PN != UntilPHI);
1362}
1363
1364// Rewrites the PHI Nodes in a cleanuppad.
1365static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB,
1366 CleanupPadInst *CleanupPad) {
1367 // For every incoming edge to a CleanupPad we will create a new block holding
1368 // all incoming values in single-value PHI nodes. We will then create another
1369 // block to act as a dispather (as all unwind edges for related EH blocks
1370 // must be the same).
1371 //
1372 // cleanuppad:
1373 // %2 = phi i32[%0, %catchswitch], [%1, %catch.1]
1374 // %3 = cleanuppad within none []
1375 //
1376 // It will create:
1377 //
1378 // cleanuppad.corodispatch
1379 // %2 = phi i8[0, %catchswitch], [1, %catch.1]
1380 // %3 = cleanuppad within none []
1381 // switch i8 % 2, label %unreachable
1382 // [i8 0, label %cleanuppad.from.catchswitch
1383 // i8 1, label %cleanuppad.from.catch.1]
1384 // cleanuppad.from.catchswitch:
1385 // %4 = phi i32 [%0, %catchswitch]
1386 // br %label cleanuppad
1387 // cleanuppad.from.catch.1:
1388 // %6 = phi i32 [%1, %catch.1]
1389 // br %label cleanuppad
1390 // cleanuppad:
1391 // %8 = phi i32 [%4, %cleanuppad.from.catchswitch],
1392 // [%6, %cleanuppad.from.catch.1]
1393
1394 // Unreachable BB, in case switching on an invalid value in the dispatcher.
1395 auto *UnreachBB = BasicBlock::Create(
1396 CleanupPadBB->getContext(), "unreachable", CleanupPadBB->getParent());
1397 IRBuilder<> Builder(UnreachBB);
1398 Builder.CreateUnreachable();
1399
1400 // Create a new cleanuppad which will be the dispatcher.
1401 auto *NewCleanupPadBB =
1402 BasicBlock::Create(CleanupPadBB->getContext(),
1403 CleanupPadBB->getName() + Twine(".corodispatch"),
1404 CleanupPadBB->getParent(), CleanupPadBB);
1405 Builder.SetInsertPoint(NewCleanupPadBB);
1406 auto *SwitchType = Builder.getInt8Ty();
1407 auto *SetDispatchValuePN =
1408 Builder.CreatePHI(SwitchType, pred_size(CleanupPadBB));
1409 CleanupPad->removeFromParent();
1410 CleanupPad->insertAfter(SetDispatchValuePN->getIterator());
1411 auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB,
1412 pred_size(CleanupPadBB));
1413
1414 int SwitchIndex = 0;
1415 SmallVector<BasicBlock *, 8> Preds(predecessors(CleanupPadBB));
1416 for (BasicBlock *Pred : Preds) {
1417 // Create a new cleanuppad and move the PHI values to there.
1418 auto *CaseBB = BasicBlock::Create(CleanupPadBB->getContext(),
1419 CleanupPadBB->getName() +
1420 Twine(".from.") + Pred->getName(),
1421 CleanupPadBB->getParent(), CleanupPadBB);
1422 updatePhiNodes(CleanupPadBB, Pred, CaseBB);
1423 CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") +
1424 Pred->getName());
1425 Builder.SetInsertPoint(CaseBB);
1426 Builder.CreateBr(CleanupPadBB);
1427 movePHIValuesToInsertedBlock(CleanupPadBB, CaseBB, NewCleanupPadBB);
1428
1429 // Update this Pred to the new unwind point.
1430 setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB);
1431
1432 // Setup the switch in the dispatcher.
1433 auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex);
1434 SetDispatchValuePN->addIncoming(SwitchConstant, Pred);
1435 SwitchOnDispatch->addCase(SwitchConstant, CaseBB);
1436 SwitchIndex++;
1437 }
1438}
1439
1442 for (auto &BB : F) {
1443 for (auto &Phi : BB.phis()) {
1444 if (Phi.getNumIncomingValues() == 1) {
1445 Worklist.push_back(&Phi);
1446 } else
1447 break;
1448 }
1449 }
1450 while (!Worklist.empty()) {
1451 auto *Phi = Worklist.pop_back_val();
1452 auto *OriginalValue = Phi->getIncomingValue(0);
1453 Phi->replaceAllUsesWith(OriginalValue);
1454 }
1455}
1456
1457static void rewritePHIs(BasicBlock &BB) {
1458 // For every incoming edge we will create a block holding all
1459 // incoming values in a single PHI nodes.
1460 //
1461 // loop:
1462 // %n.val = phi i32[%n, %entry], [%inc, %loop]
1463 //
1464 // It will create:
1465 //
1466 // loop.from.entry:
1467 // %n.loop.pre = phi i32 [%n, %entry]
1468 // br %label loop
1469 // loop.from.loop:
1470 // %inc.loop.pre = phi i32 [%inc, %loop]
1471 // br %label loop
1472 //
1473 // After this rewrite, further analysis will ignore any phi nodes with more
1474 // than one incoming edge.
1475
1476 // TODO: Simplify PHINodes in the basic block to remove duplicate
1477 // predecessors.
1478
1479 // Special case for CleanupPad: all EH blocks must have the same unwind edge
1480 // so we need to create an additional "dispatcher" block.
1481 if (!BB.empty()) {
1482 if (auto *CleanupPad =
1485 for (BasicBlock *Pred : Preds) {
1486 if (CatchSwitchInst *CS =
1487 dyn_cast<CatchSwitchInst>(Pred->getTerminator())) {
1488 // CleanupPad with a CatchSwitch predecessor: therefore this is an
1489 // unwind destination that needs to be handle specially.
1490 assert(CS->getUnwindDest() == &BB);
1491 (void)CS;
1492 rewritePHIsForCleanupPad(&BB, CleanupPad);
1493 return;
1494 }
1495 }
1496 }
1497 }
1498
1499 LandingPadInst *LandingPad = nullptr;
1500 PHINode *ReplPHI = nullptr;
1501 if (!BB.empty()) {
1502 if ((LandingPad =
1504 // ehAwareSplitEdge will clone the LandingPad in all the edge blocks.
1505 // We replace the original landing pad with a PHINode that will collect the
1506 // results from all of them.
1507 ReplPHI = PHINode::Create(LandingPad->getType(), 1, "");
1508 ReplPHI->insertBefore(LandingPad->getIterator());
1509 ReplPHI->takeName(LandingPad);
1510 LandingPad->replaceAllUsesWith(ReplPHI);
1511 // We will erase the original landing pad at the end of this function after
1512 // ehAwareSplitEdge cloned it in the transition blocks.
1513 }
1514 }
1515
1517 for (BasicBlock *Pred : Preds) {
1518 auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI);
1519 IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName());
1520
1521 // Stop the moving of values at ReplPHI, as this is either null or the PHI
1522 // that replaced the landing pad.
1523 movePHIValuesToInsertedBlock(&BB, IncomingBB, Pred, ReplPHI);
1524 }
1525
1526 if (LandingPad) {
1527 // Calls to ehAwareSplitEdge function cloned the original lading pad.
1528 // No longer need it.
1529 LandingPad->eraseFromParent();
1530 }
1531}
1532
1533static void rewritePHIs(Function &F) {
1535
1536 for (BasicBlock &BB : F)
1537 if (auto *PN = dyn_cast<PHINode>(&BB.front()))
1538 if (PN->getNumIncomingValues() > 1)
1539 WorkList.push_back(&BB);
1540
1541 for (BasicBlock *BB : WorkList)
1542 rewritePHIs(*BB);
1543}
1544
1545// Splits the block at a particular instruction unless it is the first
1546// instruction in the block with a single predecessor.
1548 auto *BB = I->getParent();
1549 if (&BB->front() == I) {
1550 if (BB->getSinglePredecessor()) {
1551 BB->setName(Name);
1552 return BB;
1553 }
1554 }
1555 return BB->splitBasicBlock(I, Name);
1556}
1557
1558// Split above and below a particular instruction so that it
1559// will be all alone by itself in a block.
1560static void splitAround(Instruction *I, const Twine &Name) {
1561 splitBlockIfNotFirst(I, Name);
1562 splitBlockIfNotFirst(I->getNextNode(), "After" + Name);
1563}
1564
1565/// After we split the coroutine, will the given basic block be along
1566/// an obvious exit path for the resumption function?
1568 unsigned depth = 3) {
1569 // If we've bottomed out our depth count, stop searching and assume
1570 // that the path might loop back.
1571 if (depth == 0) return false;
1572
1573 // If this is a suspend block, we're about to exit the resumption function.
1574 if (coro::isSuspendBlock(BB))
1575 return true;
1576
1577 // Recurse into the successors.
1578 for (auto *Succ : successors(BB)) {
1579 if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1))
1580 return false;
1581 }
1582
1583 // If none of the successors leads back in a loop, we're on an exit/abort.
1584 return true;
1585}
1586
1588 // Look for a free that isn't sufficiently obviously followed by
1589 // either a suspend or a termination, i.e. something that will leave
1590 // the coro resumption frame.
1591 for (auto *U : AI->users()) {
1592 auto FI = dyn_cast<CoroAllocaFreeInst>(U);
1593 if (!FI) continue;
1594
1595 if (!willLeaveFunctionImmediatelyAfter(FI->getParent()))
1596 return true;
1597 }
1598
1599 // If we never found one, we don't need a stack save.
1600 return false;
1601}
1602
1603/// Turn each of the given local allocas into a normal (dynamic) alloca
1604/// instruction.
1606 SmallVectorImpl<Instruction*> &DeadInsts) {
1607 for (auto *AI : LocalAllocas) {
1608 IRBuilder<> Builder(AI);
1609
1610 // Save the stack depth. Try to avoid doing this if the stackrestore
1611 // is going to immediately precede a return or something.
1612 Value *StackSave = nullptr;
1614 StackSave = Builder.CreateStackSave();
1615
1616 // Allocate memory.
1617 auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize());
1618 Alloca->setAlignment(AI->getAlignment());
1619
1620 for (auto *U : AI->users()) {
1621 // Replace gets with the allocation.
1622 if (isa<CoroAllocaGetInst>(U)) {
1623 U->replaceAllUsesWith(Alloca);
1624
1625 // Replace frees with stackrestores. This is safe because
1626 // alloca.alloc is required to obey a stack discipline, although we
1627 // don't enforce that structurally.
1628 } else {
1629 auto FI = cast<CoroAllocaFreeInst>(U);
1630 if (StackSave) {
1631 Builder.SetInsertPoint(FI);
1632 Builder.CreateStackRestore(StackSave);
1633 }
1634 }
1635 DeadInsts.push_back(cast<Instruction>(U));
1636 }
1637
1638 DeadInsts.push_back(AI);
1639 }
1640}
1641
1642/// Get the current swifterror value.
1644 coro::Shape &Shape) {
1645 // Make a fake function pointer as a sort of intrinsic.
1646 auto FnTy = FunctionType::get(ValueTy, {}, false);
1647 auto Fn = ConstantPointerNull::get(Builder.getPtrTy());
1648
1649 auto Call = Builder.CreateCall(FnTy, Fn, {});
1650 Shape.SwiftErrorOps.push_back(Call);
1651
1652 return Call;
1653}
1654
1655/// Set the given value as the current swifterror value.
1656///
1657/// Returns a slot that can be used as a swifterror slot.
1659 coro::Shape &Shape) {
1660 // Make a fake function pointer as a sort of intrinsic.
1661 auto FnTy = FunctionType::get(Builder.getPtrTy(),
1662 {V->getType()}, false);
1663 auto Fn = ConstantPointerNull::get(Builder.getPtrTy());
1664
1665 auto Call = Builder.CreateCall(FnTy, Fn, { V });
1666 Shape.SwiftErrorOps.push_back(Call);
1667
1668 return Call;
1669}
1670
1671/// Set the swifterror value from the given alloca before a call,
1672/// then put in back in the alloca afterwards.
1673///
1674/// Returns an address that will stand in for the swifterror slot
1675/// until splitting.
1677 AllocaInst *Alloca,
1678 coro::Shape &Shape) {
1679 auto ValueTy = Alloca->getAllocatedType();
1680 IRBuilder<> Builder(Call);
1681
1682 // Load the current value from the alloca and set it as the
1683 // swifterror value.
1684 auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca);
1685 auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape);
1686
1687 // Move to after the call. Since swifterror only has a guaranteed
1688 // value on normal exits, we can ignore implicit and explicit unwind
1689 // edges.
1690 if (isa<CallInst>(Call)) {
1691 Builder.SetInsertPoint(Call->getNextNode());
1692 } else {
1693 auto Invoke = cast<InvokeInst>(Call);
1694 Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg());
1695 }
1696
1697 // Get the current swifterror value and store it to the alloca.
1698 auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape);
1699 Builder.CreateStore(ValueAfterCall, Alloca);
1700
1701 return Addr;
1702}
1703
1704/// Eliminate a formerly-swifterror alloca by inserting the get/set
1705/// intrinsics and attempting to MemToReg the alloca away.
1707 coro::Shape &Shape) {
1708 for (Use &Use : llvm::make_early_inc_range(Alloca->uses())) {
1709 // swifterror values can only be used in very specific ways.
1710 // We take advantage of that here.
1711 auto User = Use.getUser();
1713 continue;
1714
1716 auto Call = cast<Instruction>(User);
1717
1718 auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape);
1719
1720 // Use the returned slot address as the call argument.
1721 Use.set(Addr);
1722 }
1723
1724 // All the uses should be loads and stores now.
1725 assert(isAllocaPromotable(Alloca));
1726}
1727
1728/// "Eliminate" a swifterror argument by reducing it to the alloca case
1729/// and then loading and storing in the prologue and epilog.
1730///
1731/// The argument keeps the swifterror flag.
1733 coro::Shape &Shape,
1734 SmallVectorImpl<AllocaInst*> &AllocasToPromote) {
1735 IRBuilder<> Builder(&F.getEntryBlock(),
1736 F.getEntryBlock().getFirstNonPHIOrDbg());
1737
1738 auto ArgTy = cast<PointerType>(Arg.getType());
1739 auto ValueTy = PointerType::getUnqual(F.getContext());
1740
1741 // Reduce to the alloca case:
1742
1743 // Create an alloca and replace all uses of the arg with it.
1744 auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace());
1745 Arg.replaceAllUsesWith(Alloca);
1746
1747 // Set an initial value in the alloca. swifterror is always null on entry.
1748 auto InitialValue = Constant::getNullValue(ValueTy);
1749 Builder.CreateStore(InitialValue, Alloca);
1750
1751 // Find all the suspends in the function and save and restore around them.
1752 for (auto *Suspend : Shape.CoroSuspends) {
1753 (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape);
1754 }
1755
1756 // Find all the coro.ends in the function and restore the error value.
1757 for (auto *End : Shape.CoroEnds) {
1758 Builder.SetInsertPoint(End);
1759 auto FinalValue = Builder.CreateLoad(ValueTy, Alloca);
1760 (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape);
1761 }
1762
1763 // Now we can use the alloca logic.
1764 AllocasToPromote.push_back(Alloca);
1765 eliminateSwiftErrorAlloca(F, Alloca, Shape);
1766}
1767
1768/// Eliminate all problematic uses of swifterror arguments and allocas
1769/// from the function. We'll fix them up later when splitting the function.
1771 SmallVector<AllocaInst*, 4> AllocasToPromote;
1772
1773 // Look for a swifterror argument.
1774 for (auto &Arg : F.args()) {
1775 if (!Arg.hasSwiftErrorAttr()) continue;
1776
1777 eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote);
1778 break;
1779 }
1780
1781 // Look for swifterror allocas.
1782 for (auto &Inst : F.getEntryBlock()) {
1783 auto Alloca = dyn_cast<AllocaInst>(&Inst);
1784 if (!Alloca || !Alloca->isSwiftError()) continue;
1785
1786 // Clear the swifterror flag.
1787 Alloca->setSwiftError(false);
1788
1789 AllocasToPromote.push_back(Alloca);
1790 eliminateSwiftErrorAlloca(F, Alloca, Shape);
1791 }
1792
1793 // If we have any allocas to promote, compute a dominator tree and
1794 // promote them en masse.
1795 if (!AllocasToPromote.empty()) {
1796 DominatorTree DT(F);
1797 PromoteMemToReg(AllocasToPromote, DT);
1798 }
1799}
1800
1801/// For each local variable that all of its user are only used inside one of
1802/// suspended region, we sink their lifetime.start markers to the place where
1803/// after the suspend block. Doing so minimizes the lifetime of each variable,
1804/// hence minimizing the amount of data we end up putting on the frame.
1806 SuspendCrossingInfo &Checker,
1807 const DominatorTree &DT) {
1808 if (F.hasOptNone())
1809 return;
1810
1811 // Collect all possible basic blocks which may dominate all uses of allocas.
1813 DomSet.insert(&F.getEntryBlock());
1814 for (auto *CSI : Shape.CoroSuspends) {
1815 BasicBlock *SuspendBlock = CSI->getParent();
1816 assert(coro::isSuspendBlock(SuspendBlock) &&
1817 SuspendBlock->getSingleSuccessor() &&
1818 "should have split coro.suspend into its own block");
1819 DomSet.insert(SuspendBlock->getSingleSuccessor());
1820 }
1821
1822 for (Instruction &I : instructions(F)) {
1824 if (!AI)
1825 continue;
1826
1827 for (BasicBlock *DomBB : DomSet) {
1828 bool Valid = true;
1830
1831 auto isLifetimeStart = [](Instruction* I) {
1832 if (auto* II = dyn_cast<IntrinsicInst>(I))
1833 return II->getIntrinsicID() == Intrinsic::lifetime_start;
1834 return false;
1835 };
1836
1837 auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) {
1838 if (isLifetimeStart(U)) {
1839 Lifetimes.push_back(U);
1840 return true;
1841 }
1842 if (!U->hasOneUse() || U->stripPointerCasts() != AI)
1843 return false;
1844 if (isLifetimeStart(U->user_back())) {
1845 Lifetimes.push_back(U->user_back());
1846 return true;
1847 }
1848 return false;
1849 };
1850
1851 for (User *U : AI->users()) {
1853 // For all users except lifetime.start markers, if they are all
1854 // dominated by one of the basic blocks and do not cross
1855 // suspend points as well, then there is no need to spill the
1856 // instruction.
1857 if (!DT.dominates(DomBB, UI->getParent()) ||
1858 Checker.isDefinitionAcrossSuspend(DomBB, UI)) {
1859 // Skip lifetime.start, GEP and bitcast used by lifetime.start
1860 // markers.
1861 if (collectLifetimeStart(UI, AI))
1862 continue;
1863 Valid = false;
1864 break;
1865 }
1866 }
1867 // Sink lifetime.start markers to dominate block when they are
1868 // only used outside the region.
1869 if (Valid && Lifetimes.size() != 0) {
1870 auto *NewLifetime = Lifetimes[0]->clone();
1871 NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(0), AI);
1872 NewLifetime->insertBefore(DomBB->getTerminator()->getIterator());
1873
1874 // All the outsided lifetime.start markers are no longer necessary.
1875 for (Instruction *S : Lifetimes)
1876 S->eraseFromParent();
1877
1878 break;
1879 }
1880 }
1881 }
1882}
1883
1884static std::optional<std::pair<Value &, DIExpression &>>
1886 bool UseEntryValue, Function *F, Value *Storage,
1887 DIExpression *Expr, bool SkipOutermostLoad) {
1888 IRBuilder<> Builder(F->getContext());
1889 auto InsertPt = F->getEntryBlock().getFirstInsertionPt();
1890 while (isa<IntrinsicInst>(InsertPt))
1891 ++InsertPt;
1892 Builder.SetInsertPoint(&F->getEntryBlock(), InsertPt);
1893
1894 while (auto *Inst = dyn_cast_or_null<Instruction>(Storage)) {
1895 if (auto *LdInst = dyn_cast<LoadInst>(Inst)) {
1896 Storage = LdInst->getPointerOperand();
1897 // FIXME: This is a heuristic that works around the fact that
1898 // LLVM IR debug intrinsics cannot yet distinguish between
1899 // memory and value locations: Because a dbg.declare(alloca) is
1900 // implicitly a memory location no DW_OP_deref operation for the
1901 // last direct load from an alloca is necessary. This condition
1902 // effectively drops the *last* DW_OP_deref in the expression.
1903 if (!SkipOutermostLoad)
1905 } else if (auto *StInst = dyn_cast<StoreInst>(Inst)) {
1906 Storage = StInst->getValueOperand();
1907 } else {
1909 SmallVector<Value *, 0> AdditionalValues;
1911 *Inst, Expr ? Expr->getNumLocationOperands() : 0, Ops,
1912 AdditionalValues);
1913 if (!Op || !AdditionalValues.empty()) {
1914 // If salvaging failed or salvaging produced more than one location
1915 // operand, give up.
1916 break;
1917 }
1918 Storage = Op;
1919 Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, /*StackValue*/ false);
1920 }
1921 SkipOutermostLoad = false;
1922 }
1923 if (!Storage)
1924 return std::nullopt;
1925
1926 auto *StorageAsArg = dyn_cast<Argument>(Storage);
1927 const bool IsSwiftAsyncArg =
1928 StorageAsArg && StorageAsArg->hasAttribute(Attribute::SwiftAsync);
1929
1930 // Swift async arguments are described by an entry value of the ABI-defined
1931 // register containing the coroutine context.
1932 // Entry values in variadic expressions are not supported.
1933 if (IsSwiftAsyncArg && UseEntryValue && !Expr->isEntryValue() &&
1936
1937 // If the coroutine frame is an Argument, store it in an alloca to improve
1938 // its availability (e.g. registers may be clobbered).
1939 // Avoid this if the value is guaranteed to be available through other means
1940 // (e.g. swift ABI guarantees).
1941 if (StorageAsArg && !IsSwiftAsyncArg) {
1942 auto &Cached = ArgToAllocaMap[StorageAsArg];
1943 if (!Cached) {
1944 Cached = Builder.CreateAlloca(Storage->getType(), 0, nullptr,
1945 Storage->getName() + ".debug");
1946 Builder.CreateStore(Storage, Cached);
1947 }
1948 Storage = Cached;
1949 // FIXME: LLVM lacks nuanced semantics to differentiate between
1950 // memory and direct locations at the IR level. The backend will
1951 // turn a dbg.declare(alloca, ..., DIExpression()) into a memory
1952 // location. Thus, if there are deref and offset operations in the
1953 // expression, we need to add a DW_OP_deref at the *start* of the
1954 // expression to first load the contents of the alloca before
1955 // adjusting it with the expression.
1957 }
1958
1959 Expr = Expr->foldConstantMath();
1960 return {{*Storage, *Expr}};
1961}
1962
1965 DbgVariableRecord &DVR, bool UseEntryValue) {
1966
1967 Function *F = DVR.getFunction();
1968 // Follow the pointer arithmetic all the way to the incoming
1969 // function argument and convert into a DIExpression.
1970 bool SkipOutermostLoad = DVR.isDbgDeclare() || DVR.isDbgDeclareValue();
1971 Value *OriginalStorage = DVR.getVariableLocationOp(0);
1972
1973 auto SalvagedInfo =
1974 ::salvageDebugInfoImpl(ArgToAllocaMap, UseEntryValue, F, OriginalStorage,
1975 DVR.getExpression(), SkipOutermostLoad);
1976 if (!SalvagedInfo)
1977 return;
1978
1979 Value *Storage = &SalvagedInfo->first;
1980 DIExpression *Expr = &SalvagedInfo->second;
1981
1982 DVR.replaceVariableLocationOp(OriginalStorage, Storage);
1983 DVR.setExpression(Expr);
1984 // We only hoist dbg.declare and dbg.declare_value today since it doesn't make
1985 // sense to hoist dbg.value since it does not have the same function wide
1986 // guarantees that dbg.declare does.
1989 std::optional<BasicBlock::iterator> InsertPt;
1990 if (auto *I = dyn_cast<Instruction>(Storage)) {
1991 InsertPt = I->getInsertionPointAfterDef();
1992 // Update DILocation only if variable was not inlined.
1993 DebugLoc ILoc = I->getDebugLoc();
1994 DebugLoc DVRLoc = DVR.getDebugLoc();
1995 if (ILoc && DVRLoc &&
1996 DVRLoc->getScope()->getSubprogram() ==
1997 ILoc->getScope()->getSubprogram())
1998 DVR.setDebugLoc(ILoc);
1999 } else if (isa<Argument>(Storage))
2000 InsertPt = F->getEntryBlock().begin();
2001 if (InsertPt) {
2002 DVR.removeFromParent();
2003 // If there is a dbg.declare_value being reinserted, insert it as a
2004 // dbg.declare instead, so that subsequent passes don't have to deal with
2005 // a dbg.declare_value.
2007 auto *MD = DVR.getRawLocation();
2008 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
2009 Type *Ty = VAM->getValue()->getType();
2010 if (Ty->isPointerTy())
2012 else
2014 }
2015 }
2016 (*InsertPt)->getParent()->insertDbgRecordBefore(&DVR, *InsertPt);
2017 }
2018 }
2019}
2020
2023 // Don't eliminate swifterror in async functions that won't be split.
2024 if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
2026
2027 if (Shape.ABI == coro::ABI::Switch &&
2030 }
2031
2032 // Make sure that all coro.save, coro.suspend and the fallthrough coro.end
2033 // intrinsics are in their own blocks to simplify the logic of building up
2034 // SuspendCrossing data.
2035 for (auto *CSI : Shape.CoroSuspends) {
2036 if (auto *Save = CSI->getCoroSave())
2037 splitAround(Save, "CoroSave");
2038 splitAround(CSI, "CoroSuspend");
2039 }
2040
2041 // Put CoroEnds into their own blocks.
2042 for (AnyCoroEndInst *CE : Shape.CoroEnds) {
2043 splitAround(CE, "CoroEnd");
2044
2045 // Emit the musttail call function in a new block before the CoroEnd.
2046 // We do this here so that the right suspend crossing info is computed for
2047 // the uses of the musttail call function call. (Arguments to the coro.end
2048 // instructions would be ignored)
2049 if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) {
2050 auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction();
2051 if (!MustTailCallFn)
2052 continue;
2053 IRBuilder<> Builder(AsyncEnd);
2054 SmallVector<Value *, 8> Args(AsyncEnd->args());
2055 auto Arguments = ArrayRef<Value *>(Args).drop_front(3);
2057 AsyncEnd->getDebugLoc(), MustTailCallFn, TTI, Arguments, Builder);
2058 splitAround(Call, "MustTailCall.Before.CoroEnd");
2059 }
2060 }
2061
2062 // Later code makes structural assumptions about single predecessors phis e.g
2063 // that they are not live across a suspend point.
2065
2066 // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will
2067 // never have its definition separated from the PHI by the suspend point.
2068 rewritePHIs(F);
2069}
2070
2071void coro::BaseABI::buildCoroutineFrame(bool OptimizeFrame) {
2072 SuspendCrossingInfo Checker(F, Shape.CoroSuspends, Shape.CoroEnds);
2074
2075 const DominatorTree DT(F);
2076 if (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon &&
2078 sinkLifetimeStartMarkers(F, Shape, Checker, DT);
2079
2080 // All values (that are not allocas) that needs to be spilled to the frame.
2081 coro::SpillInfo Spills;
2082 // All values defined as allocas that need to live in the frame.
2084
2085 // Collect the spills for arguments and other not-materializable values.
2086 coro::collectSpillsFromArgs(Spills, F, Checker);
2087 SmallVector<Instruction *, 4> DeadInstructions;
2089 coro::collectSpillsAndAllocasFromInsts(Spills, Allocas, DeadInstructions,
2090 LocalAllocas, F, Checker, DT, Shape);
2091 coro::collectSpillsFromDbgInfo(Spills, F, Checker);
2092
2093 LLVM_DEBUG(dumpAllocas(Allocas));
2094 LLVM_DEBUG(dumpSpills("Spills", Spills));
2095
2096 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
2097 Shape.ABI == coro::ABI::Async)
2098 sinkSpillUsesAfterCoroBegin(DT, Shape.CoroBegin, Spills, Allocas);
2099
2100 // Build frame
2101 FrameDataInfo FrameData(Spills, Allocas);
2102 Shape.FrameTy = buildFrameType(F, Shape, FrameData, OptimizeFrame);
2103 Shape.FramePtr = Shape.CoroBegin;
2104 // For now, this works for C++ programs only.
2105 buildFrameDebugInfo(F, Shape, FrameData);
2106 // Insert spills and reloads
2107 insertSpills(FrameData, Shape);
2108 lowerLocalAllocas(LocalAllocas, DeadInstructions);
2109
2110 for (auto *I : DeadInstructions)
2111 I->eraseFromParent();
2112}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Lower Kernel Arguments
Rewrite undef for false bool rewritePHIs(Function &F, UniformityInfo &UA, DominatorTree *DT)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static void cleanupSinglePredPHIs(Function &F)
static void eliminateSwiftError(Function &F, coro::Shape &Shape)
Eliminate all problematic uses of swifterror arguments and allocas from the function.
static void lowerLocalAllocas(ArrayRef< CoroAllocaAllocInst * > LocalAllocas, SmallVectorImpl< Instruction * > &DeadInsts)
Turn each of the given local allocas into a normal (dynamic) alloca instruction.
static Value * emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V, coro::Shape &Shape)
Set the given value as the current swifterror value.
static Value * emitSetAndGetSwiftErrorValueAround(Instruction *Call, AllocaInst *Alloca, coro::Shape &Shape)
Set the swifterror value from the given alloca before a call, then put in back in the alloca afterwar...
static void cacheDIVar(FrameDataInfo &FrameData, DenseMap< Value *, DILocalVariable * > &DIVarCache)
static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI)
static void dumpAllocas(const SmallVectorImpl< coro::AllocaInfo > &Allocas)
static void splitAround(Instruction *I, const Twine &Name)
static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca, coro::Shape &Shape)
Eliminate a formerly-swifterror alloca by inserting the get/set intrinsics and attempting to MemToReg...
static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB, BasicBlock *InsertedBB, BasicBlock *PredBB, PHINode *UntilPHI=nullptr)
static void dumpSpills(StringRef Title, const coro::SpillInfo &Spills)
static DIType * solveDIType(DIBuilder &Builder, Type *Ty, const DataLayout &Layout, DIScope *Scope, unsigned LineNum, DenseMap< Type *, DIType * > &DITypeCache)
static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB, unsigned depth=3)
After we split the coroutine, will the given basic block be along an obvious exit path for the resump...
static StructType * buildFrameType(Function &F, coro::Shape &Shape, FrameDataInfo &FrameData, bool OptimizeFrame)
static void eliminateSwiftErrorArgument(Function &F, Argument &Arg, coro::Shape &Shape, SmallVectorImpl< AllocaInst * > &AllocasToPromote)
"Eliminate" a swifterror argument by reducing it to the alloca case and then loading and storing in t...
static void buildFrameDebugInfo(Function &F, coro::Shape &Shape, FrameDataInfo &FrameData)
Build artificial debug info for C++ coroutine frames to allow users to inspect the contents of the fr...
static BasicBlock * splitBlockIfNotFirst(Instruction *I, const Twine &Name)
static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB, CleanupPadInst *CleanupPad)
static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape, SuspendCrossingInfo &Checker, const DominatorTree &DT)
For each local variable that all of its user are only used inside one of suspended region,...
static StringRef solveTypeName(Type *Ty)
Create name for Type.
static Value * emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy, coro::Shape &Shape)
Get the current swifterror value.
static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape)
static bool isLifetimeStart(const Instruction *Inst)
Definition GVN.cpp:1210
Hexagon Common GEP
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define G(x, y, z)
Definition MD5.cpp:55
uint64_t IntrinsicInst * II
OptimizedStructLayoutField Field
This file provides an interface for laying out a sequence of fields as a struct in a way that attempt...
#define P(N)
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallString class.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static const unsigned FramePtr
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
void setSwiftError(bool V)
Specify whether this alloca is used to represent a swifterror.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:459
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
bool empty() const
Definition BasicBlock.h:481
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
LLVM_ABI BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
const Instruction & front() const
Definition BasicBlock.h:482
LLVM_ABI const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This represents the llvm.coro.alloca.alloc instruction.
Definition CoroInstr.h:758
void clearPromise()
Definition CoroInstr.h:159
LLVM_ABI DIDerivedType * createMemberType(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo, Metadata *SizeInBits, uint32_t AlignInBits, Metadata *OffsetInBits, DINode::DIFlags Flags, DIType *Ty, DINodeArray Annotations=nullptr)
Create debugging information entry for a member.
LLVM_ABI DIDerivedType * createPointerType(DIType *PointeeTy, uint64_t SizeInBits, uint32_t AlignInBits=0, std::optional< unsigned > DWARFAddressSpace=std::nullopt, StringRef Name="", DINodeArray Annotations=nullptr)
Create debugging information entry for a pointer.
LLVM_ABI DIBasicType * createBasicType(StringRef Name, uint64_t SizeInBits, unsigned Encoding, DINode::DIFlags Flags=DINode::FlagZero, uint32_t NumExtraInhabitants=0, uint32_t DataSizeInBits=0)
Create debugging information entry for a basic type.
LLVM_ABI DICompositeType * createStructType(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber, Metadata *SizeInBits, uint32_t AlignInBits, DINode::DIFlags Flags, DIType *DerivedFrom, DINodeArray Elements, unsigned RunTimeLang=0, DIType *VTableHolder=nullptr, StringRef UniqueIdentifier="", DIType *Specification=nullptr, uint32_t NumExtraInhabitants=0)
Create debugging information entry for a struct.
LLVM_ABI DINodeArray getOrCreateArray(ArrayRef< Metadata * > Elements)
Get a DINodeArray, create one if required.
LLVM_ABI DIExpression * createExpression(ArrayRef< uint64_t > Addr={})
Create a new descriptor for the specified variable which has a complex address expression for its add...
LLVM_ABI DILocalVariable * createAutoVariable(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo, DIType *Ty, bool AlwaysPreserve=false, DINode::DIFlags Flags=DINode::FlagZero, uint32_t AlignInBits=0)
Create a new descriptor for an auto variable.
LLVM_ABI void replaceArrays(DICompositeType *&T, DINodeArray Elements, DINodeArray TParams=DINodeArray())
Replace arrays on a composite type.
DWARF expression.
LLVM_ABI bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
LLVM_ABI DIExpression * foldConstantMath()
Try to shorten an expression with constant math operations that can be evaluated at compile time.
LLVM_ABI uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
static LLVM_ABI DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
LLVM_ABI bool isSingleLocationExpression() const
Return whether the evaluated expression makes use of a single location at the start of the expression...
Base class for scope-like contexts.
DIFile * getFile() const
Subprogram description. Uses SubclassData1.
Base class for types.
StringRef getName() const
uint64_t getSizeInBits() const
LLVM_ABI uint32_t getAlignInBits() const
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
LLVM_ABI const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
LLVM_ABI IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
LLVM_ABI Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition DataLayout.h:760
LLVM_ABI Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
LLVM_ABI void removeFromParent()
DebugLoc getDebugLoc() const
void setDebugLoc(DebugLoc Loc)
LLVM_ABI Function * getFunction()
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LocationType Type
Classification of the debug-info record that this DbgVariableRecord represents.
LLVM_ABI void replaceVariableLocationOp(Value *OldValue, Value *NewValue, bool AllowEmpty=false)
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
void setExpression(DIExpression *NewExpr)
DIExpression * getExpression() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
A debug info location.
Definition DebugLoc.h:124
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
iterator end()
Definition DenseMap.h:81
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition DenseMap.h:169
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2788
LLVM_ABI void removeFromParent()
This method unlinks 'this' from the containing basic block, but does not delete it.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void insertBefore(InstListType::iterator InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified position.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void insertAfter(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately after the specified instruction.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
The landingpad instruction holds all of the information necessary to generate correct exception handl...
LLVM_ABI void replaceOperandWith(unsigned I, Metadata *New)
Replace a specific operand.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1569
LLVMContext & getContext() const
Definition Metadata.h:1242
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
Definition Metadata.cpp:608
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1526
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition Module.h:278
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition SmallString.h:26
StringRef str() const
Explicit conversion to StringRef.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::string str() const
str - Get the contents as an std::string.
Definition StringRef.h:225
TypeSize getElementOffsetInBits(unsigned Idx) const
Definition DataLayout.h:748
Class to represent struct types.
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition Type.cpp:619
unsigned getNumElements() const
Random access to the elements.
Type * getElementType(unsigned N) const
bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const
void setDefaultDest(BasicBlock *DefaultCase)
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
LLVM_ABI std::string str() const
Return the twine contents as a std::string.
Definition Twine.cpp:17
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:300
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM_ABI void set(Value *Val)
Definition Value.h:905
User * getUser() const
Returns the User that contains this Use.
Definition Use.h:61
static LLVM_ABI ValueAsMetadata * get(Value *V)
Definition Metadata.cpp:503
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:546
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1099
iterator_range< use_iterator > uses()
Definition Value.h:380
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:396
std::function< bool(Instruction &I)> IsMaterializable
Definition ABI.h:64
Function & F
Definition ABI.h:59
virtual void buildCoroutineFrame(bool OptimizeFrame)
coro::Shape & Shape
Definition ABI.h:60
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
A raw_ostream that writes to an SmallVector or SmallString.
StringRef str() const
Return a StringRef for the vector contents.
CallInst * Call
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
SmallMapVector< Value *, SmallVector< Instruction *, 2 >, 8 > SpillInfo
Definition SpillUtils.h:18
@ Async
The "async continuation" lowering, where each suspend point creates a single continuation function.
Definition CoroShape.h:48
@ RetconOnce
The "unique returned-continuation" lowering, where each suspend point creates a single continuation f...
Definition CoroShape.h:43
@ Retcon
The "returned-continuation" lowering, where each suspend point creates a single continuation function...
Definition CoroShape.h:36
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
Definition CoroShape.h:31
BasicBlock::iterator getSpillInsertionPt(const coro::Shape &, Value *Def, const DominatorTree &DT)
bool isSuspendBlock(BasicBlock *BB)
void normalizeCoroutine(Function &F, coro::Shape &Shape, TargetTransformInfo &TTI)
CallInst * createMustTailCall(DebugLoc Loc, Function *MustTailCallFn, TargetTransformInfo &TTI, ArrayRef< Value * > Arguments, IRBuilder<> &)
void sinkSpillUsesAfterCoroBegin(const DominatorTree &DT, CoroBeginInst *CoroBegin, coro::SpillInfo &Spills, SmallVectorImpl< coro::AllocaInfo > &Allocas)
Async and Retcon{Once} conventions assume that all spill uses can be sunk after the coro....
LLVM_ABI void doRematerializations(Function &F, SuspendCrossingInfo &Checker, std::function< bool(Instruction &)> IsMaterializable)
void collectSpillsFromArgs(SpillInfo &Spills, Function &F, const SuspendCrossingInfo &Checker)
void collectSpillsFromDbgInfo(SpillInfo &Spills, Function &F, const SuspendCrossingInfo &Checker)
void salvageDebugInfo(SmallDenseMap< Argument *, AllocaInst *, 4 > &ArgToAllocaMap, DbgVariableRecord &DVR, bool UseEntryValue)
Attempts to rewrite the location operand of debug records in terms of the coroutine frame pointer,...
void collectSpillsAndAllocasFromInsts(SpillInfo &Spills, SmallVector< AllocaInfo, 8 > &Allocas, SmallVector< Instruction *, 4 > &DeadInstructions, SmallVector< CoroAllocaAllocInst *, 4 > &LocalAllocas, Function &F, const SuspendCrossingInfo &Checker, const DominatorTree &DT, const coro::Shape &Shape)
bool isCPlusPlus(SourceLanguage S)
Definition Dwarf.h:512
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1718
MaybeAlign getAlign(const CallInst &I, unsigned Index)
LLVM_ABI void PromoteMemToReg(ArrayRef< AllocaInst * > Allocas, DominatorTree &DT, AssumptionCache *AC=nullptr)
Promote the specified list of alloca instructions into scalar registers, inserting PHI nodes as appro...
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
Definition ScopeExit.h:59
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
unsigned Log2_64_Ceil(uint64_t Value)
Return the ceil log base 2 of the specified value, 64 if the value is zero.
Definition MathExtras.h:350
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
Definition Alignment.h:134
auto successors(const MachineBasicBlock *BB)
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
auto pred_size(const MachineBasicBlock *BB)
LLVM_ABI bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1732
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1622
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
LLVM_ABI BasicBlock * ehAwareSplitEdge(BasicBlock *BB, BasicBlock *Succ, LandingPadInst *OriginalPad=nullptr, PHINode *LandingPadReplacement=nullptr, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions(), const Twine &BBName="")
Split the edge connect the specficed blocks in the case that Succ is an Exception Handling Block.
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
Definition Local.cpp:2274
uint64_t offsetToAlignment(uint64_t Value, Align Alignment)
Returns the offset to the next integer (mod 2**64) that is greater than or equal to Value and is a mu...
Definition Alignment.h:186
TargetTransformInfo TTI
LLVM_ABI std::pair< uint64_t, Align > performOptimizedStructLayout(MutableArrayRef< OptimizedStructLayoutField > Fields)
Compute a layout for a struct containing the given fields, making a best-effort attempt to minimize t...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
LLVM_ABI TinyPtrVector< DbgVariableRecord * > findDVRDeclareValues(Value *V)
As above, for DVRDeclareValues.
Definition DebugInfo.cpp:65
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
LLVM_ABI void updatePhiNodes(BasicBlock *DestBB, BasicBlock *OldPred, BasicBlock *NewPred, PHINode *Until=nullptr)
Replaces all uses of OldPred with the NewPred block in all PHINodes in a block.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1758
LLVM_ABI TinyPtrVector< DbgVariableRecord * > findDVRDeclares(Value *V)
Finds dbg.declare records declaring local variables as living in the memory that 'V' points to.
Definition DebugInfo.cpp:48
auto predecessors(const MachineBasicBlock *BB)
LLVM_ABI void setUnwindEdgeTo(Instruction *TI, BasicBlock *Succ)
Sets the unwind edge of an instruction to a particular successor.
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
LLVM_ABI void findDbgUsers(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the debug info records describing a value.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
Align Alignment
The required alignment of this field.
uint64_t Offset
The offset of this field in the final layout.
uint64_t Size
The required size of this field in bytes.
static constexpr uint64_t FlexibleOffset
A special value for Offset indicating that the field can be moved anywhere.
AsyncLoweringStorage AsyncLowering
Definition CoroShape.h:157
StructType * FrameTy
Definition CoroShape.h:116
AnyCoroIdRetconInst * getRetconCoroId() const
Definition CoroShape.h:165
CoroIdInst * getSwitchCoroId() const
Definition CoroShape.h:160
SmallVector< AnyCoroSuspendInst *, 4 > CoroSuspends
Definition CoroShape.h:59
uint64_t FrameSize
Definition CoroShape.h:118
AllocaInst * getPromiseAlloca() const
Definition CoroShape.h:246
SwitchLoweringStorage SwitchLowering
Definition CoroShape.h:155
CoroBeginInst * CoroBegin
Definition CoroShape.h:54
BasicBlock::iterator getInsertPtAfterFramePtr() const
Definition CoroShape.h:252
RetconLoweringStorage RetconLowering
Definition CoroShape.h:156
SmallVector< AnyCoroEndInst *, 4 > CoroEnds
Definition CoroShape.h:55
SmallVector< CallInst *, 2 > SwiftErrorOps
Definition CoroShape.h:64
BasicBlock * AllocaSpillBlock
Definition CoroShape.h:120