LLVM 17.0.0git
RewriteStatepointsForGC.cpp
Go to the documentation of this file.
1//===- RewriteStatepointsForGC.cpp - Make GC relocations explicit ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Rewrite call/invoke instructions so as to make potential relocations
10// performed by the garbage collector explicit in the IR.
11//
12//===----------------------------------------------------------------------===//
13
15
16#include "llvm/ADT/ArrayRef.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/DenseSet.h"
19#include "llvm/ADT/MapVector.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SetVector.h"
22#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/StringRef.h"
29#include "llvm/IR/Argument.h"
30#include "llvm/IR/Attributes.h"
31#include "llvm/IR/BasicBlock.h"
32#include "llvm/IR/CallingConv.h"
33#include "llvm/IR/Constant.h"
34#include "llvm/IR/Constants.h"
35#include "llvm/IR/DataLayout.h"
37#include "llvm/IR/Dominators.h"
38#include "llvm/IR/Function.h"
39#include "llvm/IR/GCStrategy.h"
40#include "llvm/IR/IRBuilder.h"
42#include "llvm/IR/InstrTypes.h"
43#include "llvm/IR/Instruction.h"
46#include "llvm/IR/Intrinsics.h"
47#include "llvm/IR/LLVMContext.h"
48#include "llvm/IR/MDBuilder.h"
49#include "llvm/IR/Metadata.h"
50#include "llvm/IR/Module.h"
51#include "llvm/IR/Statepoint.h"
52#include "llvm/IR/Type.h"
53#include "llvm/IR/User.h"
54#include "llvm/IR/Value.h"
55#include "llvm/IR/ValueHandle.h"
57#include "llvm/Pass.h"
61#include "llvm/Support/Debug.h"
68#include <algorithm>
69#include <cassert>
70#include <cstddef>
71#include <cstdint>
72#include <iterator>
73#include <optional>
74#include <set>
75#include <string>
76#include <utility>
77#include <vector>
78
79#define DEBUG_TYPE "rewrite-statepoints-for-gc"
80
81using namespace llvm;
82
83// Print the liveset found at the insert location
84static cl::opt<bool> PrintLiveSet("spp-print-liveset", cl::Hidden,
85 cl::init(false));
86static cl::opt<bool> PrintLiveSetSize("spp-print-liveset-size", cl::Hidden,
87 cl::init(false));
88
89// Print out the base pointers for debugging
90static cl::opt<bool> PrintBasePointers("spp-print-base-pointers", cl::Hidden,
91 cl::init(false));
92
93// Cost threshold measuring when it is profitable to rematerialize value instead
94// of relocating it
96RematerializationThreshold("spp-rematerialization-threshold", cl::Hidden,
97 cl::init(6));
98
99#ifdef EXPENSIVE_CHECKS
100static bool ClobberNonLive = true;
101#else
102static bool ClobberNonLive = false;
103#endif
104
105static cl::opt<bool, true> ClobberNonLiveOverride("rs4gc-clobber-non-live",
107 cl::Hidden);
108
109static cl::opt<bool>
110 AllowStatepointWithNoDeoptInfo("rs4gc-allow-statepoint-with-no-deopt-info",
111 cl::Hidden, cl::init(true));
112
113static cl::opt<bool> RematDerivedAtUses("rs4gc-remat-derived-at-uses",
114 cl::Hidden, cl::init(true));
115
116/// The IR fed into RewriteStatepointsForGC may have had attributes and
117/// metadata implying dereferenceability that are no longer valid/correct after
118/// RewriteStatepointsForGC has run. This is because semantically, after
119/// RewriteStatepointsForGC runs, all calls to gc.statepoint "free" the entire
120/// heap. stripNonValidData (conservatively) restores
121/// correctness by erasing all attributes in the module that externally imply
122/// dereferenceability. Similar reasoning also applies to the noalias
123/// attributes and metadata. gc.statepoint can touch the entire heap including
124/// noalias objects.
125/// Apart from attributes and metadata, we also remove instructions that imply
126/// constant physical memory: llvm.invariant.start.
127static void stripNonValidData(Module &M);
128
129// Find the GC strategy for a function, or null if it doesn't have one.
130static std::unique_ptr<GCStrategy> findGCStrategy(Function &F);
131
133
136 bool Changed = false;
137 auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
138 for (Function &F : M) {
139 // Nothing to do for declarations.
140 if (F.isDeclaration() || F.empty())
141 continue;
142
143 // Policy choice says not to rewrite - the most common reason is that we're
144 // compiling code without a GCStrategy.
146 continue;
147
150 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
151 Changed |= runOnFunction(F, DT, TTI, TLI);
152 }
153 if (!Changed)
154 return PreservedAnalyses::all();
155
156 // stripNonValidData asserts that shouldRewriteStatepointsIn
157 // returns true for at least one function in the module. Since at least
158 // one function changed, we know that the precondition is satisfied.
160
164 return PA;
165}
166
167namespace {
168
169class RewriteStatepointsForGCLegacyPass : public ModulePass {
171
172public:
173 static char ID; // Pass identification, replacement for typeid
174
175 RewriteStatepointsForGCLegacyPass() : ModulePass(ID), Impl() {
178 }
179
180 bool runOnModule(Module &M) override {
181 bool Changed = false;
182 for (Function &F : M) {
183 // Nothing to do for declarations.
184 if (F.isDeclaration() || F.empty())
185 continue;
186
187 // Policy choice says not to rewrite - the most common reason is that
188 // we're compiling code without a GCStrategy.
190 continue;
191
193 getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
194 const TargetLibraryInfo &TLI =
195 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
196 auto &DT = getAnalysis<DominatorTreeWrapperPass>(F).getDomTree();
197
198 Changed |= Impl.runOnFunction(F, DT, TTI, TLI);
199 }
200
201 if (!Changed)
202 return false;
203
204 // stripNonValidData asserts that shouldRewriteStatepointsIn
205 // returns true for at least one function in the module. Since at least
206 // one function changed, we know that the precondition is satisfied.
208 return true;
209 }
210
211 void getAnalysisUsage(AnalysisUsage &AU) const override {
212 // We add and rewrite a bunch of instructions, but don't really do much
213 // else. We could in theory preserve a lot more analyses here.
217 }
218};
219
220} // end anonymous namespace
221
222char RewriteStatepointsForGCLegacyPass::ID = 0;
223
225 return new RewriteStatepointsForGCLegacyPass();
226}
227
228INITIALIZE_PASS_BEGIN(RewriteStatepointsForGCLegacyPass,
229 "rewrite-statepoints-for-gc",
230 "Make relocations explicit at statepoints", false, false)
233INITIALIZE_PASS_END(RewriteStatepointsForGCLegacyPass,
235 "Make relocations explicit at statepoints", false, false)
236
237namespace {
238
240 /// Values defined in this block.
242
243 /// Values used in this block (and thus live); does not included values
244 /// killed within this block.
246
247 /// Values live into this basic block (i.e. used by any
248 /// instruction in this basic block or ones reachable from here)
250
251 /// Values live out of this basic block (i.e. live into
252 /// any successor block)
254};
255
256// The type of the internal cache used inside the findBasePointers family
257// of functions. From the callers perspective, this is an opaque type and
258// should not be inspected.
259//
260// In the actual implementation this caches two relations:
261// - The base relation itself (i.e. this pointer is based on that one)
262// - The base defining value relation (i.e. before base_phi insertion)
263// Generally, after the execution of a full findBasePointer call, only the
264// base relation will remain. Internally, we add a mixture of the two
265// types, then update all the second type to the first type
272
274 /// The set of values known to be live across this safepoint
276
277 /// The *new* gc.statepoint instruction itself. This produces the token
278 /// that normal path gc.relocates and the gc.result are tied to.
280
281 /// Instruction to which exceptional gc relocates are attached
282 /// Makes it easier to iterate through them during relocationViaAlloca.
284
285 /// Record live values we are rematerialized instead of relocating.
286 /// They are not included into 'LiveSet' field.
287 /// Maps rematerialized copy to it's original value.
289};
290
292 // Chain from derived pointer to base.
294 // Original base.
296 // Cost of chain.
298};
300
301} // end anonymous namespace
302
304 std::optional<OperandBundleUse> DeoptBundle =
305 Call->getOperandBundle(LLVMContext::OB_deopt);
306
307 if (!DeoptBundle) {
309 "Found non-leaf call without deopt info!");
310 return std::nullopt;
311 }
312
313 return DeoptBundle->Inputs;
314}
315
316/// Compute the live-in set for every basic block in the function
318 GCPtrLivenessData &Data, GCStrategy *GC);
319
320/// Given results from the dataflow liveness computation, find the set of live
321/// Values at a particular instruction.
322static void findLiveSetAtInst(Instruction *inst, GCPtrLivenessData &Data,
323 StatepointLiveSetTy &out, GCStrategy *GC);
324
325static bool isGCPointerType(Type *T, GCStrategy *GC) {
326 assert(GC && "GC Strategy for isGCPointerType cannot be null");
327
328 if (!isa<PointerType>(T))
329 return false;
330
331 // conservative - same as StatepointLowering
332 return GC->isGCManagedPointer(T).value_or(true);
333}
334
335// Return true if this type is one which a) is a gc pointer or contains a GC
336// pointer and b) is of a type this code expects to encounter as a live value.
337// (The insertion code will assert that a type which matches (a) and not (b)
338// is not encountered.)
340 // We fully support gc pointers
341 if (isGCPointerType(T, GC))
342 return true;
343 // We partially support vectors of gc pointers. The code will assert if it
344 // can't handle something.
345 if (auto VT = dyn_cast<VectorType>(T))
346 if (isGCPointerType(VT->getElementType(), GC))
347 return true;
348 return false;
349}
350
351#ifndef NDEBUG
352/// Returns true if this type contains a gc pointer whether we know how to
353/// handle that type or not.
354static bool containsGCPtrType(Type *Ty, GCStrategy *GC) {
355 if (isGCPointerType(Ty, GC))
356 return true;
357 if (VectorType *VT = dyn_cast<VectorType>(Ty))
358 return isGCPointerType(VT->getScalarType(), GC);
359 if (ArrayType *AT = dyn_cast<ArrayType>(Ty))
360 return containsGCPtrType(AT->getElementType(), GC);
361 if (StructType *ST = dyn_cast<StructType>(Ty))
362 return llvm::any_of(ST->elements(),
363 [GC](Type *Ty) { return containsGCPtrType(Ty, GC); });
364 return false;
365}
366
367// Returns true if this is a type which a) is a gc pointer or contains a GC
368// pointer and b) is of a type which the code doesn't expect (i.e. first class
369// aggregates). Used to trip assertions.
371 return containsGCPtrType(Ty, GC) && !isHandledGCPointerType(Ty, GC);
372}
373#endif
374
375// Return the name of the value suffixed with the provided value, or if the
376// value didn't have a name, the default value specified.
377static std::string suffixed_name_or(Value *V, StringRef Suffix,
378 StringRef DefaultName) {
379 return V->hasName() ? (V->getName() + Suffix).str() : DefaultName.str();
380}
381
382// Conservatively identifies any definitions which might be live at the
383// given instruction. The analysis is performed immediately before the
384// given instruction. Values defined by that instruction are not considered
385// live. Values used by that instruction are considered live.
387 DominatorTree &DT, GCPtrLivenessData &OriginalLivenessData, CallBase *Call,
388 PartiallyConstructedSafepointRecord &Result, GCStrategy *GC) {
389 StatepointLiveSetTy LiveSet;
390 findLiveSetAtInst(Call, OriginalLivenessData, LiveSet, GC);
391
392 if (PrintLiveSet) {
393 dbgs() << "Live Variables:\n";
394 for (Value *V : LiveSet)
395 dbgs() << " " << V->getName() << " " << *V << "\n";
396 }
397 if (PrintLiveSetSize) {
398 dbgs() << "Safepoint For: " << Call->getCalledOperand()->getName() << "\n";
399 dbgs() << "Number live values: " << LiveSet.size() << "\n";
400 }
401 Result.LiveSet = LiveSet;
402}
403
404/// Returns true if V is a known base.
405static bool isKnownBase(Value *V, const IsKnownBaseMapTy &KnownBases);
406
407/// Caches the IsKnownBase flag for a value and asserts that it wasn't present
408/// in the cache before.
409static void setKnownBase(Value *V, bool IsKnownBase,
410 IsKnownBaseMapTy &KnownBases);
411
412static Value *findBaseDefiningValue(Value *I, DefiningValueMapTy &Cache,
413 IsKnownBaseMapTy &KnownBases);
414
415/// Return a base defining value for the 'Index' element of the given vector
416/// instruction 'I'. If Index is null, returns a BDV for the entire vector
417/// 'I'. As an optimization, this method will try to determine when the
418/// element is known to already be a base pointer. If this can be established,
419/// the second value in the returned pair will be true. Note that either a
420/// vector or a pointer typed value can be returned. For the former, the
421/// vector returned is a BDV (and possibly a base) of the entire vector 'I'.
422/// If the later, the return pointer is a BDV (or possibly a base) for the
423/// particular element in 'I'.
424static Value *findBaseDefiningValueOfVector(Value *I, DefiningValueMapTy &Cache,
425 IsKnownBaseMapTy &KnownBases) {
426 // Each case parallels findBaseDefiningValue below, see that code for
427 // detailed motivation.
428
429 auto Cached = Cache.find(I);
430 if (Cached != Cache.end())
431 return Cached->second;
432
433 if (isa<Argument>(I)) {
434 // An incoming argument to the function is a base pointer
435 Cache[I] = I;
436 setKnownBase(I, /* IsKnownBase */true, KnownBases);
437 return I;
438 }
439
440 if (isa<Constant>(I)) {
441 // Base of constant vector consists only of constant null pointers.
442 // For reasoning see similar case inside 'findBaseDefiningValue' function.
443 auto *CAZ = ConstantAggregateZero::get(I->getType());
444 Cache[I] = CAZ;
445 setKnownBase(CAZ, /* IsKnownBase */true, KnownBases);
446 return CAZ;
447 }
448
449 if (isa<LoadInst>(I)) {
450 Cache[I] = I;
451 setKnownBase(I, /* IsKnownBase */true, KnownBases);
452 return I;
453 }
454
455 if (isa<InsertElementInst>(I)) {
456 // We don't know whether this vector contains entirely base pointers or
457 // not. To be conservatively correct, we treat it as a BDV and will
458 // duplicate code as needed to construct a parallel vector of bases.
459 Cache[I] = I;
460 setKnownBase(I, /* IsKnownBase */false, KnownBases);
461 return I;
462 }
463
464 if (isa<ShuffleVectorInst>(I)) {
465 // We don't know whether this vector contains entirely base pointers or
466 // not. To be conservatively correct, we treat it as a BDV and will
467 // duplicate code as needed to construct a parallel vector of bases.
468 // TODO: There a number of local optimizations which could be applied here
469 // for particular sufflevector patterns.
470 Cache[I] = I;
471 setKnownBase(I, /* IsKnownBase */false, KnownBases);
472 return I;
473 }
474
475 // The behavior of getelementptr instructions is the same for vector and
476 // non-vector data types.
477 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
478 auto *BDV =
479 findBaseDefiningValue(GEP->getPointerOperand(), Cache, KnownBases);
480 Cache[GEP] = BDV;
481 return BDV;
482 }
483
484 // The behavior of freeze instructions is the same for vector and
485 // non-vector data types.
486 if (auto *Freeze = dyn_cast<FreezeInst>(I)) {
487 auto *BDV = findBaseDefiningValue(Freeze->getOperand(0), Cache, KnownBases);
488 Cache[Freeze] = BDV;
489 return BDV;
490 }
491
492 // If the pointer comes through a bitcast of a vector of pointers to
493 // a vector of another type of pointer, then look through the bitcast
494 if (auto *BC = dyn_cast<BitCastInst>(I)) {
495 auto *BDV = findBaseDefiningValue(BC->getOperand(0), Cache, KnownBases);
496 Cache[BC] = BDV;
497 return BDV;
498 }
499
500 // We assume that functions in the source language only return base
501 // pointers. This should probably be generalized via attributes to support
502 // both source language and internal functions.
503 if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
504 Cache[I] = I;
505 setKnownBase(I, /* IsKnownBase */true, KnownBases);
506 return I;
507 }
508
509 // A PHI or Select is a base defining value. The outer findBasePointer
510 // algorithm is responsible for constructing a base value for this BDV.
511 assert((isa<SelectInst>(I) || isa<PHINode>(I)) &&
512 "unknown vector instruction - no base found for vector element");
513 Cache[I] = I;
514 setKnownBase(I, /* IsKnownBase */false, KnownBases);
515 return I;
516}
517
518/// Helper function for findBasePointer - Will return a value which either a)
519/// defines the base pointer for the input, b) blocks the simple search
520/// (i.e. a PHI or Select of two derived pointers), or c) involves a change
521/// from pointer to vector type or back.
522static Value *findBaseDefiningValue(Value *I, DefiningValueMapTy &Cache,
523 IsKnownBaseMapTy &KnownBases) {
524 assert(I->getType()->isPtrOrPtrVectorTy() &&
525 "Illegal to ask for the base pointer of a non-pointer type");
526 auto Cached = Cache.find(I);
527 if (Cached != Cache.end())
528 return Cached->second;
529
530 if (I->getType()->isVectorTy())
531 return findBaseDefiningValueOfVector(I, Cache, KnownBases);
532
533 if (isa<Argument>(I)) {
534 // An incoming argument to the function is a base pointer
535 // We should have never reached here if this argument isn't an gc value
536 Cache[I] = I;
537 setKnownBase(I, /* IsKnownBase */true, KnownBases);
538 return I;
539 }
540
541 if (isa<Constant>(I)) {
542 // We assume that objects with a constant base (e.g. a global) can't move
543 // and don't need to be reported to the collector because they are always
544 // live. Besides global references, all kinds of constants (e.g. undef,
545 // constant expressions, null pointers) can be introduced by the inliner or
546 // the optimizer, especially on dynamically dead paths.
547 // Here we treat all of them as having single null base. By doing this we
548 // trying to avoid problems reporting various conflicts in a form of
549 // "phi (const1, const2)" or "phi (const, regular gc ptr)".
550 // See constant.ll file for relevant test cases.
551
552 auto *CPN = ConstantPointerNull::get(cast<PointerType>(I->getType()));
553 Cache[I] = CPN;
554 setKnownBase(CPN, /* IsKnownBase */true, KnownBases);
555 return CPN;
556 }
557
558 // inttoptrs in an integral address space are currently ill-defined. We
559 // treat them as defining base pointers here for consistency with the
560 // constant rule above and because we don't really have a better semantic
561 // to give them. Note that the optimizer is always free to insert undefined
562 // behavior on dynamically dead paths as well.
563 if (isa<IntToPtrInst>(I)) {
564 Cache[I] = I;
565 setKnownBase(I, /* IsKnownBase */true, KnownBases);
566 return I;
567 }
568
569 if (CastInst *CI = dyn_cast<CastInst>(I)) {
570 Value *Def = CI->stripPointerCasts();
571 // If stripping pointer casts changes the address space there is an
572 // addrspacecast in between.
573 assert(cast<PointerType>(Def->getType())->getAddressSpace() ==
574 cast<PointerType>(CI->getType())->getAddressSpace() &&
575 "unsupported addrspacecast");
576 // If we find a cast instruction here, it means we've found a cast which is
577 // not simply a pointer cast (i.e. an inttoptr). We don't know how to
578 // handle int->ptr conversion.
579 assert(!isa<CastInst>(Def) && "shouldn't find another cast here");
580 auto *BDV = findBaseDefiningValue(Def, Cache, KnownBases);
581 Cache[CI] = BDV;
582 return BDV;
583 }
584
585 if (isa<LoadInst>(I)) {
586 // The value loaded is an gc base itself
587 Cache[I] = I;
588 setKnownBase(I, /* IsKnownBase */true, KnownBases);
589 return I;
590 }
591
592 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
593 // The base of this GEP is the base
594 auto *BDV =
595 findBaseDefiningValue(GEP->getPointerOperand(), Cache, KnownBases);
596 Cache[GEP] = BDV;
597 return BDV;
598 }
599
600 if (auto *Freeze = dyn_cast<FreezeInst>(I)) {
601 auto *BDV = findBaseDefiningValue(Freeze->getOperand(0), Cache, KnownBases);
602 Cache[Freeze] = BDV;
603 return BDV;
604 }
605
606 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
607 switch (II->getIntrinsicID()) {
608 default:
609 // fall through to general call handling
610 break;
611 case Intrinsic::experimental_gc_statepoint:
612 llvm_unreachable("statepoints don't produce pointers");
613 case Intrinsic::experimental_gc_relocate:
614 // Rerunning safepoint insertion after safepoints are already
615 // inserted is not supported. It could probably be made to work,
616 // but why are you doing this? There's no good reason.
617 llvm_unreachable("repeat safepoint insertion is not supported");
618 case Intrinsic::gcroot:
619 // Currently, this mechanism hasn't been extended to work with gcroot.
620 // There's no reason it couldn't be, but I haven't thought about the
621 // implications much.
623 "interaction with the gcroot mechanism is not supported");
624 case Intrinsic::experimental_gc_get_pointer_base:
625 auto *BDV = findBaseDefiningValue(II->getOperand(0), Cache, KnownBases);
626 Cache[II] = BDV;
627 return BDV;
628 }
629 }
630 // We assume that functions in the source language only return base
631 // pointers. This should probably be generalized via attributes to support
632 // both source language and internal functions.
633 if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
634 Cache[I] = I;
635 setKnownBase(I, /* IsKnownBase */true, KnownBases);
636 return I;
637 }
638
639 // TODO: I have absolutely no idea how to implement this part yet. It's not
640 // necessarily hard, I just haven't really looked at it yet.
641 assert(!isa<LandingPadInst>(I) && "Landing Pad is unimplemented");
642
643 if (isa<AtomicCmpXchgInst>(I)) {
644 // A CAS is effectively a atomic store and load combined under a
645 // predicate. From the perspective of base pointers, we just treat it
646 // like a load.
647 Cache[I] = I;
648 setKnownBase(I, /* IsKnownBase */true, KnownBases);
649 return I;
650 }
651
652 assert(!isa<AtomicRMWInst>(I) && "Xchg handled above, all others are "
653 "binary ops which don't apply to pointers");
654
655 // The aggregate ops. Aggregates can either be in the heap or on the
656 // stack, but in either case, this is simply a field load. As a result,
657 // this is a defining definition of the base just like a load is.
658 if (isa<ExtractValueInst>(I)) {
659 Cache[I] = I;
660 setKnownBase(I, /* IsKnownBase */true, KnownBases);
661 return I;
662 }
663
664 // We should never see an insert vector since that would require we be
665 // tracing back a struct value not a pointer value.
666 assert(!isa<InsertValueInst>(I) &&
667 "Base pointer for a struct is meaningless");
668
669 // This value might have been generated by findBasePointer() called when
670 // substituting gc.get.pointer.base() intrinsic.
671 bool IsKnownBase =
672 isa<Instruction>(I) && cast<Instruction>(I)->getMetadata("is_base_value");
673 setKnownBase(I, /* IsKnownBase */IsKnownBase, KnownBases);
674 Cache[I] = I;
675
676 // An extractelement produces a base result exactly when it's input does.
677 // We may need to insert a parallel instruction to extract the appropriate
678 // element out of the base vector corresponding to the input. Given this,
679 // it's analogous to the phi and select case even though it's not a merge.
680 if (isa<ExtractElementInst>(I))
681 // Note: There a lot of obvious peephole cases here. This are deliberately
682 // handled after the main base pointer inference algorithm to make writing
683 // test cases to exercise that code easier.
684 return I;
685
686 // The last two cases here don't return a base pointer. Instead, they
687 // return a value which dynamically selects from among several base
688 // derived pointers (each with it's own base potentially). It's the job of
689 // the caller to resolve these.
690 assert((isa<SelectInst>(I) || isa<PHINode>(I)) &&
691 "missing instruction case in findBaseDefiningValue");
692 return I;
693}
694
695/// Returns the base defining value for this value.
696static Value *findBaseDefiningValueCached(Value *I, DefiningValueMapTy &Cache,
697 IsKnownBaseMapTy &KnownBases) {
698 if (!Cache.contains(I)) {
699 auto *BDV = findBaseDefiningValue(I, Cache, KnownBases);
700 Cache[I] = BDV;
701 LLVM_DEBUG(dbgs() << "fBDV-cached: " << I->getName() << " -> "
702 << Cache[I]->getName() << ", is known base = "
703 << KnownBases[I] << "\n");
704 }
705 assert(Cache[I] != nullptr);
706 assert(KnownBases.contains(Cache[I]) &&
707 "Cached value must be present in known bases map");
708 return Cache[I];
709}
710
711/// Return a base pointer for this value if known. Otherwise, return it's
712/// base defining value.
713static Value *findBaseOrBDV(Value *I, DefiningValueMapTy &Cache,
714 IsKnownBaseMapTy &KnownBases) {
715 Value *Def = findBaseDefiningValueCached(I, Cache, KnownBases);
716 auto Found = Cache.find(Def);
717 if (Found != Cache.end()) {
718 // Either a base-of relation, or a self reference. Caller must check.
719 return Found->second;
720 }
721 // Only a BDV available
722 return Def;
723}
724
725#ifndef NDEBUG
726/// This value is a base pointer that is not generated by RS4GC, i.e. it already
727/// exists in the code.
729 // no recursion possible
730 return !isa<PHINode>(V) && !isa<SelectInst>(V) &&
731 !isa<ExtractElementInst>(V) && !isa<InsertElementInst>(V) &&
732 !isa<ShuffleVectorInst>(V);
733}
734#endif
735
736static bool isKnownBase(Value *V, const IsKnownBaseMapTy &KnownBases) {
737 auto It = KnownBases.find(V);
738 assert(It != KnownBases.end() && "Value not present in the map");
739 return It->second;
740}
741
742static void setKnownBase(Value *V, bool IsKnownBase,
743 IsKnownBaseMapTy &KnownBases) {
744#ifndef NDEBUG
745 auto It = KnownBases.find(V);
746 if (It != KnownBases.end())
747 assert(It->second == IsKnownBase && "Changing already present value");
748#endif
749 KnownBases[V] = IsKnownBase;
750}
751
752// Returns true if First and Second values are both scalar or both vector.
753static bool areBothVectorOrScalar(Value *First, Value *Second) {
754 return isa<VectorType>(First->getType()) ==
755 isa<VectorType>(Second->getType());
756}
757
758namespace {
759
760/// Models the state of a single base defining value in the findBasePointer
761/// algorithm for determining where a new instruction is needed to propagate
762/// the base of this BDV.
763class BDVState {
764public:
765 enum StatusTy {
766 // Starting state of lattice
767 Unknown,
768 // Some specific base value -- does *not* mean that instruction
769 // propagates the base of the object
770 // ex: gep %arg, 16 -> %arg is the base value
771 Base,
772 // Need to insert a node to represent a merge.
773 Conflict
774 };
775
776 BDVState() {
777 llvm_unreachable("missing state in map");
778 }
779
780 explicit BDVState(Value *OriginalValue)
781 : OriginalValue(OriginalValue) {}
782 explicit BDVState(Value *OriginalValue, StatusTy Status, Value *BaseValue = nullptr)
783 : OriginalValue(OriginalValue), Status(Status), BaseValue(BaseValue) {
784 assert(Status != Base || BaseValue);
785 }
786
787 StatusTy getStatus() const { return Status; }
788 Value *getOriginalValue() const { return OriginalValue; }
789 Value *getBaseValue() const { return BaseValue; }
790
791 bool isBase() const { return getStatus() == Base; }
792 bool isUnknown() const { return getStatus() == Unknown; }
793 bool isConflict() const { return getStatus() == Conflict; }
794
795 // Values of type BDVState form a lattice, and this function implements the
796 // meet
797 // operation.
798 void meet(const BDVState &Other) {
799 auto markConflict = [&]() {
800 Status = BDVState::Conflict;
801 BaseValue = nullptr;
802 };
803 // Conflict is a final state.
804 if (isConflict())
805 return;
806 // if we are not known - just take other state.
807 if (isUnknown()) {
808 Status = Other.getStatus();
809 BaseValue = Other.getBaseValue();
810 return;
811 }
812 // We are base.
813 assert(isBase() && "Unknown state");
814 // If other is unknown - just keep our state.
815 if (Other.isUnknown())
816 return;
817 // If other is conflict - it is a final state.
818 if (Other.isConflict())
819 return markConflict();
820 // Other is base as well.
821 assert(Other.isBase() && "Unknown state");
822 // If bases are different - Conflict.
823 if (getBaseValue() != Other.getBaseValue())
824 return markConflict();
825 // We are identical, do nothing.
826 }
827
828 bool operator==(const BDVState &Other) const {
829 return OriginalValue == Other.OriginalValue && BaseValue == Other.BaseValue &&
830 Status == Other.Status;
831 }
832
833 bool operator!=(const BDVState &other) const { return !(*this == other); }
834
836 void dump() const {
837 print(dbgs());
838 dbgs() << '\n';
839 }
840
841 void print(raw_ostream &OS) const {
842 switch (getStatus()) {
843 case Unknown:
844 OS << "U";
845 break;
846 case Base:
847 OS << "B";
848 break;
849 case Conflict:
850 OS << "C";
851 break;
852 }
853 OS << " (base " << getBaseValue() << " - "
854 << (getBaseValue() ? getBaseValue()->getName() : "nullptr") << ")"
855 << " for " << OriginalValue->getName() << ":";
856 }
857
858private:
859 AssertingVH<Value> OriginalValue; // instruction this state corresponds to
860 StatusTy Status = Unknown;
861 AssertingVH<Value> BaseValue = nullptr; // Non-null only if Status == Base.
862};
863
864} // end anonymous namespace
865
866#ifndef NDEBUG
867static raw_ostream &operator<<(raw_ostream &OS, const BDVState &State) {
868 State.print(OS);
869 return OS;
870}
871#endif
872
873/// For a given value or instruction, figure out what base ptr its derived from.
874/// For gc objects, this is simply itself. On success, returns a value which is
875/// the base pointer. (This is reliable and can be used for relocation.) On
876/// failure, returns nullptr.
877static Value *findBasePointer(Value *I, DefiningValueMapTy &Cache,
878 IsKnownBaseMapTy &KnownBases) {
879 Value *Def = findBaseOrBDV(I, Cache, KnownBases);
880
881 if (isKnownBase(Def, KnownBases) && areBothVectorOrScalar(Def, I))
882 return Def;
883
884 // Here's the rough algorithm:
885 // - For every SSA value, construct a mapping to either an actual base
886 // pointer or a PHI which obscures the base pointer.
887 // - Construct a mapping from PHI to unknown TOP state. Use an
888 // optimistic algorithm to propagate base pointer information. Lattice
889 // looks like:
890 // UNKNOWN
891 // b1 b2 b3 b4
892 // CONFLICT
893 // When algorithm terminates, all PHIs will either have a single concrete
894 // base or be in a conflict state.
895 // - For every conflict, insert a dummy PHI node without arguments. Add
896 // these to the base[Instruction] = BasePtr mapping. For every
897 // non-conflict, add the actual base.
898 // - For every conflict, add arguments for the base[a] of each input
899 // arguments.
900 //
901 // Note: A simpler form of this would be to add the conflict form of all
902 // PHIs without running the optimistic algorithm. This would be
903 // analogous to pessimistic data flow and would likely lead to an
904 // overall worse solution.
905
906#ifndef NDEBUG
907 auto isExpectedBDVType = [](Value *BDV) {
908 return isa<PHINode>(BDV) || isa<SelectInst>(BDV) ||
909 isa<ExtractElementInst>(BDV) || isa<InsertElementInst>(BDV) ||
910 isa<ShuffleVectorInst>(BDV);
911 };
912#endif
913
914 // Once populated, will contain a mapping from each potentially non-base BDV
915 // to a lattice value (described above) which corresponds to that BDV.
916 // We use the order of insertion (DFS over the def/use graph) to provide a
917 // stable deterministic ordering for visiting DenseMaps (which are unordered)
918 // below. This is important for deterministic compilation.
920
921#ifndef NDEBUG
922 auto VerifyStates = [&]() {
923 for (auto &Entry : States) {
924 assert(Entry.first == Entry.second.getOriginalValue());
925 }
926 };
927#endif
928
929 auto visitBDVOperands = [](Value *BDV, std::function<void (Value*)> F) {
930 if (PHINode *PN = dyn_cast<PHINode>(BDV)) {
931 for (Value *InVal : PN->incoming_values())
932 F(InVal);
933 } else if (SelectInst *SI = dyn_cast<SelectInst>(BDV)) {
934 F(SI->getTrueValue());
935 F(SI->getFalseValue());
936 } else if (auto *EE = dyn_cast<ExtractElementInst>(BDV)) {
937 F(EE->getVectorOperand());
938 } else if (auto *IE = dyn_cast<InsertElementInst>(BDV)) {
939 F(IE->getOperand(0));
940 F(IE->getOperand(1));
941 } else if (auto *SV = dyn_cast<ShuffleVectorInst>(BDV)) {
942 // For a canonical broadcast, ignore the undef argument
943 // (without this, we insert a parallel base shuffle for every broadcast)
944 F(SV->getOperand(0));
945 if (!SV->isZeroEltSplat())
946 F(SV->getOperand(1));
947 } else {
948 llvm_unreachable("unexpected BDV type");
949 }
950 };
951
952
953 // Recursively fill in all base defining values reachable from the initial
954 // one for which we don't already know a definite base value for
955 /* scope */ {
957 Worklist.push_back(Def);
958 States.insert({Def, BDVState(Def)});
959 while (!Worklist.empty()) {
960 Value *Current = Worklist.pop_back_val();
961 assert(!isOriginalBaseResult(Current) && "why did it get added?");
962
963 auto visitIncomingValue = [&](Value *InVal) {
964 Value *Base = findBaseOrBDV(InVal, Cache, KnownBases);
965 if (isKnownBase(Base, KnownBases) && areBothVectorOrScalar(Base, InVal))
966 // Known bases won't need new instructions introduced and can be
967 // ignored safely. However, this can only be done when InVal and Base
968 // are both scalar or both vector. Otherwise, we need to find a
969 // correct BDV for InVal, by creating an entry in the lattice
970 // (States).
971 return;
972 assert(isExpectedBDVType(Base) && "the only non-base values "
973 "we see should be base defining values");
974 if (States.insert(std::make_pair(Base, BDVState(Base))).second)
975 Worklist.push_back(Base);
976 };
977
978 visitBDVOperands(Current, visitIncomingValue);
979 }
980 }
981
982#ifndef NDEBUG
983 VerifyStates();
984 LLVM_DEBUG(dbgs() << "States after initialization:\n");
985 for (const auto &Pair : States) {
986 LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n");
987 }
988#endif
989
990 // Iterate forward through the value graph pruning any node from the state
991 // list where all of the inputs are base pointers. The purpose of this is to
992 // reuse existing values when the derived pointer we were asked to materialize
993 // a base pointer for happens to be a base pointer itself. (Or a sub-graph
994 // feeding it does.)
996 do {
997 ToRemove.clear();
998 for (auto Pair : States) {
999 Value *BDV = Pair.first;
1000 auto canPruneInput = [&](Value *V) {
1001 // If the input of the BDV is the BDV itself we can prune it. This is
1002 // only possible if the BDV is a PHI node.
1003 if (V->stripPointerCasts() == BDV)
1004 return true;
1005 Value *VBDV = findBaseOrBDV(V, Cache, KnownBases);
1006 if (V->stripPointerCasts() != VBDV)
1007 return false;
1008 // The assumption is that anything not in the state list is
1009 // propagates a base pointer.
1010 return States.count(VBDV) == 0;
1011 };
1012
1013 bool CanPrune = true;
1014 visitBDVOperands(BDV, [&](Value *Op) {
1015 CanPrune = CanPrune && canPruneInput(Op);
1016 });
1017 if (CanPrune)
1018 ToRemove.push_back(BDV);
1019 }
1020 for (Value *V : ToRemove) {
1021 States.erase(V);
1022 // Cache the fact V is it's own base for later usage.
1023 Cache[V] = V;
1024 }
1025 } while (!ToRemove.empty());
1026
1027 // Did we manage to prove that Def itself must be a base pointer?
1028 if (!States.count(Def))
1029 return Def;
1030
1031 // Return a phi state for a base defining value. We'll generate a new
1032 // base state for known bases and expect to find a cached state otherwise.
1033 auto GetStateForBDV = [&](Value *BaseValue, Value *Input) {
1034 auto I = States.find(BaseValue);
1035 if (I != States.end())
1036 return I->second;
1037 assert(areBothVectorOrScalar(BaseValue, Input));
1038 return BDVState(BaseValue, BDVState::Base, BaseValue);
1039 };
1040
1041 bool Progress = true;
1042 while (Progress) {
1043#ifndef NDEBUG
1044 const size_t OldSize = States.size();
1045#endif
1046 Progress = false;
1047 // We're only changing values in this loop, thus safe to keep iterators.
1048 // Since this is computing a fixed point, the order of visit does not
1049 // effect the result. TODO: We could use a worklist here and make this run
1050 // much faster.
1051 for (auto Pair : States) {
1052 Value *BDV = Pair.first;
1053 // Only values that do not have known bases or those that have differing
1054 // type (scalar versus vector) from a possible known base should be in the
1055 // lattice.
1056 assert((!isKnownBase(BDV, KnownBases) ||
1057 !areBothVectorOrScalar(BDV, Pair.second.getBaseValue())) &&
1058 "why did it get added?");
1059
1060 BDVState NewState(BDV);
1061 visitBDVOperands(BDV, [&](Value *Op) {
1062 Value *BDV = findBaseOrBDV(Op, Cache, KnownBases);
1063 auto OpState = GetStateForBDV(BDV, Op);
1064 NewState.meet(OpState);
1065 });
1066
1067 BDVState OldState = States[BDV];
1068 if (OldState != NewState) {
1069 Progress = true;
1070 States[BDV] = NewState;
1071 }
1072 }
1073
1074 assert(OldSize == States.size() &&
1075 "fixed point shouldn't be adding any new nodes to state");
1076 }
1077
1078#ifndef NDEBUG
1079 VerifyStates();
1080 LLVM_DEBUG(dbgs() << "States after meet iteration:\n");
1081 for (const auto &Pair : States) {
1082 LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n");
1083 }
1084#endif
1085
1086 // Handle all instructions that have a vector BDV, but the instruction itself
1087 // is of scalar type.
1088 for (auto Pair : States) {
1089 Instruction *I = cast<Instruction>(Pair.first);
1090 BDVState State = Pair.second;
1091 auto *BaseValue = State.getBaseValue();
1092 // Only values that do not have known bases or those that have differing
1093 // type (scalar versus vector) from a possible known base should be in the
1094 // lattice.
1095 assert(
1096 (!isKnownBase(I, KnownBases) || !areBothVectorOrScalar(I, BaseValue)) &&
1097 "why did it get added?");
1098 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!");
1099
1100 if (!State.isBase() || !isa<VectorType>(BaseValue->getType()))
1101 continue;
1102 // extractelement instructions are a bit special in that we may need to
1103 // insert an extract even when we know an exact base for the instruction.
1104 // The problem is that we need to convert from a vector base to a scalar
1105 // base for the particular indice we're interested in.
1106 if (isa<ExtractElementInst>(I)) {
1107 auto *EE = cast<ExtractElementInst>(I);
1108 // TODO: In many cases, the new instruction is just EE itself. We should
1109 // exploit this, but can't do it here since it would break the invariant
1110 // about the BDV not being known to be a base.
1111 auto *BaseInst = ExtractElementInst::Create(
1112 State.getBaseValue(), EE->getIndexOperand(), "base_ee", EE);
1113 BaseInst->setMetadata("is_base_value", MDNode::get(I->getContext(), {}));
1114 States[I] = BDVState(I, BDVState::Base, BaseInst);
1115 setKnownBase(BaseInst, /* IsKnownBase */true, KnownBases);
1116 } else if (!isa<VectorType>(I->getType())) {
1117 // We need to handle cases that have a vector base but the instruction is
1118 // a scalar type (these could be phis or selects or any instruction that
1119 // are of scalar type, but the base can be a vector type). We
1120 // conservatively set this as conflict. Setting the base value for these
1121 // conflicts is handled in the next loop which traverses States.
1122 States[I] = BDVState(I, BDVState::Conflict);
1123 }
1124 }
1125
1126#ifndef NDEBUG
1127 VerifyStates();
1128#endif
1129
1130 // Insert Phis for all conflicts
1131 // TODO: adjust naming patterns to avoid this order of iteration dependency
1132 for (auto Pair : States) {
1133 Instruction *I = cast<Instruction>(Pair.first);
1134 BDVState State = Pair.second;
1135 // Only values that do not have known bases or those that have differing
1136 // type (scalar versus vector) from a possible known base should be in the
1137 // lattice.
1138 assert((!isKnownBase(I, KnownBases) ||
1139 !areBothVectorOrScalar(I, State.getBaseValue())) &&
1140 "why did it get added?");
1141 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!");
1142
1143 // Since we're joining a vector and scalar base, they can never be the
1144 // same. As a result, we should always see insert element having reached
1145 // the conflict state.
1146 assert(!isa<InsertElementInst>(I) || State.isConflict());
1147
1148 if (!State.isConflict())
1149 continue;
1150
1151 auto getMangledName = [](Instruction *I) -> std::string {
1152 if (isa<PHINode>(I)) {
1153 return suffixed_name_or(I, ".base", "base_phi");
1154 } else if (isa<SelectInst>(I)) {
1155 return suffixed_name_or(I, ".base", "base_select");
1156 } else if (isa<ExtractElementInst>(I)) {
1157 return suffixed_name_or(I, ".base", "base_ee");
1158 } else if (isa<InsertElementInst>(I)) {
1159 return suffixed_name_or(I, ".base", "base_ie");
1160 } else {
1161 return suffixed_name_or(I, ".base", "base_sv");
1162 }
1163 };
1164
1165 Instruction *BaseInst = I->clone();
1166 BaseInst->insertBefore(I);
1167 BaseInst->setName(getMangledName(I));
1168 // Add metadata marking this as a base value
1169 BaseInst->setMetadata("is_base_value", MDNode::get(I->getContext(), {}));
1170 States[I] = BDVState(I, BDVState::Conflict, BaseInst);
1171 setKnownBase(BaseInst, /* IsKnownBase */true, KnownBases);
1172 }
1173
1174#ifndef NDEBUG
1175 VerifyStates();
1176#endif
1177
1178 // Returns a instruction which produces the base pointer for a given
1179 // instruction. The instruction is assumed to be an input to one of the BDVs
1180 // seen in the inference algorithm above. As such, we must either already
1181 // know it's base defining value is a base, or have inserted a new
1182 // instruction to propagate the base of it's BDV and have entered that newly
1183 // introduced instruction into the state table. In either case, we are
1184 // assured to be able to determine an instruction which produces it's base
1185 // pointer.
1186 auto getBaseForInput = [&](Value *Input, Instruction *InsertPt) {
1187 Value *BDV = findBaseOrBDV(Input, Cache, KnownBases);
1188 Value *Base = nullptr;
1189 if (!States.count(BDV)) {
1190 assert(areBothVectorOrScalar(BDV, Input));
1191 Base = BDV;
1192 } else {
1193 // Either conflict or base.
1194 assert(States.count(BDV));
1195 Base = States[BDV].getBaseValue();
1196 }
1197 assert(Base && "Can't be null");
1198 // The cast is needed since base traversal may strip away bitcasts
1199 if (Base->getType() != Input->getType() && InsertPt)
1200 Base = new BitCastInst(Base, Input->getType(), "cast", InsertPt);
1201 return Base;
1202 };
1203
1204 // Fixup all the inputs of the new PHIs. Visit order needs to be
1205 // deterministic and predictable because we're naming newly created
1206 // instructions.
1207 for (auto Pair : States) {
1208 Instruction *BDV = cast<Instruction>(Pair.first);
1209 BDVState State = Pair.second;
1210
1211 // Only values that do not have known bases or those that have differing
1212 // type (scalar versus vector) from a possible known base should be in the
1213 // lattice.
1214 assert((!isKnownBase(BDV, KnownBases) ||
1215 !areBothVectorOrScalar(BDV, State.getBaseValue())) &&
1216 "why did it get added?");
1217 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!");
1218 if (!State.isConflict())
1219 continue;
1220
1221 if (PHINode *BasePHI = dyn_cast<PHINode>(State.getBaseValue())) {
1222 PHINode *PN = cast<PHINode>(BDV);
1223 const unsigned NumPHIValues = PN->getNumIncomingValues();
1224
1225 // The IR verifier requires phi nodes with multiple entries from the
1226 // same basic block to have the same incoming value for each of those
1227 // entries. Since we're inserting bitcasts in the loop, make sure we
1228 // do so at least once per incoming block.
1229 DenseMap<BasicBlock *, Value*> BlockToValue;
1230 for (unsigned i = 0; i < NumPHIValues; i++) {
1231 Value *InVal = PN->getIncomingValue(i);
1232 BasicBlock *InBB = PN->getIncomingBlock(i);
1233 if (!BlockToValue.count(InBB))
1234 BlockToValue[InBB] = getBaseForInput(InVal, InBB->getTerminator());
1235 else {
1236#ifndef NDEBUG
1237 Value *OldBase = BlockToValue[InBB];
1238 Value *Base = getBaseForInput(InVal, nullptr);
1239
1240 // We can't use `stripPointerCasts` instead of this function because
1241 // `stripPointerCasts` doesn't handle vectors of pointers.
1242 auto StripBitCasts = [](Value *V) -> Value * {
1243 while (auto *BC = dyn_cast<BitCastInst>(V))
1244 V = BC->getOperand(0);
1245 return V;
1246 };
1247 // In essence this assert states: the only way two values
1248 // incoming from the same basic block may be different is by
1249 // being different bitcasts of the same value. A cleanup
1250 // that remains TODO is changing findBaseOrBDV to return an
1251 // llvm::Value of the correct type (and still remain pure).
1252 // This will remove the need to add bitcasts.
1253 assert(StripBitCasts(Base) == StripBitCasts(OldBase) &&
1254 "findBaseOrBDV should be pure!");
1255#endif
1256 }
1257 Value *Base = BlockToValue[InBB];
1258 BasePHI->setIncomingValue(i, Base);
1259 }
1260 } else if (SelectInst *BaseSI =
1261 dyn_cast<SelectInst>(State.getBaseValue())) {
1262 SelectInst *SI = cast<SelectInst>(BDV);
1263
1264 // Find the instruction which produces the base for each input.
1265 // We may need to insert a bitcast.
1266 BaseSI->setTrueValue(getBaseForInput(SI->getTrueValue(), BaseSI));
1267 BaseSI->setFalseValue(getBaseForInput(SI->getFalseValue(), BaseSI));
1268 } else if (auto *BaseEE =
1269 dyn_cast<ExtractElementInst>(State.getBaseValue())) {
1270 Value *InVal = cast<ExtractElementInst>(BDV)->getVectorOperand();
1271 // Find the instruction which produces the base for each input. We may
1272 // need to insert a bitcast.
1273 BaseEE->setOperand(0, getBaseForInput(InVal, BaseEE));
1274 } else if (auto *BaseIE = dyn_cast<InsertElementInst>(State.getBaseValue())){
1275 auto *BdvIE = cast<InsertElementInst>(BDV);
1276 auto UpdateOperand = [&](int OperandIdx) {
1277 Value *InVal = BdvIE->getOperand(OperandIdx);
1278 Value *Base = getBaseForInput(InVal, BaseIE);
1279 BaseIE->setOperand(OperandIdx, Base);
1280 };
1281 UpdateOperand(0); // vector operand
1282 UpdateOperand(1); // scalar operand
1283 } else {
1284 auto *BaseSV = cast<ShuffleVectorInst>(State.getBaseValue());
1285 auto *BdvSV = cast<ShuffleVectorInst>(BDV);
1286 auto UpdateOperand = [&](int OperandIdx) {
1287 Value *InVal = BdvSV->getOperand(OperandIdx);
1288 Value *Base = getBaseForInput(InVal, BaseSV);
1289 BaseSV->setOperand(OperandIdx, Base);
1290 };
1291 UpdateOperand(0); // vector operand
1292 if (!BdvSV->isZeroEltSplat())
1293 UpdateOperand(1); // vector operand
1294 else {
1295 // Never read, so just use undef
1296 Value *InVal = BdvSV->getOperand(1);
1297 BaseSV->setOperand(1, UndefValue::get(InVal->getType()));
1298 }
1299 }
1300 }
1301
1302#ifndef NDEBUG
1303 VerifyStates();
1304#endif
1305
1306 // Cache all of our results so we can cheaply reuse them
1307 // NOTE: This is actually two caches: one of the base defining value
1308 // relation and one of the base pointer relation! FIXME
1309 for (auto Pair : States) {
1310 auto *BDV = Pair.first;
1311 Value *Base = Pair.second.getBaseValue();
1312 assert(BDV && Base);
1313 // Only values that do not have known bases or those that have differing
1314 // type (scalar versus vector) from a possible known base should be in the
1315 // lattice.
1316 assert(
1317 (!isKnownBase(BDV, KnownBases) || !areBothVectorOrScalar(BDV, Base)) &&
1318 "why did it get added?");
1319
1320 LLVM_DEBUG(
1321 dbgs() << "Updating base value cache"
1322 << " for: " << BDV->getName() << " from: "
1323 << (Cache.count(BDV) ? Cache[BDV]->getName().str() : "none")
1324 << " to: " << Base->getName() << "\n");
1325
1326 Cache[BDV] = Base;
1327 }
1328 assert(Cache.count(Def));
1329 return Cache[Def];
1330}
1331
1332// For a set of live pointers (base and/or derived), identify the base
1333// pointer of the object which they are derived from. This routine will
1334// mutate the IR graph as needed to make the 'base' pointer live at the
1335// definition site of 'derived'. This ensures that any use of 'derived' can
1336// also use 'base'. This may involve the insertion of a number of
1337// additional PHI nodes.
1338//
1339// preconditions: live is a set of pointer type Values
1340//
1341// side effects: may insert PHI nodes into the existing CFG, will preserve
1342// CFG, will not remove or mutate any existing nodes
1343//
1344// post condition: PointerToBase contains one (derived, base) pair for every
1345// pointer in live. Note that derived can be equal to base if the original
1346// pointer was a base pointer.
1347static void findBasePointers(const StatepointLiveSetTy &live,
1348 PointerToBaseTy &PointerToBase, DominatorTree *DT,
1349 DefiningValueMapTy &DVCache,
1350 IsKnownBaseMapTy &KnownBases) {
1351 for (Value *ptr : live) {
1352 Value *base = findBasePointer(ptr, DVCache, KnownBases);
1353 assert(base && "failed to find base pointer");
1354 PointerToBase[ptr] = base;
1355 assert((!isa<Instruction>(base) || !isa<Instruction>(ptr) ||
1356 DT->dominates(cast<Instruction>(base)->getParent(),
1357 cast<Instruction>(ptr)->getParent())) &&
1358 "The base we found better dominate the derived pointer");
1359 }
1360}
1361
1362/// Find the required based pointers (and adjust the live set) for the given
1363/// parse point.
1364static void findBasePointers(DominatorTree &DT, DefiningValueMapTy &DVCache,
1365 CallBase *Call,
1366 PartiallyConstructedSafepointRecord &result,
1367 PointerToBaseTy &PointerToBase,
1368 IsKnownBaseMapTy &KnownBases) {
1369 StatepointLiveSetTy PotentiallyDerivedPointers = result.LiveSet;
1370 // We assume that all pointers passed to deopt are base pointers; as an
1371 // optimization, we can use this to avoid seperately materializing the base
1372 // pointer graph. This is only relevant since we're very conservative about
1373 // generating new conflict nodes during base pointer insertion. If we were
1374 // smarter there, this would be irrelevant.
1375 if (auto Opt = Call->getOperandBundle(LLVMContext::OB_deopt))
1376 for (Value *V : Opt->Inputs) {
1377 if (!PotentiallyDerivedPointers.count(V))
1378 continue;
1379 PotentiallyDerivedPointers.remove(V);
1380 PointerToBase[V] = V;
1381 }
1382 findBasePointers(PotentiallyDerivedPointers, PointerToBase, &DT, DVCache,
1383 KnownBases);
1384}
1385
1386/// Given an updated version of the dataflow liveness results, update the
1387/// liveset and base pointer maps for the call site CS.
1388static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData,
1389 CallBase *Call,
1390 PartiallyConstructedSafepointRecord &result,
1391 PointerToBaseTy &PointerToBase,
1392 GCStrategy *GC);
1393
1397 PointerToBaseTy &PointerToBase, GCStrategy *GC) {
1398 // TODO-PERF: reuse the original liveness, then simply run the dataflow
1399 // again. The old values are still live and will help it stabilize quickly.
1400 GCPtrLivenessData RevisedLivenessData;
1401 computeLiveInValues(DT, F, RevisedLivenessData, GC);
1402 for (size_t i = 0; i < records.size(); i++) {
1403 struct PartiallyConstructedSafepointRecord &info = records[i];
1404 recomputeLiveInValues(RevisedLivenessData, toUpdate[i], info, PointerToBase,
1405 GC);
1406 }
1407}
1408
1409// Utility function which clones all instructions from "ChainToBase"
1410// and inserts them before "InsertBefore". Returns rematerialized value
1411// which should be used after statepoint.
1413 Instruction *InsertBefore,
1414 Value *RootOfChain,
1415 Value *AlternateLiveBase) {
1416 Instruction *LastClonedValue = nullptr;
1417 Instruction *LastValue = nullptr;
1418 // Walk backwards to visit top-most instructions first.
1419 for (Instruction *Instr :
1420 make_range(ChainToBase.rbegin(), ChainToBase.rend())) {
1421 // Only GEP's and casts are supported as we need to be careful to not
1422 // introduce any new uses of pointers not in the liveset.
1423 // Note that it's fine to introduce new uses of pointers which were
1424 // otherwise not used after this statepoint.
1425 assert(isa<GetElementPtrInst>(Instr) || isa<CastInst>(Instr));
1426
1427 Instruction *ClonedValue = Instr->clone();
1428 ClonedValue->insertBefore(InsertBefore);
1429 ClonedValue->setName(Instr->getName() + ".remat");
1430
1431 // If it is not first instruction in the chain then it uses previously
1432 // cloned value. We should update it to use cloned value.
1433 if (LastClonedValue) {
1434 assert(LastValue);
1435 ClonedValue->replaceUsesOfWith(LastValue, LastClonedValue);
1436#ifndef NDEBUG
1437 for (auto *OpValue : ClonedValue->operand_values()) {
1438 // Assert that cloned instruction does not use any instructions from
1439 // this chain other than LastClonedValue
1440 assert(!is_contained(ChainToBase, OpValue) &&
1441 "incorrect use in rematerialization chain");
1442 // Assert that the cloned instruction does not use the RootOfChain
1443 // or the AlternateLiveBase.
1444 assert(OpValue != RootOfChain && OpValue != AlternateLiveBase);
1445 }
1446#endif
1447 } else {
1448 // For the first instruction, replace the use of unrelocated base i.e.
1449 // RootOfChain/OrigRootPhi, with the corresponding PHI present in the
1450 // live set. They have been proved to be the same PHI nodes. Note
1451 // that the *only* use of the RootOfChain in the ChainToBase list is
1452 // the first Value in the list.
1453 if (RootOfChain != AlternateLiveBase)
1454 ClonedValue->replaceUsesOfWith(RootOfChain, AlternateLiveBase);
1455 }
1456
1457 LastClonedValue = ClonedValue;
1458 LastValue = Instr;
1459 }
1460 assert(LastClonedValue);
1461 return LastClonedValue;
1462}
1463
1464// When inserting gc.relocate and gc.result calls, we need to ensure there are
1465// no uses of the original value / return value between the gc.statepoint and
1466// the gc.relocate / gc.result call. One case which can arise is a phi node
1467// starting one of the successor blocks. We also need to be able to insert the
1468// gc.relocates only on the path which goes through the statepoint. We might
1469// need to split an edge to make this possible.
1470static BasicBlock *
1472 DominatorTree &DT) {
1473 BasicBlock *Ret = BB;
1474 if (!BB->getUniquePredecessor())
1475 Ret = SplitBlockPredecessors(BB, InvokeParent, "", &DT);
1476
1477 // Now that 'Ret' has unique predecessor we can safely remove all phi nodes
1478 // from it
1480 assert(!isa<PHINode>(Ret->begin()) &&
1481 "All PHI nodes should have been removed!");
1482
1483 // At this point, we can safely insert a gc.relocate or gc.result as the first
1484 // instruction in Ret if needed.
1485 return Ret;
1486}
1487
1488// List of all function attributes which must be stripped when lowering from
1489// abstract machine model to physical machine model. Essentially, these are
1490// all the effects a safepoint might have which we ignored in the abstract
1491// machine model for purposes of optimization. We have to strip these on
1492// both function declarations and call sites.
1494 {Attribute::Memory, Attribute::NoSync, Attribute::NoFree};
1495
1496// Create new attribute set containing only attributes which can be transferred
1497// from original call to the safepoint.
1499 AttributeList OrigAL,
1500 AttributeList StatepointAL) {
1501 if (OrigAL.isEmpty())
1502 return StatepointAL;
1503
1504 // Remove the readonly, readnone, and statepoint function attributes.
1505 AttrBuilder FnAttrs(Ctx, OrigAL.getFnAttrs());
1506 for (auto Attr : FnAttrsToStrip)
1507 FnAttrs.removeAttribute(Attr);
1508
1509 for (Attribute A : OrigAL.getFnAttrs()) {
1511 FnAttrs.removeAttribute(A);
1512 }
1513
1514 // Just skip parameter and return attributes for now
1515 return StatepointAL.addFnAttributes(Ctx, FnAttrs);
1516}
1517
1518/// Helper function to place all gc relocates necessary for the given
1519/// statepoint.
1520/// Inputs:
1521/// liveVariables - list of variables to be relocated.
1522/// basePtrs - base pointers.
1523/// statepointToken - statepoint instruction to which relocates should be
1524/// bound.
1525/// Builder - Llvm IR builder to be used to construct new calls.
1527 ArrayRef<Value *> BasePtrs,
1528 Instruction *StatepointToken,
1529 IRBuilder<> &Builder, GCStrategy *GC) {
1530 if (LiveVariables.empty())
1531 return;
1532
1533 auto FindIndex = [](ArrayRef<Value *> LiveVec, Value *Val) {
1534 auto ValIt = llvm::find(LiveVec, Val);
1535 assert(ValIt != LiveVec.end() && "Val not found in LiveVec!");
1536 size_t Index = std::distance(LiveVec.begin(), ValIt);
1537 assert(Index < LiveVec.size() && "Bug in std::find?");
1538 return Index;
1539 };
1540 Module *M = StatepointToken->getModule();
1541
1542 // All gc_relocate are generated as i8 addrspace(1)* (or a vector type whose
1543 // element type is i8 addrspace(1)*). We originally generated unique
1544 // declarations for each pointer type, but this proved problematic because
1545 // the intrinsic mangling code is incomplete and fragile. Since we're moving
1546 // towards a single unified pointer type anyways, we can just cast everything
1547 // to an i8* of the right address space. A bitcast is added later to convert
1548 // gc_relocate to the actual value's type.
1549 auto getGCRelocateDecl = [&](Type *Ty) {
1551 auto AS = Ty->getScalarType()->getPointerAddressSpace();
1552 Type *NewTy = Type::getInt8PtrTy(M->getContext(), AS);
1553 if (auto *VT = dyn_cast<VectorType>(Ty))
1554 NewTy = FixedVectorType::get(NewTy,
1555 cast<FixedVectorType>(VT)->getNumElements());
1556 return Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate,
1557 {NewTy});
1558 };
1559
1560 // Lazily populated map from input types to the canonicalized form mentioned
1561 // in the comment above. This should probably be cached somewhere more
1562 // broadly.
1563 DenseMap<Type *, Function *> TypeToDeclMap;
1564
1565 for (unsigned i = 0; i < LiveVariables.size(); i++) {
1566 // Generate the gc.relocate call and save the result
1567 Value *BaseIdx = Builder.getInt32(FindIndex(LiveVariables, BasePtrs[i]));
1568 Value *LiveIdx = Builder.getInt32(i);
1569
1570 Type *Ty = LiveVariables[i]->getType();
1571 if (!TypeToDeclMap.count(Ty))
1572 TypeToDeclMap[Ty] = getGCRelocateDecl(Ty);
1573 Function *GCRelocateDecl = TypeToDeclMap[Ty];
1574
1575 // only specify a debug name if we can give a useful one
1576 CallInst *Reloc = Builder.CreateCall(
1577 GCRelocateDecl, {StatepointToken, BaseIdx, LiveIdx},
1578 suffixed_name_or(LiveVariables[i], ".relocated", ""));
1579 // Trick CodeGen into thinking there are lots of free registers at this
1580 // fake call.
1582 }
1583}
1584
1585namespace {
1586
1587/// This struct is used to defer RAUWs and `eraseFromParent` s. Using this
1588/// avoids having to worry about keeping around dangling pointers to Values.
1589class DeferredReplacement {
1592 bool IsDeoptimize = false;
1593
1594 DeferredReplacement() = default;
1595
1596public:
1597 static DeferredReplacement createRAUW(Instruction *Old, Instruction *New) {
1598 assert(Old != New && Old && New &&
1599 "Cannot RAUW equal values or to / from null!");
1600
1601 DeferredReplacement D;
1602 D.Old = Old;
1603 D.New = New;
1604 return D;
1605 }
1606
1607 static DeferredReplacement createDelete(Instruction *ToErase) {
1608 DeferredReplacement D;
1609 D.Old = ToErase;
1610 return D;
1611 }
1612
1613 static DeferredReplacement createDeoptimizeReplacement(Instruction *Old) {
1614#ifndef NDEBUG
1615 auto *F = cast<CallInst>(Old)->getCalledFunction();
1616 assert(F && F->getIntrinsicID() == Intrinsic::experimental_deoptimize &&
1617 "Only way to construct a deoptimize deferred replacement");
1618#endif
1619 DeferredReplacement D;
1620 D.Old = Old;
1621 D.IsDeoptimize = true;
1622 return D;
1623 }
1624
1625 /// Does the task represented by this instance.
1626 void doReplacement() {
1627 Instruction *OldI = Old;
1628 Instruction *NewI = New;
1629
1630 assert(OldI != NewI && "Disallowed at construction?!");
1631 assert((!IsDeoptimize || !New) &&
1632 "Deoptimize intrinsics are not replaced!");
1633
1634 Old = nullptr;
1635 New = nullptr;
1636
1637 if (NewI)
1638 OldI->replaceAllUsesWith(NewI);
1639
1640 if (IsDeoptimize) {
1641 // Note: we've inserted instructions, so the call to llvm.deoptimize may
1642 // not necessarily be followed by the matching return.
1643 auto *RI = cast<ReturnInst>(OldI->getParent()->getTerminator());
1644 new UnreachableInst(RI->getContext(), RI);
1645 RI->eraseFromParent();
1646 }
1647
1648 OldI->eraseFromParent();
1649 }
1650};
1651
1652} // end anonymous namespace
1653
1655 const char *DeoptLowering = "deopt-lowering";
1656 if (Call->hasFnAttr(DeoptLowering)) {
1657 // FIXME: Calls have a *really* confusing interface around attributes
1658 // with values.
1659 const AttributeList &CSAS = Call->getAttributes();
1660 if (CSAS.hasFnAttr(DeoptLowering))
1661 return CSAS.getFnAttr(DeoptLowering).getValueAsString();
1662 Function *F = Call->getCalledFunction();
1663 assert(F && F->hasFnAttribute(DeoptLowering));
1664 return F->getFnAttribute(DeoptLowering).getValueAsString();
1665 }
1666 return "live-through";
1667}
1668
1669static void
1671 const SmallVectorImpl<Value *> &BasePtrs,
1673 PartiallyConstructedSafepointRecord &Result,
1674 std::vector<DeferredReplacement> &Replacements,
1675 const PointerToBaseTy &PointerToBase,
1676 GCStrategy *GC) {
1677 assert(BasePtrs.size() == LiveVariables.size());
1678
1679 // Then go ahead and use the builder do actually do the inserts. We insert
1680 // immediately before the previous instruction under the assumption that all
1681 // arguments will be available here. We can't insert afterwards since we may
1682 // be replacing a terminator.
1683 IRBuilder<> Builder(Call);
1684
1687 uint32_t NumPatchBytes = 0;
1689
1690 SmallVector<Value *, 8> CallArgs(Call->args());
1691 std::optional<ArrayRef<Use>> DeoptArgs;
1692 if (auto Bundle = Call->getOperandBundle(LLVMContext::OB_deopt))
1693 DeoptArgs = Bundle->Inputs;
1694 std::optional<ArrayRef<Use>> TransitionArgs;
1695 if (auto Bundle = Call->getOperandBundle(LLVMContext::OB_gc_transition)) {
1696 TransitionArgs = Bundle->Inputs;
1697 // TODO: This flag no longer serves a purpose and can be removed later
1699 }
1700
1701 // Instead of lowering calls to @llvm.experimental.deoptimize as normal calls
1702 // with a return value, we lower then as never returning calls to
1703 // __llvm_deoptimize that are followed by unreachable to get better codegen.
1704 bool IsDeoptimize = false;
1705
1707 parseStatepointDirectivesFromAttrs(Call->getAttributes());
1708 if (SD.NumPatchBytes)
1709 NumPatchBytes = *SD.NumPatchBytes;
1710 if (SD.StatepointID)
1711 StatepointID = *SD.StatepointID;
1712
1713 // Pass through the requested lowering if any. The default is live-through.
1714 StringRef DeoptLowering = getDeoptLowering(Call);
1715 if (DeoptLowering.equals("live-in"))
1717 else {
1718 assert(DeoptLowering.equals("live-through") && "Unsupported value!");
1719 }
1720
1721 FunctionCallee CallTarget(Call->getFunctionType(), Call->getCalledOperand());
1722 if (Function *F = dyn_cast<Function>(CallTarget.getCallee())) {
1723 auto IID = F->getIntrinsicID();
1724 if (IID == Intrinsic::experimental_deoptimize) {
1725 // Calls to llvm.experimental.deoptimize are lowered to calls to the
1726 // __llvm_deoptimize symbol. We want to resolve this now, since the
1727 // verifier does not allow taking the address of an intrinsic function.
1728
1729 SmallVector<Type *, 8> DomainTy;
1730 for (Value *Arg : CallArgs)
1731 DomainTy.push_back(Arg->getType());
1732 auto *FTy = FunctionType::get(Type::getVoidTy(F->getContext()), DomainTy,
1733 /* isVarArg = */ false);
1734
1735 // Note: CallTarget can be a bitcast instruction of a symbol if there are
1736 // calls to @llvm.experimental.deoptimize with different argument types in
1737 // the same module. This is fine -- we assume the frontend knew what it
1738 // was doing when generating this kind of IR.
1739 CallTarget = F->getParent()
1740 ->getOrInsertFunction("__llvm_deoptimize", FTy);
1741
1742 IsDeoptimize = true;
1743 } else if (IID == Intrinsic::memcpy_element_unordered_atomic ||
1744 IID == Intrinsic::memmove_element_unordered_atomic) {
1745 // Unordered atomic memcpy and memmove intrinsics which are not explicitly
1746 // marked as "gc-leaf-function" should be lowered in a GC parseable way.
1747 // Specifically, these calls should be lowered to the
1748 // __llvm_{memcpy|memmove}_element_unordered_atomic_safepoint symbols.
1749 // Similarly to __llvm_deoptimize we want to resolve this now, since the
1750 // verifier does not allow taking the address of an intrinsic function.
1751 //
1752 // Moreover we need to shuffle the arguments for the call in order to
1753 // accommodate GC. The underlying source and destination objects might be
1754 // relocated during copy operation should the GC occur. To relocate the
1755 // derived source and destination pointers the implementation of the
1756 // intrinsic should know the corresponding base pointers.
1757 //
1758 // To make the base pointers available pass them explicitly as arguments:
1759 // memcpy(dest_derived, source_derived, ...) =>
1760 // memcpy(dest_base, dest_offset, source_base, source_offset, ...)
1761 auto &Context = Call->getContext();
1762 auto &DL = Call->getModule()->getDataLayout();
1763 auto GetBaseAndOffset = [&](Value *Derived) {
1764 Value *Base = nullptr;
1765 // Optimizations in unreachable code might substitute the real pointer
1766 // with undef, poison or null-derived constant. Return null base for
1767 // them to be consistent with the handling in the main algorithm in
1768 // findBaseDefiningValue.
1769 if (isa<Constant>(Derived))
1770 Base =
1771 ConstantPointerNull::get(cast<PointerType>(Derived->getType()));
1772 else {
1773 assert(PointerToBase.count(Derived));
1774 Base = PointerToBase.find(Derived)->second;
1775 }
1776 unsigned AddressSpace = Derived->getType()->getPointerAddressSpace();
1777 unsigned IntPtrSize = DL.getPointerSizeInBits(AddressSpace);
1778 Value *Base_int = Builder.CreatePtrToInt(
1779 Base, Type::getIntNTy(Context, IntPtrSize));
1780 Value *Derived_int = Builder.CreatePtrToInt(
1781 Derived, Type::getIntNTy(Context, IntPtrSize));
1782 return std::make_pair(Base, Builder.CreateSub(Derived_int, Base_int));
1783 };
1784
1785 auto *Dest = CallArgs[0];
1786 Value *DestBase, *DestOffset;
1787 std::tie(DestBase, DestOffset) = GetBaseAndOffset(Dest);
1788
1789 auto *Source = CallArgs[1];
1790 Value *SourceBase, *SourceOffset;
1791 std::tie(SourceBase, SourceOffset) = GetBaseAndOffset(Source);
1792
1793 auto *LengthInBytes = CallArgs[2];
1794 auto *ElementSizeCI = cast<ConstantInt>(CallArgs[3]);
1795
1796 CallArgs.clear();
1797 CallArgs.push_back(DestBase);
1798 CallArgs.push_back(DestOffset);
1799 CallArgs.push_back(SourceBase);
1800 CallArgs.push_back(SourceOffset);
1801 CallArgs.push_back(LengthInBytes);
1802
1803 SmallVector<Type *, 8> DomainTy;
1804 for (Value *Arg : CallArgs)
1805 DomainTy.push_back(Arg->getType());
1806 auto *FTy = FunctionType::get(Type::getVoidTy(F->getContext()), DomainTy,
1807 /* isVarArg = */ false);
1808
1809 auto GetFunctionName = [](Intrinsic::ID IID, ConstantInt *ElementSizeCI) {
1810 uint64_t ElementSize = ElementSizeCI->getZExtValue();
1811 if (IID == Intrinsic::memcpy_element_unordered_atomic) {
1812 switch (ElementSize) {
1813 case 1:
1814 return "__llvm_memcpy_element_unordered_atomic_safepoint_1";
1815 case 2:
1816 return "__llvm_memcpy_element_unordered_atomic_safepoint_2";
1817 case 4:
1818 return "__llvm_memcpy_element_unordered_atomic_safepoint_4";
1819 case 8:
1820 return "__llvm_memcpy_element_unordered_atomic_safepoint_8";
1821 case 16:
1822 return "__llvm_memcpy_element_unordered_atomic_safepoint_16";
1823 default:
1824 llvm_unreachable("unexpected element size!");
1825 }
1826 }
1827 assert(IID == Intrinsic::memmove_element_unordered_atomic);
1828 switch (ElementSize) {
1829 case 1:
1830 return "__llvm_memmove_element_unordered_atomic_safepoint_1";
1831 case 2:
1832 return "__llvm_memmove_element_unordered_atomic_safepoint_2";
1833 case 4:
1834 return "__llvm_memmove_element_unordered_atomic_safepoint_4";
1835 case 8:
1836 return "__llvm_memmove_element_unordered_atomic_safepoint_8";
1837 case 16:
1838 return "__llvm_memmove_element_unordered_atomic_safepoint_16";
1839 default:
1840 llvm_unreachable("unexpected element size!");
1841 }
1842 };
1843
1844 CallTarget =
1845 F->getParent()
1846 ->getOrInsertFunction(GetFunctionName(IID, ElementSizeCI), FTy);
1847 }
1848 }
1849
1850 // Create the statepoint given all the arguments
1851 GCStatepointInst *Token = nullptr;
1852 if (auto *CI = dyn_cast<CallInst>(Call)) {
1853 CallInst *SPCall = Builder.CreateGCStatepointCall(
1854 StatepointID, NumPatchBytes, CallTarget, Flags, CallArgs,
1855 TransitionArgs, DeoptArgs, GCArgs, "safepoint_token");
1856
1857 SPCall->setTailCallKind(CI->getTailCallKind());
1858 SPCall->setCallingConv(CI->getCallingConv());
1859
1860 // Currently we will fail on parameter attributes and on certain
1861 // function attributes. In case if we can handle this set of attributes -
1862 // set up function attrs directly on statepoint and return attrs later for
1863 // gc_result intrinsic.
1865 CI->getContext(), CI->getAttributes(), SPCall->getAttributes()));
1866
1867 Token = cast<GCStatepointInst>(SPCall);
1868
1869 // Put the following gc_result and gc_relocate calls immediately after the
1870 // the old call (which we're about to delete)
1871 assert(CI->getNextNode() && "Not a terminator, must have next!");
1872 Builder.SetInsertPoint(CI->getNextNode());
1873 Builder.SetCurrentDebugLocation(CI->getNextNode()->getDebugLoc());
1874 } else {
1875 auto *II = cast<InvokeInst>(Call);
1876
1877 // Insert the new invoke into the old block. We'll remove the old one in a
1878 // moment at which point this will become the new terminator for the
1879 // original block.
1880 InvokeInst *SPInvoke = Builder.CreateGCStatepointInvoke(
1881 StatepointID, NumPatchBytes, CallTarget, II->getNormalDest(),
1882 II->getUnwindDest(), Flags, CallArgs, TransitionArgs, DeoptArgs, GCArgs,
1883 "statepoint_token");
1884
1885 SPInvoke->setCallingConv(II->getCallingConv());
1886
1887 // Currently we will fail on parameter attributes and on certain
1888 // function attributes. In case if we can handle this set of attributes -
1889 // set up function attrs directly on statepoint and return attrs later for
1890 // gc_result intrinsic.
1892 II->getContext(), II->getAttributes(), SPInvoke->getAttributes()));
1893
1894 Token = cast<GCStatepointInst>(SPInvoke);
1895
1896 // Generate gc relocates in exceptional path
1897 BasicBlock *UnwindBlock = II->getUnwindDest();
1898 assert(!isa<PHINode>(UnwindBlock->begin()) &&
1899 UnwindBlock->getUniquePredecessor() &&
1900 "can't safely insert in this block!");
1901
1902 Builder.SetInsertPoint(&*UnwindBlock->getFirstInsertionPt());
1903 Builder.SetCurrentDebugLocation(II->getDebugLoc());
1904
1905 // Attach exceptional gc relocates to the landingpad.
1906 Instruction *ExceptionalToken = UnwindBlock->getLandingPadInst();
1907 Result.UnwindToken = ExceptionalToken;
1908
1909 CreateGCRelocates(LiveVariables, BasePtrs, ExceptionalToken, Builder, GC);
1910
1911 // Generate gc relocates and returns for normal block
1912 BasicBlock *NormalDest = II->getNormalDest();
1913 assert(!isa<PHINode>(NormalDest->begin()) &&
1914 NormalDest->getUniquePredecessor() &&
1915 "can't safely insert in this block!");
1916
1917 Builder.SetInsertPoint(&*NormalDest->getFirstInsertionPt());
1918
1919 // gc relocates will be generated later as if it were regular call
1920 // statepoint
1921 }
1922 assert(Token && "Should be set in one of the above branches!");
1923
1924 if (IsDeoptimize) {
1925 // If we're wrapping an @llvm.experimental.deoptimize in a statepoint, we
1926 // transform the tail-call like structure to a call to a void function
1927 // followed by unreachable to get better codegen.
1928 Replacements.push_back(
1929 DeferredReplacement::createDeoptimizeReplacement(Call));
1930 } else {
1931 Token->setName("statepoint_token");
1932 if (!Call->getType()->isVoidTy() && !Call->use_empty()) {
1933 StringRef Name = Call->hasName() ? Call->getName() : "";
1934 CallInst *GCResult = Builder.CreateGCResult(Token, Call->getType(), Name);
1935 GCResult->setAttributes(
1937 Call->getAttributes().getRetAttrs()));
1938
1939 // We cannot RAUW or delete CS.getInstruction() because it could be in the
1940 // live set of some other safepoint, in which case that safepoint's
1941 // PartiallyConstructedSafepointRecord will hold a raw pointer to this
1942 // llvm::Instruction. Instead, we defer the replacement and deletion to
1943 // after the live sets have been made explicit in the IR, and we no longer
1944 // have raw pointers to worry about.
1945 Replacements.emplace_back(
1946 DeferredReplacement::createRAUW(Call, GCResult));
1947 } else {
1948 Replacements.emplace_back(DeferredReplacement::createDelete(Call));
1949 }
1950 }
1951
1952 Result.StatepointToken = Token;
1953
1954 // Second, create a gc.relocate for every live variable
1955 CreateGCRelocates(LiveVariables, BasePtrs, Token, Builder, GC);
1956}
1957
1958// Replace an existing gc.statepoint with a new one and a set of gc.relocates
1959// which make the relocations happening at this safepoint explicit.
1960//
1961// WARNING: Does not do any fixup to adjust users of the original live
1962// values. That's the callers responsibility.
1963static void
1965 PartiallyConstructedSafepointRecord &Result,
1966 std::vector<DeferredReplacement> &Replacements,
1967 const PointerToBaseTy &PointerToBase, GCStrategy *GC) {
1968 const auto &LiveSet = Result.LiveSet;
1969
1970 // Convert to vector for efficient cross referencing.
1971 SmallVector<Value *, 64> BaseVec, LiveVec;
1972 LiveVec.reserve(LiveSet.size());
1973 BaseVec.reserve(LiveSet.size());
1974 for (Value *L : LiveSet) {
1975 LiveVec.push_back(L);
1976 assert(PointerToBase.count(L));
1977 Value *Base = PointerToBase.find(L)->second;
1978 BaseVec.push_back(Base);
1979 }
1980 assert(LiveVec.size() == BaseVec.size());
1981
1982 // Do the actual rewriting and delete the old statepoint
1983 makeStatepointExplicitImpl(Call, BaseVec, LiveVec, Result, Replacements,
1984 PointerToBase, GC);
1985}
1986
1987// Helper function for the relocationViaAlloca.
1988//
1989// It receives iterator to the statepoint gc relocates and emits a store to the
1990// assigned location (via allocaMap) for the each one of them. It adds the
1991// visited values into the visitedLiveValues set, which we will later use them
1992// for validation checking.
1993static void
1996 DenseSet<Value *> &VisitedLiveValues) {
1997 for (User *U : GCRelocs) {
1998 GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U);
1999 if (!Relocate)
2000 continue;
2001
2002 Value *OriginalValue = Relocate->getDerivedPtr();
2003 assert(AllocaMap.count(OriginalValue));
2004 Value *Alloca = AllocaMap[OriginalValue];
2005
2006 // Emit store into the related alloca
2007 // All gc_relocates are i8 addrspace(1)* typed, and it must be bitcasted to
2008 // the correct type according to alloca.
2009 assert(Relocate->getNextNode() &&
2010 "Should always have one since it's not a terminator");
2011 IRBuilder<> Builder(Relocate->getNextNode());
2012 Value *CastedRelocatedValue =
2013 Builder.CreateBitCast(Relocate,
2014 cast<AllocaInst>(Alloca)->getAllocatedType(),
2015 suffixed_name_or(Relocate, ".casted", ""));
2016
2017 new StoreInst(CastedRelocatedValue, Alloca,
2018 cast<Instruction>(CastedRelocatedValue)->getNextNode());
2019
2020#ifndef NDEBUG
2021 VisitedLiveValues.insert(OriginalValue);
2022#endif
2023 }
2024}
2025
2026// Helper function for the "relocationViaAlloca". Similar to the
2027// "insertRelocationStores" but works for rematerialized values.
2029 const RematerializedValueMapTy &RematerializedValues,
2031 DenseSet<Value *> &VisitedLiveValues) {
2032 for (auto RematerializedValuePair: RematerializedValues) {
2033 Instruction *RematerializedValue = RematerializedValuePair.first;
2034 Value *OriginalValue = RematerializedValuePair.second;
2035
2036 assert(AllocaMap.count(OriginalValue) &&
2037 "Can not find alloca for rematerialized value");
2038 Value *Alloca = AllocaMap[OriginalValue];
2039
2040 new StoreInst(RematerializedValue, Alloca,
2041 RematerializedValue->getNextNode());
2042
2043#ifndef NDEBUG
2044 VisitedLiveValues.insert(OriginalValue);
2045#endif
2046 }
2047}
2048
2049/// Do all the relocation update via allocas and mem2reg
2053#ifndef NDEBUG
2054 // record initial number of (static) allocas; we'll check we have the same
2055 // number when we get done.
2056 int InitialAllocaNum = 0;
2057 for (Instruction &I : F.getEntryBlock())
2058 if (isa<AllocaInst>(I))
2059 InitialAllocaNum++;
2060#endif
2061
2062 // TODO-PERF: change data structures, reserve
2064 SmallVector<AllocaInst *, 200> PromotableAllocas;
2065 // Used later to chack that we have enough allocas to store all values
2066 std::size_t NumRematerializedValues = 0;
2067 PromotableAllocas.reserve(Live.size());
2068
2069 // Emit alloca for "LiveValue" and record it in "allocaMap" and
2070 // "PromotableAllocas"
2071 const DataLayout &DL = F.getParent()->getDataLayout();
2072 auto emitAllocaFor = [&](Value *LiveValue) {
2073 AllocaInst *Alloca = new AllocaInst(LiveValue->getType(),
2074 DL.getAllocaAddrSpace(), "",
2075 F.getEntryBlock().getFirstNonPHI());
2076 AllocaMap[LiveValue] = Alloca;
2077 PromotableAllocas.push_back(Alloca);
2078 };
2079
2080 // Emit alloca for each live gc pointer
2081 for (Value *V : Live)
2082 emitAllocaFor(V);
2083
2084 // Emit allocas for rematerialized values
2085 for (const auto &Info : Records)
2086 for (auto RematerializedValuePair : Info.RematerializedValues) {
2087 Value *OriginalValue = RematerializedValuePair.second;
2088 if (AllocaMap.count(OriginalValue) != 0)
2089 continue;
2090
2091 emitAllocaFor(OriginalValue);
2092 ++NumRematerializedValues;
2093 }
2094
2095 // The next two loops are part of the same conceptual operation. We need to
2096 // insert a store to the alloca after the original def and at each
2097 // redefinition. We need to insert a load before each use. These are split
2098 // into distinct loops for performance reasons.
2099
2100 // Update gc pointer after each statepoint: either store a relocated value or
2101 // null (if no relocated value was found for this gc pointer and it is not a
2102 // gc_result). This must happen before we update the statepoint with load of
2103 // alloca otherwise we lose the link between statepoint and old def.
2104 for (const auto &Info : Records) {
2105 Value *Statepoint = Info.StatepointToken;
2106
2107 // This will be used for consistency check
2108 DenseSet<Value *> VisitedLiveValues;
2109
2110 // Insert stores for normal statepoint gc relocates
2111 insertRelocationStores(Statepoint->users(), AllocaMap, VisitedLiveValues);
2112
2113 // In case if it was invoke statepoint
2114 // we will insert stores for exceptional path gc relocates.
2115 if (isa<InvokeInst>(Statepoint)) {
2116 insertRelocationStores(Info.UnwindToken->users(), AllocaMap,
2117 VisitedLiveValues);
2118 }
2119
2120 // Do similar thing with rematerialized values
2121 insertRematerializationStores(Info.RematerializedValues, AllocaMap,
2122 VisitedLiveValues);
2123
2124 if (ClobberNonLive) {
2125 // As a debugging aid, pretend that an unrelocated pointer becomes null at
2126 // the gc.statepoint. This will turn some subtle GC problems into
2127 // slightly easier to debug SEGVs. Note that on large IR files with
2128 // lots of gc.statepoints this is extremely costly both memory and time
2129 // wise.
2131 for (auto Pair : AllocaMap) {
2132 Value *Def = Pair.first;
2133 AllocaInst *Alloca = Pair.second;
2134
2135 // This value was relocated
2136 if (VisitedLiveValues.count(Def)) {
2137 continue;
2138 }
2139 ToClobber.push_back(Alloca);
2140 }
2141
2142 auto InsertClobbersAt = [&](Instruction *IP) {
2143 for (auto *AI : ToClobber) {
2144 auto AT = AI->getAllocatedType();
2145 Constant *CPN;
2146 if (AT->isVectorTy())
2148 else
2149 CPN = ConstantPointerNull::get(cast<PointerType>(AT));
2150 new StoreInst(CPN, AI, IP);
2151 }
2152 };
2153
2154 // Insert the clobbering stores. These may get intermixed with the
2155 // gc.results and gc.relocates, but that's fine.
2156 if (auto II = dyn_cast<InvokeInst>(Statepoint)) {
2157 InsertClobbersAt(&*II->getNormalDest()->getFirstInsertionPt());
2158 InsertClobbersAt(&*II->getUnwindDest()->getFirstInsertionPt());
2159 } else {
2160 InsertClobbersAt(cast<Instruction>(Statepoint)->getNextNode());
2161 }
2162 }
2163 }
2164
2165 // Update use with load allocas and add store for gc_relocated.
2166 for (auto Pair : AllocaMap) {
2167 Value *Def = Pair.first;
2168 AllocaInst *Alloca = Pair.second;
2169
2170 // We pre-record the uses of allocas so that we dont have to worry about
2171 // later update that changes the user information..
2172
2174 // PERF: trade a linear scan for repeated reallocation
2175 Uses.reserve(Def->getNumUses());
2176 for (User *U : Def->users()) {
2177 if (!isa<ConstantExpr>(U)) {
2178 // If the def has a ConstantExpr use, then the def is either a
2179 // ConstantExpr use itself or null. In either case
2180 // (recursively in the first, directly in the second), the oop
2181 // it is ultimately dependent on is null and this particular
2182 // use does not need to be fixed up.
2183 Uses.push_back(cast<Instruction>(U));
2184 }
2185 }
2186
2188 auto Last = std::unique(Uses.begin(), Uses.end());
2189 Uses.erase(Last, Uses.end());
2190
2191 for (Instruction *Use : Uses) {
2192 if (isa<PHINode>(Use)) {
2193 PHINode *Phi = cast<PHINode>(Use);
2194 for (unsigned i = 0; i < Phi->getNumIncomingValues(); i++) {
2195 if (Def == Phi->getIncomingValue(i)) {
2196 LoadInst *Load =
2197 new LoadInst(Alloca->getAllocatedType(), Alloca, "",
2198 Phi->getIncomingBlock(i)->getTerminator());
2199 Phi->setIncomingValue(i, Load);
2200 }
2201 }
2202 } else {
2203 LoadInst *Load =
2204 new LoadInst(Alloca->getAllocatedType(), Alloca, "", Use);
2205 Use->replaceUsesOfWith(Def, Load);
2206 }
2207 }
2208
2209 // Emit store for the initial gc value. Store must be inserted after load,
2210 // otherwise store will be in alloca's use list and an extra load will be
2211 // inserted before it.
2212 StoreInst *Store = new StoreInst(Def, Alloca, /*volatile*/ false,
2213 DL.getABITypeAlign(Def->getType()));
2214 if (Instruction *Inst = dyn_cast<Instruction>(Def)) {
2215 if (InvokeInst *Invoke = dyn_cast<InvokeInst>(Inst)) {
2216 // InvokeInst is a terminator so the store need to be inserted into its
2217 // normal destination block.
2218 BasicBlock *NormalDest = Invoke->getNormalDest();
2219 Store->insertBefore(NormalDest->getFirstNonPHI());
2220 } else {
2221 assert(!Inst->isTerminator() &&
2222 "The only terminator that can produce a value is "
2223 "InvokeInst which is handled above.");
2224 Store->insertAfter(Inst);
2225 }
2226 } else {
2227 assert(isa<Argument>(Def));
2228 Store->insertAfter(cast<Instruction>(Alloca));
2229 }
2230 }
2231
2232 assert(PromotableAllocas.size() == Live.size() + NumRematerializedValues &&
2233 "we must have the same allocas with lives");
2234 (void) NumRematerializedValues;
2235 if (!PromotableAllocas.empty()) {
2236 // Apply mem2reg to promote alloca to SSA
2237 PromoteMemToReg(PromotableAllocas, DT);
2238 }
2239
2240#ifndef NDEBUG
2241 for (auto &I : F.getEntryBlock())
2242 if (isa<AllocaInst>(I))
2243 InitialAllocaNum--;
2244 assert(InitialAllocaNum == 0 && "We must not introduce any extra allocas");
2245#endif
2246}
2247
2248/// Implement a unique function which doesn't require we sort the input
2249/// vector. Doing so has the effect of changing the output of a couple of
2250/// tests in ways which make them less useful in testing fused safepoints.
2251template <typename T> static void unique_unsorted(SmallVectorImpl<T> &Vec) {
2252 SmallSet<T, 8> Seen;
2253 erase_if(Vec, [&](const T &V) { return !Seen.insert(V).second; });
2254}
2255
2256/// Insert holders so that each Value is obviously live through the entire
2257/// lifetime of the call.
2258static void insertUseHolderAfter(CallBase *Call, const ArrayRef<Value *> Values,
2259 SmallVectorImpl<CallInst *> &Holders) {
2260 if (Values.empty())
2261 // No values to hold live, might as well not insert the empty holder
2262 return;
2263
2264 Module *M = Call->getModule();
2265 // Use a dummy vararg function to actually hold the values live
2266 FunctionCallee Func = M->getOrInsertFunction(
2267 "__tmp_use", FunctionType::get(Type::getVoidTy(M->getContext()), true));
2268 if (isa<CallInst>(Call)) {
2269 // For call safepoints insert dummy calls right after safepoint
2270 Holders.push_back(
2271 CallInst::Create(Func, Values, "", &*++Call->getIterator()));
2272 return;
2273 }
2274 // For invoke safepooints insert dummy calls both in normal and
2275 // exceptional destination blocks
2276 auto *II = cast<InvokeInst>(Call);
2278 Func, Values, "", &*II->getNormalDest()->getFirstInsertionPt()));
2280 Func, Values, "", &*II->getUnwindDest()->getFirstInsertionPt()));
2281}
2282
2286 GCStrategy *GC) {
2287 GCPtrLivenessData OriginalLivenessData;
2288 computeLiveInValues(DT, F, OriginalLivenessData, GC);
2289 for (size_t i = 0; i < records.size(); i++) {
2290 struct PartiallyConstructedSafepointRecord &info = records[i];
2291 analyzeParsePointLiveness(DT, OriginalLivenessData, toUpdate[i], info, GC);
2292 }
2293}
2294
2295// Helper function for the "rematerializeLiveValues". It walks use chain
2296// starting from the "CurrentValue" until it reaches the root of the chain, i.e.
2297// the base or a value it cannot process. Only "simple" values are processed
2298// (currently it is GEP's and casts). The returned root is examined by the
2299// callers of findRematerializableChainToBasePointer. Fills "ChainToBase" array
2300// with all visited values.
2302 SmallVectorImpl<Instruction*> &ChainToBase,
2303 Value *CurrentValue) {
2304 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurrentValue)) {
2305 ChainToBase.push_back(GEP);
2306 return findRematerializableChainToBasePointer(ChainToBase,
2307 GEP->getPointerOperand());
2308 }
2309
2310 if (CastInst *CI = dyn_cast<CastInst>(CurrentValue)) {
2311 if (!CI->isNoopCast(CI->getModule()->getDataLayout()))
2312 return CI;
2313
2314 ChainToBase.push_back(CI);
2315 return findRematerializableChainToBasePointer(ChainToBase,
2316 CI->getOperand(0));
2317 }
2318
2319 // We have reached the root of the chain, which is either equal to the base or
2320 // is the first unsupported value along the use chain.
2321 return CurrentValue;
2322}
2323
2324// Helper function for the "rematerializeLiveValues". Compute cost of the use
2325// chain we are going to rematerialize.
2326static InstructionCost
2330
2331 for (Instruction *Instr : Chain) {
2332 if (CastInst *CI = dyn_cast<CastInst>(Instr)) {
2333 assert(CI->isNoopCast(CI->getModule()->getDataLayout()) &&
2334 "non noop cast is found during rematerialization");
2335
2336 Type *SrcTy = CI->getOperand(0)->getType();
2337 Cost += TTI.getCastInstrCost(CI->getOpcode(), CI->getType(), SrcTy,
2340
2341 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Instr)) {
2342 // Cost of the address calculation
2343 Type *ValTy = GEP->getSourceElementType();
2345
2346 // And cost of the GEP itself
2347 // TODO: Use TTI->getGEPCost here (it exists, but appears to be not
2348 // allowed for the external usage)
2349 if (!GEP->hasAllConstantIndices())
2350 Cost += 2;
2351
2352 } else {
2353 llvm_unreachable("unsupported instruction type during rematerialization");
2354 }
2355 }
2356
2357 return Cost;
2358}
2359
2360static bool AreEquivalentPhiNodes(PHINode &OrigRootPhi, PHINode &AlternateRootPhi) {
2361 unsigned PhiNum = OrigRootPhi.getNumIncomingValues();
2362 if (PhiNum != AlternateRootPhi.getNumIncomingValues() ||
2363 OrigRootPhi.getParent() != AlternateRootPhi.getParent())
2364 return false;
2365 // Map of incoming values and their corresponding basic blocks of
2366 // OrigRootPhi.
2367 SmallDenseMap<Value *, BasicBlock *, 8> CurrentIncomingValues;
2368 for (unsigned i = 0; i < PhiNum; i++)
2369 CurrentIncomingValues[OrigRootPhi.getIncomingValue(i)] =
2370 OrigRootPhi.getIncomingBlock(i);
2371
2372 // Both current and base PHIs should have same incoming values and
2373 // the same basic blocks corresponding to the incoming values.
2374 for (unsigned i = 0; i < PhiNum; i++) {
2375 auto CIVI =
2376 CurrentIncomingValues.find(AlternateRootPhi.getIncomingValue(i));
2377 if (CIVI == CurrentIncomingValues.end())
2378 return false;
2379 BasicBlock *CurrentIncomingBB = CIVI->second;
2380 if (CurrentIncomingBB != AlternateRootPhi.getIncomingBlock(i))
2381 return false;
2382 }
2383 return true;
2384}
2385
2386// Find derived pointers that can be recomputed cheap enough and fill
2387// RematerizationCandidates with such candidates.
2388static void
2389findRematerializationCandidates(PointerToBaseTy PointerToBase,
2390 RematCandTy &RematerizationCandidates,
2392 const unsigned int ChainLengthThreshold = 10;
2393
2394 for (auto P2B : PointerToBase) {
2395 auto *Derived = P2B.first;
2396 auto *Base = P2B.second;
2397 // Consider only derived pointers.
2398 if (Derived == Base)
2399 continue;
2400
2401 // For each live pointer find its defining chain.
2403 Value *RootOfChain =
2404 findRematerializableChainToBasePointer(ChainToBase, Derived);
2405
2406 // Nothing to do, or chain is too long
2407 if ( ChainToBase.size() == 0 ||
2408 ChainToBase.size() > ChainLengthThreshold)
2409 continue;
2410
2411 // Handle the scenario where the RootOfChain is not equal to the
2412 // Base Value, but they are essentially the same phi values.
2413 if (RootOfChain != PointerToBase[Derived]) {
2414 PHINode *OrigRootPhi = dyn_cast<PHINode>(RootOfChain);
2415 PHINode *AlternateRootPhi = dyn_cast<PHINode>(PointerToBase[Derived]);
2416 if (!OrigRootPhi || !AlternateRootPhi)
2417 continue;
2418 // PHI nodes that have the same incoming values, and belonging to the same
2419 // basic blocks are essentially the same SSA value. When the original phi
2420 // has incoming values with different base pointers, the original phi is
2421 // marked as conflict, and an additional `AlternateRootPhi` with the same
2422 // incoming values get generated by the findBasePointer function. We need
2423 // to identify the newly generated AlternateRootPhi (.base version of phi)
2424 // and RootOfChain (the original phi node itself) are the same, so that we
2425 // can rematerialize the gep and casts. This is a workaround for the
2426 // deficiency in the findBasePointer algorithm.
2427 if (!AreEquivalentPhiNodes(*OrigRootPhi, *AlternateRootPhi))
2428 continue;
2429 }
2430 // Compute cost of this chain.
2432 // TODO: We can also account for cases when we will be able to remove some
2433 // of the rematerialized values by later optimization passes. I.e if
2434 // we rematerialized several intersecting chains. Or if original values
2435 // don't have any uses besides this statepoint.
2436
2437 // Ok, there is a candidate.
2438 RematerizlizationCandidateRecord Record;
2439 Record.ChainToBase = ChainToBase;
2440 Record.RootOfChain = RootOfChain;
2441 Record.Cost = Cost;
2442 RematerizationCandidates.insert({ Derived, Record });
2443 }
2444}
2445
2446// Try to rematerialize derived pointers immediately before their uses
2447// (instead of rematerializing after every statepoint it is live through).
2448// This can be beneficial when derived pointer is live across many
2449// statepoints, but uses are rare.
2451 RematCandTy &RematerizationCandidates,
2453 PointerToBaseTy &PointerToBase) {
2454 if (!RematDerivedAtUses)
2455 return;
2456
2457 SmallVector<Instruction *, 32> LiveValuesToBeDeleted;
2458
2459 LLVM_DEBUG(dbgs() << "Rematerialize derived pointers at uses, "
2460 << "Num statepoints: " << Records.size() << '\n');
2461
2462 for (auto &It : RematerizationCandidates) {
2463 Instruction *Cand = cast<Instruction>(It.first);
2464 auto &Record = It.second;
2465
2467 continue;
2468
2469 if (Cand->user_empty())
2470 continue;
2471
2472 if (Cand->hasOneUse())
2473 if (auto *U = dyn_cast<Instruction>(Cand->getUniqueUndroppableUser()))
2474 if (U->getParent() == Cand->getParent())
2475 continue;
2476
2477 // Rematerialization before PHI nodes is not implemented.
2478 if (llvm::any_of(Cand->users(),
2479 [](const auto *U) { return isa<PHINode>(U); }))
2480 continue;
2481
2482 LLVM_DEBUG(dbgs() << "Trying cand " << *Cand << " ... ");
2483
2484 // Count of rematerialization instructions we introduce is equal to number
2485 // of candidate uses.
2486 // Count of rematerialization instructions we eliminate is equal to number
2487 // of statepoints it is live through.
2488 // Consider transformation profitable if latter is greater than former
2489 // (in other words, we create less than eliminate).
2490 unsigned NumLiveStatepoints = llvm::count_if(
2491 Records, [Cand](const auto &R) { return R.LiveSet.contains(Cand); });
2492 unsigned NumUses = Cand->getNumUses();
2493
2494 LLVM_DEBUG(dbgs() << "Num uses: " << NumUses << " Num live statepoints: "
2495 << NumLiveStatepoints << " ");
2496
2497 if (NumLiveStatepoints < NumUses) {
2498 LLVM_DEBUG(dbgs() << "not profitable\n");
2499 continue;
2500 }
2501
2502 // If rematerialization is 'free', then favor rematerialization at
2503 // uses as it generally shortens live ranges.
2504 // TODO: Short (size ==1) chains only?
2505 if (NumLiveStatepoints == NumUses && Record.Cost > 0) {
2506 LLVM_DEBUG(dbgs() << "not profitable\n");
2507 continue;
2508 }
2509
2510 LLVM_DEBUG(dbgs() << "looks profitable\n");
2511
2512 // ChainToBase may contain another remat candidate (as a sub chain) which
2513 // has been rewritten by now. Need to recollect chain to have up to date
2514 // value.
2515 // TODO: sort records in findRematerializationCandidates() in
2516 // decreasing chain size order?
2517 if (Record.ChainToBase.size() > 1) {
2518 Record.ChainToBase.clear();
2520 }
2521
2522 // Current rematerialization algorithm is very simple: we rematerialize
2523 // immediately before EVERY use, even if there are several uses in same
2524 // block or if use is local to Cand Def. The reason is that this allows
2525 // us to avoid recomputing liveness without complicated analysis:
2526 // - If we did not eliminate all uses of original Candidate, we do not
2527 // know exaclty in what BBs it is still live.
2528 // - If we rematerialize once per BB, we need to find proper insertion
2529 // place (first use in block, but after Def) and analyze if there is
2530 // statepoint between uses in the block.
2531 while (!Cand->user_empty()) {
2532 Instruction *UserI = cast<Instruction>(*Cand->user_begin());
2533 Instruction *RematChain = rematerializeChain(
2534 Record.ChainToBase, UserI, Record.RootOfChain, PointerToBase[Cand]);
2535 UserI->replaceUsesOfWith(Cand, RematChain);
2536 PointerToBase[RematChain] = PointerToBase[Cand];
2537 }
2538 LiveValuesToBeDeleted.push_back(Cand);
2539 }
2540
2541 LLVM_DEBUG(dbgs() << "Rematerialized " << LiveValuesToBeDeleted.size()
2542 << " derived pointers\n");
2543 for (auto *Cand : LiveValuesToBeDeleted) {
2544 assert(Cand->use_empty() && "Unexpected user remain");
2545 RematerizationCandidates.erase(Cand);
2546 for (auto &R : Records) {
2547 assert(!R.LiveSet.contains(Cand) ||
2548 R.LiveSet.contains(PointerToBase[Cand]));
2549 R.LiveSet.remove(Cand);
2550 }
2551 }
2552
2553 // Recollect not rematerialized chains - we might have rewritten
2554 // their sub-chains.
2555 if (!LiveValuesToBeDeleted.empty()) {
2556 for (auto &P : RematerizationCandidates) {
2557 auto &R = P.second;
2558 if (R.ChainToBase.size() > 1) {
2559 R.ChainToBase.clear();
2560 findRematerializableChainToBasePointer(R.ChainToBase, P.first);
2561 }
2562 }
2563 }
2564}
2565
2566// From the statepoint live set pick values that are cheaper to recompute then
2567// to relocate. Remove this values from the live set, rematerialize them after
2568// statepoint and record them in "Info" structure. Note that similar to
2569// relocated values we don't do any user adjustments here.
2571 PartiallyConstructedSafepointRecord &Info,
2572 PointerToBaseTy &PointerToBase,
2573 RematCandTy &RematerizationCandidates,
2575 // Record values we are going to delete from this statepoint live set.
2576 // We can not di this in following loop due to iterator invalidation.
2577 SmallVector<Value *, 32> LiveValuesToBeDeleted;
2578
2579 for (Value *LiveValue : Info.LiveSet) {
2580 auto It = RematerizationCandidates.find(LiveValue);
2581 if (It == RematerizationCandidates.end())
2582 continue;
2583
2584 RematerizlizationCandidateRecord &Record = It->second;
2585
2587 // For invokes we need to rematerialize each chain twice - for normal and
2588 // for unwind basic blocks. Model this by multiplying cost by two.
2589 if (isa<InvokeInst>(Call))
2590 Cost *= 2;
2591
2592 // If it's too expensive - skip it.
2594 continue;
2595
2596 // Remove value from the live set
2597 LiveValuesToBeDeleted.push_back(LiveValue);
2598
2599 // Clone instructions and record them inside "Info" structure.
2600
2601 // Different cases for calls and invokes. For invokes we need to clone
2602 // instructions both on normal and unwind path.
2603 if (isa<CallInst>(Call)) {
2604 Instruction *InsertBefore = Call->getNextNode();
2605 assert(InsertBefore);
2606 Instruction *RematerializedValue =
2607 rematerializeChain(Record.ChainToBase, InsertBefore,
2608 Record.RootOfChain, PointerToBase[LiveValue]);
2609 Info.RematerializedValues[RematerializedValue] = LiveValue;
2610 } else {
2611 auto *Invoke = cast<InvokeInst>(Call);
2612
2613 Instruction *NormalInsertBefore =
2614 &*Invoke->getNormalDest()->getFirstInsertionPt();
2615 Instruction *UnwindInsertBefore =
2616 &*Invoke->getUnwindDest()->getFirstInsertionPt();
2617
2618 Instruction *NormalRematerializedValue =
2619 rematerializeChain(Record.ChainToBase, NormalInsertBefore,
2620 Record.RootOfChain, PointerToBase[LiveValue]);
2621 Instruction *UnwindRematerializedValue =
2622 rematerializeChain(Record.ChainToBase, UnwindInsertBefore,
2623 Record.RootOfChain, PointerToBase[LiveValue]);
2624
2625 Info.RematerializedValues[NormalRematerializedValue] = LiveValue;
2626 Info.RematerializedValues[UnwindRematerializedValue] = LiveValue;
2627 }
2628 }
2629
2630 // Remove rematerialized values from the live set.
2631 for (auto *LiveValue: LiveValuesToBeDeleted) {
2632 Info.LiveSet.remove(LiveValue);
2633 }
2634}
2635
2637 SmallVectorImpl<CallInst *> &Intrinsics,
2638 DefiningValueMapTy &DVCache,
2639 IsKnownBaseMapTy &KnownBases) {
2640 auto &Context = F.getContext();
2641 auto &DL = F.getParent()->getDataLayout();
2642 bool Changed = false;
2643
2644 for (auto *Callsite : Intrinsics)
2645 switch (Callsite->getIntrinsicID()) {
2646 case Intrinsic::experimental_gc_get_pointer_base: {
2647 Changed = true;
2648 Value *Base =
2649 findBasePointer(Callsite->getOperand(0), DVCache, KnownBases);
2650 assert(!DVCache.count(Callsite));
2651 auto *BaseBC = IRBuilder<>(Callsite).CreateBitCast(
2652 Base, Callsite->getType(), suffixed_name_or(Base, ".cast", ""));
2653 if (BaseBC != Base)
2654 DVCache[BaseBC] = Base;
2655 Callsite->replaceAllUsesWith(BaseBC);
2656 if (!BaseBC->hasName())
2657 BaseBC->takeName(Callsite);
2658 Callsite->eraseFromParent();
2659 break;
2660 }
2661 case Intrinsic::experimental_gc_get_pointer_offset: {
2662 Changed = true;
2663 Value *Derived = Callsite->getOperand(0);
2664 Value *Base = findBasePointer(Derived, DVCache, KnownBases);
2665 assert(!DVCache.count(Callsite));
2666 unsigned AddressSpace = Derived->getType()->getPointerAddressSpace();
2667 unsigned IntPtrSize = DL.getPointerSizeInBits(AddressSpace);
2668 IRBuilder<> Builder(Callsite);
2669 Value *BaseInt =
2670 Builder.CreatePtrToInt(Base, Type::getIntNTy(Context, IntPtrSize),
2671 suffixed_name_or(Base, ".int", ""));
2672 Value *DerivedInt =
2673 Builder.CreatePtrToInt(Derived, Type::getIntNTy(Context, IntPtrSize),
2674 suffixed_name_or(Derived, ".int", ""));
2675 Value *Offset = Builder.CreateSub(DerivedInt, BaseInt);
2676 Callsite->replaceAllUsesWith(Offset);
2677 Offset->takeName(Callsite);
2678 Callsite->eraseFromParent();
2679 break;
2680 }
2681 default:
2682 llvm_unreachable("Unknown intrinsic");
2683 }
2684
2685 return Changed;
2686}
2687
2691 DefiningValueMapTy &DVCache,
2692 IsKnownBaseMapTy &KnownBases) {
2693 std::unique_ptr<GCStrategy> GC = findGCStrategy(F);
2694
2695#ifndef NDEBUG
2696 // Validate the input
2697 std::set<CallBase *> Uniqued;
2698 Uniqued.insert(ToUpdate.begin(), ToUpdate.end());
2699 assert(Uniqued.size() == ToUpdate.size() && "no duplicates please!");
2700
2701 for (CallBase *Call : ToUpdate)
2702 assert(Call->getFunction() == &F);
2703#endif
2704
2705 // When inserting gc.relocates for invokes, we need to be able to insert at
2706 // the top of the successor blocks. See the comment on
2707 // normalForInvokeSafepoint on exactly what is needed. Note that this step
2708 // may restructure the CFG.
2709 for (CallBase *Call : ToUpdate) {
2710 auto *II = dyn_cast<InvokeInst>(Call);
2711 if (!II)
2712 continue;
2713 normalizeForInvokeSafepoint(II->getNormalDest(), II->getParent(), DT);
2714 normalizeForInvokeSafepoint(II->getUnwindDest(), II->getParent(), DT);
2715 }
2716
2717 // A list of dummy calls added to the IR to keep various values obviously
2718 // live in the IR. We'll remove all of these when done.
2720
2721 // Insert a dummy call with all of the deopt operands we'll need for the
2722 // actual safepoint insertion as arguments. This ensures reference operands
2723 // in the deopt argument list are considered live through the safepoint (and
2724 // thus makes sure they get relocated.)
2725 for (CallBase *Call : ToUpdate) {
2726 SmallVector<Value *, 64> DeoptValues;
2727
2728 for (Value *Arg : GetDeoptBundleOperands(Call)) {
2729 assert(!isUnhandledGCPointerType(Arg->getType(), GC.get()) &&
2730 "support for FCA unimplemented");
2731 if (isHandledGCPointerType(Arg->getType(), GC.get()))
2732 DeoptValues.push_back(Arg);
2733 }
2734
2735 insertUseHolderAfter(Call, DeoptValues, Holders);
2736 }
2737
2739
2740 // A) Identify all gc pointers which are statically live at the given call
2741 // site.
2742 findLiveReferences(F, DT, ToUpdate, Records, GC.get());
2743
2744 /// Global mapping from live pointers to a base-defining-value.
2745 PointerToBaseTy PointerToBase;
2746
2747 // B) Find the base pointers for each live pointer
2748 for (size_t i = 0; i < Records.size(); i++) {
2749 PartiallyConstructedSafepointRecord &info = Records[i];
2750 findBasePointers(DT, DVCache, ToUpdate[i], info, PointerToBase, KnownBases);
2751 }
2752 if (PrintBasePointers) {
2753 errs() << "Base Pairs (w/o Relocation):\n";
2754 for (auto &Pair : PointerToBase) {
2755 errs() << " derived ";
2756 Pair.first->printAsOperand(errs(), false);
2757 errs() << " base ";
2758 Pair.second->printAsOperand(errs(), false);
2759 errs() << "\n";
2760 ;
2761 }
2762 }
2763
2764 // The base phi insertion logic (for any safepoint) may have inserted new
2765 // instructions which are now live at some safepoint. The simplest such
2766 // example is:
2767 // loop:
2768 // phi a <-- will be a new base_phi here
2769 // safepoint 1 <-- that needs to be live here
2770 // gep a + 1
2771 // safepoint 2
2772 // br loop
2773 // We insert some dummy calls after each safepoint to definitely hold live
2774 // the base pointers which were identified for that safepoint. We'll then
2775 // ask liveness for _every_ base inserted to see what is now live. Then we
2776 // remove the dummy calls.
2777 Holders.reserve(Holders.size() + Records.size());
2778 for (size_t i = 0; i < Records.size(); i++) {
2779 PartiallyConstructedSafepointRecord &Info = Records[i];
2780
2782 for (auto *Derived : Info.LiveSet) {
2783 assert(PointerToBase.count(Derived) && "Missed base for derived pointer");
2784 Bases.push_back(PointerToBase[Derived]);
2785 }
2786
2787 insertUseHolderAfter(ToUpdate[i], Bases, Holders);
2788 }
2789
2790 // By selecting base pointers, we've effectively inserted new uses. Thus, we
2791 // need to rerun liveness. We may *also* have inserted new defs, but that's
2792 // not the key issue.
2793 recomputeLiveInValues(F, DT, ToUpdate, Records, PointerToBase, GC.get());
2794
2795 if (PrintBasePointers) {
2796 errs() << "Base Pairs: (w/Relocation)\n";
2797 for (auto Pair : PointerToBase) {
2798 errs() << " derived ";
2799 Pair.first->printAsOperand(errs(), false);
2800 errs() << " base ";
2801 Pair.second->printAsOperand(errs(), false);
2802 errs() << "\n";
2803 }
2804 }
2805
2806 // It is possible that non-constant live variables have a constant base. For
2807 // example, a GEP with a variable offset from a global. In this case we can
2808 // remove it from the liveset. We already don't add constants to the liveset
2809 // because we assume they won't move at runtime and the GC doesn't need to be
2810 // informed about them. The same reasoning applies if the base is constant.
2811 // Note that the relocation placement code relies on this filtering for
2812 // correctness as it expects the base to be in the liveset, which isn't true
2813 // if the base is constant.
2814 for (auto &Info : Records) {
2815 Info.LiveSet.remove_if([&](Value *LiveV) {
2816 assert(PointerToBase.count(LiveV) && "Missed base for derived pointer");
2817 return isa<Constant>(PointerToBase[LiveV]);
2818 });
2819 }
2820
2821 for (CallInst *CI : Holders)
2822 CI->eraseFromParent();
2823
2824 Holders.clear();
2825
2826 // Compute the cost of possible re-materialization of derived pointers.
2827 RematCandTy RematerizationCandidates;
2828 findRematerializationCandidates(PointerToBase, RematerizationCandidates, TTI);
2829
2830 // In order to reduce live set of statepoint we might choose to rematerialize
2831 // some values instead of relocating them. This is purely an optimization and
2832 // does not influence correctness.
2833 // First try rematerialization at uses, then after statepoints.
2834 rematerializeLiveValuesAtUses(RematerizationCandidates, Records,
2835 PointerToBase);
2836 for (size_t i = 0; i < Records.size(); i++)
2837 rematerializeLiveValues(ToUpdate[i], Records[i], PointerToBase,
2838 RematerizationCandidates, TTI);
2839
2840 // We need this to safely RAUW and delete call or invoke return values that
2841 // may themselves be live over a statepoint. For details, please see usage in
2842 // makeStatepointExplicitImpl.
2843 std::vector<DeferredReplacement> Replacements;
2844
2845 // Now run through and replace the existing statepoints with new ones with
2846 // the live variables listed. We do not yet update uses of the values being
2847 // relocated. We have references to live variables that need to
2848 // survive to the last iteration of this loop. (By construction, the
2849 // previous statepoint can not be a live variable, thus we can and remove
2850 // the old statepoint calls as we go.)
2851 for (size_t i = 0; i < Records.size(); i++)
2852 makeStatepointExplicit(DT, ToUpdate[i], Records[i], Replacements,
2853 PointerToBase, GC.get());
2854
2855 ToUpdate.clear(); // prevent accident use of invalid calls.
2856
2857 for (auto &PR : Replacements)
2858 PR.doReplacement();
2859
2860 Replacements.clear();
2861
2862 for (auto &Info : Records) {
2863 // These live sets may contain state Value pointers, since we replaced calls
2864 // with operand bundles with calls wrapped in gc.statepoint, and some of
2865 // those calls may have been def'ing live gc pointers. Clear these out to
2866 // avoid accidentally using them.
2867 //
2868 // TODO: We should create a separate data structure that does not contain
2869 // these live sets, and migrate to using that data structure from this point
2870 // onward.
2871 Info.LiveSet.clear();
2872 }
2873 PointerToBase.clear();
2874
2875 // Do all the fixups of the original live variables to their relocated selves
2877 for (const PartiallyConstructedSafepointRecord &Info : Records) {
2878 // We can't simply save the live set from the original insertion. One of
2879 // the live values might be the result of a call which needs a safepoint.
2880 // That Value* no longer exists and we need to use the new gc_result.
2881 // Thankfully, the live set is embedded in the statepoint (and updated), so
2882 // we just grab that.
2883 llvm::append_range(Live, Info.StatepointToken->gc_args());
2884#ifndef NDEBUG
2885 // Do some basic validation checking on our liveness results before
2886 // performing relocation. Relocation can and will turn mistakes in liveness
2887 // results into non-sensical code which is must harder to debug.
2888 // TODO: It would be nice to test consistency as well
2889 assert(DT.isReachableFromEntry(Info.StatepointToken->getParent()) &&
2890 "statepoint must be reachable or liveness is meaningless");
2891 for (Value *V : Info.StatepointToken->gc_args()) {
2892 if (!isa<Instruction>(V))
2893 // Non-instruction values trivial dominate all possible uses
2894 continue;
2895 auto *LiveInst = cast<Instruction>(V);
2896 assert(DT.isReachableFromEntry(LiveInst->getParent()) &&
2897 "unreachable values should never be live");
2898 assert(DT.dominates(LiveInst, Info.StatepointToken) &&
2899 "basic SSA liveness expectation violated by liveness analysis");
2900 }
2901#endif
2902 }
2904
2905#ifndef NDEBUG
2906 // Validation check
2907 for (auto *Ptr : Live)
2908 assert(isHandledGCPointerType(Ptr->getType(), GC.get()) &&
2909 "must be a gc pointer type");
2910#endif
2911
2912 relocationViaAlloca(F, DT, Live, Records);
2913 return !Records.empty();
2914}
2915
2916// List of all parameter and return attributes which must be stripped when
2917// lowering from the abstract machine model. Note that we list attributes
2918// here which aren't valid as return attributes, that is okay.
2920 AttributeMask R;
2921 R.addAttribute(Attribute::Dereferenceable);
2922 R.addAttribute(Attribute::DereferenceableOrNull);
2923 R.addAttribute(Attribute::ReadNone);
2924 R.addAttribute(Attribute::ReadOnly);
2925 R.addAttribute(Attribute::WriteOnly);
2926 R.addAttribute(Attribute::NoAlias);
2927 R.addAttribute(Attribute::NoFree);
2928 return R;
2929}
2930
2932 LLVMContext &Ctx = F.getContext();
2933
2934 // Intrinsics are very delicate. Lowering sometimes depends the presence
2935 // of certain attributes for correctness, but we may have also inferred
2936 // additional ones in the abstract machine model which need stripped. This
2937 // assumes that the attributes defined in Intrinsic.td are conservatively
2938 // correct for both physical and abstract model.
2939 if (Intrinsic::ID id = F.getIntrinsicID()) {
2940 F.setAttributes(Intrinsic::getAttributes(Ctx, id));
2941 return;
2942 }
2943
2945 for (Argument &A : F.args())
2946 if (isa<PointerType>(A.getType()))
2947 F.removeParamAttrs(A.getArgNo(), R);
2948
2949 if (isa<PointerType>(F.getReturnType()))
2950 F.removeRetAttrs(R);
2951
2952 for (auto Attr : FnAttrsToStrip)
2953 F.removeFnAttr(Attr);
2954}
2955
2956/// Certain metadata on instructions are invalid after running RS4GC.
2957/// Optimizations that run after RS4GC can incorrectly use this metadata to
2958/// optimize functions. We drop such metadata on the instruction.
2960 if (!isa<LoadInst>(I) && !isa<StoreInst>(I))
2961 return;
2962 // These are the attributes that are still valid on loads and stores after
2963 // RS4GC.
2964 // The metadata implying dereferenceability and noalias are (conservatively)
2965 // dropped. This is because semantically, after RewriteStatepointsForGC runs,
2966 // all calls to gc.statepoint "free" the entire heap. Also, gc.statepoint can
2967 // touch the entire heap including noalias objects. Note: The reasoning is
2968 // same as stripping the dereferenceability and noalias attributes that are
2969 // analogous to the metadata counterparts.
2970 // We also drop the invariant.load metadata on the load because that metadata
2971 // implies the address operand to the load points to memory that is never
2972 // changed once it became dereferenceable. This is no longer true after RS4GC.
2973 // Similar reasoning applies to invariant.group metadata, which applies to
2974 // loads within a group.
2975 unsigned ValidMetadataAfterRS4GC[] = {LLVMContext::MD_tbaa,
2976 LLVMContext::MD_range,
2977 LLVMContext::MD_alias_scope,
2978 LLVMContext::MD_nontemporal,
2979 LLVMContext::MD_nonnull,
2980 LLVMContext::MD_align,
2981 LLVMContext::MD_type};
2982
2983 // Drops all metadata on the instruction other than ValidMetadataAfterRS4GC.
2984 I.dropUnknownNonDebugMetadata(ValidMetadataAfterRS4GC);
2985}
2986
2988 if (F.empty())
2989 return;
2990
2991 LLVMContext &Ctx = F.getContext();
2992 MDBuilder Builder(Ctx);
2993
2994 // Set of invariantstart instructions that we need to remove.
2995 // Use this to avoid invalidating the instruction iterator.
2996 SmallVector<IntrinsicInst*, 12> InvariantStartInstructions;
2997
2998 for (Instruction &I : instructions(F)) {
2999 // invariant.start on memory location implies that the referenced memory
3000 // location is constant and unchanging. This is no longer true after
3001 // RewriteStatepointsForGC runs because there can be calls to gc.statepoint
3002 // which frees the entire heap and the presence of invariant.start allows
3003 // the optimizer to sink the load of a memory location past a statepoint,
3004 // which is incorrect.
3005 if (auto *II = dyn_cast<IntrinsicInst>(&I))
3006 if (II->getIntrinsicID() == Intrinsic::invariant_start) {
3007 InvariantStartInstructions.push_back(II);
3008 continue;
3009 }
3010
3011 if (MDNode *Tag = I.getMetadata(LLVMContext::MD_tbaa)) {
3012 MDNode *MutableTBAA = Builder.createMutableTBAAAccessTag(Tag);
3013 I.setMetadata(LLVMContext::MD_tbaa, MutableTBAA);
3014 }
3015
3017
3019 if (auto *Call = dyn_cast<CallBase>(&I)) {
3020 for (int i = 0, e = Call->arg_size(); i != e; i++)
3021 if (isa<PointerType>(Call->getArgOperand(i)->getType()))
3022 Call->removeParamAttrs(i, R);
3023 if (isa<PointerType>(Call->getType()))
3024 Call->removeRetAttrs(R);
3025 }
3026 }
3027
3028 // Delete the invariant.start instructions and RAUW undef.
3029 for (auto *II : InvariantStartInstructions) {
3030 II->replaceAllUsesWith(UndefValue::get(II->getType()));
3031 II->eraseFromParent();
3032 }
3033}
3034
3035/// Looks up the GC strategy for a given function, returning null if the
3036/// function doesn't have a GC tag. The strategy is stored in the cache.
3037static std::unique_ptr<GCStrategy> findGCStrategy(Function &F) {
3038 if (!F.hasGC())
3039 return nullptr;
3040
3041 return getGCStrategy(F.getGC());
3042}
3043
3044/// Returns true if this function should be rewritten by this pass. The main
3045/// point of this function is as an extension point for custom logic.
3047 if (!F.hasGC())
3048 return false;
3049
3050 std::unique_ptr<GCStrategy> Strategy = findGCStrategy(F);
3051
3052 assert(Strategy && "GC strategy is required by function, but was not found");
3053
3054 return Strategy->useRS4GC();
3055}
3056
3057static void stripNonValidData(Module &M) {
3058#ifndef NDEBUG
3059 assert(llvm::any_of(M, shouldRewriteStatepointsIn) && "precondition!");
3060#endif
3061
3062 for (Function &F : M)
3064
3065 for (Function &F : M)
3067}
3068
3071 const TargetLibraryInfo &TLI) {
3072 assert(!F.isDeclaration() && !F.empty() &&
3073 "need function body to rewrite statepoints in");
3074 assert(shouldRewriteStatepointsIn(F) && "mismatch in rewrite decision");
3075
3076 auto NeedsRewrite = [&TLI](Instruction &I) {
3077 if (const auto *Call = dyn_cast<CallBase>(&I)) {
3078 if (isa<GCStatepointInst>(Call))
3079 return false;
3080 if (callsGCLeafFunction(Call, TLI))
3081 return false;
3082
3083 // Normally it's up to the frontend to make sure that non-leaf calls also
3084 // have proper deopt state if it is required. We make an exception for
3085 // element atomic memcpy/memmove intrinsics here. Unlike other intrinsics
3086 // these are non-leaf by default. They might be generated by the optimizer
3087 // which doesn't know how to produce a proper deopt state. So if we see a
3088 // non-leaf memcpy/memmove without deopt state just treat it as a leaf
3089 // copy and don't produce a statepoint.
3091 !Call->getOperandBundle(LLVMContext::OB_deopt)) {
3092 assert((isa<AtomicMemCpyInst>(Call) || isa<AtomicMemMoveInst>(Call)) &&
3093 "Don't expect any other calls here!");
3094 return false;
3095 }
3096 return true;
3097 }
3098 return false;
3099 };
3100
3101 // Delete any unreachable statepoints so that we don't have unrewritten
3102 // statepoints surviving this pass. This makes testing easier and the
3103 // resulting IR less confusing to human readers.
3105 bool MadeChange = removeUnreachableBlocks(F, &DTU);
3106 // Flush the Dominator Tree.
3107 DTU.getDomTree();
3108
3109 // Gather all the statepoints which need rewritten. Be careful to only
3110 // consider those in reachable code since we need to ask dominance queries
3111 // when rewriting. We'll delete the unreachable ones in a moment.
3112 SmallVector<CallBase *, 64> ParsePointNeeded;
3113 SmallVector<CallInst *, 64> Intrinsics;
3114 for (Instruction &I : instructions(F)) {
3115 // TODO: only the ones with the flag set!
3116 if (NeedsRewrite(I)) {
3117 // NOTE removeUnreachableBlocks() is stronger than
3118 // DominatorTree::isReachableFromEntry(). In other words
3119 // removeUnreachableBlocks can remove some blocks for which
3120 // isReachableFromEntry() returns true.
3121 assert(DT.isReachableFromEntry(I.getParent()) &&
3122 "no unreachable blocks expected");
3123 ParsePointNeeded.push_back(cast<CallBase>(&I));
3124 }
3125 if (auto *CI = dyn_cast<CallInst>(&I))
3126 if (CI->getIntrinsicID() == Intrinsic::experimental_gc_get_pointer_base ||
3127 CI->getIntrinsicID() == Intrinsic::experimental_gc_get_pointer_offset)
3128 Intrinsics.emplace_back(CI);
3129 }
3130
3131 // Return early if no work to do.
3132 if (ParsePointNeeded.empty() && Intrinsics.empty())
3133 return MadeChange;
3134
3135 // As a prepass, go ahead and aggressively destroy single entry phi nodes.
3136 // These are created by LCSSA. They have the effect of increasing the size
3137 // of liveness sets for no good reason. It may be harder to do this post
3138 // insertion since relocations and base phis can confuse things.
3139 for (BasicBlock &BB : F)
3140 if (BB.getUniquePredecessor())
3141 MadeChange |= FoldSingleEntryPHINodes(&BB);
3142
3143 // Before we start introducing relocations, we want to tweak the IR a bit to
3144 // avoid unfortunate code generation effects. The main example is that we
3145 // want to try to make sure the comparison feeding a branch is after any
3146 // safepoints. Otherwise, we end up with a comparison of pre-relocation
3147 // values feeding a branch after relocation. This is semantically correct,
3148 // but results in extra register pressure since both the pre-relocation and
3149 // post-relocation copies must be available in registers. For code without
3150 // relocations this is handled elsewhere, but teaching the scheduler to
3151 // reverse the transform we're about to do would be slightly complex.
3152 // Note: This may extend the live range of the inputs to the icmp and thus
3153 // increase the liveset of any statepoint we move over. This is profitable
3154 // as long as all statepoints are in rare blocks. If we had in-register
3155 // lowering for live values this would be a much safer transform.
3156 auto getConditionInst = [](Instruction *TI) -> Instruction * {
3157 if (auto *BI = dyn_cast<BranchInst>(TI))
3158 if (BI->isConditional())
3159 return dyn_cast<Instruction>(BI->getCondition());
3160 // TODO: Extend this to handle switches
3161 return nullptr;
3162 };
3163 for (BasicBlock &BB : F) {
3164 Instruction *TI = BB.getTerminator();
3165 if (auto *Cond = getConditionInst(TI))
3166 // TODO: Handle more than just ICmps here. We should be able to move
3167 // most instructions without side effects or memory access.
3168 if (isa<ICmpInst>(Cond) && Cond->hasOneUse()) {
3169 MadeChange = true;
3170 Cond->moveBefore(TI);
3171 }
3172 }
3173
3174 // Nasty workaround - The base computation code in the main algorithm doesn't
3175 // consider the fact that a GEP can be used to convert a scalar to a vector.
3176 // The right fix for this is to integrate GEPs into the base rewriting
3177 // algorithm properly, this is just a short term workaround to prevent
3178 // crashes by canonicalizing such GEPs into fully vector GEPs.
3179 for (Instruction &I : instructions(F)) {
3180 if (!isa<GetElementPtrInst>(I))
3181 continue;
3182
3183 unsigned VF = 0;
3184 for (unsigned i = 0; i < I.getNumOperands(); i++)
3185 if (auto *OpndVTy = dyn_cast<VectorType>(I.getOperand(i)->getType())) {
3186 assert(VF == 0 ||
3187 VF == cast<FixedVectorType>(OpndVTy)->getNumElements());
3188 VF = cast<FixedVectorType>(OpndVTy)->getNumElements();
3189 }
3190
3191 // It's the vector to scalar traversal through the pointer operand which
3192 // confuses base pointer rewriting, so limit ourselves to that case.
3193 if (!I.getOperand(0)->getType()->isVectorTy() && VF != 0) {
3194 IRBuilder<> B(&I);
3195 auto *Splat = B.CreateVectorSplat(VF, I.getOperand(0));
3196 I.setOperand(0, Splat);
3197 MadeChange = true;
3198 }
3199 }
3200
3201 // Cache the 'defining value' relation used in the computation and
3202 // insertion of base phis and selects. This ensures that we don't insert
3203 // large numbers of duplicate base_phis. Use one cache for both
3204 // inlineGetBaseAndOffset() and insertParsePoints().
3205 DefiningValueMapTy DVCache;
3206
3207 // Mapping between a base values and a flag indicating whether it's a known
3208 // base or not.
3209 IsKnownBaseMapTy KnownBases;
3210
3211 if (!Intrinsics.empty())
3212 // Inline @gc.get.pointer.base() and @gc.get.pointer.offset() before finding
3213 // live references.
3214 MadeChange |= inlineGetBaseAndOffset(F, Intrinsics, DVCache, KnownBases);
3215
3216 if (!ParsePointNeeded.empty())
3217 MadeChange |=
3218 insertParsePoints(F, DT, TTI, ParsePointNeeded, DVCache, KnownBases);
3219
3220 return MadeChange;
3221}
3222
3223// liveness computation via standard dataflow
3224// -------------------------------------------------------------------
3225
3226// TODO: Consider using bitvectors for liveness, the set of potentially
3227// interesting values should be small and easy to pre-compute.
3228
3229/// Compute the live-in set for the location rbegin starting from
3230/// the live-out set of the basic block
3233 SetVector<Value *> &LiveTmp, GCStrategy *GC) {
3234 for (auto &I : make_range(Begin, End)) {
3235 // KILL/Def - Remove this definition from LiveIn
3236 LiveTmp.remove(&I);
3237
3238 // Don't consider *uses* in PHI nodes, we handle their contribution to
3239 // predecessor blocks when we seed the LiveOut sets
3240 if (isa<PHINode>(I))
3241 continue;
3242
3243 // USE - Add to the LiveIn set for this instruction
3244 for (Value *V : I.operands()) {
3245 assert(!isUnhandledGCPointerType(V->getType(), GC) &&
3246 "support for FCA unimplemented");
3247 if (isHandledGCPointerType(V->getType(), GC) && !isa<Constant>(V)) {
3248 // The choice to exclude all things constant here is slightly subtle.
3249 // There are two independent reasons:
3250 // - We assume that things which are constant (from LLVM's definition)
3251 // do not move at runtime. For example, the address of a global
3252 // variable is fixed, even though it's contents may not be.
3253 // - Second, we can't disallow arbitrary inttoptr constants even
3254 // if the language frontend does. Optimization passes are free to
3255 // locally exploit facts without respect to global reachability. This
3256 // can create sections of code which are dynamically unreachable and
3257 // contain just about anything. (see constants.ll in tests)
3258 LiveTmp.insert(V);
3259 }
3260 }
3261 }
3262}
3263
3265 GCStrategy *GC) {
3266 for (BasicBlock *Succ : successors(BB)) {
3267 for (auto &I : *Succ) {
3268 PHINode *PN = dyn_cast<PHINode>(&I);
3269 if (!PN)
3270 break;
3271
3272 Value *V = PN->getIncomingValueForBlock(BB);
3273 assert(!isUnhandledGCPointerType(V->getType(), GC) &&
3274 "support for FCA unimplemented");
3275 if (isHandledGCPointerType(V->getType(), GC) && !isa<Constant>(V))
3276 LiveTmp.insert(V);
3277 }
3278 }
3279}
3280
3282 SetVector<Value *> KillSet;
3283 for (Instruction &I : *BB)
3284 if (isHandledGCPointerType(I.getType(), GC))
3285 KillSet.insert(&I);
3286 return KillSet;
3287}
3288
3289#ifndef NDEBUG
3290/// Check that the items in 'Live' dominate 'TI'. This is used as a basic
3291/// validation check for the liveness computation.
3293 Instruction *TI, bool TermOkay = false) {
3294 for (Value *V : Live) {
3295 if (auto *I = dyn_cast<Instruction>(V)) {
3296 // The terminator can be a member of the LiveOut set. LLVM's definition
3297 // of instruction dominance states that V does not dominate itself. As
3298 // such, we need to special case this to allow it.
3299 if (TermOkay && TI == I)
3300 continue;
3301 assert(DT.dominates(I, TI) &&
3302 "basic SSA liveness expectation violated by liveness analysis");
3303 }
3304 }
3305}
3306
3307/// Check that all the liveness sets used during the computation of liveness
3308/// obey basic SSA properties. This is useful for finding cases where we miss
3309/// a def.
3310static void checkBasicSSA(DominatorTree &DT, GCPtrLivenessData &Data,
3311 BasicBlock &BB) {
3312 checkBasicSSA(DT, Data.LiveSet[&BB], BB.getTerminator());
3313 checkBasicSSA(DT, Data.LiveOut[&BB], BB.getTerminator(), true);
3314 checkBasicSSA(DT, Data.LiveIn[&BB], BB.getTerminator());
3315}
3316#endif
3317
3319 GCPtrLivenessData &Data, GCStrategy *GC) {
3321
3322 // Seed the liveness for each individual block
3323 for (BasicBlock &BB : F) {
3324 Data.KillSet[&BB] = computeKillSet(&BB, GC);
3325 Data.LiveSet[&BB].clear();
3326 computeLiveInValues(BB.rbegin(), BB.rend(), Data.LiveSet[&BB], GC);
3327
3328#ifndef NDEBUG
3329 for (Value *Kill : Data.KillSet[&BB])
3330 assert(!Data.LiveSet[&BB].count(Kill) && "live set contains kill");
3331#endif
3332
3333 Data.LiveOut[&BB] = SetVector<Value *>();
3334 computeLiveOutSeed(&BB, Data.LiveOut[&BB], GC);
3335 Data.LiveIn[&BB] = Data.LiveSet[&BB];
3336 Data.LiveIn[&BB].set_union(Data.LiveOut[&BB]);
3337 Data.LiveIn[&BB].set_subtract(Data.KillSet[&BB]);
3338 if (!Data.LiveIn[&BB].empty())
3339 Worklist.insert(pred_begin(&BB), pred_end(&BB));
3340 }
3341
3342 // Propagate that liveness until stable
3343 while (!Worklist.empty()) {
3344 BasicBlock *BB = Worklist.pop_back_val();
3345
3346 // Compute our new liveout set, then exit early if it hasn't changed despite
3347 // the contribution of our successor.
3348 SetVector<Value *> LiveOut = Data.LiveOut[BB];
3349 const auto OldLiveOutSize = LiveOut.size();
3350 for (BasicBlock *Succ : successors(BB)) {
3351 assert(Data.LiveIn.count(Succ));
3352 LiveOut.set_union(Data.LiveIn[Succ]);
3353 }
3354 // assert OutLiveOut is a subset of LiveOut
3355 if (OldLiveOutSize == LiveOut.size()) {
3356 // If the sets are the same size, then we didn't actually add anything
3357 // when unioning our successors LiveIn. Thus, the LiveIn of this block
3358 // hasn't changed.
3359 continue;
3360 }
3361 Data.LiveOut[BB] = LiveOut;
3362
3363 // Apply the effects of this basic block
3364 SetVector<Value *> LiveTmp = LiveOut;
3365 LiveTmp.set_union(Data.LiveSet[BB]);
3366 LiveTmp.set_subtract(Data.KillSet[BB]);
3367
3368 assert(Data.LiveIn.count(BB));
3369 const SetVector<Value *> &OldLiveIn = Data.LiveIn[BB];
3370 // assert: OldLiveIn is a subset of LiveTmp
3371 if (OldLiveIn.size() != LiveTmp.size()) {
3372 Data.LiveIn[BB] = LiveTmp;
3373 Worklist.insert(pred_begin(BB), pred_end(BB));
3374 }
3375 } // while (!Worklist.empty())
3376
3377#ifndef NDEBUG
3378 // Verify our output against SSA properties. This helps catch any
3379 // missing kills during the above iteration.
3380 for (BasicBlock &BB : F)
3381 checkBasicSSA(DT, Data, BB);
3382#endif
3383}
3384
3385static void findLiveSetAtInst(Instruction *Inst, GCPtrLivenessData &Data,
3386 StatepointLiveSetTy &Out, GCStrategy *GC) {
3387 BasicBlock *BB = Inst->getParent();
3388
3389 // Note: The copy is intentional and required
3390 assert(Data.LiveOut.count(BB));
3391 SetVector<Value *> LiveOut = Data.LiveOut[BB];
3392
3393 // We want to handle the statepoint itself oddly. It's
3394 // call result is not live (normal), nor are it's arguments
3395 // (unless they're used again later). This adjustment is
3396 // specifically what we need to relocate
3397 computeLiveInValues(BB->rbegin(), ++Inst->getIterator().getReverse(), LiveOut,
3398 GC);
3399 LiveOut.remove(Inst);
3400 Out.insert(LiveOut.begin(), LiveOut.end());
3401}
3402
3403static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData,
3404 CallBase *Call,
3405 PartiallyConstructedSafepointRecord &Info,
3406 PointerToBaseTy &PointerToBase,
3407 GCStrategy *GC) {
3408 StatepointLiveSetTy Updated;
3409 findLiveSetAtInst(Call, RevisedLivenessData, Updated, GC);
3410
3411 // We may have base pointers which are now live that weren't before. We need
3412 // to update the PointerToBase structure to reflect this.
3413 for (auto *V : Updated)
3414 PointerToBase.insert({ V, V });
3415
3416 Info.LiveSet = Updated;
3417}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
ReachingDefAnalysis InstSet & ToRemove
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
assume Assume Builder
This file contains the simple types necessary to represent the attributes associated with functions a...
for(auto &MBB :MF)
SmallVector< MachineOperand, 4 > Cond
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition: Compiler.h:492
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Checks Use for liveness in LiveValues If Use is not live
Looks at all the uses of the given value Returns the Liveness deduced from the uses of this value Adds all uses that cause the result to be MaybeLive to MaybeLiveRetUses If the result is Live
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
std::string Name
std::optional< std::vector< StOtherPiece > > Other
Definition: ELFYAML.cpp:1269
bool End
Definition: ELF_riscv.cpp:464
Rewrite Partial Register Uses
Hexagon Common GEP
lazy value info
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
This file contains the declarations for metadata subclasses.
Module.h This file contains the declarations for the Module class.
print must be executed print the must be executed context for all instructions
LLVMContext & Context
#define P(N)
FunctionAnalysisManager FAM
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:59
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
static bool rewrite(Function &F)
rewrite statepoints for gc
static void makeStatepointExplicitImpl(CallBase *Call, const SmallVectorImpl< Value * > &BasePtrs, const SmallVectorImpl< Value * > &LiveVariables, PartiallyConstructedSafepointRecord &Result, std::vector< DeferredReplacement > &Replacements, const PointerToBaseTy &PointerToBase, GCStrategy *GC)
rewrite statepoints for Make relocations at statepoints
static void rematerializeLiveValues(CallBase *Call, PartiallyConstructedSafepointRecord &Info, PointerToBaseTy &PointerToBase, RematCandTy &RematerizationCandidates, TargetTransformInfo &TTI)
static void findRematerializationCandidates(PointerToBaseTy PointerToBase, RematCandTy &RematerizationCandidates, TargetTransformInfo &TTI)
static std::unique_ptr< GCStrategy > findGCStrategy(Function &F)
Looks up the GC strategy for a given function, returning null if the function doesn't have a GC tag.
static Instruction * rematerializeChain(ArrayRef< Instruction * > ChainToBase, Instruction *InsertBefore, Value *RootOfChain, Value *AlternateLiveBase)
static void unique_unsorted(SmallVectorImpl< T > &Vec)
Implement a unique function which doesn't require we sort the input vector.
static void stripNonValidDataFromBody(Function &F)
static bool isKnownBase(Value *V, const IsKnownBaseMapTy &KnownBases)
Returns true if V is a known base.
static Value * findBasePointer(Value *I, DefiningValueMapTy &Cache, IsKnownBaseMapTy &KnownBases)
For a given value or instruction, figure out what base ptr its derived from.
static cl::opt< bool, true > ClobberNonLiveOverride("rs4gc-clobber-non-live", cl::location(ClobberNonLive), cl::Hidden)
static void insertRelocationStores(iterator_range< Value::user_iterator > GCRelocs, DenseMap< Value *, AllocaInst * > &AllocaMap, DenseSet< Value * > &VisitedLiveValues)
static BasicBlock * normalizeForInvokeSafepoint(BasicBlock *BB, BasicBlock *InvokeParent, DominatorTree &DT)
static void analyzeParsePointLiveness(DominatorTree &DT, GCPtrLivenessData &OriginalLivenessData, CallBase *Call, PartiallyConstructedSafepointRecord &Result, GCStrategy *GC)
static AttributeList legalizeCallAttributes(LLVMContext &Ctx, AttributeList OrigAL, AttributeList StatepointAL)
static void computeLiveOutSeed(BasicBlock *BB, SetVector< Value * > &LiveTmp, GCStrategy *GC)
static void relocationViaAlloca(Function &F, DominatorTree &DT, ArrayRef< Value * > Live, ArrayRef< PartiallyConstructedSafepointRecord > Records)
Do all the relocation update via allocas and mem2reg.
static bool AreEquivalentPhiNodes(PHINode &OrigRootPhi, PHINode &AlternateRootPhi)
static cl::opt< unsigned > RematerializationThreshold("spp-rematerialization-threshold", cl::Hidden, cl::init(6))
static Value * findBaseOrBDV(Value *I, DefiningValueMapTy &Cache, IsKnownBaseMapTy &KnownBases)
Return a base pointer for this value if known.
static Value * findBaseDefiningValueCached(Value *I, DefiningValueMapTy &Cache, IsKnownBaseMapTy &KnownBases)
Returns the base defining value for this value.
static void insertUseHolderAfter(CallBase *Call, const ArrayRef< Value * > Values, SmallVectorImpl< CallInst * > &Holders)
Insert holders so that each Value is obviously live through the entire lifetime of the call.
static void insertRematerializationStores(const RematerializedValueMapTy &RematerializedValues, DenseMap< Value *, AllocaInst * > &AllocaMap, DenseSet< Value * > &VisitedLiveValues)
static bool insertParsePoints(Function &F, DominatorTree &DT, TargetTransformInfo &TTI, SmallVectorImpl< CallBase * > &ToUpdate, DefiningValueMapTy &DVCache, IsKnownBaseMapTy &KnownBases)
static void findBasePointers(const StatepointLiveSetTy &live, PointerToBaseTy &PointerToBase, DominatorTree *DT, DefiningValueMapTy &DVCache, IsKnownBaseMapTy &KnownBases)
static bool shouldRewriteStatepointsIn(Function &F)
Returns true if this function should be rewritten by this pass.
static cl::opt< bool > RematDerivedAtUses("rs4gc-remat-derived-at-uses", cl::Hidden, cl::init(true))
static ArrayRef< Use > GetDeoptBundleOperands(const CallBase *Call)
static void stripNonValidAttributesFromPrototype(Function &F)
static void findLiveSetAtInst(Instruction *inst, GCPtrLivenessData &Data, StatepointLiveSetTy &out, GCStrategy *GC)
Given results from the dataflow liveness computation, find the set of live Values at a particular ins...
static void computeLiveInValues(DominatorTree &DT, Function &F, GCPtrLivenessData &Data, GCStrategy *GC)
Compute the live-in set for every basic block in the function.
static void stripInvalidMetadataFromInstruction(Instruction &I)
Certain metadata on instructions are invalid after running RS4GC.
static constexpr Attribute::AttrKind FnAttrsToStrip[]
static bool areBothVectorOrScalar(Value *First, Value *Second)
static void rematerializeLiveValuesAtUses(RematCandTy &RematerizationCandidates, MutableArrayRef< PartiallyConstructedSafepointRecord > Records, PointerToBaseTy &PointerToBase)
static bool isHandledGCPointerType(Type *T, GCStrategy *GC)
static Value * findRematerializableChainToBasePointer(SmallVectorImpl< Instruction * > &ChainToBase, Value *CurrentValue)
static cl::opt< bool > PrintLiveSetSize("spp-print-liveset-size", cl::Hidden, cl::init(false))
static Value * findBaseDefiningValueOfVector(Value *I, DefiningValueMapTy &Cache, IsKnownBaseMapTy &KnownBases)
Return a base defining value for the 'Index' element of the given vector instruction 'I'.
static void stripNonValidData(Module &M)
The IR fed into RewriteStatepointsForGC may have had attributes and metadata implying dereferenceabil...
static InstructionCost chainToBasePointerCost(SmallVectorImpl< Instruction * > &Chain, TargetTransformInfo &TTI)
static bool isUnhandledGCPointerType(Type *Ty, GCStrategy *GC)
static SetVector< Value * > computeKillSet(BasicBlock *BB, GCStrategy *GC)
static bool ClobberNonLive
static cl::opt< bool > PrintBasePointers("spp-print-base-pointers", cl::Hidden, cl::init(false))
static bool isOriginalBaseResult(Value *V)
This value is a base pointer that is not generated by RS4GC, i.e.
static cl::opt< bool > PrintLiveSet("spp-print-liveset", cl::Hidden, cl::init(false))
static void setKnownBase(Value *V, bool IsKnownBase, IsKnownBaseMapTy &KnownBases)
Caches the IsKnownBase flag for a value and asserts that it wasn't present in the cache before.
static cl::opt< bool > AllowStatepointWithNoDeoptInfo("rs4gc-allow-statepoint-with-no-deopt-info", cl::Hidden, cl::init(true))
static void makeStatepointExplicit(DominatorTree &DT, CallBase *Call, PartiallyConstructedSafepointRecord &Result, std::vector< DeferredReplacement > &Replacements, const PointerToBaseTy &PointerToBase, GCStrategy *GC)
static std::string suffixed_name_or(Value *V, StringRef Suffix, StringRef DefaultName)
static void CreateGCRelocates(ArrayRef< Value * > LiveVariables, ArrayRef< Value * > BasePtrs, Instruction *StatepointToken, IRBuilder<> &Builder, GCStrategy *GC)
Helper function to place all gc relocates necessary for the given statepoint.
static void checkBasicSSA(DominatorTree &DT, SetVector< Value * > &Live, Instruction *TI, bool TermOkay=false)
Check that the items in 'Live' dominate 'TI'.
static StringRef getDeoptLowering(CallBase *Call)
static void findLiveReferences(Function &F, DominatorTree &DT, ArrayRef< CallBase * > toUpdate, MutableArrayRef< struct PartiallyConstructedSafepointRecord > records, GCStrategy *GC)
static AttributeMask getParamAndReturnAttributesToRemove()
static bool inlineGetBaseAndOffset(Function &F, SmallVectorImpl< CallInst * > &Intrinsics, DefiningValueMapTy &DVCache, IsKnownBaseMapTy &KnownBases)
static Value * findBaseDefiningValue(Value *I, DefiningValueMapTy &Cache, IsKnownBaseMapTy &KnownBases)
Helper function for findBasePointer - Will return a value which either a) defines the base pointer fo...
static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData, CallBase *Call, PartiallyConstructedSafepointRecord &result, PointerToBaseTy &PointerToBase, GCStrategy *GC)
Given an updated version of the dataflow liveness results, update the liveset and base pointer maps f...
@ SI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR static false bool isGCPointerType(Type *T)
static bool containsGCPtrType(Type *Ty)
raw_pwrite_stream & OS
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallSet class.
This file defines the SmallVector class.
This pass exposes codegen information to IR-level passes.
@ Flags
Definition: TextStubV5.cpp:93
an instruction to allocate memory on the stack
Definition: Instructions.h:58
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:118
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:620
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:774
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
reverse_iterator rend() const
Definition: ArrayRef.h:155
iterator end() const
Definition: ArrayRef.h:152
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:163
iterator begin() const
Definition: ArrayRef.h:151
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:158
reverse_iterator rbegin() const
Definition: ArrayRef.h:154
Value handle that asserts if the Value is deleted.
Definition: ValueHandle.h:264
AttrBuilder & removeAttribute(Attribute::AttrKind Val)
Remove an attribute from the builder.
AttributeSet getFnAttrs() const
The function attributes are returned.
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
bool isEmpty() const
Return true if there are no attributes.
Definition: Attributes.h:954
AttributeList addFnAttributes(LLVMContext &C, const AttrBuilder &B) const
Add function attribute to the list.
Definition: Attributes.h:547
bool hasFnAttr(Attribute::AttrKind Kind) const
Return true if the attribute exists for the function.
Attribute getFnAttr(Attribute::AttrKind Kind) const
Return the attribute object that exists for the function.
Definition: Attributes.h:829
StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:317
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:87
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:326
const LandingPadInst * getLandingPadInst() const
Return the landingpad instruction associated with the landing pad.
Definition: BasicBlock.cpp:525
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:253
reverse_iterator rbegin()
Definition: BasicBlock.h:331
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:216
InstListType::reverse_iterator reverse_iterator
Definition: BasicBlock.h:89
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
Definition: BasicBlock.cpp:300
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:127
This class represents a no-op cast from one type to another.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1190
void setCallingConv(CallingConv::ID CC)
Definition: InstrTypes.h:1475
void setAttributes(AttributeList A)
Set the parameter attributes for this call.
Definition: InstrTypes.h:1494
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1490
This class represents a function call, abstracting a target machine's calling convention.
void setTailCallKind(TailCallKind TCK)
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:428
static ConstantAggregateZero * get(Type *Ty)
Definition: Constants.cpp:1579
This is the shared class of boolean and integer constants.
Definition: Constants.h:78
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Definition: Constants.cpp:1691
This is an important base class in LLVM.
Definition: Constant.h:41
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:151
iterator end()
Definition: DenseMap.h:84
Implements a dense probed hash-table based set.
Definition: DenseSet.h:271
DominatorTree & getDomTree()
Flush DomTree updates and return DomTree.
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:314
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:166
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:321
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
static ExtractElementInst * Create(Value *Vec, Value *Idx, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition: Type.cpp:754
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:165
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
Represents calls to the gc.relocate intrinsic.
Value * getDerivedPtr() const
Represents a gc.statepoint intrinsic call.
Definition: Statepoint.h:61
GCStrategy describes a garbage collector algorithm's code generation requirements,...
Definition: GCStrategy.h:63
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:940
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2028
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2570
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Definition: PassManager.h:933
Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
Definition: Instruction.cpp:88
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:70
const BasicBlock * getParent() const
Definition: Instruction.h:90
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1521
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:82
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
Invoke instruction.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:177
Metadata node.
Definition: Metadata.h:950
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1416
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:37
size_type count(const KeyT &Key) const
Definition: MapVector.h:145
iterator end()
Definition: MapVector.h:72
iterator find(const KeyT &Key)
Definition: MapVector.h:147
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: MapVector.h:118
size_type size() const
Definition: MapVector.h:61
ModulePass class - This class is used to implement unstructured interprocedural optimizations and ana...
Definition: Pass.h:251
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Definition: ArrayRef.h:305
void setIncomingValue(unsigned i, Value *V)
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:152
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:158
void preserve()
Mark an analysis as preserved.
Definition: PassManager.h:173
This class represents the LLVM 'select' instruction.
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:88
bool remove(const value_type &X)
Remove an item from the set vector.
Definition: SetVector.h:168
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:152
bool set_union(const STy &S)
Compute This := This u S, return whether 'This' changed.
Definition: SetVector.h:254
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:83
value_type pop_back_val()
Definition: SetVector.h:236
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition: SetVector.h:93
void set_subtract(const STy &S)
Compute This := This - B TODO: We should be able to use set_subtract from SetOperations....
Definition: SetVector.h:269
iterator end()
Get an iterator to the end of the SetVector.
Definition: SetVector.h:103
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:312
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:941
void reserve(size_type N)
Definition: SmallVector.h:667
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
An instruction for storing to memory.
Definition: Instructions.h:301
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:222
bool equals(StringRef RHS) const
equals - Check for string equality, this is more efficient than compare() when the relative ordering ...
Definition: StringRef.h:164
Class to represent struct types.
Definition: DerivedTypes.h:213
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
static CastContextHint getCastContextHint(const Instruction *I)
Calculates a CastContextHint from I.
InstructionCost getAddressComputationCost(Type *Ty, ScalarEvolution *SE=nullptr, const SCEV *Ptr=nullptr) const
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const
@ TCK_SizeAndLatency
The weighted sum of size and latency.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static Type * getVoidTy(LLVMContext &C)
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1724
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition: User.cpp:21
iterator_range< value_op_iterator > operand_values()
Definition: User.h:266
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
user_iterator user_begin()
Definition: Value.h:397
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:378
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:535
iterator_range< user_iterator > users()
Definition: Value.h:421
User * getUniqueUndroppableUser()
Return true if there is exactly one unique user of this value that cannot be dropped (that user can h...
Definition: Value.cpp:179
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1069
unsigned getNumUses() const
This method computes the number of uses of this Value.
Definition: Value.cpp:255
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
bool user_empty() const
Definition: Value.h:385
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
Definition: DenseSet.h:97
ilist_iterator< OptionsT, !IsReverse, IsConst > getReverse() const
Get a reverse iterator to the same node.
self_iterator getIterator()
Definition: ilist_node.h:82
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:289
A range adaptor for a pair of iterators.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
Definition: CallingConv.h:47
AttributeList getAttributes(LLVMContext &C, ID id)
Return the attributes for an intrinsic.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1465
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:465
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
@ Offset
Definition: DWP.cpp:440
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1839
void PromoteMemToReg(ArrayRef< AllocaInst * > Allocas, DominatorTree &DT, AssumptionCache *AC=nullptr)
Promote the specified list of alloca instructions into scalar registers, inserting PHI nodes as appro...
auto successors(const MachineBasicBlock *BB)
AddressSpace
Definition: NVPTXBaseInfo.h:21
bool operator!=(uint64_t V1, const APInt &V2)
Definition: APInt.h:2052
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
Definition: STLExtras.h:2129
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:112
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1826
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1744
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:109
StatepointDirectives parseStatepointDirectivesFromAttrs(AttributeList AS)
Parse out statepoint directives from the function attributes present in AS.
Definition: Statepoint.cpp:24
ModulePass * createRewriteStatepointsForGCLegacyPass()
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
BasicBlock * SplitBlockPredecessors(BasicBlock *BB, ArrayRef< BasicBlock * > Preds, const char *Suffix, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, bool PreserveLCSSA=false)
This method introduces at least one new basic block into the function and moves some of the predecess...
bool FoldSingleEntryPHINodes(BasicBlock *BB, MemoryDependenceResults *MemDep=nullptr)
We know that BB has one predecessor.
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
Definition: APFixedPoint.h:292
@ DeoptLiveIn
Mark the deopt arguments associated with the statepoint as only being "live-in".
@ GCTransition
Indicates that this statepoint is a transition from GC-aware code to code that is not GC-aware.
void initializeRewriteStatepointsForGCLegacyPassPass(PassRegistry &)
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition: STLExtras.h:2018
std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
Definition: GCStrategy.cpp:24
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:2113
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1976
InstructionCost Cost
bool isStatepointDirectiveAttr(Attribute Attr)
Return true if the Attr is an attribute that is a statepoint directive.
Definition: Statepoint.cpp:18
bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
Definition: Local.cpp:2660
bool callsGCLeafFunction(const CallBase *Call, const TargetLibraryInfo &TLI)
Return true if this call calls a gc leaf function.
Definition: Local.cpp:2940
MapVector< BasicBlock *, SetVector< Value * > > LiveSet
Values used in this block (and thus live); does not included values killed within this block.
MapVector< BasicBlock *, SetVector< Value * > > LiveOut
Values live out of this basic block (i.e.
MapVector< BasicBlock *, SetVector< Value * > > KillSet
Values defined in this block.
MapVector< BasicBlock *, SetVector< Value * > > LiveIn
Values live into this basic block (i.e.
RematerializedValueMapTy RematerializedValues
Record live values we are rematerialized instead of relocating.
Instruction * UnwindToken
Instruction to which exceptional gc relocates are attached Makes it easier to iterate through them du...
GCStatepointInst * StatepointToken
The new gc.statepoint instruction itself.
StatepointLiveSetTy LiveSet
The set of values known to be live across this safepoint.
bool runOnFunction(Function &F, DominatorTree &, TargetTransformInfo &, const TargetLibraryInfo &)
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
Call sites that get wrapped by a gc.statepoint (currently only in RewriteStatepointsForGC and potenti...
Definition: Statepoint.h:235
std::optional< uint32_t > NumPatchBytes
Definition: Statepoint.h:236
std::optional< uint64_t > StatepointID
Definition: Statepoint.h:237
static const uint64_t DefaultStatepointID
Definition: Statepoint.h:239