LLVM  13.0.0git
PlaceSafepoints.cpp
Go to the documentation of this file.
1 //===- PlaceSafepoints.cpp - Place GC Safepoints --------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Place garbage collection safepoints at appropriate locations in the IR. This
10 // does not make relocation semantics or variable liveness explicit. That's
11 // done by RewriteStatepointsForGC.
12 //
13 // Terminology:
14 // - A call is said to be "parseable" if there is a stack map generated for the
15 // return PC of the call. A runtime can determine where values listed in the
16 // deopt arguments and (after RewriteStatepointsForGC) gc arguments are located
17 // on the stack when the code is suspended inside such a call. Every parse
18 // point is represented by a call wrapped in an gc.statepoint intrinsic.
19 // - A "poll" is an explicit check in the generated code to determine if the
20 // runtime needs the generated code to cooperate by calling a helper routine
21 // and thus suspending its execution at a known state. The call to the helper
22 // routine will be parseable. The (gc & runtime specific) logic of a poll is
23 // assumed to be provided in a function of the name "gc.safepoint_poll".
24 //
25 // We aim to insert polls such that running code can quickly be brought to a
26 // well defined state for inspection by the collector. In the current
27 // implementation, this is done via the insertion of poll sites at method entry
28 // and the backedge of most loops. We try to avoid inserting more polls than
29 // are necessary to ensure a finite period between poll sites. This is not
30 // because the poll itself is expensive in the generated code; it's not. Polls
31 // do tend to impact the optimizer itself in negative ways; we'd like to avoid
32 // perturbing the optimization of the method as much as we can.
33 //
34 // We also need to make most call sites parseable. The callee might execute a
35 // poll (or otherwise be inspected by the GC). If so, the entire stack
36 // (including the suspended frame of the current method) must be parseable.
37 //
38 // This pass will insert:
39 // - Call parse points ("call safepoints") for any call which may need to
40 // reach a safepoint during the execution of the callee function.
41 // - Backedge safepoint polls and entry safepoint polls to ensure that
42 // executing code reaches a safepoint poll in a finite amount of time.
43 //
44 // We do not currently support return statepoints, but adding them would not
45 // be hard. They are not required for correctness - entry safepoints are an
46 // alternative - but some GCs may prefer them. Patches welcome.
47 //
48 //===----------------------------------------------------------------------===//
49 
50 #include "llvm/InitializePasses.h"
51 #include "llvm/Pass.h"
52 
53 #include "llvm/ADT/SetVector.h"
54 #include "llvm/ADT/Statistic.h"
55 #include "llvm/Analysis/CFG.h"
59 #include "llvm/IR/Dominators.h"
60 #include "llvm/IR/IntrinsicInst.h"
62 #include "llvm/IR/Statepoint.h"
64 #include "llvm/Support/Debug.h"
65 #include "llvm/Transforms/Scalar.h"
68 
69 #define DEBUG_TYPE "safepoint-placement"
70 
71 STATISTIC(NumEntrySafepoints, "Number of entry safepoints inserted");
72 STATISTIC(NumBackedgeSafepoints, "Number of backedge safepoints inserted");
73 
74 STATISTIC(CallInLoop,
75  "Number of loops without safepoints due to calls in loop");
76 STATISTIC(FiniteExecution,
77  "Number of loops without safepoints finite execution");
78 
79 using namespace llvm;
80 
81 // Ignore opportunities to avoid placing safepoints on backedges, useful for
82 // validation
83 static cl::opt<bool> AllBackedges("spp-all-backedges", cl::Hidden,
84  cl::init(false));
85 
86 /// How narrow does the trip count of a loop have to be to have to be considered
87 /// "counted"? Counted loops do not get safepoints at backedges.
88 static cl::opt<int> CountedLoopTripWidth("spp-counted-loop-trip-width",
89  cl::Hidden, cl::init(32));
90 
91 // If true, split the backedge of a loop when placing the safepoint, otherwise
92 // split the latch block itself. Both are useful to support for
93 // experimentation, but in practice, it looks like splitting the backedge
94 // optimizes better.
95 static cl::opt<bool> SplitBackedge("spp-split-backedge", cl::Hidden,
96  cl::init(false));
97 
98 namespace {
99 
100 /// An analysis pass whose purpose is to identify each of the backedges in
101 /// the function which require a safepoint poll to be inserted.
102 struct PlaceBackedgeSafepointsImpl : public FunctionPass {
103  static char ID;
104 
105  /// The output of the pass - gives a list of each backedge (described by
106  /// pointing at the branch) which need a poll inserted.
107  std::vector<Instruction *> PollLocations;
108 
109  /// True unless we're running spp-no-calls in which case we need to disable
110  /// the call-dependent placement opts.
111  bool CallSafepointsEnabled;
112 
113  ScalarEvolution *SE = nullptr;
114  DominatorTree *DT = nullptr;
115  LoopInfo *LI = nullptr;
116  TargetLibraryInfo *TLI = nullptr;
117 
118  PlaceBackedgeSafepointsImpl(bool CallSafepoints = false)
119  : FunctionPass(ID), CallSafepointsEnabled(CallSafepoints) {
121  }
122 
123  bool runOnLoop(Loop *);
124  void runOnLoopAndSubLoops(Loop *L) {
125  // Visit all the subloops
126  for (Loop *I : *L)
127  runOnLoopAndSubLoops(I);
128  runOnLoop(L);
129  }
130 
131  bool runOnFunction(Function &F) override {
132  SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
133  DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
134  LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
135  TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
136  for (Loop *I : *LI) {
137  runOnLoopAndSubLoops(I);
138  }
139  return false;
140  }
141 
142  void getAnalysisUsage(AnalysisUsage &AU) const override {
147  // We no longer modify the IR at all in this pass. Thus all
148  // analysis are preserved.
149  AU.setPreservesAll();
150  }
151 };
152 }
153 
154 static cl::opt<bool> NoEntry("spp-no-entry", cl::Hidden, cl::init(false));
155 static cl::opt<bool> NoCall("spp-no-call", cl::Hidden, cl::init(false));
156 static cl::opt<bool> NoBackedge("spp-no-backedge", cl::Hidden, cl::init(false));
157 
158 namespace {
159 struct PlaceSafepoints : public FunctionPass {
160  static char ID; // Pass identification, replacement for typeid
161 
162  PlaceSafepoints() : FunctionPass(ID) {
164  }
165  bool runOnFunction(Function &F) override;
166 
167  void getAnalysisUsage(AnalysisUsage &AU) const override {
168  // We modify the graph wholesale (inlining, block insertion, etc). We
169  // preserve nothing at the moment. We could potentially preserve dom tree
170  // if that was worth doing
172  }
173 };
174 }
175 
176 // Insert a safepoint poll immediately before the given instruction. Does
177 // not handle the parsability of state at the runtime call, that's the
178 // callers job.
179 static void
180 InsertSafepointPoll(Instruction *InsertBefore,
181  std::vector<CallBase *> &ParsePointsNeeded /*rval*/,
182  const TargetLibraryInfo &TLI);
183 
184 static bool needsStatepoint(CallBase *Call, const TargetLibraryInfo &TLI) {
185  if (callsGCLeafFunction(Call, TLI))
186  return false;
187  if (auto *CI = dyn_cast<CallInst>(Call)) {
188  if (CI->isInlineAsm())
189  return false;
190  }
191 
192  return !(isa<GCStatepointInst>(Call) || isa<GCRelocateInst>(Call) ||
193  isa<GCResultInst>(Call));
194 }
195 
196 /// Returns true if this loop is known to contain a call safepoint which
197 /// must unconditionally execute on any iteration of the loop which returns
198 /// to the loop header via an edge from Pred. Returns a conservative correct
199 /// answer; i.e. false is always valid.
201  BasicBlock *Pred,
202  DominatorTree &DT,
203  const TargetLibraryInfo &TLI) {
204  // In general, we're looking for any cut of the graph which ensures
205  // there's a call safepoint along every edge between Header and Pred.
206  // For the moment, we look only for the 'cuts' that consist of a single call
207  // instruction in a block which is dominated by the Header and dominates the
208  // loop latch (Pred) block. Somewhat surprisingly, walking the entire chain
209  // of such dominating blocks gets substantially more occurrences than just
210  // checking the Pred and Header blocks themselves. This may be due to the
211  // density of loop exit conditions caused by range and null checks.
212  // TODO: structure this as an analysis pass, cache the result for subloops,
213  // avoid dom tree recalculations
214  assert(DT.dominates(Header, Pred) && "loop latch not dominated by header?");
215 
216  BasicBlock *Current = Pred;
217  while (true) {
218  for (Instruction &I : *Current) {
219  if (auto *Call = dyn_cast<CallBase>(&I))
220  // Note: Technically, needing a safepoint isn't quite the right
221  // condition here. We should instead be checking if the target method
222  // has an
223  // unconditional poll. In practice, this is only a theoretical concern
224  // since we don't have any methods with conditional-only safepoint
225  // polls.
226  if (needsStatepoint(Call, TLI))
227  return true;
228  }
229 
230  if (Current == Header)
231  break;
232  Current = DT.getNode(Current)->getIDom()->getBlock();
233  }
234 
235  return false;
236 }
237 
238 /// Returns true if this loop is known to terminate in a finite number of
239 /// iterations. Note that this function may return false for a loop which
240 /// does actual terminate in a finite constant number of iterations due to
241 /// conservatism in the analysis.
243  BasicBlock *Pred) {
244  // A conservative bound on the loop as a whole.
245  const SCEV *MaxTrips = SE->getConstantMaxBackedgeTakenCount(L);
246  if (!isa<SCEVCouldNotCompute>(MaxTrips) &&
247  SE->getUnsignedRange(MaxTrips).getUnsignedMax().isIntN(
249  return true;
250 
251  // If this is a conditional branch to the header with the alternate path
252  // being outside the loop, we can ask questions about the execution frequency
253  // of the exit block.
254  if (L->isLoopExiting(Pred)) {
255  // This returns an exact expression only. TODO: We really only need an
256  // upper bound here, but SE doesn't expose that.
257  const SCEV *MaxExec = SE->getExitCount(L, Pred);
258  if (!isa<SCEVCouldNotCompute>(MaxExec) &&
259  SE->getUnsignedRange(MaxExec).getUnsignedMax().isIntN(
261  return true;
262  }
263 
264  return /* not finite */ false;
265 }
266 
267 static void scanOneBB(Instruction *Start, Instruction *End,
268  std::vector<CallInst *> &Calls,
270  std::vector<BasicBlock *> &Worklist) {
271  for (BasicBlock::iterator BBI(Start), BBE0 = Start->getParent()->end(),
272  BBE1 = BasicBlock::iterator(End);
273  BBI != BBE0 && BBI != BBE1; BBI++) {
274  if (CallInst *CI = dyn_cast<CallInst>(&*BBI))
275  Calls.push_back(CI);
276 
277  // FIXME: This code does not handle invokes
278  assert(!isa<InvokeInst>(&*BBI) &&
279  "support for invokes in poll code needed");
280 
281  // Only add the successor blocks if we reach the terminator instruction
282  // without encountering end first
283  if (BBI->isTerminator()) {
284  BasicBlock *BB = BBI->getParent();
285  for (BasicBlock *Succ : successors(BB)) {
286  if (Seen.insert(Succ).second) {
287  Worklist.push_back(Succ);
288  }
289  }
290  }
291  }
292 }
293 
294 static void scanInlinedCode(Instruction *Start, Instruction *End,
295  std::vector<CallInst *> &Calls,
296  DenseSet<BasicBlock *> &Seen) {
297  Calls.clear();
298  std::vector<BasicBlock *> Worklist;
299  Seen.insert(Start->getParent());
300  scanOneBB(Start, End, Calls, Seen, Worklist);
301  while (!Worklist.empty()) {
302  BasicBlock *BB = Worklist.back();
303  Worklist.pop_back();
304  scanOneBB(&*BB->begin(), End, Calls, Seen, Worklist);
305  }
306 }
307 
308 bool PlaceBackedgeSafepointsImpl::runOnLoop(Loop *L) {
309  // Loop through all loop latches (branches controlling backedges). We need
310  // to place a safepoint on every backedge (potentially).
311  // Note: In common usage, there will be only one edge due to LoopSimplify
312  // having run sometime earlier in the pipeline, but this code must be correct
313  // w.r.t. loops with multiple backedges.
314  BasicBlock *Header = L->getHeader();
315  SmallVector<BasicBlock*, 16> LoopLatches;
316  L->getLoopLatches(LoopLatches);
317  for (BasicBlock *Pred : LoopLatches) {
318  assert(L->contains(Pred));
319 
320  // Make a policy decision about whether this loop needs a safepoint or
321  // not. Note that this is about unburdening the optimizer in loops, not
322  // avoiding the runtime cost of the actual safepoint.
323  if (!AllBackedges) {
324  if (mustBeFiniteCountedLoop(L, SE, Pred)) {
325  LLVM_DEBUG(dbgs() << "skipping safepoint placement in finite loop\n");
326  FiniteExecution++;
327  continue;
328  }
329  if (CallSafepointsEnabled &&
330  containsUnconditionalCallSafepoint(L, Header, Pred, *DT, *TLI)) {
331  // Note: This is only semantically legal since we won't do any further
332  // IPO or inlining before the actual call insertion.. If we hadn't, we
333  // might latter loose this call safepoint.
334  LLVM_DEBUG(
335  dbgs()
336  << "skipping safepoint placement due to unconditional call\n");
337  CallInLoop++;
338  continue;
339  }
340  }
341 
342  // TODO: We can create an inner loop which runs a finite number of
343  // iterations with an outer loop which contains a safepoint. This would
344  // not help runtime performance that much, but it might help our ability to
345  // optimize the inner loop.
346 
347  // Safepoint insertion would involve creating a new basic block (as the
348  // target of the current backedge) which does the safepoint (of all live
349  // variables) and branches to the true header
350  Instruction *Term = Pred->getTerminator();
351 
352  LLVM_DEBUG(dbgs() << "[LSP] terminator instruction: " << *Term);
353 
354  PollLocations.push_back(Term);
355  }
356 
357  return false;
358 }
359 
360 /// Returns true if an entry safepoint is not required before this callsite in
361 /// the caller function.
363  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call)) {
364  switch (II->getIntrinsicID()) {
365  case Intrinsic::experimental_gc_statepoint:
366  case Intrinsic::experimental_patchpoint_void:
367  case Intrinsic::experimental_patchpoint_i64:
368  // The can wrap an actual call which may grow the stack by an unbounded
369  // amount or run forever.
370  return false;
371  default:
372  // Most LLVM intrinsics are things which do not expand to actual calls, or
373  // at least if they do, are leaf functions that cause only finite stack
374  // growth. In particular, the optimizer likes to form things like memsets
375  // out of stores in the original IR. Another important example is
376  // llvm.localescape which must occur in the entry block. Inserting a
377  // safepoint before it is not legal since it could push the localescape
378  // out of the entry block.
379  return true;
380  }
381  }
382  return false;
383 }
384 
386  DominatorTree &DT) {
387 
388  // Conceptually, this poll needs to be on method entry, but in
389  // practice, we place it as late in the entry block as possible. We
390  // can place it as late as we want as long as it dominates all calls
391  // that can grow the stack. This, combined with backedge polls,
392  // give us all the progress guarantees we need.
393 
394  // hasNextInstruction and nextInstruction are used to iterate
395  // through a "straight line" execution sequence.
396 
397  auto HasNextInstruction = [](Instruction *I) {
398  if (!I->isTerminator())
399  return true;
400 
401  BasicBlock *nextBB = I->getParent()->getUniqueSuccessor();
402  return nextBB && (nextBB->getUniquePredecessor() != nullptr);
403  };
404 
405  auto NextInstruction = [&](Instruction *I) {
406  assert(HasNextInstruction(I) &&
407  "first check if there is a next instruction!");
408 
409  if (I->isTerminator())
410  return &I->getParent()->getUniqueSuccessor()->front();
411  return &*++I->getIterator();
412  };
413 
414  Instruction *Cursor = nullptr;
415  for (Cursor = &F.getEntryBlock().front(); HasNextInstruction(Cursor);
416  Cursor = NextInstruction(Cursor)) {
417 
418  // We need to ensure a safepoint poll occurs before any 'real' call. The
419  // easiest way to ensure finite execution between safepoints in the face of
420  // recursive and mutually recursive functions is to enforce that each take
421  // a safepoint. Additionally, we need to ensure a poll before any call
422  // which can grow the stack by an unbounded amount. This isn't required
423  // for GC semantics per se, but is a common requirement for languages
424  // which detect stack overflow via guard pages and then throw exceptions.
425  if (auto *Call = dyn_cast<CallBase>(Cursor)) {
427  continue;
428  break;
429  }
430  }
431 
432  assert((HasNextInstruction(Cursor) || Cursor->isTerminator()) &&
433  "either we stopped because of a call, or because of terminator");
434 
435  return Cursor;
436 }
437 
438 const char GCSafepointPollName[] = "gc.safepoint_poll";
439 
440 static bool isGCSafepointPoll(Function &F) {
441  return F.getName().equals(GCSafepointPollName);
442 }
443 
444 /// Returns true if this function should be rewritten to include safepoint
445 /// polls and parseable call sites. The main point of this function is to be
446 /// an extension point for custom logic.
448  // TODO: This should check the GCStrategy
449  if (F.hasGC()) {
450  const auto &FunctionGCName = F.getGC();
451  const StringRef StatepointExampleName("statepoint-example");
452  const StringRef CoreCLRName("coreclr");
453  return (StatepointExampleName == FunctionGCName) ||
454  (CoreCLRName == FunctionGCName);
455  } else
456  return false;
457 }
458 
459 // TODO: These should become properties of the GCStrategy, possibly with
460 // command line overrides.
461 static bool enableEntrySafepoints(Function &F) { return !NoEntry; }
462 static bool enableBackedgeSafepoints(Function &F) { return !NoBackedge; }
463 static bool enableCallSafepoints(Function &F) { return !NoCall; }
464 
466  if (F.isDeclaration() || F.empty()) {
467  // This is a declaration, nothing to do. Must exit early to avoid crash in
468  // dom tree calculation
469  return false;
470  }
471 
472  if (isGCSafepointPoll(F)) {
473  // Given we're inlining this inside of safepoint poll insertion, this
474  // doesn't make any sense. Note that we do make any contained calls
475  // parseable after we inline a poll.
476  return false;
477  }
478 
479  if (!shouldRewriteFunction(F))
480  return false;
481 
482  const TargetLibraryInfo &TLI =
483  getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
484 
485  bool Modified = false;
486 
487  // In various bits below, we rely on the fact that uses are reachable from
488  // defs. When there are basic blocks unreachable from the entry, dominance
489  // and reachablity queries return non-sensical results. Thus, we preprocess
490  // the function to ensure these properties hold.
492 
493  // STEP 1 - Insert the safepoint polling locations. We do not need to
494  // actually insert parse points yet. That will be done for all polls and
495  // calls in a single pass.
496 
497  DominatorTree DT;
498  DT.recalculate(F);
499 
500  SmallVector<Instruction *, 16> PollsNeeded;
501  std::vector<CallBase *> ParsePointNeeded;
502 
504  // Construct a pass manager to run the LoopPass backedge logic. We
505  // need the pass manager to handle scheduling all the loop passes
506  // appropriately. Doing this by hand is painful and just not worth messing
507  // with for the moment.
508  legacy::FunctionPassManager FPM(F.getParent());
509  bool CanAssumeCallSafepoints = enableCallSafepoints(F);
510  auto *PBS = new PlaceBackedgeSafepointsImpl(CanAssumeCallSafepoints);
511  FPM.add(PBS);
512  FPM.run(F);
513 
514  // We preserve dominance information when inserting the poll, otherwise
515  // we'd have to recalculate this on every insert
516  DT.recalculate(F);
517 
518  auto &PollLocations = PBS->PollLocations;
519 
520  auto OrderByBBName = [](Instruction *a, Instruction *b) {
521  return a->getParent()->getName() < b->getParent()->getName();
522  };
523  // We need the order of list to be stable so that naming ends up stable
524  // when we split edges. This makes test cases much easier to write.
525  llvm::sort(PollLocations, OrderByBBName);
526 
527  // We can sometimes end up with duplicate poll locations. This happens if
528  // a single loop is visited more than once. The fact this happens seems
529  // wrong, but it does happen for the split-backedge.ll test case.
530  PollLocations.erase(std::unique(PollLocations.begin(),
531  PollLocations.end()),
532  PollLocations.end());
533 
534  // Insert a poll at each point the analysis pass identified
535  // The poll location must be the terminator of a loop latch block.
536  for (Instruction *Term : PollLocations) {
537  // We are inserting a poll, the function is modified
538  Modified = true;
539 
540  if (SplitBackedge) {
541  // Split the backedge of the loop and insert the poll within that new
542  // basic block. This creates a loop with two latches per original
543  // latch (which is non-ideal), but this appears to be easier to
544  // optimize in practice than inserting the poll immediately before the
545  // latch test.
546 
547  // Since this is a latch, at least one of the successors must dominate
548  // it. Its possible that we have a) duplicate edges to the same header
549  // and b) edges to distinct loop headers. We need to insert pools on
550  // each.
551  SetVector<BasicBlock *> Headers;
552  for (unsigned i = 0; i < Term->getNumSuccessors(); i++) {
553  BasicBlock *Succ = Term->getSuccessor(i);
554  if (DT.dominates(Succ, Term->getParent())) {
555  Headers.insert(Succ);
556  }
557  }
558  assert(!Headers.empty() && "poll location is not a loop latch?");
559 
560  // The split loop structure here is so that we only need to recalculate
561  // the dominator tree once. Alternatively, we could just keep it up to
562  // date and use a more natural merged loop.
563  SetVector<BasicBlock *> SplitBackedges;
564  for (BasicBlock *Header : Headers) {
565  BasicBlock *NewBB = SplitEdge(Term->getParent(), Header, &DT);
566  PollsNeeded.push_back(NewBB->getTerminator());
567  NumBackedgeSafepoints++;
568  }
569  } else {
570  // Split the latch block itself, right before the terminator.
571  PollsNeeded.push_back(Term);
572  NumBackedgeSafepoints++;
573  }
574  }
575  }
576 
577  if (enableEntrySafepoints(F)) {
578  if (Instruction *Location = findLocationForEntrySafepoint(F, DT)) {
579  PollsNeeded.push_back(Location);
580  Modified = true;
581  NumEntrySafepoints++;
582  }
583  // TODO: else we should assert that there was, in fact, a policy choice to
584  // not insert a entry safepoint poll.
585  }
586 
587  // Now that we've identified all the needed safepoint poll locations, insert
588  // safepoint polls themselves.
589  for (Instruction *PollLocation : PollsNeeded) {
590  std::vector<CallBase *> RuntimeCalls;
591  InsertSafepointPoll(PollLocation, RuntimeCalls, TLI);
592  llvm::append_range(ParsePointNeeded, RuntimeCalls);
593  }
594 
595  return Modified;
596 }
597 
599 char PlaceSafepoints::ID = 0;
600 
602  return new PlaceSafepoints();
603 }
604 
605 INITIALIZE_PASS_BEGIN(PlaceBackedgeSafepointsImpl,
606  "place-backedge-safepoints-impl",
607  "Place Backedge Safepoints", false, false)
611 INITIALIZE_PASS_END(PlaceBackedgeSafepointsImpl,
612  "place-backedge-safepoints-impl",
613  "Place Backedge Safepoints", false, false)
614 
615 INITIALIZE_PASS_BEGIN(PlaceSafepoints, "place-safepoints", "Place Safepoints",
616  false, false)
617 INITIALIZE_PASS_END(PlaceSafepoints, "place-safepoints", "Place Safepoints",
618  false, false)
619 
620 static void
622  std::vector<CallBase *> &ParsePointsNeeded /*rval*/,
623  const TargetLibraryInfo &TLI) {
624  BasicBlock *OrigBB = InsertBefore->getParent();
625  Module *M = InsertBefore->getModule();
626  assert(M && "must be part of a module");
627 
628  // Inline the safepoint poll implementation - this will get all the branch,
629  // control flow, etc.. Most importantly, it will introduce the actual slow
630  // path call - where we need to insert a safepoint (parsepoint).
631 
632  auto *F = M->getFunction(GCSafepointPollName);
633  assert(F && "gc.safepoint_poll function is missing");
634  assert(F->getValueType() ==
635  FunctionType::get(Type::getVoidTy(M->getContext()), false) &&
636  "gc.safepoint_poll declared with wrong type");
637  assert(!F->empty() && "gc.safepoint_poll must be a non-empty function");
638  CallInst *PollCall = CallInst::Create(F, "", InsertBefore);
639 
640  // Record some information about the call site we're replacing
641  BasicBlock::iterator Before(PollCall), After(PollCall);
642  bool IsBegin = false;
643  if (Before == OrigBB->begin())
644  IsBegin = true;
645  else
646  Before--;
647 
648  After++;
649  assert(After != OrigBB->end() && "must have successor");
650 
651  // Do the actual inlining
652  InlineFunctionInfo IFI;
653  bool InlineStatus = InlineFunction(*PollCall, IFI).isSuccess();
654  assert(InlineStatus && "inline must succeed");
655  (void)InlineStatus; // suppress warning in release-asserts
656 
657  // Check post-conditions
658  assert(IFI.StaticAllocas.empty() && "can't have allocs");
659 
660  std::vector<CallInst *> Calls; // new calls
661  DenseSet<BasicBlock *> BBs; // new BBs + insertee
662 
663  // Include only the newly inserted instructions, Note: begin may not be valid
664  // if we inserted to the beginning of the basic block
665  BasicBlock::iterator Start = IsBegin ? OrigBB->begin() : std::next(Before);
666 
667  // If your poll function includes an unreachable at the end, that's not
668  // valid. Bugpoint likes to create this, so check for it.
669  assert(isPotentiallyReachable(&*Start, &*After) &&
670  "malformed poll function");
671 
672  scanInlinedCode(&*Start, &*After, Calls, BBs);
673  assert(!Calls.empty() && "slow path not found for safepoint poll");
674 
675  // Record the fact we need a parsable state at the runtime call contained in
676  // the poll function. This is required so that the runtime knows how to
677  // parse the last frame when we actually take the safepoint (i.e. execute
678  // the slow path)
679  assert(ParsePointsNeeded.empty());
680  for (auto *CI : Calls) {
681  // No safepoint needed or wanted
682  if (!needsStatepoint(CI, TLI))
683  continue;
684 
685  // These are likely runtime calls. Should we assert that via calling
686  // convention or something?
687  ParsePointsNeeded.push_back(CI);
688  }
689  assert(ParsePointsNeeded.size() <= Calls.size());
690 }
i
i
Definition: README.txt:29
llvm::Instruction::isTerminator
bool isTerminator() const
Definition: Instruction.h:163
llvm::BasicBlock::end
iterator end()
Definition: BasicBlock.h:298
llvm
Definition: AllocatorList.h:23
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
llvm::BasicBlock::iterator
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:90
llvm::BasicBlock::getParent
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:107
IntrinsicInst.h
Scalar.h
llvm::Function
Definition: Function.h:61
llvm::Loop
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:530
llvm::callsGCLeafFunction
bool callsGCLeafFunction(const CallBase *Call, const TargetLibraryInfo &TLI)
Return true if this call calls a gc leaf function.
Definition: Local.cpp:2712
Pass.h
llvm::LoopBase::contains
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
Definition: LoopInfo.h:122
scanInlinedCode
static void scanInlinedCode(Instruction *Start, Instruction *End, std::vector< CallInst * > &Calls, DenseSet< BasicBlock * > &Seen)
Definition: PlaceSafepoints.cpp:294
llvm::ScalarEvolution::getConstantMaxBackedgeTakenCount
const SCEV * getConstantMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEVConstant that is greater than or equal to (i.e.
Definition: ScalarEvolution.h:817
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1167
Statistic.h
Safepoints
place backedge safepoints Place Backedge Safepoints
Definition: PlaceSafepoints.cpp:613
SplitBackedge
static cl::opt< bool > SplitBackedge("spp-split-backedge", cl::Hidden, cl::init(false))
NoBackedge
static cl::opt< bool > NoBackedge("spp-no-backedge", cl::Hidden, cl::init(false))
llvm::createPlaceSafepointsPass
FunctionPass * createPlaceSafepointsPass()
Definition: PlaceSafepoints.cpp:601
llvm::ScalarEvolution
The main scalar evolution driver.
Definition: ScalarEvolution.h:443
llvm::FunctionType::get
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
Definition: Type.cpp:321
Local.h
llvm::DominatorTree
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:151
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:143
doesNotRequireEntrySafepointBefore
static bool doesNotRequireEntrySafepointBefore(CallBase *Call)
Returns true if an entry safepoint is not required before this callsite in the caller function.
Definition: PlaceSafepoints.cpp:362
ScalarEvolution.h
llvm::removeUnreachableBlocks
bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
Definition: Local.cpp:2443
llvm::LoopInfoWrapperPass
The legacy pass manager's analysis pass to compute loop information.
Definition: LoopInfo.h:1258
llvm::DominatorTreeBase::getNode
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Definition: GenericDomTree.h:351
STATISTIC
STATISTIC(NumEntrySafepoints, "Number of entry safepoints inserted")
impl
place backedge safepoints impl
Definition: PlaceSafepoints.cpp:612
llvm::M68kBeads::Term
@ Term
Definition: M68kBaseInfo.h:71
llvm::DomTreeNodeBase::getIDom
DomTreeNodeBase * getIDom() const
Definition: GenericDomTree.h:89
llvm::successors
succ_range successors(Instruction *I)
Definition: CFG.h:262
llvm::detail::DenseSetImpl< ValueT, DenseMap< ValueT, detail::DenseSetEmpty, DenseMapInfo< ValueT >, detail::DenseSetPair< ValueT > >, DenseMapInfo< ValueT > >::insert
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
LegacyPassManager.h
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:122
F
#define F(x, y, z)
Definition: MD5.cpp:56
enableBackedgeSafepoints
static bool enableBackedgeSafepoints(Function &F)
Definition: PlaceSafepoints.cpp:462
llvm::BasicBlock
LLVM Basic Block Representation.
Definition: BasicBlock.h:58
a
=0.0 ? 0.0 :(a > 0.0 ? 1.0 :-1.0) a
Definition: README.txt:489
GCSafepointPollName
const char GCSafepointPollName[]
Definition: PlaceSafepoints.cpp:438
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
llvm::DominatorTree::dominates
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:115
safepoints
place backedge safepoints Place Backedge false place safepoints
Definition: PlaceSafepoints.cpp:617
CommandLine.h
needsStatepoint
static bool needsStatepoint(CallBase *Call, const TargetLibraryInfo &TLI)
Definition: PlaceSafepoints.cpp:184
llvm::LoopBase::getLoopLatches
void getLoopLatches(SmallVectorImpl< BlockT * > &LoopLatches) const
Return all loop latch blocks of this loop.
Definition: LoopInfo.h:334
llvm::PassRegistry::getPassRegistry
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Definition: PassRegistry.cpp:31
llvm::BasicBlock::begin
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:296
llvm::CallInst::Create
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Definition: Instructions.h:1521
llvm::AnalysisUsage
Represent the analysis usage information of a pass.
Definition: PassAnalysisSupport.h:47
b
the resulting code requires compare and branches when and if the revised code is with conditional branches instead of More there is a byte word extend before each where there should be only and the condition codes are not remembered when the same two values are compared twice More LSR enhancements i8 and i32 load store addressing modes are identical int b
Definition: README.txt:418
TargetLibraryInfo.h
false
Definition: StackSlotColoring.cpp:142
llvm::isPotentiallyReachable
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
Definition: CFG.cpp:236
llvm::Instruction
Definition: Instruction.h:45
llvm::ConstantRange::getUnsignedMax
APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
Definition: ConstantRange.cpp:370
llvm::DominatorTreeWrapperPass
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:281
LoopDeletionResult::Modified
@ Modified
llvm::InlineResult::isSuccess
bool isSuccess() const
Definition: InlineCost.h:143
llvm::APInt::isIntN
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
Definition: APInt.h:455
llvm::ScalarEvolutionWrapperPass
Definition: ScalarEvolution.h:2135
isGCSafepointPoll
static bool isGCSafepointPoll(Function &F)
Definition: PlaceSafepoints.cpp:440
place
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM ID Predecessors according to mbb< bb27, 0x8b0a7c0 > Note ADDri is not a two address instruction its result reg1037 is an operand of the PHI node in bb76 and its operand reg1039 is the result of the PHI node We should treat it as a two address code and make sure the ADDri is scheduled after any node that reads reg1039 Use info(i.e. register scavenger) to assign it a free register to allow reuse the collector could move the objects and invalidate the derived pointer This is bad enough in the first place
Definition: README.txt:50
llvm::SetVector::empty
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:72
Statepoint.h
INITIALIZE_PASS_END
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:58
llvm::DenseSet
Implements a dense probed hash-table based set.
Definition: DenseSet.h:268
llvm::cl::opt< bool >
llvm::SCEV
This class represents an analyzed expression in the program.
Definition: ScalarEvolution.h:78
llvm::DomTreeNodeBase::getBlock
NodeT * getBlock() const
Definition: GenericDomTree.h:88
scanOneBB
static void scanOneBB(Instruction *Start, Instruction *End, std::vector< CallInst * > &Calls, DenseSet< BasicBlock * > &Seen, std::vector< BasicBlock * > &Worklist)
Definition: PlaceSafepoints.cpp:267
llvm::TargetLibraryInfoWrapperPass
Definition: TargetLibraryInfo.h:463
const
aarch64 promote const
Definition: AArch64PromoteConstant.cpp:232
INITIALIZE_PASS_DEPENDENCY
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
AllBackedges
static cl::opt< bool > AllBackedges("spp-all-backedges", cl::Hidden, cl::init(false))
I
#define I(x, y, z)
Definition: MD5.cpp:59
Cloning.h
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
llvm::ScalarEvolution::getUnsignedRange
ConstantRange getUnsignedRange(const SCEV *S)
Determine the unsigned range for a particular SCEV.
Definition: ScalarEvolution.h:876
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::Module
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
mustBeFiniteCountedLoop
static bool mustBeFiniteCountedLoop(Loop *L, ScalarEvolution *SE, BasicBlock *Pred)
Returns true if this loop is known to terminate in a finite number of iterations.
Definition: PlaceSafepoints.cpp:242
llvm::SetVector::insert
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:141
CFG.h
llvm::DominatorTreeBase::recalculate
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Definition: GenericDomTree.h:778
llvm::LoopInfo
Definition: LoopInfo.h:1080
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:58
InsertSafepointPoll
static void InsertSafepointPoll(Instruction *InsertBefore, std::vector< CallBase * > &ParsePointsNeeded, const TargetLibraryInfo &TLI)
Definition: PlaceSafepoints.cpp:621
llvm::BasicBlock::getUniquePredecessor
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
Definition: BasicBlock.cpp:272
llvm::append_range
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
Definition: STLExtras.h:1672
llvm::BasicBlock::getTerminator
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:148
shouldRewriteFunction
static bool shouldRewriteFunction(Function &F)
Returns true if this function should be rewritten to include safepoint polls and parseable call sites...
Definition: PlaceSafepoints.cpp:447
runOnFunction
static bool runOnFunction(Function &F, bool PostInlining)
Definition: EntryExitInstrumenter.cpp:69
llvm::SplitEdge
BasicBlock * SplitEdge(BasicBlock *From, BasicBlock *To, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the edge connecting the specified blocks, and return the newly created basic block between From...
Definition: BasicBlockUtils.cpp:495
findLocationForEntrySafepoint
static Instruction * findLocationForEntrySafepoint(Function &F, DominatorTree &DT)
Definition: PlaceSafepoints.cpp:385
std
Definition: BitVector.h:838
enableCallSafepoints
static bool enableCallSafepoints(Function &F)
Definition: PlaceSafepoints.cpp:463
llvm::AnalysisUsage::setPreservesAll
void setPreservesAll()
Set by analyses that do not transform their input at all.
Definition: PassAnalysisSupport.h:130
llvm::LoopBase::getHeader
BlockT * getHeader() const
Definition: LoopInfo.h:104
llvm::sort
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1423
llvm::TargetLibraryInfo
Provides information about what library functions are available for the current target.
Definition: TargetLibraryInfo.h:219
containsUnconditionalCallSafepoint
static bool containsUnconditionalCallSafepoint(Loop *L, BasicBlock *Header, BasicBlock *Pred, DominatorTree &DT, const TargetLibraryInfo &TLI)
Returns true if this loop is known to contain a call safepoint which must unconditionally execute on ...
Definition: PlaceSafepoints.cpp:200
llvm::InlineFunctionInfo
This class captures the data input to the InlineFunction call, and records the auxiliary results prod...
Definition: Cloning.h:193
llvm::unique
auto unique(Range &&R, Predicate P)
Definition: STLExtras.h:1635
llvm::initializePlaceSafepointsPass
void initializePlaceSafepointsPass(PassRegistry &)
llvm::IntrinsicInst
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:45
llvm::Type::getVoidTy
static Type * getVoidTy(LLVMContext &C)
Definition: Type.cpp:180
llvm::InlineFunctionInfo::StaticAllocas
SmallVector< AllocaInst *, 4 > StaticAllocas
InlineFunction fills this in with all static allocas that get copied into the caller.
Definition: Cloning.h:214
Dominators.h
CountedLoopTripWidth
static cl::opt< int > CountedLoopTripWidth("spp-counted-loop-trip-width", cl::Hidden, cl::init(32))
How narrow does the trip count of a loop have to be to have to be considered "counted"?...
llvm::legacy::FunctionPassManager
FunctionPassManager manages FunctionPasses.
Definition: LegacyPassManager.h:71
llvm::InlineFunction
InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr)
This function inlines the called function into the basic block of the caller.
Definition: InlineFunction.cpp:1757
llvm::CallBase
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1164
NoEntry
static cl::opt< bool > NoEntry("spp-no-entry", cl::Hidden, cl::init(false))
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:298
llvm::LoopBase::isLoopExiting
bool isLoopExiting(const BlockT *BB) const
True if terminator in the block can branch to another block that is outside of the current loop.
Definition: LoopInfo.h:225
llvm::CallInst
This class represents a function call, abstracting a target machine's calling convention.
Definition: Instructions.h:1478
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
llvm::AnalysisUsage::addRequired
AnalysisUsage & addRequired()
Definition: PassAnalysisSupport.h:75
llvm::SetVector
A vector that has set insertion semantics.
Definition: SetVector.h:40
BasicBlockUtils.h
INITIALIZE_PASS_BEGIN
INITIALIZE_PASS_BEGIN(PlaceBackedgeSafepointsImpl, "place-backedge-safepoints-impl", "Place Backedge Safepoints", false, false) INITIALIZE_PASS_END(PlaceBackedgeSafepointsImpl
NoCall
static cl::opt< bool > NoCall("spp-no-call", cl::Hidden, cl::init(false))
InitializePasses.h
llvm::ScalarEvolution::getExitCount
const SCEV * getExitCount(const Loop *L, const BasicBlock *ExitingBlock, ExitCountKind Kind=Exact)
Return the number of times the backedge executes before the given exit would be taken; if not exactly...
Definition: ScalarEvolution.cpp:7129
Debug.h
SetVector.h
llvm::initializePlaceBackedgeSafepointsImplPass
void initializePlaceBackedgeSafepointsImplPass(PassRegistry &)
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:38
enableEntrySafepoints
static bool enableEntrySafepoints(Function &F)
Definition: PlaceSafepoints.cpp:461