LLVM 17.0.0git
TailRecursionElimination.cpp
Go to the documentation of this file.
1//===- TailRecursionElimination.cpp - Eliminate Tail Calls ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file transforms calls of the current function (self recursion) followed
10// by a return instruction with a branch to the entry of the function, creating
11// a loop. This pass also implements the following extensions to the basic
12// algorithm:
13//
14// 1. Trivial instructions between the call and return do not prevent the
15// transformation from taking place, though currently the analysis cannot
16// support moving any really useful instructions (only dead ones).
17// 2. This pass transforms functions that are prevented from being tail
18// recursive by an associative and commutative expression to use an
19// accumulator variable, thus compiling the typical naive factorial or
20// 'fib' implementation into efficient code.
21// 3. TRE is performed if the function returns void, if the return
22// returns the result returned by the call, or if the function returns a
23// run-time constant on all exits from the function. It is possible, though
24// unlikely, that the return returns something else (like constant 0), and
25// can still be TRE'd. It can be TRE'd if ALL OTHER return instructions in
26// the function return the exact same value.
27// 4. If it can prove that callees do not access their caller stack frame,
28// they are marked as eligible for tail call elimination (by the code
29// generator).
30//
31// There are several improvements that could be made:
32//
33// 1. If the function has any alloca instructions, these instructions will be
34// moved out of the entry block of the function, causing them to be
35// evaluated each time through the tail recursion. Safely keeping allocas
36// in the entry block requires analysis to proves that the tail-called
37// function does not read or write the stack object.
38// 2. Tail recursion is only performed if the call immediately precedes the
39// return instruction. It's possible that there could be a jump between
40// the call and the return.
41// 3. There can be intervening operations between the call and the return that
42// prevent the TRE from occurring. For example, there could be GEP's and
43// stores to memory that will not be read or written by the call. This
44// requires some substantial analysis (such as with DSA) to prove safe to
45// move ahead of the call, but doing so could allow many more TREs to be
46// performed, for example in TreeAdd/TreeAlloc from the treeadd benchmark.
47// 4. The algorithm we use to detect if callees access their caller stack
48// frames is very primitive.
49//
50//===----------------------------------------------------------------------===//
51
53#include "llvm/ADT/STLExtras.h"
55#include "llvm/ADT/Statistic.h"
59#include "llvm/Analysis/Loads.h"
64#include "llvm/IR/CFG.h"
65#include "llvm/IR/Constants.h"
66#include "llvm/IR/DataLayout.h"
69#include "llvm/IR/Dominators.h"
70#include "llvm/IR/Function.h"
71#include "llvm/IR/IRBuilder.h"
75#include "llvm/IR/Module.h"
77#include "llvm/Pass.h"
78#include "llvm/Support/Debug.h"
82using namespace llvm;
83
84#define DEBUG_TYPE "tailcallelim"
85
86STATISTIC(NumEliminated, "Number of tail calls removed");
87STATISTIC(NumRetDuped, "Number of return duplicated");
88STATISTIC(NumAccumAdded, "Number of accumulators introduced");
89
90/// Scan the specified function for alloca instructions.
91/// If it contains any dynamic allocas, returns false.
92static bool canTRE(Function &F) {
93 // TODO: We don't do TRE if dynamic allocas are used.
94 // Dynamic allocas allocate stack space which should be
95 // deallocated before new iteration started. That is
96 // currently not implemented.
98 auto *AI = dyn_cast<AllocaInst>(&I);
99 return !AI || AI->isStaticAlloca();
100 });
101}
102
103namespace {
104struct AllocaDerivedValueTracker {
105 // Start at a root value and walk its use-def chain to mark calls that use the
106 // value or a derived value in AllocaUsers, and places where it may escape in
107 // EscapePoints.
108 void walk(Value *Root) {
109 SmallVector<Use *, 32> Worklist;
111
112 auto AddUsesToWorklist = [&](Value *V) {
113 for (auto &U : V->uses()) {
114 if (!Visited.insert(&U).second)
115 continue;
116 Worklist.push_back(&U);
117 }
118 };
119
120 AddUsesToWorklist(Root);
121
122 while (!Worklist.empty()) {
123 Use *U = Worklist.pop_back_val();
124 Instruction *I = cast<Instruction>(U->getUser());
125
126 switch (I->getOpcode()) {
127 case Instruction::Call:
128 case Instruction::Invoke: {
129 auto &CB = cast<CallBase>(*I);
130 // If the alloca-derived argument is passed byval it is not an escape
131 // point, or a use of an alloca. Calling with byval copies the contents
132 // of the alloca into argument registers or stack slots, which exist
133 // beyond the lifetime of the current frame.
134 if (CB.isArgOperand(U) && CB.isByValArgument(CB.getArgOperandNo(U)))
135 continue;
136 bool IsNocapture =
137 CB.isDataOperand(U) && CB.doesNotCapture(CB.getDataOperandNo(U));
138 callUsesLocalStack(CB, IsNocapture);
139 if (IsNocapture) {
140 // If the alloca-derived argument is passed in as nocapture, then it
141 // can't propagate to the call's return. That would be capturing.
142 continue;
143 }
144 break;
145 }
146 case Instruction::Load: {
147 // The result of a load is not alloca-derived (unless an alloca has
148 // otherwise escaped, but this is a local analysis).
149 continue;
150 }
151 case Instruction::Store: {
152 if (U->getOperandNo() == 0)
153 EscapePoints.insert(I);
154 continue; // Stores have no users to analyze.
155 }
156 case Instruction::BitCast:
157 case Instruction::GetElementPtr:
158 case Instruction::PHI:
159 case Instruction::Select:
160 case Instruction::AddrSpaceCast:
161 break;
162 default:
163 EscapePoints.insert(I);
164 break;
165 }
166
167 AddUsesToWorklist(I);
168 }
169 }
170
171 void callUsesLocalStack(CallBase &CB, bool IsNocapture) {
172 // Add it to the list of alloca users.
173 AllocaUsers.insert(&CB);
174
175 // If it's nocapture then it can't capture this alloca.
176 if (IsNocapture)
177 return;
178
179 // If it can write to memory, it can leak the alloca value.
180 if (!CB.onlyReadsMemory())
181 EscapePoints.insert(&CB);
182 }
183
186};
187}
188
190 if (F.callsFunctionThatReturnsTwice())
191 return false;
192
193 // The local stack holds all alloca instructions and all byval arguments.
194 AllocaDerivedValueTracker Tracker;
195 for (Argument &Arg : F.args()) {
196 if (Arg.hasByValAttr())
197 Tracker.walk(&Arg);
198 }
199 for (auto &BB : F) {
200 for (auto &I : BB)
201 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
202 Tracker.walk(AI);
203 }
204
205 bool Modified = false;
206
207 // Track whether a block is reachable after an alloca has escaped. Blocks that
208 // contain the escaping instruction will be marked as being visited without an
209 // escaped alloca, since that is how the block began.
210 enum VisitType {
211 UNVISITED,
212 UNESCAPED,
213 ESCAPED
214 };
216
217 // We propagate the fact that an alloca has escaped from block to successor.
218 // Visit the blocks that are propagating the escapedness first. To do this, we
219 // maintain two worklists.
220 SmallVector<BasicBlock *, 32> WorklistUnescaped, WorklistEscaped;
221
222 // We may enter a block and visit it thinking that no alloca has escaped yet,
223 // then see an escape point and go back around a loop edge and come back to
224 // the same block twice. Because of this, we defer setting tail on calls when
225 // we first encounter them in a block. Every entry in this list does not
226 // statically use an alloca via use-def chain analysis, but may find an alloca
227 // through other means if the block turns out to be reachable after an escape
228 // point.
229 SmallVector<CallInst *, 32> DeferredTails;
230
231 BasicBlock *BB = &F.getEntryBlock();
232 VisitType Escaped = UNESCAPED;
233 do {
234 for (auto &I : *BB) {
235 if (Tracker.EscapePoints.count(&I))
236 Escaped = ESCAPED;
237
238 CallInst *CI = dyn_cast<CallInst>(&I);
239 // A PseudoProbeInst has the IntrInaccessibleMemOnly tag hence it is
240 // considered accessing memory and will be marked as a tail call if we
241 // don't bail out here.
242 if (!CI || CI->isTailCall() || isa<DbgInfoIntrinsic>(&I) ||
243 isa<PseudoProbeInst>(&I))
244 continue;
245
246 // Special-case operand bundles "clang.arc.attachedcall", "ptrauth", and
247 // "kcfi".
248 bool IsNoTail = CI->isNoTailCall() ||
252
253 if (!IsNoTail && CI->doesNotAccessMemory()) {
254 // A call to a readnone function whose arguments are all things computed
255 // outside this function can be marked tail. Even if you stored the
256 // alloca address into a global, a readnone function can't load the
257 // global anyhow.
258 //
259 // Note that this runs whether we know an alloca has escaped or not. If
260 // it has, then we can't trust Tracker.AllocaUsers to be accurate.
261 bool SafeToTail = true;
262 for (auto &Arg : CI->args()) {
263 if (isa<Constant>(Arg.getUser()))
264 continue;
265 if (Argument *A = dyn_cast<Argument>(Arg.getUser()))
266 if (!A->hasByValAttr())
267 continue;
268 SafeToTail = false;
269 break;
270 }
271 if (SafeToTail) {
272 using namespace ore;
273 ORE->emit([&]() {
274 return OptimizationRemark(DEBUG_TYPE, "tailcall-readnone", CI)
275 << "marked as tail call candidate (readnone)";
276 });
277 CI->setTailCall();
278 Modified = true;
279 continue;
280 }
281 }
282
283 if (!IsNoTail && Escaped == UNESCAPED && !Tracker.AllocaUsers.count(CI))
284 DeferredTails.push_back(CI);
285 }
286
287 for (auto *SuccBB : successors(BB)) {
288 auto &State = Visited[SuccBB];
289 if (State < Escaped) {
290 State = Escaped;
291 if (State == ESCAPED)
292 WorklistEscaped.push_back(SuccBB);
293 else
294 WorklistUnescaped.push_back(SuccBB);
295 }
296 }
297
298 if (!WorklistEscaped.empty()) {
299 BB = WorklistEscaped.pop_back_val();
300 Escaped = ESCAPED;
301 } else {
302 BB = nullptr;
303 while (!WorklistUnescaped.empty()) {
304 auto *NextBB = WorklistUnescaped.pop_back_val();
305 if (Visited[NextBB] == UNESCAPED) {
306 BB = NextBB;
307 Escaped = UNESCAPED;
308 break;
309 }
310 }
311 }
312 } while (BB);
313
314 for (CallInst *CI : DeferredTails) {
315 if (Visited[CI->getParent()] != ESCAPED) {
316 // If the escape point was part way through the block, calls after the
317 // escape point wouldn't have been put into DeferredTails.
318 LLVM_DEBUG(dbgs() << "Marked as tail call candidate: " << *CI << "\n");
319 CI->setTailCall();
320 Modified = true;
321 }
322 }
323
324 return Modified;
325}
326
327/// Return true if it is safe to move the specified
328/// instruction from after the call to before the call, assuming that all
329/// instructions between the call and this instruction are movable.
330///
332 if (isa<DbgInfoIntrinsic>(I))
333 return true;
334
335 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
336 if (II->getIntrinsicID() == Intrinsic::lifetime_end &&
337 llvm::findAllocaForValue(II->getArgOperand(1)))
338 return true;
339
340 // FIXME: We can move load/store/call/free instructions above the call if the
341 // call does not mod/ref the memory location being processed.
342 if (I->mayHaveSideEffects()) // This also handles volatile loads.
343 return false;
344
345 if (LoadInst *L = dyn_cast<LoadInst>(I)) {
346 // Loads may always be moved above calls without side effects.
347 if (CI->mayHaveSideEffects()) {
348 // Non-volatile loads may be moved above a call with side effects if it
349 // does not write to memory and the load provably won't trap.
350 // Writes to memory only matter if they may alias the pointer
351 // being loaded from.
352 const DataLayout &DL = L->getModule()->getDataLayout();
353 if (isModSet(AA->getModRefInfo(CI, MemoryLocation::get(L))) ||
354 !isSafeToLoadUnconditionally(L->getPointerOperand(), L->getType(),
355 L->getAlign(), DL, L))
356 return false;
357 }
358 }
359
360 // Otherwise, if this is a side-effect free instruction, check to make sure
361 // that it does not use the return value of the call. If it doesn't use the
362 // return value of the call, it must only use things that are defined before
363 // the call, or movable instructions between the call and the instruction
364 // itself.
365 return !is_contained(I->operands(), CI);
366}
367
369 if (!I->isAssociative() || !I->isCommutative())
370 return false;
371
372 assert(I->getNumOperands() == 2 &&
373 "Associative/commutative operations should have 2 args!");
374
375 // Exactly one operand should be the result of the call instruction.
376 if ((I->getOperand(0) == CI && I->getOperand(1) == CI) ||
377 (I->getOperand(0) != CI && I->getOperand(1) != CI))
378 return false;
379
380 // The only user of this instruction we allow is a single return instruction.
381 if (!I->hasOneUse() || !isa<ReturnInst>(I->user_back()))
382 return false;
383
384 return true;
385}
386
388 while (isa<DbgInfoIntrinsic>(I))
389 ++I;
390 return &*I;
391}
392
393namespace {
394class TailRecursionEliminator {
395 Function &F;
397 AliasAnalysis *AA;
399 DomTreeUpdater &DTU;
400
401 // The below are shared state we want to have available when eliminating any
402 // calls in the function. There values should be populated by
403 // createTailRecurseLoopHeader the first time we find a call we can eliminate.
404 BasicBlock *HeaderBB = nullptr;
405 SmallVector<PHINode *, 8> ArgumentPHIs;
406
407 // PHI node to store our return value.
408 PHINode *RetPN = nullptr;
409
410 // i1 PHI node to track if we have a valid return value stored in RetPN.
411 PHINode *RetKnownPN = nullptr;
412
413 // Vector of select instructions we insereted. These selects use RetKnownPN
414 // to either propagate RetPN or select a new return value.
416
417 // The below are shared state needed when performing accumulator recursion.
418 // There values should be populated by insertAccumulator the first time we
419 // find an elimination that requires an accumulator.
420
421 // PHI node to store our current accumulated value.
422 PHINode *AccPN = nullptr;
423
424 // The instruction doing the accumulating.
425 Instruction *AccumulatorRecursionInstr = nullptr;
426
427 TailRecursionEliminator(Function &F, const TargetTransformInfo *TTI,
429 DomTreeUpdater &DTU)
430 : F(F), TTI(TTI), AA(AA), ORE(ORE), DTU(DTU) {}
431
432 CallInst *findTRECandidate(BasicBlock *BB);
433
434 void createTailRecurseLoopHeader(CallInst *CI);
435
436 void insertAccumulator(Instruction *AccRecInstr);
437
438 bool eliminateCall(CallInst *CI);
439
440 void cleanupAndFinalize();
441
442 bool processBlock(BasicBlock &BB);
443
444 void copyByValueOperandIntoLocalTemp(CallInst *CI, int OpndIdx);
445
446 void copyLocalTempOfByValueOperandIntoArguments(CallInst *CI, int OpndIdx);
447
448public:
449 static bool eliminate(Function &F, const TargetTransformInfo *TTI,
451 DomTreeUpdater &DTU);
452};
453} // namespace
454
455CallInst *TailRecursionEliminator::findTRECandidate(BasicBlock *BB) {
456 Instruction *TI = BB->getTerminator();
457
458 if (&BB->front() == TI) // Make sure there is something before the terminator.
459 return nullptr;
460
461 // Scan backwards from the return, checking to see if there is a tail call in
462 // this block. If so, set CI to it.
463 CallInst *CI = nullptr;
464 BasicBlock::iterator BBI(TI);
465 while (true) {
466 CI = dyn_cast<CallInst>(BBI);
467 if (CI && CI->getCalledFunction() == &F)
468 break;
469
470 if (BBI == BB->begin())
471 return nullptr; // Didn't find a potential tail call.
472 --BBI;
473 }
474
475 assert((!CI->isTailCall() || !CI->isNoTailCall()) &&
476 "Incompatible call site attributes(Tail,NoTail)");
477 if (!CI->isTailCall())
478 return nullptr;
479
480 // As a special case, detect code like this:
481 // double fabs(double f) { return __builtin_fabs(f); } // a 'fabs' call
482 // and disable this xform in this case, because the code generator will
483 // lower the call to fabs into inline code.
484 if (BB == &F.getEntryBlock() &&
485 firstNonDbg(BB->front().getIterator()) == CI &&
486 firstNonDbg(std::next(BB->begin())) == TI && CI->getCalledFunction() &&
488 // A single-block function with just a call and a return. Check that
489 // the arguments match.
490 auto I = CI->arg_begin(), E = CI->arg_end();
491 Function::arg_iterator FI = F.arg_begin(), FE = F.arg_end();
492 for (; I != E && FI != FE; ++I, ++FI)
493 if (*I != &*FI) break;
494 if (I == E && FI == FE)
495 return nullptr;
496 }
497
498 return CI;
499}
500
501void TailRecursionEliminator::createTailRecurseLoopHeader(CallInst *CI) {
502 HeaderBB = &F.getEntryBlock();
503 BasicBlock *NewEntry = BasicBlock::Create(F.getContext(), "", &F, HeaderBB);
504 NewEntry->takeName(HeaderBB);
505 HeaderBB->setName("tailrecurse");
506 BranchInst *BI = BranchInst::Create(HeaderBB, NewEntry);
507 BI->setDebugLoc(CI->getDebugLoc());
508
509 // Move all fixed sized allocas from HeaderBB to NewEntry.
510 for (BasicBlock::iterator OEBI = HeaderBB->begin(), E = HeaderBB->end(),
511 NEBI = NewEntry->begin();
512 OEBI != E;)
513 if (AllocaInst *AI = dyn_cast<AllocaInst>(OEBI++))
514 if (isa<ConstantInt>(AI->getArraySize()))
515 AI->moveBefore(&*NEBI);
516
517 // Now that we have created a new block, which jumps to the entry
518 // block, insert a PHI node for each argument of the function.
519 // For now, we initialize each PHI to only have the real arguments
520 // which are passed in.
521 Instruction *InsertPos = &HeaderBB->front();
522 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
523 PHINode *PN =
524 PHINode::Create(I->getType(), 2, I->getName() + ".tr", InsertPos);
525 I->replaceAllUsesWith(PN); // Everyone use the PHI node now!
526 PN->addIncoming(&*I, NewEntry);
527 ArgumentPHIs.push_back(PN);
528 }
529
530 // If the function doen't return void, create the RetPN and RetKnownPN PHI
531 // nodes to track our return value. We initialize RetPN with poison and
532 // RetKnownPN with false since we can't know our return value at function
533 // entry.
534 Type *RetType = F.getReturnType();
535 if (!RetType->isVoidTy()) {
536 Type *BoolType = Type::getInt1Ty(F.getContext());
537 RetPN = PHINode::Create(RetType, 2, "ret.tr", InsertPos);
538 RetKnownPN = PHINode::Create(BoolType, 2, "ret.known.tr", InsertPos);
539
540 RetPN->addIncoming(PoisonValue::get(RetType), NewEntry);
541 RetKnownPN->addIncoming(ConstantInt::getFalse(BoolType), NewEntry);
542 }
543
544 // The entry block was changed from HeaderBB to NewEntry.
545 // The forward DominatorTree needs to be recalculated when the EntryBB is
546 // changed. In this corner-case we recalculate the entire tree.
547 DTU.recalculate(*NewEntry->getParent());
548}
549
550void TailRecursionEliminator::insertAccumulator(Instruction *AccRecInstr) {
551 assert(!AccPN && "Trying to insert multiple accumulators");
552
553 AccumulatorRecursionInstr = AccRecInstr;
554
555 // Start by inserting a new PHI node for the accumulator.
556 pred_iterator PB = pred_begin(HeaderBB), PE = pred_end(HeaderBB);
557 AccPN = PHINode::Create(F.getReturnType(), std::distance(PB, PE) + 1,
558 "accumulator.tr", &HeaderBB->front());
559
560 // Loop over all of the predecessors of the tail recursion block. For the
561 // real entry into the function we seed the PHI with the identity constant for
562 // the accumulation operation. For any other existing branches to this block
563 // (due to other tail recursions eliminated) the accumulator is not modified.
564 // Because we haven't added the branch in the current block to HeaderBB yet,
565 // it will not show up as a predecessor.
566 for (pred_iterator PI = PB; PI != PE; ++PI) {
567 BasicBlock *P = *PI;
568 if (P == &F.getEntryBlock()) {
570 AccRecInstr->getOpcode(), AccRecInstr->getType());
571 AccPN->addIncoming(Identity, P);
572 } else {
573 AccPN->addIncoming(AccPN, P);
574 }
575 }
576
577 ++NumAccumAdded;
578}
579
580// Creates a copy of contents of ByValue operand of the specified
581// call instruction into the newly created temporarily variable.
582void TailRecursionEliminator::copyByValueOperandIntoLocalTemp(CallInst *CI,
583 int OpndIdx) {
584 Type *AggTy = CI->getParamByValType(OpndIdx);
585 assert(AggTy);
586 const DataLayout &DL = F.getParent()->getDataLayout();
587
588 // Get alignment of byVal operand.
589 Align Alignment(CI->getParamAlign(OpndIdx).valueOrOne());
590
591 // Create alloca for temporarily byval operands.
592 // Put alloca into the entry block.
593 Value *NewAlloca = new AllocaInst(
594 AggTy, DL.getAllocaAddrSpace(), nullptr, Alignment,
595 CI->getArgOperand(OpndIdx)->getName(), &*F.getEntryBlock().begin());
596
598 Value *Size = Builder.getInt64(DL.getTypeAllocSize(AggTy));
599
600 // Copy data from byvalue operand into the temporarily variable.
601 Builder.CreateMemCpy(NewAlloca, /*DstAlign*/ Alignment,
602 CI->getArgOperand(OpndIdx),
603 /*SrcAlign*/ Alignment, Size);
604 CI->setArgOperand(OpndIdx, NewAlloca);
605}
606
607// Creates a copy from temporarily variable(keeping value of ByVal argument)
608// into the corresponding function argument location.
609void TailRecursionEliminator::copyLocalTempOfByValueOperandIntoArguments(
610 CallInst *CI, int OpndIdx) {
611 Type *AggTy = CI->getParamByValType(OpndIdx);
612 assert(AggTy);
613 const DataLayout &DL = F.getParent()->getDataLayout();
614
615 // Get alignment of byVal operand.
616 Align Alignment(CI->getParamAlign(OpndIdx).valueOrOne());
617
619 Value *Size = Builder.getInt64(DL.getTypeAllocSize(AggTy));
620
621 // Copy data from the temporarily variable into corresponding
622 // function argument location.
623 Builder.CreateMemCpy(F.getArg(OpndIdx), /*DstAlign*/ Alignment,
624 CI->getArgOperand(OpndIdx),
625 /*SrcAlign*/ Alignment, Size);
626}
627
628bool TailRecursionEliminator::eliminateCall(CallInst *CI) {
629 ReturnInst *Ret = cast<ReturnInst>(CI->getParent()->getTerminator());
630
631 // Ok, we found a potential tail call. We can currently only transform the
632 // tail call if all of the instructions between the call and the return are
633 // movable to above the call itself, leaving the call next to the return.
634 // Check that this is the case now.
635 Instruction *AccRecInstr = nullptr;
636 BasicBlock::iterator BBI(CI);
637 for (++BBI; &*BBI != Ret; ++BBI) {
638 if (canMoveAboveCall(&*BBI, CI, AA))
639 continue;
640
641 // If we can't move the instruction above the call, it might be because it
642 // is an associative and commutative operation that could be transformed
643 // using accumulator recursion elimination. Check to see if this is the
644 // case, and if so, remember which instruction accumulates for later.
645 if (AccPN || !canTransformAccumulatorRecursion(&*BBI, CI))
646 return false; // We cannot eliminate the tail recursion!
647
648 // Yes, this is accumulator recursion. Remember which instruction
649 // accumulates.
650 AccRecInstr = &*BBI;
651 }
652
653 BasicBlock *BB = Ret->getParent();
654
655 using namespace ore;
656 ORE->emit([&]() {
657 return OptimizationRemark(DEBUG_TYPE, "tailcall-recursion", CI)
658 << "transforming tail recursion into loop";
659 });
660
661 // OK! We can transform this tail call. If this is the first one found,
662 // create the new entry block, allowing us to branch back to the old entry.
663 if (!HeaderBB)
664 createTailRecurseLoopHeader(CI);
665
666 // Copy values of ByVal operands into local temporarily variables.
667 for (unsigned I = 0, E = CI->arg_size(); I != E; ++I) {
668 if (CI->isByValArgument(I))
669 copyByValueOperandIntoLocalTemp(CI, I);
670 }
671
672 // Ok, now that we know we have a pseudo-entry block WITH all of the
673 // required PHI nodes, add entries into the PHI node for the actual
674 // parameters passed into the tail-recursive call.
675 for (unsigned I = 0, E = CI->arg_size(); I != E; ++I) {
676 if (CI->isByValArgument(I)) {
677 copyLocalTempOfByValueOperandIntoArguments(CI, I);
678 ArgumentPHIs[I]->addIncoming(F.getArg(I), BB);
679 } else
680 ArgumentPHIs[I]->addIncoming(CI->getArgOperand(I), BB);
681 }
682
683 if (AccRecInstr) {
684 insertAccumulator(AccRecInstr);
685
686 // Rewrite the accumulator recursion instruction so that it does not use
687 // the result of the call anymore, instead, use the PHI node we just
688 // inserted.
689 AccRecInstr->setOperand(AccRecInstr->getOperand(0) != CI, AccPN);
690 }
691
692 // Update our return value tracking
693 if (RetPN) {
694 if (Ret->getReturnValue() == CI || AccRecInstr) {
695 // Defer selecting a return value
696 RetPN->addIncoming(RetPN, BB);
697 RetKnownPN->addIncoming(RetKnownPN, BB);
698 } else {
699 // We found a return value we want to use, insert a select instruction to
700 // select it if we don't already know what our return value will be and
701 // store the result in our return value PHI node.
703 RetKnownPN, RetPN, Ret->getReturnValue(), "current.ret.tr", Ret);
704 RetSelects.push_back(SI);
705
706 RetPN->addIncoming(SI, BB);
707 RetKnownPN->addIncoming(ConstantInt::getTrue(RetKnownPN->getType()), BB);
708 }
709
710 if (AccPN)
711 AccPN->addIncoming(AccRecInstr ? AccRecInstr : AccPN, BB);
712 }
713
714 // Now that all of the PHI nodes are in place, remove the call and
715 // ret instructions, replacing them with an unconditional branch.
716 BranchInst *NewBI = BranchInst::Create(HeaderBB, Ret);
717 NewBI->setDebugLoc(CI->getDebugLoc());
718
719 Ret->eraseFromParent(); // Remove return.
720 CI->eraseFromParent(); // Remove call.
721 DTU.applyUpdates({{DominatorTree::Insert, BB, HeaderBB}});
722 ++NumEliminated;
723 return true;
724}
725
726void TailRecursionEliminator::cleanupAndFinalize() {
727 // If we eliminated any tail recursions, it's possible that we inserted some
728 // silly PHI nodes which just merge an initial value (the incoming operand)
729 // with themselves. Check to see if we did and clean up our mess if so. This
730 // occurs when a function passes an argument straight through to its tail
731 // call.
732 for (PHINode *PN : ArgumentPHIs) {
733 // If the PHI Node is a dynamic constant, replace it with the value it is.
734 if (Value *PNV = simplifyInstruction(PN, F.getParent()->getDataLayout())) {
735 PN->replaceAllUsesWith(PNV);
736 PN->eraseFromParent();
737 }
738 }
739
740 if (RetPN) {
741 if (RetSelects.empty()) {
742 // If we didn't insert any select instructions, then we know we didn't
743 // store a return value and we can remove the PHI nodes we inserted.
744 RetPN->dropAllReferences();
745 RetPN->eraseFromParent();
746
747 RetKnownPN->dropAllReferences();
748 RetKnownPN->eraseFromParent();
749
750 if (AccPN) {
751 // We need to insert a copy of our accumulator instruction before any
752 // return in the function, and return its result instead.
753 Instruction *AccRecInstr = AccumulatorRecursionInstr;
754 for (BasicBlock &BB : F) {
755 ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator());
756 if (!RI)
757 continue;
758
759 Instruction *AccRecInstrNew = AccRecInstr->clone();
760 AccRecInstrNew->setName("accumulator.ret.tr");
761 AccRecInstrNew->setOperand(AccRecInstr->getOperand(0) == AccPN,
762 RI->getOperand(0));
763 AccRecInstrNew->insertBefore(RI);
764 RI->setOperand(0, AccRecInstrNew);
765 }
766 }
767 } else {
768 // We need to insert a select instruction before any return left in the
769 // function to select our stored return value if we have one.
770 for (BasicBlock &BB : F) {
771 ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator());
772 if (!RI)
773 continue;
774
776 RetKnownPN, RetPN, RI->getOperand(0), "current.ret.tr", RI);
777 RetSelects.push_back(SI);
778 RI->setOperand(0, SI);
779 }
780
781 if (AccPN) {
782 // We need to insert a copy of our accumulator instruction before any
783 // of the selects we inserted, and select its result instead.
784 Instruction *AccRecInstr = AccumulatorRecursionInstr;
785 for (SelectInst *SI : RetSelects) {
786 Instruction *AccRecInstrNew = AccRecInstr->clone();
787 AccRecInstrNew->setName("accumulator.ret.tr");
788 AccRecInstrNew->setOperand(AccRecInstr->getOperand(0) == AccPN,
789 SI->getFalseValue());
790 AccRecInstrNew->insertBefore(SI);
791 SI->setFalseValue(AccRecInstrNew);
792 }
793 }
794 }
795 }
796}
797
798bool TailRecursionEliminator::processBlock(BasicBlock &BB) {
799 Instruction *TI = BB.getTerminator();
800
801 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
802 if (BI->isConditional())
803 return false;
804
805 BasicBlock *Succ = BI->getSuccessor(0);
806 ReturnInst *Ret = dyn_cast<ReturnInst>(Succ->getFirstNonPHIOrDbg(true));
807
808 if (!Ret)
809 return false;
810
811 CallInst *CI = findTRECandidate(&BB);
812
813 if (!CI)
814 return false;
815
816 LLVM_DEBUG(dbgs() << "FOLDING: " << *Succ
817 << "INTO UNCOND BRANCH PRED: " << BB);
818 FoldReturnIntoUncondBranch(Ret, Succ, &BB, &DTU);
819 ++NumRetDuped;
820
821 // If all predecessors of Succ have been eliminated by
822 // FoldReturnIntoUncondBranch, delete it. It is important to empty it,
823 // because the ret instruction in there is still using a value which
824 // eliminateCall will attempt to remove. This block can only contain
825 // instructions that can't have uses, therefore it is safe to remove.
826 if (pred_empty(Succ))
827 DTU.deleteBB(Succ);
828
829 eliminateCall(CI);
830 return true;
831 } else if (isa<ReturnInst>(TI)) {
832 CallInst *CI = findTRECandidate(&BB);
833
834 if (CI)
835 return eliminateCall(CI);
836 }
837
838 return false;
839}
840
841bool TailRecursionEliminator::eliminate(Function &F,
843 AliasAnalysis *AA,
845 DomTreeUpdater &DTU) {
846 if (F.getFnAttribute("disable-tail-calls").getValueAsBool())
847 return false;
848
849 bool MadeChange = false;
850 MadeChange |= markTails(F, ORE);
851
852 // If this function is a varargs function, we won't be able to PHI the args
853 // right, so don't even try to convert it...
854 if (F.getFunctionType()->isVarArg())
855 return MadeChange;
856
857 if (!canTRE(F))
858 return MadeChange;
859
860 // Change any tail recursive calls to loops.
861 TailRecursionEliminator TRE(F, TTI, AA, ORE, DTU);
862
863 for (BasicBlock &BB : F)
864 MadeChange |= TRE.processBlock(BB);
865
866 TRE.cleanupAndFinalize();
867
868 return MadeChange;
869}
870
871namespace {
872struct TailCallElim : public FunctionPass {
873 static char ID; // Pass identification, replacement for typeid
874 TailCallElim() : FunctionPass(ID) {
876 }
877
878 void getAnalysisUsage(AnalysisUsage &AU) const override {
885 }
886
887 bool runOnFunction(Function &F) override {
888 if (skipFunction(F))
889 return false;
890
891 auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
892 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
893 auto *PDTWP = getAnalysisIfAvailable<PostDominatorTreeWrapperPass>();
894 auto *PDT = PDTWP ? &PDTWP->getPostDomTree() : nullptr;
895 // There is no noticable performance difference here between Lazy and Eager
896 // UpdateStrategy based on some test results. It is feasible to switch the
897 // UpdateStrategy to Lazy if we find it profitable later.
898 DomTreeUpdater DTU(DT, PDT, DomTreeUpdater::UpdateStrategy::Eager);
899
900 return TailRecursionEliminator::eliminate(
901 F, &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F),
902 &getAnalysis<AAResultsWrapperPass>().getAAResults(),
903 &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(), DTU);
904 }
905};
906}
907
908char TailCallElim::ID = 0;
909INITIALIZE_PASS_BEGIN(TailCallElim, "tailcallelim", "Tail Call Elimination",
910 false, false)
915
916// Public interface to the TailCallElimination pass
918 return new TailCallElim();
919}
920
923
929 // There is no noticable performance difference here between Lazy and Eager
930 // UpdateStrategy based on some test results. It is feasible to switch the
931 // UpdateStrategy to Lazy if we find it profitable later.
933 bool Changed = TailRecursionEliminator::eliminate(F, &TTI, &AA, &ORE, DTU);
934
935 if (!Changed)
936 return PreservedAnalyses::all();
940 return PA;
941}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
assume Assume Builder
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Size
This is the interface for a simple mod/ref and alias analysis over globals.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Module.h This file contains the declarations for the Module class.
print must be executed print the must be executed context for all instructions
#define P(N)
PassBuilder PB(Machine, PassOpts->PTO, std::nullopt, &PIC)
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:59
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
@ SI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallPtrSet class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
static bool canTRE(Function &F)
Scan the specified function for alloca instructions.
static bool canMoveAboveCall(Instruction *I, CallInst *CI, AliasAnalysis *AA)
Return true if it is safe to move the specified instruction from after the call to before the call,...
Tail Call Elimination
static Instruction * firstNonDbg(BasicBlock::iterator I)
#define DEBUG_TYPE
static bool canTransformAccumulatorRecursion(Instruction *I, CallInst *CI)
static bool markTails(Function &F, OptimizationRemarkEmitter *ORE)
This pass exposes codegen information to IR-level passes.
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
Check whether or not an instruction may read or write the optionally specified memory location.
an instruction to allocate memory on the stack
Definition: Instructions.h:58
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:620
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:793
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:774
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:326
const Instruction & front() const
Definition: BasicBlock.h:338
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:105
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:112
const Instruction * getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
Definition: BasicBlock.cpp:223
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:87
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:127
Conditional or Unconditional Branch instruction.
bool isConditional() const
static BranchInst * Create(BasicBlock *IfTrue, Instruction *InsertBefore=nullptr)
BasicBlock * getSuccessor(unsigned i) const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1190
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1412
bool doesNotAccessMemory(unsigned OpNo) const
Definition: InstrTypes.h:1735
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1332
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
Definition: InstrTypes.h:1699
MaybeAlign getParamAlign(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
Definition: InstrTypes.h:1763
bool onlyReadsMemory(unsigned OpNo) const
Definition: InstrTypes.h:1741
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
Definition: InstrTypes.h:1772
bool hasOperandBundlesOtherThan(ArrayRef< uint32_t > IDs) const
Return true if this operand bundle user contains operand bundles with tags other than those specified...
Definition: InstrTypes.h:2129
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1357
void setArgOperand(unsigned i, Value *v)
Definition: InstrTypes.h:1362
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Definition: InstrTypes.h:1338
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1348
unsigned arg_size() const
Definition: InstrTypes.h:1355
This class represents a function call, abstracting a target machine's calling convention.
bool isNoTailCall() const
bool isTailCall() const
void setTailCall(bool IsTc=true)
static Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
Definition: Constants.cpp:2672
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:833
static ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:840
This is an important base class in LLVM.
Definition: Constant.h:41
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:314
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
virtual bool runOnFunction(Function &F)=0
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
Definition: Pass.cpp:174
Legacy wrapper pass to provide the GlobalsAAResult object.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2564
Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
Definition: Instruction.cpp:88
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:365
const BasicBlock * getParent() const
Definition: Instruction.h:90
bool mayHaveSideEffects() const LLVM_READONLY
Return true if the instruction may have side effects.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:168
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:82
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:362
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
An instruction for reading from memory.
Definition: Instructions.h:177
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
OptimizationRemarkEmitter legacy analysis pass.
The optimization diagnostic interface.
void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for applied optimization remarks.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
virtual void getAnalysisUsage(AnalysisUsage &) const
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: Pass.cpp:98
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1743
Analysis pass which computes a PostDominatorTree.
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:152
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:158
void preserve()
Mark an analysis as preserved.
Definition: PassManager.h:173
Return a value (possibly void), from a function.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", Instruction *InsertBefore=nullptr, Instruction *MDFrom=nullptr)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:450
bool empty() const
Definition: SmallVector.h:94
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
Analysis pass providing the TargetTransformInfo.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
bool isLoweredToCall(const Function *F) const
Test whether calls to a function lower to actual program function calls.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static IntegerType * getInt1Ty(LLVMContext &C)
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:140
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
Value * getOperand(unsigned i) const
Definition: User.h:169
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:378
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:535
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:384
self_iterator getIterator()
Definition: ilist_node.h:82
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1819
FunctionPass * createTailCallEliminationPass()
AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
auto successors(const MachineBasicBlock *BB)
void initializeTailCallElimPass(PassRegistry &)
ReturnInst * FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB, BasicBlock *Pred, DomTreeUpdater *DTU=nullptr)
This method duplicates the specified return instruction into a predecessor which ends in an unconditi...
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:112
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:48
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:109
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1976
bool isSafeToLoadUnconditionally(Value *V, Align Alignment, APInt &Size, const DataLayout &DL, Instruction *ScanFrom=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if we know that executing a load from this value cannot trap.
Definition: Loads.cpp:352
bool pred_empty(const BasicBlock *BB)
Definition: CFG.h:118
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)