LLVM 20.0.0git
ObjCARCOpts.cpp
Go to the documentation of this file.
1//===- ObjCARCOpts.cpp - ObjC ARC Optimization ----------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file defines ObjC ARC optimizations. ARC stands for Automatic
11/// Reference Counting and is a system for managing reference counts for objects
12/// in Objective C.
13///
14/// The optimizations performed include elimination of redundant, partially
15/// redundant, and inconsequential reference count operations, elimination of
16/// redundant weak pointer operations, and numerous minor simplifications.
17///
18/// WARNING: This file knows about certain library functions. It recognizes them
19/// by name, and hardwires knowledge of their semantics.
20///
21/// WARNING: This file knows about how certain Objective-C library functions are
22/// used. Naive LLVM IR transformations which would otherwise be
23/// behavior-preserving may break these assumptions.
24//
25//===----------------------------------------------------------------------===//
26
28#include "BlotMapVector.h"
29#include "DependencyAnalysis.h"
30#include "ObjCARC.h"
31#include "ProvenanceAnalysis.h"
32#include "PtrState.h"
33#include "llvm/ADT/DenseMap.h"
34#include "llvm/ADT/STLExtras.h"
37#include "llvm/ADT/Statistic.h"
42#include "llvm/IR/BasicBlock.h"
43#include "llvm/IR/CFG.h"
44#include "llvm/IR/Constant.h"
45#include "llvm/IR/Constants.h"
48#include "llvm/IR/Function.h"
51#include "llvm/IR/InstrTypes.h"
52#include "llvm/IR/Instruction.h"
54#include "llvm/IR/LLVMContext.h"
55#include "llvm/IR/Metadata.h"
56#include "llvm/IR/Type.h"
57#include "llvm/IR/User.h"
58#include "llvm/IR/Value.h"
62#include "llvm/Support/Debug.h"
66#include <cassert>
67#include <iterator>
68#include <utility>
69
70using namespace llvm;
71using namespace llvm::objcarc;
72
73#define DEBUG_TYPE "objc-arc-opts"
74
75static cl::opt<unsigned> MaxPtrStates("arc-opt-max-ptr-states",
77 cl::desc("Maximum number of ptr states the optimizer keeps track of"),
78 cl::init(4095));
79
80/// \defgroup ARCUtilities Utility declarations/definitions specific to ARC.
81/// @{
82
83/// This is similar to GetRCIdentityRoot but it stops as soon
84/// as it finds a value with multiple uses.
85static const Value *FindSingleUseIdentifiedObject(const Value *Arg) {
86 // ConstantData (like ConstantPointerNull and UndefValue) is used across
87 // modules. It's never a single-use value.
88 if (isa<ConstantData>(Arg))
89 return nullptr;
90
91 if (Arg->hasOneUse()) {
92 if (const BitCastInst *BC = dyn_cast<BitCastInst>(Arg))
93 return FindSingleUseIdentifiedObject(BC->getOperand(0));
94 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Arg))
95 if (GEP->hasAllZeroIndices())
96 return FindSingleUseIdentifiedObject(GEP->getPointerOperand());
99 cast<CallInst>(Arg)->getArgOperand(0));
100 if (!IsObjCIdentifiedObject(Arg))
101 return nullptr;
102 return Arg;
103 }
104
105 // If we found an identifiable object but it has multiple uses, but they are
106 // trivial uses, we can still consider this to be a single-use value.
107 if (IsObjCIdentifiedObject(Arg)) {
108 for (const User *U : Arg->users())
109 if (!U->use_empty() || GetRCIdentityRoot(U) != Arg)
110 return nullptr;
111
112 return Arg;
113 }
114
115 return nullptr;
116}
117
118/// @}
119///
120/// \defgroup ARCOpt ARC Optimization.
121/// @{
122
123// TODO: On code like this:
124//
125// objc_retain(%x)
126// stuff_that_cannot_release()
127// objc_autorelease(%x)
128// stuff_that_cannot_release()
129// objc_retain(%x)
130// stuff_that_cannot_release()
131// objc_autorelease(%x)
132//
133// The second retain and autorelease can be deleted.
134
135// TODO: It should be possible to delete
136// objc_autoreleasePoolPush and objc_autoreleasePoolPop
137// pairs if nothing is actually autoreleased between them. Also, autorelease
138// calls followed by objc_autoreleasePoolPop calls (perhaps in ObjC++ code
139// after inlining) can be turned into plain release calls.
140
141// TODO: Critical-edge splitting. If the optimial insertion point is
142// a critical edge, the current algorithm has to fail, because it doesn't
143// know how to split edges. It should be possible to make the optimizer
144// think in terms of edges, rather than blocks, and then split critical
145// edges on demand.
146
147// TODO: OptimizeSequences could generalized to be Interprocedural.
148
149// TODO: Recognize that a bunch of other objc runtime calls have
150// non-escaping arguments and non-releasing arguments, and may be
151// non-autoreleasing.
152
153// TODO: Sink autorelease calls as far as possible. Unfortunately we
154// usually can't sink them past other calls, which would be the main
155// case where it would be useful.
156
157// TODO: The pointer returned from objc_loadWeakRetained is retained.
158
159// TODO: Delete release+retain pairs (rare).
160
161STATISTIC(NumNoops, "Number of no-op objc calls eliminated");
162STATISTIC(NumPartialNoops, "Number of partially no-op objc calls eliminated");
163STATISTIC(NumAutoreleases,"Number of autoreleases converted to releases");
164STATISTIC(NumRets, "Number of return value forwarding "
165 "retain+autoreleases eliminated");
166STATISTIC(NumRRs, "Number of retain+release paths eliminated");
167STATISTIC(NumPeeps, "Number of calls peephole-optimized");
168#ifndef NDEBUG
169STATISTIC(NumRetainsBeforeOpt,
170 "Number of retains before optimization");
171STATISTIC(NumReleasesBeforeOpt,
172 "Number of releases before optimization");
173STATISTIC(NumRetainsAfterOpt,
174 "Number of retains after optimization");
175STATISTIC(NumReleasesAfterOpt,
176 "Number of releases after optimization");
177#endif
178
179namespace {
180
181 /// Per-BasicBlock state.
182 class BBState {
183 /// The number of unique control paths from the entry which can reach this
184 /// block.
185 unsigned TopDownPathCount = 0;
186
187 /// The number of unique control paths to exits from this block.
188 unsigned BottomUpPathCount = 0;
189
190 /// The top-down traversal uses this to record information known about a
191 /// pointer at the bottom of each block.
193
194 /// The bottom-up traversal uses this to record information known about a
195 /// pointer at the top of each block.
197
198 /// Effective predecessors of the current block ignoring ignorable edges and
199 /// ignored backedges.
201
202 /// Effective successors of the current block ignoring ignorable edges and
203 /// ignored backedges.
205
206 public:
207 static const unsigned OverflowOccurredValue;
208
209 BBState() = default;
210
211 using top_down_ptr_iterator = decltype(PerPtrTopDown)::iterator;
212 using const_top_down_ptr_iterator = decltype(PerPtrTopDown)::const_iterator;
213
214 top_down_ptr_iterator top_down_ptr_begin() { return PerPtrTopDown.begin(); }
215 top_down_ptr_iterator top_down_ptr_end() { return PerPtrTopDown.end(); }
216 const_top_down_ptr_iterator top_down_ptr_begin() const {
217 return PerPtrTopDown.begin();
218 }
219 const_top_down_ptr_iterator top_down_ptr_end() const {
220 return PerPtrTopDown.end();
221 }
222 bool hasTopDownPtrs() const {
223 return !PerPtrTopDown.empty();
224 }
225
226 unsigned top_down_ptr_list_size() const {
227 return std::distance(top_down_ptr_begin(), top_down_ptr_end());
228 }
229
230 using bottom_up_ptr_iterator = decltype(PerPtrBottomUp)::iterator;
231 using const_bottom_up_ptr_iterator =
232 decltype(PerPtrBottomUp)::const_iterator;
233
234 bottom_up_ptr_iterator bottom_up_ptr_begin() {
235 return PerPtrBottomUp.begin();
236 }
237 bottom_up_ptr_iterator bottom_up_ptr_end() { return PerPtrBottomUp.end(); }
238 const_bottom_up_ptr_iterator bottom_up_ptr_begin() const {
239 return PerPtrBottomUp.begin();
240 }
241 const_bottom_up_ptr_iterator bottom_up_ptr_end() const {
242 return PerPtrBottomUp.end();
243 }
244 bool hasBottomUpPtrs() const {
245 return !PerPtrBottomUp.empty();
246 }
247
248 unsigned bottom_up_ptr_list_size() const {
249 return std::distance(bottom_up_ptr_begin(), bottom_up_ptr_end());
250 }
251
252 /// Mark this block as being an entry block, which has one path from the
253 /// entry by definition.
254 void SetAsEntry() { TopDownPathCount = 1; }
255
256 /// Mark this block as being an exit block, which has one path to an exit by
257 /// definition.
258 void SetAsExit() { BottomUpPathCount = 1; }
259
260 /// Attempt to find the PtrState object describing the top down state for
261 /// pointer Arg. Return a new initialized PtrState describing the top down
262 /// state for Arg if we do not find one.
263 TopDownPtrState &getPtrTopDownState(const Value *Arg) {
264 return PerPtrTopDown[Arg];
265 }
266
267 /// Attempt to find the PtrState object describing the bottom up state for
268 /// pointer Arg. Return a new initialized PtrState describing the bottom up
269 /// state for Arg if we do not find one.
270 BottomUpPtrState &getPtrBottomUpState(const Value *Arg) {
271 return PerPtrBottomUp[Arg];
272 }
273
274 /// Attempt to find the PtrState object describing the bottom up state for
275 /// pointer Arg.
276 bottom_up_ptr_iterator findPtrBottomUpState(const Value *Arg) {
277 return PerPtrBottomUp.find(Arg);
278 }
279
280 void clearBottomUpPointers() {
281 PerPtrBottomUp.clear();
282 }
283
284 void clearTopDownPointers() {
285 PerPtrTopDown.clear();
286 }
287
288 void InitFromPred(const BBState &Other);
289 void InitFromSucc(const BBState &Other);
290 void MergePred(const BBState &Other);
291 void MergeSucc(const BBState &Other);
292
293 /// Compute the number of possible unique paths from an entry to an exit
294 /// which pass through this block. This is only valid after both the
295 /// top-down and bottom-up traversals are complete.
296 ///
297 /// Returns true if overflow occurred. Returns false if overflow did not
298 /// occur.
299 bool GetAllPathCountWithOverflow(unsigned &PathCount) const {
300 if (TopDownPathCount == OverflowOccurredValue ||
301 BottomUpPathCount == OverflowOccurredValue)
302 return true;
303 unsigned long long Product =
304 (unsigned long long)TopDownPathCount*BottomUpPathCount;
305 // Overflow occurred if any of the upper bits of Product are set or if all
306 // the lower bits of Product are all set.
307 return (Product >> 32) ||
308 ((PathCount = Product) == OverflowOccurredValue);
309 }
310
311 // Specialized CFG utilities.
313
314 edge_iterator pred_begin() const { return Preds.begin(); }
315 edge_iterator pred_end() const { return Preds.end(); }
316 edge_iterator succ_begin() const { return Succs.begin(); }
317 edge_iterator succ_end() const { return Succs.end(); }
318
319 void addSucc(BasicBlock *Succ) { Succs.push_back(Succ); }
320 void addPred(BasicBlock *Pred) { Preds.push_back(Pred); }
321
322 bool isExit() const { return Succs.empty(); }
323 };
324
325} // end anonymous namespace
326
327const unsigned BBState::OverflowOccurredValue = 0xffffffff;
328
329namespace llvm {
330
332 BBState &BBState) LLVM_ATTRIBUTE_UNUSED;
333
334} // end namespace llvm
335
336void BBState::InitFromPred(const BBState &Other) {
337 PerPtrTopDown = Other.PerPtrTopDown;
338 TopDownPathCount = Other.TopDownPathCount;
339}
340
341void BBState::InitFromSucc(const BBState &Other) {
342 PerPtrBottomUp = Other.PerPtrBottomUp;
343 BottomUpPathCount = Other.BottomUpPathCount;
344}
345
346/// The top-down traversal uses this to merge information about predecessors to
347/// form the initial state for a new block.
348void BBState::MergePred(const BBState &Other) {
349 if (TopDownPathCount == OverflowOccurredValue)
350 return;
351
352 // Other.TopDownPathCount can be 0, in which case it is either dead or a
353 // loop backedge. Loop backedges are special.
354 TopDownPathCount += Other.TopDownPathCount;
355
356 // In order to be consistent, we clear the top down pointers when by adding
357 // TopDownPathCount becomes OverflowOccurredValue even though "true" overflow
358 // has not occurred.
359 if (TopDownPathCount == OverflowOccurredValue) {
360 clearTopDownPointers();
361 return;
362 }
363
364 // Check for overflow. If we have overflow, fall back to conservative
365 // behavior.
366 if (TopDownPathCount < Other.TopDownPathCount) {
367 TopDownPathCount = OverflowOccurredValue;
368 clearTopDownPointers();
369 return;
370 }
371
372 // For each entry in the other set, if our set has an entry with the same key,
373 // merge the entries. Otherwise, copy the entry and merge it with an empty
374 // entry.
375 for (auto MI = Other.top_down_ptr_begin(), ME = Other.top_down_ptr_end();
376 MI != ME; ++MI) {
377 auto Pair = PerPtrTopDown.insert(*MI);
378 Pair.first->second.Merge(Pair.second ? TopDownPtrState() : MI->second,
379 /*TopDown=*/true);
380 }
381
382 // For each entry in our set, if the other set doesn't have an entry with the
383 // same key, force it to merge with an empty entry.
384 for (auto MI = top_down_ptr_begin(), ME = top_down_ptr_end(); MI != ME; ++MI)
385 if (Other.PerPtrTopDown.find(MI->first) == Other.PerPtrTopDown.end())
386 MI->second.Merge(TopDownPtrState(), /*TopDown=*/true);
387}
388
389/// The bottom-up traversal uses this to merge information about successors to
390/// form the initial state for a new block.
391void BBState::MergeSucc(const BBState &Other) {
392 if (BottomUpPathCount == OverflowOccurredValue)
393 return;
394
395 // Other.BottomUpPathCount can be 0, in which case it is either dead or a
396 // loop backedge. Loop backedges are special.
397 BottomUpPathCount += Other.BottomUpPathCount;
398
399 // In order to be consistent, we clear the top down pointers when by adding
400 // BottomUpPathCount becomes OverflowOccurredValue even though "true" overflow
401 // has not occurred.
402 if (BottomUpPathCount == OverflowOccurredValue) {
403 clearBottomUpPointers();
404 return;
405 }
406
407 // Check for overflow. If we have overflow, fall back to conservative
408 // behavior.
409 if (BottomUpPathCount < Other.BottomUpPathCount) {
410 BottomUpPathCount = OverflowOccurredValue;
411 clearBottomUpPointers();
412 return;
413 }
414
415 // For each entry in the other set, if our set has an entry with the
416 // same key, merge the entries. Otherwise, copy the entry and merge
417 // it with an empty entry.
418 for (auto MI = Other.bottom_up_ptr_begin(), ME = Other.bottom_up_ptr_end();
419 MI != ME; ++MI) {
420 auto Pair = PerPtrBottomUp.insert(*MI);
421 Pair.first->second.Merge(Pair.second ? BottomUpPtrState() : MI->second,
422 /*TopDown=*/false);
423 }
424
425 // For each entry in our set, if the other set doesn't have an entry
426 // with the same key, force it to merge with an empty entry.
427 for (auto MI = bottom_up_ptr_begin(), ME = bottom_up_ptr_end(); MI != ME;
428 ++MI)
429 if (Other.PerPtrBottomUp.find(MI->first) == Other.PerPtrBottomUp.end())
430 MI->second.Merge(BottomUpPtrState(), /*TopDown=*/false);
431}
432
434 // Dump the pointers we are tracking.
435 OS << " TopDown State:\n";
436 if (!BBInfo.hasTopDownPtrs()) {
437 LLVM_DEBUG(dbgs() << " NONE!\n");
438 } else {
439 for (auto I = BBInfo.top_down_ptr_begin(), E = BBInfo.top_down_ptr_end();
440 I != E; ++I) {
441 const PtrState &P = I->second;
442 OS << " Ptr: " << *I->first
443 << "\n KnownSafe: " << (P.IsKnownSafe()?"true":"false")
444 << "\n ImpreciseRelease: "
445 << (P.IsTrackingImpreciseReleases()?"true":"false") << "\n"
446 << " HasCFGHazards: "
447 << (P.IsCFGHazardAfflicted()?"true":"false") << "\n"
448 << " KnownPositive: "
449 << (P.HasKnownPositiveRefCount()?"true":"false") << "\n"
450 << " Seq: "
451 << P.GetSeq() << "\n";
452 }
453 }
454
455 OS << " BottomUp State:\n";
456 if (!BBInfo.hasBottomUpPtrs()) {
457 LLVM_DEBUG(dbgs() << " NONE!\n");
458 } else {
459 for (auto I = BBInfo.bottom_up_ptr_begin(), E = BBInfo.bottom_up_ptr_end();
460 I != E; ++I) {
461 const PtrState &P = I->second;
462 OS << " Ptr: " << *I->first
463 << "\n KnownSafe: " << (P.IsKnownSafe()?"true":"false")
464 << "\n ImpreciseRelease: "
465 << (P.IsTrackingImpreciseReleases()?"true":"false") << "\n"
466 << " HasCFGHazards: "
467 << (P.IsCFGHazardAfflicted()?"true":"false") << "\n"
468 << " KnownPositive: "
469 << (P.HasKnownPositiveRefCount()?"true":"false") << "\n"
470 << " Seq: "
471 << P.GetSeq() << "\n";
472 }
473 }
474
475 return OS;
476}
477
478namespace {
479
480 /// The main ARC optimization pass.
481class ObjCARCOpt {
482 bool Changed = false;
483 bool CFGChanged = false;
485
486 /// A cache of references to runtime entry point constants.
488
489 /// A cache of MDKinds that can be passed into other functions to propagate
490 /// MDKind identifiers.
491 ARCMDKindCache MDKindCache;
492
493 BundledRetainClaimRVs *BundledInsts = nullptr;
494
495 /// A flag indicating whether the optimization that removes or moves
496 /// retain/release pairs should be performed.
497 bool DisableRetainReleasePairing = false;
498
499 /// Flags which determine whether each of the interesting runtime functions
500 /// is in fact used in the current function.
501 unsigned UsedInThisFunction;
502
504
505 bool OptimizeRetainRVCall(Function &F, Instruction *RetainRV);
506 void OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
507 ARCInstKind &Class);
508 void OptimizeIndividualCalls(Function &F);
509
510 /// Optimize an individual call, optionally passing the
511 /// GetArgRCIdentityRoot if it has already been computed.
512 void OptimizeIndividualCallImpl(Function &F, Instruction *Inst,
513 ARCInstKind Class, const Value *Arg);
514
515 /// Try to optimize an AutoreleaseRV with a RetainRV or UnsafeClaimRV. If the
516 /// optimization occurs, returns true to indicate that the caller should
517 /// assume the instructions are dead.
518 bool OptimizeInlinedAutoreleaseRVCall(Function &F, Instruction *Inst,
519 const Value *&Arg, ARCInstKind Class,
521 const Value *&AutoreleaseRVArg);
522
523 void CheckForCFGHazards(const BasicBlock *BB,
525 BBState &MyStates) const;
526 bool VisitInstructionBottomUp(Instruction *Inst, BasicBlock *BB,
528 BBState &MyStates);
529 bool VisitBottomUp(BasicBlock *BB,
532 bool VisitInstructionTopDown(
533 Instruction *Inst, DenseMap<Value *, RRInfo> &Releases, BBState &MyStates,
535 &ReleaseInsertPtToRCIdentityRoots);
536 bool VisitTopDown(
540 &ReleaseInsertPtToRCIdentityRoots);
541 bool Visit(Function &F, DenseMap<const BasicBlock *, BBState> &BBStates,
543 DenseMap<Value *, RRInfo> &Releases);
544
545 void MoveCalls(Value *Arg, RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
549
550 bool PairUpRetainsAndReleases(DenseMap<const BasicBlock *, BBState> &BBStates,
552 DenseMap<Value *, RRInfo> &Releases, Module *M,
555 RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
556 Value *Arg, bool KnownSafe,
557 bool &AnyPairsCompletelyEliminated);
558
559 bool PerformCodePlacement(DenseMap<const BasicBlock *, BBState> &BBStates,
561 DenseMap<Value *, RRInfo> &Releases, Module *M);
562
563 void OptimizeWeakCalls(Function &F);
564
565 bool OptimizeSequences(Function &F);
566
567 void OptimizeReturns(Function &F);
568
569 template <typename PredicateT>
570 static void cloneOpBundlesIf(CallBase *CI,
572 PredicateT Predicate) {
573 for (unsigned I = 0, E = CI->getNumOperandBundles(); I != E; ++I) {
575 if (Predicate(B))
576 OpBundles.emplace_back(B);
577 }
578 }
579
580 void addOpBundleForFunclet(BasicBlock *BB,
582 if (!BlockEHColors.empty()) {
583 const ColorVector &CV = BlockEHColors.find(BB)->second;
584 assert(CV.size() > 0 && "Uncolored block");
585 for (BasicBlock *EHPadBB : CV)
586 if (auto *EHPad = dyn_cast<FuncletPadInst>(EHPadBB->getFirstNonPHI())) {
587 OpBundles.emplace_back("funclet", EHPad);
588 return;
589 }
590 }
591 }
592
593#ifndef NDEBUG
594 void GatherStatistics(Function &F, bool AfterOptimization = false);
595#endif
596
597 public:
598 void init(Function &F);
599 bool run(Function &F, AAResults &AA);
600 bool hasCFGChanged() const { return CFGChanged; }
601};
602} // end anonymous namespace
603
604/// Turn objc_retainAutoreleasedReturnValue into objc_retain if the operand is
605/// not a return value.
606bool
607ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) {
608 // Check for the argument being from an immediately preceding call or invoke.
609 const Value *Arg = GetArgRCIdentityRoot(RetainRV);
610 if (const Instruction *Call = dyn_cast<CallBase>(Arg)) {
611 if (Call->getParent() == RetainRV->getParent()) {
613 ++I;
614 while (IsNoopInstruction(&*I))
615 ++I;
616 if (&*I == RetainRV)
617 return false;
618 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
619 BasicBlock *RetainRVParent = RetainRV->getParent();
620 if (II->getNormalDest() == RetainRVParent) {
621 BasicBlock::const_iterator I = RetainRVParent->begin();
622 while (IsNoopInstruction(&*I))
623 ++I;
624 if (&*I == RetainRV)
625 return false;
626 }
627 }
628 }
629
630 assert(!BundledInsts->contains(RetainRV) &&
631 "a bundled retainRV's argument should be a call");
632
633 // Turn it to a plain objc_retain.
634 Changed = true;
635 ++NumPeeps;
636
637 LLVM_DEBUG(dbgs() << "Transforming objc_retainAutoreleasedReturnValue => "
638 "objc_retain since the operand is not a return value.\n"
639 "Old = "
640 << *RetainRV << "\n");
641
642 Function *NewDecl = EP.get(ARCRuntimeEntryPointKind::Retain);
643 cast<CallInst>(RetainRV)->setCalledFunction(NewDecl);
644
645 LLVM_DEBUG(dbgs() << "New = " << *RetainRV << "\n");
646
647 return false;
648}
649
650bool ObjCARCOpt::OptimizeInlinedAutoreleaseRVCall(
651 Function &F, Instruction *Inst, const Value *&Arg, ARCInstKind Class,
652 Instruction *AutoreleaseRV, const Value *&AutoreleaseRVArg) {
653 if (BundledInsts->contains(Inst))
654 return false;
655
656 // Must be in the same basic block.
657 assert(Inst->getParent() == AutoreleaseRV->getParent());
658
659 // Must operate on the same root.
660 Arg = GetArgRCIdentityRoot(Inst);
661 AutoreleaseRVArg = GetArgRCIdentityRoot(AutoreleaseRV);
662 if (Arg != AutoreleaseRVArg) {
663 // If there isn't an exact match, check if we have equivalent PHIs.
664 const PHINode *PN = dyn_cast<PHINode>(Arg);
665 if (!PN)
666 return false;
667
669 getEquivalentPHIs(*PN, ArgUsers);
670 if (!llvm::is_contained(ArgUsers, AutoreleaseRVArg))
671 return false;
672 }
673
674 // Okay, this is a match. Merge them.
675 ++NumPeeps;
676 LLVM_DEBUG(dbgs() << "Found inlined objc_autoreleaseReturnValue '"
677 << *AutoreleaseRV << "' paired with '" << *Inst << "'\n");
678
679 // Delete the RV pair, starting with the AutoreleaseRV.
680 AutoreleaseRV->replaceAllUsesWith(
681 cast<CallInst>(AutoreleaseRV)->getArgOperand(0));
682 Changed = true;
684 if (Class == ARCInstKind::RetainRV) {
685 // AutoreleaseRV and RetainRV cancel out. Delete the RetainRV.
686 Inst->replaceAllUsesWith(cast<CallInst>(Inst)->getArgOperand(0));
687 EraseInstruction(Inst);
688 return true;
689 }
690
691 // UnsafeClaimRV is a frontend peephole for RetainRV + Release. Since the
692 // AutoreleaseRV and RetainRV cancel out, replace UnsafeClaimRV with Release.
693 assert(Class == ARCInstKind::UnsafeClaimRV);
694 Value *CallArg = cast<CallInst>(Inst)->getArgOperand(0);
696 CallInst::Create(EP.get(ARCRuntimeEntryPointKind::Release), CallArg, "",
697 Inst->getIterator());
698 assert(IsAlwaysTail(ARCInstKind::UnsafeClaimRV) &&
699 "Expected UnsafeClaimRV to be safe to tail call");
700 Release->setTailCall();
701 Inst->replaceAllUsesWith(CallArg);
702 EraseInstruction(Inst);
703
704 // Run the normal optimizations on Release.
705 OptimizeIndividualCallImpl(F, Release, ARCInstKind::Release, Arg);
706 return true;
707}
708
709/// Turn objc_autoreleaseReturnValue into objc_autorelease if the result is not
710/// used as a return value.
711void ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F,
713 ARCInstKind &Class) {
714 // Check for a return of the pointer value.
716
717 // If the argument is ConstantPointerNull or UndefValue, its other users
718 // aren't actually interesting to look at.
719 if (isa<ConstantData>(Ptr))
720 return;
721
723 Users.push_back(Ptr);
724
725 // Add PHIs that are equivalent to Ptr to Users.
726 if (const PHINode *PN = dyn_cast<PHINode>(Ptr))
728
729 do {
730 Ptr = Users.pop_back_val();
731 for (const User *U : Ptr->users()) {
732 if (isa<ReturnInst>(U) || GetBasicARCInstKind(U) == ARCInstKind::RetainRV)
733 return;
734 if (isa<BitCastInst>(U))
735 Users.push_back(U);
736 }
737 } while (!Users.empty());
738
739 Changed = true;
740 ++NumPeeps;
741
743 dbgs() << "Transforming objc_autoreleaseReturnValue => "
744 "objc_autorelease since its operand is not used as a return "
745 "value.\n"
746 "Old = "
747 << *AutoreleaseRV << "\n");
748
749 CallInst *AutoreleaseRVCI = cast<CallInst>(AutoreleaseRV);
750 Function *NewDecl = EP.get(ARCRuntimeEntryPointKind::Autorelease);
751 AutoreleaseRVCI->setCalledFunction(NewDecl);
752 AutoreleaseRVCI->setTailCall(false); // Never tail call objc_autorelease.
753 Class = ARCInstKind::Autorelease;
754
755 LLVM_DEBUG(dbgs() << "New: " << *AutoreleaseRV << "\n");
756}
757
758/// Visit each call, one at a time, and make simplifications without doing any
759/// additional analysis.
760void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
761 LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeIndividualCalls ==\n");
762 // Reset all the flags in preparation for recomputing them.
763 UsedInThisFunction = 0;
764
765 // Store any delayed AutoreleaseRV intrinsics, so they can be easily paired
766 // with RetainRV and UnsafeClaimRV.
767 Instruction *DelayedAutoreleaseRV = nullptr;
768 const Value *DelayedAutoreleaseRVArg = nullptr;
769 auto setDelayedAutoreleaseRV = [&](Instruction *AutoreleaseRV) {
770 assert(!DelayedAutoreleaseRV || !AutoreleaseRV);
771 DelayedAutoreleaseRV = AutoreleaseRV;
772 DelayedAutoreleaseRVArg = nullptr;
773 };
774 auto optimizeDelayedAutoreleaseRV = [&]() {
775 if (!DelayedAutoreleaseRV)
776 return;
777 OptimizeIndividualCallImpl(F, DelayedAutoreleaseRV,
778 ARCInstKind::AutoreleaseRV,
779 DelayedAutoreleaseRVArg);
780 setDelayedAutoreleaseRV(nullptr);
781 };
782 auto shouldDelayAutoreleaseRV = [&](Instruction *NonARCInst) {
783 // Nothing to delay, but we may as well skip the logic below.
784 if (!DelayedAutoreleaseRV)
785 return true;
786
787 // If we hit the end of the basic block we're not going to find an RV-pair.
788 // Stop delaying.
789 if (NonARCInst->isTerminator())
790 return false;
791
792 // Given the frontend rules for emitting AutoreleaseRV, RetainRV, and
793 // UnsafeClaimRV, it's probably safe to skip over even opaque function calls
794 // here since OptimizeInlinedAutoreleaseRVCall will confirm that they
795 // have the same RCIdentityRoot. However, what really matters is
796 // skipping instructions or intrinsics that the inliner could leave behind;
797 // be conservative for now and don't skip over opaque calls, which could
798 // potentially include other ARC calls.
799 auto *CB = dyn_cast<CallBase>(NonARCInst);
800 if (!CB)
801 return true;
802 return CB->getIntrinsicID() != Intrinsic::not_intrinsic;
803 };
804
805 // Visit all objc_* calls in F.
806 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
807 Instruction *Inst = &*I++;
808
809 if (auto *CI = dyn_cast<CallInst>(Inst))
811 BundledInsts->insertRVCall(I->getIterator(), CI);
812 Changed = true;
813 }
814
816
817 // Skip this loop if this instruction isn't itself an ARC intrinsic.
818 const Value *Arg = nullptr;
819 switch (Class) {
820 default:
821 optimizeDelayedAutoreleaseRV();
822 break;
823 case ARCInstKind::CallOrUser:
824 case ARCInstKind::User:
825 case ARCInstKind::None:
826 // This is a non-ARC instruction. If we're delaying an AutoreleaseRV,
827 // check if it's safe to skip over it; if not, optimize the AutoreleaseRV
828 // now.
829 if (!shouldDelayAutoreleaseRV(Inst))
830 optimizeDelayedAutoreleaseRV();
831 continue;
832 case ARCInstKind::AutoreleaseRV:
833 optimizeDelayedAutoreleaseRV();
834 setDelayedAutoreleaseRV(Inst);
835 continue;
836 case ARCInstKind::RetainRV:
837 case ARCInstKind::UnsafeClaimRV:
838 if (DelayedAutoreleaseRV) {
839 // We have a potential RV pair. Check if they cancel out.
840 if (OptimizeInlinedAutoreleaseRVCall(F, Inst, Arg, Class,
841 DelayedAutoreleaseRV,
842 DelayedAutoreleaseRVArg)) {
843 setDelayedAutoreleaseRV(nullptr);
844 continue;
845 }
846 optimizeDelayedAutoreleaseRV();
847 }
848 break;
849 }
850
851 OptimizeIndividualCallImpl(F, Inst, Class, Arg);
852 }
853
854 // Catch the final delayed AutoreleaseRV.
855 optimizeDelayedAutoreleaseRV();
856}
857
858/// This function returns true if the value is inert. An ObjC ARC runtime call
859/// taking an inert operand can be safely deleted.
860static bool isInertARCValue(Value *V, SmallPtrSet<Value *, 1> &VisitedPhis) {
861 V = V->stripPointerCasts();
862
863 if (IsNullOrUndef(V))
864 return true;
865
866 // See if this is a global attribute annotated with an 'objc_arc_inert'.
867 if (auto *GV = dyn_cast<GlobalVariable>(V))
868 if (GV->hasAttribute("objc_arc_inert"))
869 return true;
870
871 if (auto PN = dyn_cast<PHINode>(V)) {
872 // Ignore this phi if it has already been discovered.
873 if (!VisitedPhis.insert(PN).second)
874 return true;
875 // Look through phis's operands.
876 for (Value *Opnd : PN->incoming_values())
877 if (!isInertARCValue(Opnd, VisitedPhis))
878 return false;
879 return true;
880 }
881
882 return false;
883}
884
885void ObjCARCOpt::OptimizeIndividualCallImpl(Function &F, Instruction *Inst,
886 ARCInstKind Class,
887 const Value *Arg) {
888 LLVM_DEBUG(dbgs() << "Visiting: Class: " << Class << "; " << *Inst << "\n");
889
890 // We can delete this call if it takes an inert value.
891 SmallPtrSet<Value *, 1> VisitedPhis;
892
893 if (BundledInsts->contains(Inst)) {
894 UsedInThisFunction |= 1 << unsigned(Class);
895 return;
896 }
897
898 if (IsNoopOnGlobal(Class))
899 if (isInertARCValue(Inst->getOperand(0), VisitedPhis)) {
900 if (!Inst->getType()->isVoidTy())
901 Inst->replaceAllUsesWith(Inst->getOperand(0));
902 Inst->eraseFromParent();
903 Changed = true;
904 return;
905 }
906
907 switch (Class) {
908 default:
909 break;
910
911 // Delete no-op casts. These function calls have special semantics, but
912 // the semantics are entirely implemented via lowering in the front-end,
913 // so by the time they reach the optimizer, they are just no-op calls
914 // which return their argument.
915 //
916 // There are gray areas here, as the ability to cast reference-counted
917 // pointers to raw void* and back allows code to break ARC assumptions,
918 // however these are currently considered to be unimportant.
919 case ARCInstKind::NoopCast:
920 Changed = true;
921 ++NumNoops;
922 LLVM_DEBUG(dbgs() << "Erasing no-op cast: " << *Inst << "\n");
923 EraseInstruction(Inst);
924 return;
925
926 // If the pointer-to-weak-pointer is null, it's undefined behavior.
927 case ARCInstKind::StoreWeak:
928 case ARCInstKind::LoadWeak:
929 case ARCInstKind::LoadWeakRetained:
930 case ARCInstKind::InitWeak:
931 case ARCInstKind::DestroyWeak: {
932 CallInst *CI = cast<CallInst>(Inst);
933 if (IsNullOrUndef(CI->getArgOperand(0))) {
934 Changed = true;
936 PoisonValue::get(PointerType::getUnqual(CI->getContext())),
937 CI->getIterator());
938 Value *NewValue = PoisonValue::get(CI->getType());
940 dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
941 "\nOld = "
942 << *CI << "\nNew = " << *NewValue << "\n");
943 CI->replaceAllUsesWith(NewValue);
944 CI->eraseFromParent();
945 return;
946 }
947 break;
948 }
949 case ARCInstKind::CopyWeak:
950 case ARCInstKind::MoveWeak: {
951 CallInst *CI = cast<CallInst>(Inst);
952 if (IsNullOrUndef(CI->getArgOperand(0)) ||
954 Changed = true;
956 PoisonValue::get(PointerType::getUnqual(CI->getContext())),
957 CI->getIterator());
958
959 Value *NewValue = PoisonValue::get(CI->getType());
961 dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
962 "\nOld = "
963 << *CI << "\nNew = " << *NewValue << "\n");
964
965 CI->replaceAllUsesWith(NewValue);
966 CI->eraseFromParent();
967 return;
968 }
969 break;
970 }
971 case ARCInstKind::RetainRV:
972 if (OptimizeRetainRVCall(F, Inst))
973 return;
974 break;
975 case ARCInstKind::AutoreleaseRV:
976 OptimizeAutoreleaseRVCall(F, Inst, Class);
977 break;
978 }
979
980 // objc_autorelease(x) -> objc_release(x) if x is otherwise unused.
981 if (IsAutorelease(Class) && Inst->use_empty()) {
982 CallInst *Call = cast<CallInst>(Inst);
983 const Value *Arg = Call->getArgOperand(0);
985 if (Arg) {
986 Changed = true;
987 ++NumAutoreleases;
988
989 // Create the declaration lazily.
990 LLVMContext &C = Inst->getContext();
991
992 Function *Decl = EP.get(ARCRuntimeEntryPointKind::Release);
993 CallInst *NewCall = CallInst::Create(Decl, Call->getArgOperand(0), "",
994 Call->getIterator());
995 NewCall->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease),
996 MDNode::get(C, {}));
997
998 LLVM_DEBUG(dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) "
999 "since x is otherwise unused.\nOld: "
1000 << *Call << "\nNew: " << *NewCall << "\n");
1001
1003 Inst = NewCall;
1004 Class = ARCInstKind::Release;
1005 }
1006 }
1007
1008 // For functions which can never be passed stack arguments, add
1009 // a tail keyword.
1010 if (IsAlwaysTail(Class) && !cast<CallInst>(Inst)->isNoTailCall()) {
1011 Changed = true;
1012 LLVM_DEBUG(
1013 dbgs() << "Adding tail keyword to function since it can never be "
1014 "passed stack args: "
1015 << *Inst << "\n");
1016 cast<CallInst>(Inst)->setTailCall();
1017 }
1018
1019 // Ensure that functions that can never have a "tail" keyword due to the
1020 // semantics of ARC truly do not do so.
1021 if (IsNeverTail(Class)) {
1022 Changed = true;
1023 LLVM_DEBUG(dbgs() << "Removing tail keyword from function: " << *Inst
1024 << "\n");
1025 cast<CallInst>(Inst)->setTailCall(false);
1026 }
1027
1028 // Set nounwind as needed.
1029 if (IsNoThrow(Class)) {
1030 Changed = true;
1031 LLVM_DEBUG(dbgs() << "Found no throw class. Setting nounwind on: " << *Inst
1032 << "\n");
1033 cast<CallInst>(Inst)->setDoesNotThrow();
1034 }
1035
1036 // Note: This catches instructions unrelated to ARC.
1037 if (!IsNoopOnNull(Class)) {
1038 UsedInThisFunction |= 1 << unsigned(Class);
1039 return;
1040 }
1041
1042 // If we haven't already looked up the root, look it up now.
1043 if (!Arg)
1044 Arg = GetArgRCIdentityRoot(Inst);
1045
1046 // ARC calls with null are no-ops. Delete them.
1047 if (IsNullOrUndef(Arg)) {
1048 Changed = true;
1049 ++NumNoops;
1050 LLVM_DEBUG(dbgs() << "ARC calls with null are no-ops. Erasing: " << *Inst
1051 << "\n");
1052 EraseInstruction(Inst);
1053 return;
1054 }
1055
1056 // Keep track of which of retain, release, autorelease, and retain_block
1057 // are actually present in this function.
1058 UsedInThisFunction |= 1 << unsigned(Class);
1059
1060 // If Arg is a PHI, and one or more incoming values to the
1061 // PHI are null, and the call is control-equivalent to the PHI, and there
1062 // are no relevant side effects between the PHI and the call, and the call
1063 // is not a release that doesn't have the clang.imprecise_release tag, the
1064 // call could be pushed up to just those paths with non-null incoming
1065 // values. For now, don't bother splitting critical edges for this.
1066 if (Class == ARCInstKind::Release &&
1067 !Inst->getMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease)))
1068 return;
1069
1071 Worklist.push_back(std::make_pair(Inst, Arg));
1072 do {
1073 std::pair<Instruction *, const Value *> Pair = Worklist.pop_back_val();
1074 Inst = Pair.first;
1075 Arg = Pair.second;
1076
1077 const PHINode *PN = dyn_cast<PHINode>(Arg);
1078 if (!PN)
1079 continue;
1080
1081 // Determine if the PHI has any null operands, or any incoming
1082 // critical edges.
1083 bool HasNull = false;
1084 bool HasCriticalEdges = false;
1085 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1088 HasNull = true;
1089 else if (PN->getIncomingBlock(i)->getTerminator()->getNumSuccessors() !=
1090 1) {
1091 HasCriticalEdges = true;
1092 break;
1093 }
1094 }
1095 // If we have null operands and no critical edges, optimize.
1096 if (HasCriticalEdges)
1097 continue;
1098 if (!HasNull)
1099 continue;
1100
1101 Instruction *DepInst = nullptr;
1102
1103 // Check that there is nothing that cares about the reference
1104 // count between the call and the phi.
1105 switch (Class) {
1106 case ARCInstKind::Retain:
1107 case ARCInstKind::RetainBlock:
1108 // These can always be moved up.
1109 break;
1110 case ARCInstKind::Release:
1111 // These can't be moved across things that care about the retain
1112 // count.
1114 Inst->getParent(), Inst, PA);
1115 break;
1116 case ARCInstKind::Autorelease:
1117 // These can't be moved across autorelease pool scope boundaries.
1119 Inst->getParent(), Inst, PA);
1120 break;
1121 case ARCInstKind::UnsafeClaimRV:
1122 case ARCInstKind::RetainRV:
1123 case ARCInstKind::AutoreleaseRV:
1124 // Don't move these; the RV optimization depends on the autoreleaseRV
1125 // being tail called, and the retainRV being immediately after a call
1126 // (which might still happen if we get lucky with codegen layout, but
1127 // it's not worth taking the chance).
1128 continue;
1129 default:
1130 llvm_unreachable("Invalid dependence flavor");
1131 }
1132
1133 if (DepInst != PN)
1134 continue;
1135
1136 Changed = true;
1137 ++NumPartialNoops;
1138 // Clone the call into each predecessor that has a non-null value.
1139 CallInst *CInst = cast<CallInst>(Inst);
1140 Type *ParamTy = CInst->getArgOperand(0)->getType();
1141 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1144 continue;
1145 Value *Op = PN->getIncomingValue(i);
1146 BasicBlock::iterator InsertPos =
1147 PN->getIncomingBlock(i)->back().getIterator();
1149 cloneOpBundlesIf(CInst, OpBundles, [](const OperandBundleUse &B) {
1150 return B.getTagID() != LLVMContext::OB_funclet;
1151 });
1152 addOpBundleForFunclet(InsertPos->getParent(), OpBundles);
1153 CallInst *Clone = CallInst::Create(CInst, OpBundles);
1154 if (Op->getType() != ParamTy)
1155 Op = new BitCastInst(Op, ParamTy, "", InsertPos);
1156 Clone->setArgOperand(0, Op);
1157 Clone->insertBefore(*InsertPos->getParent(), InsertPos);
1158
1159 LLVM_DEBUG(dbgs() << "Cloning " << *CInst << "\n"
1160 "And inserting clone at "
1161 << *InsertPos << "\n");
1162 Worklist.push_back(std::make_pair(Clone, Incoming));
1163 }
1164 // Erase the original call.
1165 LLVM_DEBUG(dbgs() << "Erasing: " << *CInst << "\n");
1166 EraseInstruction(CInst);
1167 } while (!Worklist.empty());
1168}
1169
1170/// If we have a top down pointer in the S_Use state, make sure that there are
1171/// no CFG hazards by checking the states of various bottom up pointers.
1172static void CheckForUseCFGHazard(const Sequence SuccSSeq,
1173 const bool SuccSRRIKnownSafe,
1174 TopDownPtrState &S,
1175 bool &SomeSuccHasSame,
1176 bool &AllSuccsHaveSame,
1177 bool &NotAllSeqEqualButKnownSafe,
1178 bool &ShouldContinue) {
1179 switch (SuccSSeq) {
1180 case S_CanRelease: {
1181 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe) {
1183 break;
1184 }
1185 S.SetCFGHazardAfflicted(true);
1186 ShouldContinue = true;
1187 break;
1188 }
1189 case S_Use:
1190 SomeSuccHasSame = true;
1191 break;
1192 case S_Stop:
1193 case S_MovableRelease:
1194 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
1195 AllSuccsHaveSame = false;
1196 else
1197 NotAllSeqEqualButKnownSafe = true;
1198 break;
1199 case S_Retain:
1200 llvm_unreachable("bottom-up pointer in retain state!");
1201 case S_None:
1202 llvm_unreachable("This should have been handled earlier.");
1203 }
1204}
1205
1206/// If we have a Top Down pointer in the S_CanRelease state, make sure that
1207/// there are no CFG hazards by checking the states of various bottom up
1208/// pointers.
1209static void CheckForCanReleaseCFGHazard(const Sequence SuccSSeq,
1210 const bool SuccSRRIKnownSafe,
1211 TopDownPtrState &S,
1212 bool &SomeSuccHasSame,
1213 bool &AllSuccsHaveSame,
1214 bool &NotAllSeqEqualButKnownSafe) {
1215 switch (SuccSSeq) {
1216 case S_CanRelease:
1217 SomeSuccHasSame = true;
1218 break;
1219 case S_Stop:
1220 case S_MovableRelease:
1221 case S_Use:
1222 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
1223 AllSuccsHaveSame = false;
1224 else
1225 NotAllSeqEqualButKnownSafe = true;
1226 break;
1227 case S_Retain:
1228 llvm_unreachable("bottom-up pointer in retain state!");
1229 case S_None:
1230 llvm_unreachable("This should have been handled earlier.");
1231 }
1232}
1233
1234/// Check for critical edges, loop boundaries, irreducible control flow, or
1235/// other CFG structures where moving code across the edge would result in it
1236/// being executed more.
1237void
1238ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
1240 BBState &MyStates) const {
1241 // If any top-down local-use or possible-dec has a succ which is earlier in
1242 // the sequence, forget it.
1243 for (auto I = MyStates.top_down_ptr_begin(), E = MyStates.top_down_ptr_end();
1244 I != E; ++I) {
1245 TopDownPtrState &S = I->second;
1246 const Sequence Seq = I->second.GetSeq();
1247
1248 // We only care about S_Retain, S_CanRelease, and S_Use.
1249 if (Seq == S_None)
1250 continue;
1251
1252 // Make sure that if extra top down states are added in the future that this
1253 // code is updated to handle it.
1254 assert((Seq == S_Retain || Seq == S_CanRelease || Seq == S_Use) &&
1255 "Unknown top down sequence state.");
1256
1257 const Value *Arg = I->first;
1258 bool SomeSuccHasSame = false;
1259 bool AllSuccsHaveSame = true;
1260 bool NotAllSeqEqualButKnownSafe = false;
1261
1262 for (const BasicBlock *Succ : successors(BB)) {
1263 // If VisitBottomUp has pointer information for this successor, take
1264 // what we know about it.
1266 BBStates.find(Succ);
1267 assert(BBI != BBStates.end());
1268 const BottomUpPtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
1269 const Sequence SuccSSeq = SuccS.GetSeq();
1270
1271 // If bottom up, the pointer is in an S_None state, clear the sequence
1272 // progress since the sequence in the bottom up state finished
1273 // suggesting a mismatch in between retains/releases. This is true for
1274 // all three cases that we are handling here: S_Retain, S_Use, and
1275 // S_CanRelease.
1276 if (SuccSSeq == S_None) {
1278 continue;
1279 }
1280
1281 // If we have S_Use or S_CanRelease, perform our check for cfg hazard
1282 // checks.
1283 const bool SuccSRRIKnownSafe = SuccS.IsKnownSafe();
1284
1285 // *NOTE* We do not use Seq from above here since we are allowing for
1286 // S.GetSeq() to change while we are visiting basic blocks.
1287 switch(S.GetSeq()) {
1288 case S_Use: {
1289 bool ShouldContinue = false;
1290 CheckForUseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S, SomeSuccHasSame,
1291 AllSuccsHaveSame, NotAllSeqEqualButKnownSafe,
1292 ShouldContinue);
1293 if (ShouldContinue)
1294 continue;
1295 break;
1296 }
1297 case S_CanRelease:
1298 CheckForCanReleaseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S,
1299 SomeSuccHasSame, AllSuccsHaveSame,
1300 NotAllSeqEqualButKnownSafe);
1301 break;
1302 case S_Retain:
1303 case S_None:
1304 case S_Stop:
1305 case S_MovableRelease:
1306 break;
1307 }
1308 }
1309
1310 // If the state at the other end of any of the successor edges
1311 // matches the current state, require all edges to match. This
1312 // guards against loops in the middle of a sequence.
1313 if (SomeSuccHasSame && !AllSuccsHaveSame) {
1315 } else if (NotAllSeqEqualButKnownSafe) {
1316 // If we would have cleared the state foregoing the fact that we are known
1317 // safe, stop code motion. This is because whether or not it is safe to
1318 // remove RR pairs via KnownSafe is an orthogonal concept to whether we
1319 // are allowed to perform code motion.
1320 S.SetCFGHazardAfflicted(true);
1321 }
1322 }
1323}
1324
1325bool ObjCARCOpt::VisitInstructionBottomUp(
1327 BBState &MyStates) {
1328 bool NestingDetected = false;
1330 const Value *Arg = nullptr;
1331
1332 LLVM_DEBUG(dbgs() << " Class: " << Class << "\n");
1333
1334 switch (Class) {
1335 case ARCInstKind::Release: {
1336 Arg = GetArgRCIdentityRoot(Inst);
1337
1338 BottomUpPtrState &S = MyStates.getPtrBottomUpState(Arg);
1339 NestingDetected |= S.InitBottomUp(MDKindCache, Inst);
1340 break;
1341 }
1342 case ARCInstKind::RetainBlock:
1343 // In OptimizeIndividualCalls, we have strength reduced all optimizable
1344 // objc_retainBlocks to objc_retains. Thus at this point any
1345 // objc_retainBlocks that we see are not optimizable.
1346 break;
1347 case ARCInstKind::Retain:
1348 case ARCInstKind::RetainRV: {
1349 Arg = GetArgRCIdentityRoot(Inst);
1350 BottomUpPtrState &S = MyStates.getPtrBottomUpState(Arg);
1351 if (S.MatchWithRetain()) {
1352 // Don't do retain+release tracking for ARCInstKind::RetainRV, because
1353 // it's better to let it remain as the first instruction after a call.
1354 if (Class != ARCInstKind::RetainRV) {
1355 LLVM_DEBUG(dbgs() << " Matching with: " << *Inst << "\n");
1356 Retains[Inst] = S.GetRRInfo();
1357 }
1359 }
1360 // A retain moving bottom up can be a use.
1361 break;
1362 }
1363 case ARCInstKind::AutoreleasepoolPop:
1364 // Conservatively, clear MyStates for all known pointers.
1365 MyStates.clearBottomUpPointers();
1366 return NestingDetected;
1367 case ARCInstKind::AutoreleasepoolPush:
1368 case ARCInstKind::None:
1369 // These are irrelevant.
1370 return NestingDetected;
1371 default:
1372 break;
1373 }
1374
1375 // Consider any other possible effects of this instruction on each
1376 // pointer being tracked.
1377 for (auto MI = MyStates.bottom_up_ptr_begin(),
1378 ME = MyStates.bottom_up_ptr_end();
1379 MI != ME; ++MI) {
1380 const Value *Ptr = MI->first;
1381 if (Ptr == Arg)
1382 continue; // Handled above.
1383 BottomUpPtrState &S = MI->second;
1384
1385 if (S.HandlePotentialAlterRefCount(Inst, Ptr, PA, Class))
1386 continue;
1387
1388 S.HandlePotentialUse(BB, Inst, Ptr, PA, Class);
1389 }
1390
1391 return NestingDetected;
1392}
1393
1394bool ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
1397 LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::VisitBottomUp ==\n");
1398
1399 bool NestingDetected = false;
1400 BBState &MyStates = BBStates[BB];
1401
1402 // Merge the states from each successor to compute the initial state
1403 // for the current block.
1404 BBState::edge_iterator SI(MyStates.succ_begin()),
1405 SE(MyStates.succ_end());
1406 if (SI != SE) {
1407 const BasicBlock *Succ = *SI;
1409 assert(I != BBStates.end());
1410 MyStates.InitFromSucc(I->second);
1411 ++SI;
1412 for (; SI != SE; ++SI) {
1413 Succ = *SI;
1414 I = BBStates.find(Succ);
1415 assert(I != BBStates.end());
1416 MyStates.MergeSucc(I->second);
1417 }
1418 }
1419
1420 LLVM_DEBUG(dbgs() << "Before:\n"
1421 << BBStates[BB] << "\n"
1422 << "Performing Dataflow:\n");
1423
1424 // Visit all the instructions, bottom-up.
1425 for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) {
1426 Instruction *Inst = &*std::prev(I);
1427
1428 // Invoke instructions are visited as part of their successors (below).
1429 if (isa<InvokeInst>(Inst))
1430 continue;
1431
1432 LLVM_DEBUG(dbgs() << " Visiting " << *Inst << "\n");
1433
1434 NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates);
1435
1436 // Bail out if the number of pointers being tracked becomes too large so
1437 // that this pass can complete in a reasonable amount of time.
1438 if (MyStates.bottom_up_ptr_list_size() > MaxPtrStates) {
1439 DisableRetainReleasePairing = true;
1440 return false;
1441 }
1442 }
1443
1444 // If there's a predecessor with an invoke, visit the invoke as if it were
1445 // part of this block, since we can't insert code after an invoke in its own
1446 // block, and we don't want to split critical edges.
1447 for (BBState::edge_iterator PI(MyStates.pred_begin()),
1448 PE(MyStates.pred_end()); PI != PE; ++PI) {
1449 BasicBlock *Pred = *PI;
1450 if (InvokeInst *II = dyn_cast<InvokeInst>(&Pred->back()))
1451 NestingDetected |= VisitInstructionBottomUp(II, BB, Retains, MyStates);
1452 }
1453
1454 LLVM_DEBUG(dbgs() << "\nFinal State:\n" << BBStates[BB] << "\n");
1455
1456 return NestingDetected;
1457}
1458
1459// Fill ReleaseInsertPtToRCIdentityRoots, which is a map from insertion points
1460// to the set of RC identity roots that would be released by the release calls
1461// moved to the insertion points.
1463 const BlotMapVector<Value *, RRInfo> &Retains,
1465 &ReleaseInsertPtToRCIdentityRoots) {
1466 for (const auto &P : Retains) {
1467 // Retains is a map from an objc_retain call to a RRInfo of the RC identity
1468 // root of the call. Get the RC identity root of the objc_retain call.
1469 Instruction *Retain = cast<Instruction>(P.first);
1470 Value *Root = GetRCIdentityRoot(Retain->getOperand(0));
1471 // Collect all the insertion points of the objc_release calls that release
1472 // the RC identity root of the objc_retain call.
1473 for (const Instruction *InsertPt : P.second.ReverseInsertPts)
1474 ReleaseInsertPtToRCIdentityRoots[InsertPt].insert(Root);
1475 }
1476}
1477
1478// Get the RC identity roots from an insertion point of an objc_release call.
1479// Return nullptr if the passed instruction isn't an insertion point.
1480static const SmallPtrSet<const Value *, 2> *
1482 const Instruction *InsertPt,
1484 &ReleaseInsertPtToRCIdentityRoots) {
1485 auto I = ReleaseInsertPtToRCIdentityRoots.find(InsertPt);
1486 if (I == ReleaseInsertPtToRCIdentityRoots.end())
1487 return nullptr;
1488 return &I->second;
1489}
1490
1491bool ObjCARCOpt::VisitInstructionTopDown(
1492 Instruction *Inst, DenseMap<Value *, RRInfo> &Releases, BBState &MyStates,
1494 &ReleaseInsertPtToRCIdentityRoots) {
1495 bool NestingDetected = false;
1497 const Value *Arg = nullptr;
1498
1499 // Make sure a call to objc_retain isn't moved past insertion points of calls
1500 // to objc_release.
1501 if (const SmallPtrSet<const Value *, 2> *Roots =
1503 Inst, ReleaseInsertPtToRCIdentityRoots))
1504 for (const auto *Root : *Roots) {
1505 TopDownPtrState &S = MyStates.getPtrTopDownState(Root);
1506 // Disable code motion if the current position is S_Retain to prevent
1507 // moving the objc_retain call past objc_release calls. If it's
1508 // S_CanRelease or larger, it's not necessary to disable code motion as
1509 // the insertion points that prevent the objc_retain call from moving down
1510 // should have been set already.
1511 if (S.GetSeq() == S_Retain)
1512 S.SetCFGHazardAfflicted(true);
1513 }
1514
1515 LLVM_DEBUG(dbgs() << " Class: " << Class << "\n");
1516
1517 switch (Class) {
1518 case ARCInstKind::RetainBlock:
1519 // In OptimizeIndividualCalls, we have strength reduced all optimizable
1520 // objc_retainBlocks to objc_retains. Thus at this point any
1521 // objc_retainBlocks that we see are not optimizable. We need to break since
1522 // a retain can be a potential use.
1523 break;
1524 case ARCInstKind::Retain:
1525 case ARCInstKind::RetainRV: {
1526 Arg = GetArgRCIdentityRoot(Inst);
1527 TopDownPtrState &S = MyStates.getPtrTopDownState(Arg);
1528 NestingDetected |= S.InitTopDown(Class, Inst);
1529 // A retain can be a potential use; proceed to the generic checking
1530 // code below.
1531 break;
1532 }
1533 case ARCInstKind::Release: {
1534 Arg = GetArgRCIdentityRoot(Inst);
1535 TopDownPtrState &S = MyStates.getPtrTopDownState(Arg);
1536 // Try to form a tentative pair in between this release instruction and the
1537 // top down pointers that we are tracking.
1538 if (S.MatchWithRelease(MDKindCache, Inst)) {
1539 // If we succeed, copy S's RRInfo into the Release -> {Retain Set
1540 // Map}. Then we clear S.
1541 LLVM_DEBUG(dbgs() << " Matching with: " << *Inst << "\n");
1542 Releases[Inst] = S.GetRRInfo();
1544 }
1545 break;
1546 }
1547 case ARCInstKind::AutoreleasepoolPop:
1548 // Conservatively, clear MyStates for all known pointers.
1549 MyStates.clearTopDownPointers();
1550 return false;
1551 case ARCInstKind::AutoreleasepoolPush:
1552 case ARCInstKind::None:
1553 // These can not be uses of
1554 return false;
1555 default:
1556 break;
1557 }
1558
1559 // Consider any other possible effects of this instruction on each
1560 // pointer being tracked.
1561 for (auto MI = MyStates.top_down_ptr_begin(),
1562 ME = MyStates.top_down_ptr_end();
1563 MI != ME; ++MI) {
1564 const Value *Ptr = MI->first;
1565 if (Ptr == Arg)
1566 continue; // Handled above.
1567 TopDownPtrState &S = MI->second;
1568 if (S.HandlePotentialAlterRefCount(Inst, Ptr, PA, Class, *BundledInsts))
1569 continue;
1570
1571 S.HandlePotentialUse(Inst, Ptr, PA, Class);
1572 }
1573
1574 return NestingDetected;
1575}
1576
1577bool ObjCARCOpt::VisitTopDown(
1579 DenseMap<Value *, RRInfo> &Releases,
1581 &ReleaseInsertPtToRCIdentityRoots) {
1582 LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::VisitTopDown ==\n");
1583 bool NestingDetected = false;
1584 BBState &MyStates = BBStates[BB];
1585
1586 // Merge the states from each predecessor to compute the initial state
1587 // for the current block.
1588 BBState::edge_iterator PI(MyStates.pred_begin()),
1589 PE(MyStates.pred_end());
1590 if (PI != PE) {
1591 const BasicBlock *Pred = *PI;
1593 assert(I != BBStates.end());
1594 MyStates.InitFromPred(I->second);
1595 ++PI;
1596 for (; PI != PE; ++PI) {
1597 Pred = *PI;
1598 I = BBStates.find(Pred);
1599 assert(I != BBStates.end());
1600 MyStates.MergePred(I->second);
1601 }
1602 }
1603
1604 // Check that BB and MyStates have the same number of predecessors. This
1605 // prevents retain calls that live outside a loop from being moved into the
1606 // loop.
1607 if (!BB->hasNPredecessors(MyStates.pred_end() - MyStates.pred_begin()))
1608 for (auto I = MyStates.top_down_ptr_begin(),
1609 E = MyStates.top_down_ptr_end();
1610 I != E; ++I)
1611 I->second.SetCFGHazardAfflicted(true);
1612
1613 LLVM_DEBUG(dbgs() << "Before:\n"
1614 << BBStates[BB] << "\n"
1615 << "Performing Dataflow:\n");
1616
1617 // Visit all the instructions, top-down.
1618 for (Instruction &Inst : *BB) {
1619 LLVM_DEBUG(dbgs() << " Visiting " << Inst << "\n");
1620
1621 NestingDetected |= VisitInstructionTopDown(
1622 &Inst, Releases, MyStates, ReleaseInsertPtToRCIdentityRoots);
1623
1624 // Bail out if the number of pointers being tracked becomes too large so
1625 // that this pass can complete in a reasonable amount of time.
1626 if (MyStates.top_down_ptr_list_size() > MaxPtrStates) {
1627 DisableRetainReleasePairing = true;
1628 return false;
1629 }
1630 }
1631
1632 LLVM_DEBUG(dbgs() << "\nState Before Checking for CFG Hazards:\n"
1633 << BBStates[BB] << "\n\n");
1634 CheckForCFGHazards(BB, BBStates, MyStates);
1635 LLVM_DEBUG(dbgs() << "Final State:\n" << BBStates[BB] << "\n");
1636 return NestingDetected;
1637}
1638
1639static void
1642 SmallVectorImpl<BasicBlock *> &ReverseCFGPostOrder,
1643 unsigned NoObjCARCExceptionsMDKind,
1645 /// The visited set, for doing DFS walks.
1647
1648 // Do DFS, computing the PostOrder.
1651
1652 // Functions always have exactly one entry block, and we don't have
1653 // any other block that we treat like an entry block.
1654 BasicBlock *EntryBB = &F.getEntryBlock();
1655 BBState &MyStates = BBStates[EntryBB];
1656 MyStates.SetAsEntry();
1657 Instruction *EntryTI = EntryBB->getTerminator();
1658 SuccStack.push_back(std::make_pair(EntryBB, succ_iterator(EntryTI)));
1659 Visited.insert(EntryBB);
1660 OnStack.insert(EntryBB);
1661 do {
1662 dfs_next_succ:
1663 BasicBlock *CurrBB = SuccStack.back().first;
1664 succ_iterator SE(CurrBB->getTerminator(), false);
1665
1666 while (SuccStack.back().second != SE) {
1667 BasicBlock *SuccBB = *SuccStack.back().second++;
1668 if (Visited.insert(SuccBB).second) {
1669 SuccStack.push_back(
1670 std::make_pair(SuccBB, succ_iterator(SuccBB->getTerminator())));
1671 BBStates[CurrBB].addSucc(SuccBB);
1672 BBState &SuccStates = BBStates[SuccBB];
1673 SuccStates.addPred(CurrBB);
1674 OnStack.insert(SuccBB);
1675 goto dfs_next_succ;
1676 }
1677
1678 if (!OnStack.count(SuccBB)) {
1679 BBStates[CurrBB].addSucc(SuccBB);
1680 BBStates[SuccBB].addPred(CurrBB);
1681 }
1682 }
1683 OnStack.erase(CurrBB);
1684 PostOrder.push_back(CurrBB);
1685 SuccStack.pop_back();
1686 } while (!SuccStack.empty());
1687
1688 Visited.clear();
1689
1690 // Do reverse-CFG DFS, computing the reverse-CFG PostOrder.
1691 // Functions may have many exits, and there also blocks which we treat
1692 // as exits due to ignored edges.
1694 for (BasicBlock &ExitBB : F) {
1695 BBState &MyStates = BBStates[&ExitBB];
1696 if (!MyStates.isExit())
1697 continue;
1698
1699 MyStates.SetAsExit();
1700
1701 PredStack.push_back(std::make_pair(&ExitBB, MyStates.pred_begin()));
1702 Visited.insert(&ExitBB);
1703 while (!PredStack.empty()) {
1704 reverse_dfs_next_succ:
1705 BBState::edge_iterator PE = BBStates[PredStack.back().first].pred_end();
1706 while (PredStack.back().second != PE) {
1707 BasicBlock *BB = *PredStack.back().second++;
1708 if (Visited.insert(BB).second) {
1709 PredStack.push_back(std::make_pair(BB, BBStates[BB].pred_begin()));
1710 goto reverse_dfs_next_succ;
1711 }
1712 }
1713 ReverseCFGPostOrder.push_back(PredStack.pop_back_val().first);
1714 }
1715 }
1716}
1717
1718// Visit the function both top-down and bottom-up.
1719bool ObjCARCOpt::Visit(Function &F,
1722 DenseMap<Value *, RRInfo> &Releases) {
1723 // Use reverse-postorder traversals, because we magically know that loops
1724 // will be well behaved, i.e. they won't repeatedly call retain on a single
1725 // pointer without doing a release. We can't use the ReversePostOrderTraversal
1726 // class here because we want the reverse-CFG postorder to consider each
1727 // function exit point, and we want to ignore selected cycle edges.
1729 SmallVector<BasicBlock *, 16> ReverseCFGPostOrder;
1730 ComputePostOrders(F, PostOrder, ReverseCFGPostOrder,
1731 MDKindCache.get(ARCMDKindID::NoObjCARCExceptions),
1732 BBStates);
1733
1734 // Use reverse-postorder on the reverse CFG for bottom-up.
1735 bool BottomUpNestingDetected = false;
1736 for (BasicBlock *BB : llvm::reverse(ReverseCFGPostOrder)) {
1737 BottomUpNestingDetected |= VisitBottomUp(BB, BBStates, Retains);
1738 if (DisableRetainReleasePairing)
1739 return false;
1740 }
1741
1743 ReleaseInsertPtToRCIdentityRoots;
1744 collectReleaseInsertPts(Retains, ReleaseInsertPtToRCIdentityRoots);
1745
1746 // Use reverse-postorder for top-down.
1747 bool TopDownNestingDetected = false;
1748 for (BasicBlock *BB : llvm::reverse(PostOrder)) {
1749 TopDownNestingDetected |=
1750 VisitTopDown(BB, BBStates, Releases, ReleaseInsertPtToRCIdentityRoots);
1751 if (DisableRetainReleasePairing)
1752 return false;
1753 }
1754
1755 return TopDownNestingDetected && BottomUpNestingDetected;
1756}
1757
1758/// Move the calls in RetainsToMove and ReleasesToMove.
1759void ObjCARCOpt::MoveCalls(Value *Arg, RRInfo &RetainsToMove,
1760 RRInfo &ReleasesToMove,
1762 DenseMap<Value *, RRInfo> &Releases,
1764 Module *M) {
1765 LLVM_DEBUG(dbgs() << "== ObjCARCOpt::MoveCalls ==\n");
1766
1767 // Insert the new retain and release calls.
1768 for (Instruction *InsertPt : ReleasesToMove.ReverseInsertPts) {
1769 Function *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
1771 addOpBundleForFunclet(InsertPt->getParent(), BundleList);
1772 CallInst *Call =
1773 CallInst::Create(Decl, Arg, BundleList, "", InsertPt->getIterator());
1774 Call->setDoesNotThrow();
1775 Call->setTailCall();
1776
1777 LLVM_DEBUG(dbgs() << "Inserting new Retain: " << *Call
1778 << "\n"
1779 "At insertion point: "
1780 << *InsertPt << "\n");
1781 }
1782 for (Instruction *InsertPt : RetainsToMove.ReverseInsertPts) {
1783 Function *Decl = EP.get(ARCRuntimeEntryPointKind::Release);
1785 addOpBundleForFunclet(InsertPt->getParent(), BundleList);
1786 CallInst *Call =
1787 CallInst::Create(Decl, Arg, BundleList, "", InsertPt->getIterator());
1788 // Attach a clang.imprecise_release metadata tag, if appropriate.
1789 if (MDNode *M = ReleasesToMove.ReleaseMetadata)
1790 Call->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease), M);
1791 Call->setDoesNotThrow();
1792 if (ReleasesToMove.IsTailCallRelease)
1793 Call->setTailCall();
1794
1795 LLVM_DEBUG(dbgs() << "Inserting new Release: " << *Call
1796 << "\n"
1797 "At insertion point: "
1798 << *InsertPt << "\n");
1799 }
1800
1801 // Delete the original retain and release calls.
1802 for (Instruction *OrigRetain : RetainsToMove.Calls) {
1803 Retains.blot(OrigRetain);
1804 DeadInsts.push_back(OrigRetain);
1805 LLVM_DEBUG(dbgs() << "Deleting retain: " << *OrigRetain << "\n");
1806 }
1807 for (Instruction *OrigRelease : ReleasesToMove.Calls) {
1808 Releases.erase(OrigRelease);
1809 DeadInsts.push_back(OrigRelease);
1810 LLVM_DEBUG(dbgs() << "Deleting release: " << *OrigRelease << "\n");
1811 }
1812}
1813
1814bool ObjCARCOpt::PairUpRetainsAndReleases(
1817 DenseMap<Value *, RRInfo> &Releases, Module *M,
1819 SmallVectorImpl<Instruction *> &DeadInsts, RRInfo &RetainsToMove,
1820 RRInfo &ReleasesToMove, Value *Arg, bool KnownSafe,
1821 bool &AnyPairsCompletelyEliminated) {
1822 // If a pair happens in a region where it is known that the reference count
1823 // is already incremented, we can similarly ignore possible decrements unless
1824 // we are dealing with a retainable object with multiple provenance sources.
1825 bool KnownSafeTD = true, KnownSafeBU = true;
1826 bool CFGHazardAfflicted = false;
1827
1828 // Connect the dots between the top-down-collected RetainsToMove and
1829 // bottom-up-collected ReleasesToMove to form sets of related calls.
1830 // This is an iterative process so that we connect multiple releases
1831 // to multiple retains if needed.
1832 unsigned OldDelta = 0;
1833 unsigned NewDelta = 0;
1834 unsigned OldCount = 0;
1835 unsigned NewCount = 0;
1836 bool FirstRelease = true;
1837 for (SmallVector<Instruction *, 4> NewRetains{Retain};;) {
1839 for (Instruction *NewRetain : NewRetains) {
1840 auto It = Retains.find(NewRetain);
1841 assert(It != Retains.end());
1842 const RRInfo &NewRetainRRI = It->second;
1843 KnownSafeTD &= NewRetainRRI.KnownSafe;
1844 CFGHazardAfflicted |= NewRetainRRI.CFGHazardAfflicted;
1845 for (Instruction *NewRetainRelease : NewRetainRRI.Calls) {
1846 auto Jt = Releases.find(NewRetainRelease);
1847 if (Jt == Releases.end())
1848 return false;
1849 const RRInfo &NewRetainReleaseRRI = Jt->second;
1850
1851 // If the release does not have a reference to the retain as well,
1852 // something happened which is unaccounted for. Do not do anything.
1853 //
1854 // This can happen if we catch an additive overflow during path count
1855 // merging.
1856 if (!NewRetainReleaseRRI.Calls.count(NewRetain))
1857 return false;
1858
1859 if (ReleasesToMove.Calls.insert(NewRetainRelease).second) {
1860 // If we overflow when we compute the path count, don't remove/move
1861 // anything.
1862 const BBState &NRRBBState = BBStates[NewRetainRelease->getParent()];
1863 unsigned PathCount = BBState::OverflowOccurredValue;
1864 if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
1865 return false;
1867 "PathCount at this point can not be "
1868 "OverflowOccurredValue.");
1869 OldDelta -= PathCount;
1870
1871 // Merge the ReleaseMetadata and IsTailCallRelease values.
1872 if (FirstRelease) {
1873 ReleasesToMove.ReleaseMetadata =
1874 NewRetainReleaseRRI.ReleaseMetadata;
1875 ReleasesToMove.IsTailCallRelease =
1876 NewRetainReleaseRRI.IsTailCallRelease;
1877 FirstRelease = false;
1878 } else {
1879 if (ReleasesToMove.ReleaseMetadata !=
1880 NewRetainReleaseRRI.ReleaseMetadata)
1881 ReleasesToMove.ReleaseMetadata = nullptr;
1882 if (ReleasesToMove.IsTailCallRelease !=
1883 NewRetainReleaseRRI.IsTailCallRelease)
1884 ReleasesToMove.IsTailCallRelease = false;
1885 }
1886
1887 // Collect the optimal insertion points.
1888 if (!KnownSafe)
1889 for (Instruction *RIP : NewRetainReleaseRRI.ReverseInsertPts) {
1890 if (ReleasesToMove.ReverseInsertPts.insert(RIP).second) {
1891 // If we overflow when we compute the path count, don't
1892 // remove/move anything.
1893 const BBState &RIPBBState = BBStates[RIP->getParent()];
1895 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
1896 return false;
1898 "PathCount at this point can not be "
1899 "OverflowOccurredValue.");
1900 NewDelta -= PathCount;
1901 }
1902 }
1903 NewReleases.push_back(NewRetainRelease);
1904 }
1905 }
1906 }
1907 NewRetains.clear();
1908 if (NewReleases.empty()) break;
1909
1910 // Back the other way.
1911 for (Instruction *NewRelease : NewReleases) {
1912 auto It = Releases.find(NewRelease);
1913 assert(It != Releases.end());
1914 const RRInfo &NewReleaseRRI = It->second;
1915 KnownSafeBU &= NewReleaseRRI.KnownSafe;
1916 CFGHazardAfflicted |= NewReleaseRRI.CFGHazardAfflicted;
1917 for (Instruction *NewReleaseRetain : NewReleaseRRI.Calls) {
1918 auto Jt = Retains.find(NewReleaseRetain);
1919 if (Jt == Retains.end())
1920 return false;
1921 const RRInfo &NewReleaseRetainRRI = Jt->second;
1922
1923 // If the retain does not have a reference to the release as well,
1924 // something happened which is unaccounted for. Do not do anything.
1925 //
1926 // This can happen if we catch an additive overflow during path count
1927 // merging.
1928 if (!NewReleaseRetainRRI.Calls.count(NewRelease))
1929 return false;
1930
1931 if (RetainsToMove.Calls.insert(NewReleaseRetain).second) {
1932 // If we overflow when we compute the path count, don't remove/move
1933 // anything.
1934 const BBState &NRRBBState = BBStates[NewReleaseRetain->getParent()];
1935 unsigned PathCount = BBState::OverflowOccurredValue;
1936 if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
1937 return false;
1939 "PathCount at this point can not be "
1940 "OverflowOccurredValue.");
1941 OldDelta += PathCount;
1942 OldCount += PathCount;
1943
1944 // Collect the optimal insertion points.
1945 if (!KnownSafe)
1946 for (Instruction *RIP : NewReleaseRetainRRI.ReverseInsertPts) {
1947 if (RetainsToMove.ReverseInsertPts.insert(RIP).second) {
1948 // If we overflow when we compute the path count, don't
1949 // remove/move anything.
1950 const BBState &RIPBBState = BBStates[RIP->getParent()];
1951
1953 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
1954 return false;
1956 "PathCount at this point can not be "
1957 "OverflowOccurredValue.");
1958 NewDelta += PathCount;
1959 NewCount += PathCount;
1960 }
1961 }
1962 NewRetains.push_back(NewReleaseRetain);
1963 }
1964 }
1965 }
1966 if (NewRetains.empty()) break;
1967 }
1968
1969 // We can only remove pointers if we are known safe in both directions.
1970 bool UnconditionallySafe = KnownSafeTD && KnownSafeBU;
1971 if (UnconditionallySafe) {
1972 RetainsToMove.ReverseInsertPts.clear();
1973 ReleasesToMove.ReverseInsertPts.clear();
1974 NewCount = 0;
1975 } else {
1976 // Determine whether the new insertion points we computed preserve the
1977 // balance of retain and release calls through the program.
1978 // TODO: If the fully aggressive solution isn't valid, try to find a
1979 // less aggressive solution which is.
1980 if (NewDelta != 0)
1981 return false;
1982
1983 // At this point, we are not going to remove any RR pairs, but we still are
1984 // able to move RR pairs. If one of our pointers is afflicted with
1985 // CFGHazards, we cannot perform such code motion so exit early.
1986 const bool WillPerformCodeMotion =
1987 !RetainsToMove.ReverseInsertPts.empty() ||
1988 !ReleasesToMove.ReverseInsertPts.empty();
1989 if (CFGHazardAfflicted && WillPerformCodeMotion)
1990 return false;
1991 }
1992
1993 // Determine whether the original call points are balanced in the retain and
1994 // release calls through the program. If not, conservatively don't touch
1995 // them.
1996 // TODO: It's theoretically possible to do code motion in this case, as
1997 // long as the existing imbalances are maintained.
1998 if (OldDelta != 0)
1999 return false;
2000
2001 Changed = true;
2002 assert(OldCount != 0 && "Unreachable code?");
2003 NumRRs += OldCount - NewCount;
2004 // Set to true if we completely removed any RR pairs.
2005 AnyPairsCompletelyEliminated = NewCount == 0;
2006
2007 // We can move calls!
2008 return true;
2009}
2010
2011/// Identify pairings between the retains and releases, and delete and/or move
2012/// them.
2013bool ObjCARCOpt::PerformCodePlacement(
2016 DenseMap<Value *, RRInfo> &Releases, Module *M) {
2017 LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::PerformCodePlacement ==\n");
2018
2019 bool AnyPairsCompletelyEliminated = false;
2021
2022 // Visit each retain.
2024 E = Retains.end();
2025 I != E; ++I) {
2026 Value *V = I->first;
2027 if (!V) continue; // blotted
2028
2029 Instruction *Retain = cast<Instruction>(V);
2030
2031 LLVM_DEBUG(dbgs() << "Visiting: " << *Retain << "\n");
2032
2034
2035 // If the object being released is in static or stack storage, we know it's
2036 // not being managed by ObjC reference counting, so we can delete pairs
2037 // regardless of what possible decrements or uses lie between them.
2038 bool KnownSafe = isa<Constant>(Arg) || isa<AllocaInst>(Arg);
2039
2040 // A constant pointer can't be pointing to an object on the heap. It may
2041 // be reference-counted, but it won't be deleted.
2042 if (const LoadInst *LI = dyn_cast<LoadInst>(Arg))
2043 if (const GlobalVariable *GV =
2044 dyn_cast<GlobalVariable>(
2045 GetRCIdentityRoot(LI->getPointerOperand())))
2046 if (GV->isConstant())
2047 KnownSafe = true;
2048
2049 // Connect the dots between the top-down-collected RetainsToMove and
2050 // bottom-up-collected ReleasesToMove to form sets of related calls.
2051 RRInfo RetainsToMove, ReleasesToMove;
2052
2053 bool PerformMoveCalls = PairUpRetainsAndReleases(
2054 BBStates, Retains, Releases, M, Retain, DeadInsts,
2055 RetainsToMove, ReleasesToMove, Arg, KnownSafe,
2056 AnyPairsCompletelyEliminated);
2057
2058 if (PerformMoveCalls) {
2059 // Ok, everything checks out and we're all set. Let's move/delete some
2060 // code!
2061 MoveCalls(Arg, RetainsToMove, ReleasesToMove,
2062 Retains, Releases, DeadInsts, M);
2063 }
2064 }
2065
2066 // Now that we're done moving everything, we can delete the newly dead
2067 // instructions, as we no longer need them as insert points.
2068 while (!DeadInsts.empty())
2069 EraseInstruction(DeadInsts.pop_back_val());
2070
2071 return AnyPairsCompletelyEliminated;
2072}
2073
2074/// Weak pointer optimizations.
2075void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
2076 LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeWeakCalls ==\n");
2077
2078 // First, do memdep-style RLE and S2L optimizations. We can't use memdep
2079 // itself because it uses AliasAnalysis and we need to do provenance
2080 // queries instead.
2081 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2082 Instruction *Inst = &*I++;
2083
2084 LLVM_DEBUG(dbgs() << "Visiting: " << *Inst << "\n");
2085
2087 if (Class != ARCInstKind::LoadWeak &&
2088 Class != ARCInstKind::LoadWeakRetained)
2089 continue;
2090
2091 // Delete objc_loadWeak calls with no users.
2092 if (Class == ARCInstKind::LoadWeak && Inst->use_empty()) {
2093 Inst->eraseFromParent();
2094 Changed = true;
2095 continue;
2096 }
2097
2098 // TODO: For now, just look for an earlier available version of this value
2099 // within the same block. Theoretically, we could do memdep-style non-local
2100 // analysis too, but that would want caching. A better approach would be to
2101 // use the technique that EarlyCSE uses.
2102 inst_iterator Current = std::prev(I);
2103 BasicBlock *CurrentBB = &*Current.getBasicBlockIterator();
2104 for (BasicBlock::iterator B = CurrentBB->begin(),
2105 J = Current.getInstructionIterator();
2106 J != B; --J) {
2107 Instruction *EarlierInst = &*std::prev(J);
2108 ARCInstKind EarlierClass = GetARCInstKind(EarlierInst);
2109 switch (EarlierClass) {
2110 case ARCInstKind::LoadWeak:
2111 case ARCInstKind::LoadWeakRetained: {
2112 // If this is loading from the same pointer, replace this load's value
2113 // with that one.
2114 CallInst *Call = cast<CallInst>(Inst);
2115 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2116 Value *Arg = Call->getArgOperand(0);
2117 Value *EarlierArg = EarlierCall->getArgOperand(0);
2118 switch (PA.getAA()->alias(Arg, EarlierArg)) {
2120 Changed = true;
2121 // If the load has a builtin retain, insert a plain retain for it.
2122 if (Class == ARCInstKind::LoadWeakRetained) {
2123 Function *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
2124 CallInst *CI =
2125 CallInst::Create(Decl, EarlierCall, "", Call->getIterator());
2126 CI->setTailCall();
2127 }
2128 // Zap the fully redundant load.
2129 Call->replaceAllUsesWith(EarlierCall);
2130 Call->eraseFromParent();
2131 goto clobbered;
2134 goto clobbered;
2136 break;
2137 }
2138 break;
2139 }
2140 case ARCInstKind::StoreWeak:
2141 case ARCInstKind::InitWeak: {
2142 // If this is storing to the same pointer and has the same size etc.
2143 // replace this load's value with the stored value.
2144 CallInst *Call = cast<CallInst>(Inst);
2145 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2146 Value *Arg = Call->getArgOperand(0);
2147 Value *EarlierArg = EarlierCall->getArgOperand(0);
2148 switch (PA.getAA()->alias(Arg, EarlierArg)) {
2150 Changed = true;
2151 // If the load has a builtin retain, insert a plain retain for it.
2152 if (Class == ARCInstKind::LoadWeakRetained) {
2153 Function *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
2154 CallInst *CI =
2155 CallInst::Create(Decl, EarlierCall, "", Call->getIterator());
2156 CI->setTailCall();
2157 }
2158 // Zap the fully redundant load.
2159 Call->replaceAllUsesWith(EarlierCall->getArgOperand(1));
2160 Call->eraseFromParent();
2161 goto clobbered;
2164 goto clobbered;
2166 break;
2167 }
2168 break;
2169 }
2170 case ARCInstKind::MoveWeak:
2171 case ARCInstKind::CopyWeak:
2172 // TOOD: Grab the copied value.
2173 goto clobbered;
2174 case ARCInstKind::AutoreleasepoolPush:
2175 case ARCInstKind::None:
2176 case ARCInstKind::IntrinsicUser:
2177 case ARCInstKind::User:
2178 // Weak pointers are only modified through the weak entry points
2179 // (and arbitrary calls, which could call the weak entry points).
2180 break;
2181 default:
2182 // Anything else could modify the weak pointer.
2183 goto clobbered;
2184 }
2185 }
2186 clobbered:;
2187 }
2188
2189 // Then, for each destroyWeak with an alloca operand, check to see if
2190 // the alloca and all its users can be zapped.
2193 if (Class != ARCInstKind::DestroyWeak)
2194 continue;
2195
2196 CallInst *Call = cast<CallInst>(&Inst);
2197 Value *Arg = Call->getArgOperand(0);
2198 if (AllocaInst *Alloca = dyn_cast<AllocaInst>(Arg)) {
2199 for (User *U : Alloca->users()) {
2200 const Instruction *UserInst = cast<Instruction>(U);
2201 switch (GetBasicARCInstKind(UserInst)) {
2202 case ARCInstKind::InitWeak:
2203 case ARCInstKind::StoreWeak:
2204 case ARCInstKind::DestroyWeak:
2205 continue;
2206 default:
2207 goto done;
2208 }
2209 }
2210 Changed = true;
2211 for (User *U : llvm::make_early_inc_range(Alloca->users())) {
2212 CallInst *UserInst = cast<CallInst>(U);
2213 switch (GetBasicARCInstKind(UserInst)) {
2214 case ARCInstKind::InitWeak:
2215 case ARCInstKind::StoreWeak:
2216 // These functions return their second argument.
2217 UserInst->replaceAllUsesWith(UserInst->getArgOperand(1));
2218 break;
2219 case ARCInstKind::DestroyWeak:
2220 // No return value.
2221 break;
2222 default:
2223 llvm_unreachable("alloca really is used!");
2224 }
2225 UserInst->eraseFromParent();
2226 }
2227 Alloca->eraseFromParent();
2228 done:;
2229 }
2230 }
2231}
2232
2233/// Identify program paths which execute sequences of retains and releases which
2234/// can be eliminated.
2235bool ObjCARCOpt::OptimizeSequences(Function &F) {
2236 // Releases, Retains - These are used to store the results of the main flow
2237 // analysis. These use Value* as the key instead of Instruction* so that the
2238 // map stays valid when we get around to rewriting code and calls get
2239 // replaced by arguments.
2242
2243 // This is used during the traversal of the function to track the
2244 // states for each identified object at each block.
2246
2247 // Analyze the CFG of the function, and all instructions.
2248 bool NestingDetected = Visit(F, BBStates, Retains, Releases);
2249
2250 if (DisableRetainReleasePairing)
2251 return false;
2252
2253 // Transform.
2254 bool AnyPairsCompletelyEliminated = PerformCodePlacement(BBStates, Retains,
2255 Releases,
2256 F.getParent());
2257
2258 return AnyPairsCompletelyEliminated && NestingDetected;
2259}
2260
2261/// Check if there is a dependent call earlier that does not have anything in
2262/// between the Retain and the call that can affect the reference count of their
2263/// shared pointer argument. Note that Retain need not be in BB.
2266 ProvenanceAnalysis &PA) {
2267 auto *Call = dyn_cast_or_null<CallInst>(findSingleDependency(
2268 CanChangeRetainCount, Arg, Retain->getParent(), Retain, PA));
2269
2270 // Check that the pointer is the return value of the call.
2271 if (!Call || Arg != Call)
2272 return nullptr;
2273
2274 // Check that the call is a regular call.
2276 return Class == ARCInstKind::CallOrUser || Class == ARCInstKind::Call
2277 ? Call
2278 : nullptr;
2279}
2280
2281/// Find a dependent retain that precedes the given autorelease for which there
2282/// is nothing in between the two instructions that can affect the ref count of
2283/// Arg.
2284static CallInst *
2287 ProvenanceAnalysis &PA) {
2288 auto *Retain = dyn_cast_or_null<CallInst>(
2290
2291 // Check that we found a retain with the same argument.
2293 GetArgRCIdentityRoot(Retain) != Arg) {
2294 return nullptr;
2295 }
2296
2297 return Retain;
2298}
2299
2300/// Look for an ``autorelease'' instruction dependent on Arg such that there are
2301/// no instructions dependent on Arg that need a positive ref count in between
2302/// the autorelease and the ret.
2303static CallInst *
2305 ReturnInst *Ret,
2306 ProvenanceAnalysis &PA) {
2308 auto *Autorelease = dyn_cast_or_null<CallInst>(
2310
2311 if (!Autorelease)
2312 return nullptr;
2313 ARCInstKind AutoreleaseClass = GetBasicARCInstKind(Autorelease);
2314 if (!IsAutorelease(AutoreleaseClass))
2315 return nullptr;
2317 return nullptr;
2318
2319 return Autorelease;
2320}
2321
2322/// Look for this pattern:
2323/// \code
2324/// %call = call i8* @something(...)
2325/// %2 = call i8* @objc_retain(i8* %call)
2326/// %3 = call i8* @objc_autorelease(i8* %2)
2327/// ret i8* %3
2328/// \endcode
2329/// And delete the retain and autorelease.
2330void ObjCARCOpt::OptimizeReturns(Function &F) {
2331 if (!F.getReturnType()->isPointerTy())
2332 return;
2333
2334 LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeReturns ==\n");
2335
2336 for (BasicBlock &BB: F) {
2337 ReturnInst *Ret = dyn_cast<ReturnInst>(&BB.back());
2338 if (!Ret)
2339 continue;
2340
2341 LLVM_DEBUG(dbgs() << "Visiting: " << *Ret << "\n");
2342
2343 const Value *Arg = GetRCIdentityRoot(Ret->getOperand(0));
2344
2345 // Look for an ``autorelease'' instruction that is a predecessor of Ret and
2346 // dependent on Arg such that there are no instructions dependent on Arg
2347 // that need a positive ref count in between the autorelease and Ret.
2349 FindPredecessorAutoreleaseWithSafePath(Arg, &BB, Ret, PA);
2350
2351 if (!Autorelease)
2352 continue;
2353
2355 Arg, Autorelease->getParent(), Autorelease, PA);
2356
2357 if (!Retain)
2358 continue;
2359
2360 // Check that there is nothing that can affect the reference count
2361 // between the retain and the call. Note that Retain need not be in BB.
2363
2364 // Don't remove retainRV/autoreleaseRV pairs if the call isn't a tail call.
2365 if (!Call ||
2366 (!Call->isTailCall() &&
2367 GetBasicARCInstKind(Retain) == ARCInstKind::RetainRV &&
2368 GetBasicARCInstKind(Autorelease) == ARCInstKind::AutoreleaseRV))
2369 continue;
2370
2371 // If so, we can zap the retain and autorelease.
2372 Changed = true;
2373 ++NumRets;
2374 LLVM_DEBUG(dbgs() << "Erasing: " << *Retain << "\nErasing: " << *Autorelease
2375 << "\n");
2376 BundledInsts->eraseInst(Retain);
2378 }
2379}
2380
2381#ifndef NDEBUG
2382void
2383ObjCARCOpt::GatherStatistics(Function &F, bool AfterOptimization) {
2384 Statistic &NumRetains =
2385 AfterOptimization ? NumRetainsAfterOpt : NumRetainsBeforeOpt;
2386 Statistic &NumReleases =
2387 AfterOptimization ? NumReleasesAfterOpt : NumReleasesBeforeOpt;
2388
2389 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2390 Instruction *Inst = &*I++;
2391 switch (GetBasicARCInstKind(Inst)) {
2392 default:
2393 break;
2394 case ARCInstKind::Retain:
2395 ++NumRetains;
2396 break;
2397 case ARCInstKind::Release:
2398 ++NumReleases;
2399 break;
2400 }
2401 }
2402}
2403#endif
2404
2405void ObjCARCOpt::init(Function &F) {
2406 if (!EnableARCOpts)
2407 return;
2408
2409 // Intuitively, objc_retain and others are nocapture, however in practice
2410 // they are not, because they return their argument value. And objc_release
2411 // calls finalizers which can have arbitrary side effects.
2412 MDKindCache.init(F.getParent());
2413
2414 // Initialize our runtime entry point cache.
2415 EP.init(F.getParent());
2416
2417 // Compute which blocks are in which funclet.
2418 if (F.hasPersonalityFn() &&
2419 isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
2420 BlockEHColors = colorEHFunclets(F);
2421}
2422
2423bool ObjCARCOpt::run(Function &F, AAResults &AA) {
2424 if (!EnableARCOpts)
2425 return false;
2426
2427 Changed = CFGChanged = false;
2428 BundledRetainClaimRVs BRV(/*ContractPass=*/false);
2429 BundledInsts = &BRV;
2430
2431 LLVM_DEBUG(dbgs() << "<<< ObjCARCOpt: Visiting Function: " << F.getName()
2432 << " >>>"
2433 "\n");
2434
2435 std::pair<bool, bool> R = BundledInsts->insertAfterInvokes(F, nullptr);
2436 Changed |= R.first;
2437 CFGChanged |= R.second;
2438
2439 PA.setAA(&AA);
2440
2441#ifndef NDEBUG
2442 if (AreStatisticsEnabled()) {
2443 GatherStatistics(F, false);
2444 }
2445#endif
2446
2447 // This pass performs several distinct transformations. As a compile-time aid
2448 // when compiling code that isn't ObjC, skip these if the relevant ObjC
2449 // library functions aren't declared.
2450
2451 // Preliminary optimizations. This also computes UsedInThisFunction.
2452 OptimizeIndividualCalls(F);
2453
2454 // Optimizations for weak pointers.
2455 if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::LoadWeak)) |
2456 (1 << unsigned(ARCInstKind::LoadWeakRetained)) |
2457 (1 << unsigned(ARCInstKind::StoreWeak)) |
2458 (1 << unsigned(ARCInstKind::InitWeak)) |
2459 (1 << unsigned(ARCInstKind::CopyWeak)) |
2460 (1 << unsigned(ARCInstKind::MoveWeak)) |
2461 (1 << unsigned(ARCInstKind::DestroyWeak))))
2462 OptimizeWeakCalls(F);
2463
2464 // Optimizations for retain+release pairs.
2465 if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::Retain)) |
2466 (1 << unsigned(ARCInstKind::RetainRV)) |
2467 (1 << unsigned(ARCInstKind::RetainBlock))))
2468 if (UsedInThisFunction & (1 << unsigned(ARCInstKind::Release)))
2469 // Run OptimizeSequences until it either stops making changes or
2470 // no retain+release pair nesting is detected.
2471 while (OptimizeSequences(F)) {}
2472
2473 // Optimizations if objc_autorelease is used.
2474 if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::Autorelease)) |
2475 (1 << unsigned(ARCInstKind::AutoreleaseRV))))
2476 OptimizeReturns(F);
2477
2478 // Gather statistics after optimization.
2479#ifndef NDEBUG
2480 if (AreStatisticsEnabled()) {
2481 GatherStatistics(F, true);
2482 }
2483#endif
2484
2485 LLVM_DEBUG(dbgs() << "\n");
2486
2487 return Changed;
2488}
2489
2490/// @}
2491///
2492
2495 ObjCARCOpt OCAO;
2496 OCAO.init(F);
2497
2498 bool Changed = OCAO.run(F, AM.getResult<AAManager>(F));
2499 bool CFGChanged = OCAO.hasCFGChanged();
2500 if (Changed) {
2502 if (!CFGChanged)
2504 return PA;
2505 }
2506 return PreservedAnalyses::all();
2507}
This file contains a class ARCRuntimeEntryPoints for use in creating/managing references to entry poi...
Expand Atomic instructions
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_ATTRIBUTE_UNUSED
Definition: Compiler.h:282
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define LLVM_DEBUG(...)
Definition: Debug.h:106
This file defines the DenseMap class.
This file declares special dependency analysis routines used in Objective C ARC Optimizations.
std::optional< std::vector< StOtherPiece > > Other
Definition: ELFYAML.cpp:1315
Hexagon Common GEP
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
iv Induction Variable Users
Definition: IVUsers.cpp:48
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file contains the declarations for metadata subclasses.
uint64_t IntrinsicInst * II
This file defines common analysis utilities used by the ObjC ARC Optimizer.
static cl::opt< unsigned > MaxPtrStates("arc-opt-max-ptr-states", cl::Hidden, cl::desc("Maximum number of ptr states the optimizer keeps track of"), cl::init(4095))
This file defines ARC utility functions which are used by various parts of the compiler.
#define P(N)
This file declares a special form of Alias Analysis called Provenance Analysis''.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:166
A manager for alias analyses.
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
The main low level interface to the alias analysis implementation.
@ MayAlias
The two locations may or may not alias.
Definition: AliasAnalysis.h:98
@ NoAlias
The two locations do not alias at all.
Definition: AliasAnalysis.h:95
@ PartialAlias
The two locations alias, but only due to a partial overlap.
@ MustAlias
The two locations precisely alias each other.
an instruction to allocate memory on the stack
Definition: Instructions.h:63
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:410
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator end()
Definition: BasicBlock.h:461
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:448
InstListType::const_iterator const_iterator
Definition: BasicBlock.h:178
bool hasNPredecessors(unsigned N) const
Return true if this block has exactly N predecessors.
Definition: BasicBlock.cpp:481
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:177
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
const Instruction & back() const
Definition: BasicBlock.h:473
This class represents a no-op cast from one type to another.
An associative container with fast insertion-order (deterministic) iteration over its elements.
Definition: BlotMapVector.h:22
void blot(const KeyT &Key)
This is similar to erase, but instead of removing the element from the vector, it just zeros out the ...
Definition: BlotMapVector.h:96
iterator find(const KeyT &Key)
Definition: BlotMapVector.h:79
typename VectorTy::const_iterator const_iterator
Definition: BlotMapVector.h:48
bool empty() const
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &InsertPair)
Definition: BlotMapVector.h:67
Represents analyses that only rely on functions' control flow.
Definition: Analysis.h:72
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1112
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Definition: InstrTypes.h:2022
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
Definition: InstrTypes.h:1966
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1286
void setArgOperand(unsigned i, Value *v)
Definition: InstrTypes.h:1291
void setCalledFunction(Function *Fn)
Sets the function called, including updating the function type.
Definition: InstrTypes.h:1380
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
void setTailCall(bool IsTc=true)
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:866
This class represents an Operation in the Expression.
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:156
bool erase(const KeyT &Val)
Definition: DenseMap.h:321
bool empty() const
Definition: DenseMap.h:98
iterator end()
Definition: DenseMap.h:84
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:933
BIty & getInstructionIterator()
Definition: InstIterator.h:74
BBIty & getBasicBlockIterator()
Definition: InstIterator.h:73
unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
Definition: Instruction.cpp:99
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:94
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:390
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1679
Invoke instruction.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:176
Metadata node.
Definition: Metadata.h:1073
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1549
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1878
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
void preserveSet()
Mark an analysis set as preserved.
Definition: Analysis.h:146
Return a value (possibly void), from a function.
bool erase(PtrType Ptr)
Remove pointer from the set.
Definition: SmallPtrSet.h:401
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:452
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:384
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:519
bool empty() const
Definition: SmallVector.h:81
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:937
typename SuperClass::const_iterator const_iterator
Definition: SmallVector.h:578
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
An instruction for storing to memory.
Definition: Instructions.h:292
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:29
unsigned size() const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:139
Value * getOperand(unsigned i) const
Definition: User.h:228
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
iterator_range< user_iterator > users()
Definition: Value.h:421
bool use_empty() const
Definition: Value.h:344
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
A cache of MDKinds used by various ARC optimizations.
unsigned get(ARCMDKindID ID)
Declarations for ObjC runtime functions and constants.
Function * get(ARCRuntimeEntryPointKind kind)
bool contains(const Instruction *I) const
See if an instruction is a bundled retainRV/claimRV call.
Definition: ObjCARC.h:125
std::pair< bool, bool > insertAfterInvokes(Function &F, DominatorTree *DT)
Insert a retainRV/claimRV call to the normal destination blocks of invokes with operand bundle "clang...
Definition: ObjCARC.cpp:44
CallInst * insertRVCall(BasicBlock::iterator InsertPt, CallBase *AnnotatedCall)
Insert a retainRV/claimRV call.
Definition: ObjCARC.cpp:74
void eraseInst(CallInst *CI)
Remove a retainRV/claimRV call entirely.
Definition: ObjCARC.h:132
This is similar to BasicAliasAnalysis, and it uses many of the same techniques, except it uses specia...
This class summarizes several per-pointer runtime properties which are propagated through the flow gr...
Definition: PtrState.h:101
void SetCFGHazardAfflicted(const bool NewValue)
Definition: PtrState.h:139
Sequence GetSeq() const
Definition: PtrState.h:150
const RRInfo & GetRRInfo() const
Definition: PtrState.h:165
void ClearSequenceProgress()
Definition: PtrState.h:152
bool IsKnownSafe() const
Definition: PtrState.h:119
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
static void CheckForUseCFGHazard(const Sequence SuccSSeq, const bool SuccSRRIKnownSafe, TopDownPtrState &S, bool &SomeSuccHasSame, bool &AllSuccsHaveSame, bool &NotAllSeqEqualButKnownSafe, bool &ShouldContinue)
If we have a top down pointer in the S_Use state, make sure that there are no CFG hazards by checking...
static void CheckForCanReleaseCFGHazard(const Sequence SuccSSeq, const bool SuccSRRIKnownSafe, TopDownPtrState &S, bool &SomeSuccHasSame, bool &AllSuccsHaveSame, bool &NotAllSeqEqualButKnownSafe)
If we have a Top Down pointer in the S_CanRelease state, make sure that there are no CFG hazards by c...
static bool isInertARCValue(Value *V, SmallPtrSet< Value *, 1 > &VisitedPhis)
This function returns true if the value is inert.
static CallInst * FindPredecessorAutoreleaseWithSafePath(const Value *Arg, BasicBlock *BB, ReturnInst *Ret, ProvenanceAnalysis &PA)
Look for an `‘autorelease’' instruction dependent on Arg such that there are no instructions dependen...
static void collectReleaseInsertPts(const BlotMapVector< Value *, RRInfo > &Retains, DenseMap< const Instruction *, SmallPtrSet< const Value *, 2 > > &ReleaseInsertPtToRCIdentityRoots)
static void ComputePostOrders(Function &F, SmallVectorImpl< BasicBlock * > &PostOrder, SmallVectorImpl< BasicBlock * > &ReverseCFGPostOrder, unsigned NoObjCARCExceptionsMDKind, DenseMap< const BasicBlock *, BBState > &BBStates)
static CallInst * FindPredecessorRetainWithSafePath(const Value *Arg, BasicBlock *BB, Instruction *Autorelease, ProvenanceAnalysis &PA)
Find a dependent retain that precedes the given autorelease for which there is nothing in between the...
static const SmallPtrSet< const Value *, 2 > * getRCIdentityRootsFromReleaseInsertPt(const Instruction *InsertPt, const DenseMap< const Instruction *, SmallPtrSet< const Value *, 2 > > &ReleaseInsertPtToRCIdentityRoots)
static const unsigned OverflowOccurredValue
static CallInst * HasSafePathToPredecessorCall(const Value *Arg, Instruction *Retain, ProvenanceAnalysis &PA)
Check if there is a dependent call earlier that does not have anything in between the Retain and the ...
static const Value * FindSingleUseIdentifiedObject(const Value *Arg)
This is similar to GetRCIdentityRoot but it stops as soon as it finds a value with multiple uses.
Definition: ObjCARCOpts.cpp:85
This file defines common definitions/declarations used by the ObjC ARC Optimizer.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
bool IsRetain(ARCInstKind Class)
Test if the given class is objc_retain or equivalent.
bool IsNeverTail(ARCInstKind Class)
Test if the given class represents instructions which are never safe to mark with the "tail" keyword.
bool IsAlwaysTail(ARCInstKind Class)
Test if the given class represents instructions which are always safe to mark with the "tail" keyword...
bool IsNullOrUndef(const Value *V)
bool IsAutorelease(ARCInstKind Class)
Test if the given class is objc_autorelease or equivalent.
ARCInstKind
Equivalence classes of instructions in the ARC Model.
@ Autorelease
objc_autorelease
@ RetainRV
objc_retainAutoreleasedReturnValue
@ AutoreleaseRV
objc_autoreleaseReturnValue
@ Call
could call objc_release
bool IsObjCIdentifiedObject(const Value *V)
Return true if this value refers to a distinct and identifiable object.
bool EnableARCOpts
A handy option to enable/disable all ARC Optimizations.
void getEquivalentPHIs(PHINodeTy &PN, VectorTy &PHIList)
Return the list of PHI nodes that are equivalent to PN.
Definition: ObjCARC.h:74
bool IsForwarding(ARCInstKind Class)
Test if the given class represents instructions which return their argument verbatim.
bool IsNoopInstruction(const Instruction *I)
llvm::Instruction * findSingleDependency(DependenceKind Flavor, const Value *Arg, BasicBlock *StartBB, Instruction *StartInst, ProvenanceAnalysis &PA)
Find dependent instructions.
Sequence
A sequence of states that a pointer may go through in which an objc_retain and objc_release are actua...
Definition: PtrState.h:41
@ S_CanRelease
foo(x) – x could possibly see a ref count decrement.
Definition: PtrState.h:44
@ S_Use
any use of x.
Definition: PtrState.h:45
@ S_Retain
objc_retain(x).
Definition: PtrState.h:43
@ S_Stop
code motion is stopped.
Definition: PtrState.h:46
@ S_MovableRelease
objc_release(x), !clang.imprecise_release.
Definition: PtrState.h:47
ARCInstKind GetBasicARCInstKind(const Value *V)
Determine which objc runtime call instruction class V belongs to.
ARCInstKind GetARCInstKind(const Value *V)
Map V to its ARCInstKind equivalence class.
Value * GetArgRCIdentityRoot(Value *Inst)
Assuming the given instruction is one of the special calls such as objc_retain or objc_release,...
bool IsNoThrow(ARCInstKind Class)
Test if the given class represents instructions which are always safe to mark with the nounwind attri...
const Value * GetRCIdentityRoot(const Value *V)
The RCIdentity root of a value V is a dominating value U for which retaining or releasing U is equiva...
bool IsNoopOnGlobal(ARCInstKind Class)
Test if the given class represents instructions which do nothing if passed a global variable.
bool IsNoopOnNull(ARCInstKind Class)
Test if the given class represents instructions which do nothing if passed a null pointer.
bool hasAttachedCallOpBundle(const CallBase *CB)
Definition: ObjCARCUtil.h:29
static void EraseInstruction(Instruction *CI)
Erase the given instruction.
Definition: ObjCARC.h:39
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto pred_end(const MachineBasicBlock *BB)
auto successors(const MachineBasicBlock *BB)
DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:657
inst_iterator inst_begin(Function *F)
Definition: InstIterator.h:129
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:420
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
SuccIterator< Instruction, BasicBlock > succ_iterator
Definition: CFG.h:242
bool AreStatisticsEnabled()
Check if statistics are enabled.
Definition: Statistic.cpp:139
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
inst_iterator inst_end(Function *F)
Definition: InstIterator.h:130
RNSuccIterator< NodeRef, BlockT, RegionT > succ_begin(NodeRef Node)
RNSuccIterator< NodeRef, BlockT, RegionT > succ_end(NodeRef Node)
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
Definition: APFixedPoint.h:303
auto pred_begin(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1903
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
A lightweight accessor for an operand bundle meant to be passed around by value.
Definition: InstrTypes.h:1007
bool HandlePotentialAlterRefCount(Instruction *Inst, const Value *Ptr, ProvenanceAnalysis &PA, ARCInstKind Class)
Definition: PtrState.cpp:226
bool InitBottomUp(ARCMDKindCache &Cache, Instruction *I)
(Re-)Initialize this bottom up pointer returning true if we detected a pointer with nested releases.
Definition: PtrState.cpp:174
bool MatchWithRetain()
Return true if this set of releases can be paired with a release.
Definition: PtrState.cpp:203
void HandlePotentialUse(BasicBlock *BB, Instruction *Inst, const Value *Ptr, ProvenanceAnalysis &PA, ARCInstKind Class)
Definition: PtrState.cpp:253
Unidirectional information about either a retain-decrement-use-release sequence or release-use-decrem...
Definition: PtrState.h:56
bool KnownSafe
After an objc_retain, the reference count of the referenced object is known to be positive.
Definition: PtrState.h:69
SmallPtrSet< Instruction *, 2 > Calls
For a top-down sequence, the set of objc_retains or objc_retainBlocks.
Definition: PtrState.h:80
MDNode * ReleaseMetadata
If the Calls are objc_release calls and they all have a clang.imprecise_release tag,...
Definition: PtrState.h:76
bool CFGHazardAfflicted
If this is true, we cannot perform code motion but can still remove retain/release pairs.
Definition: PtrState.h:88
bool IsTailCallRelease
True of the objc_release calls are all marked with the "tail" keyword.
Definition: PtrState.h:72
SmallPtrSet< Instruction *, 2 > ReverseInsertPts
The set of optimal insert positions for moving calls in the opposite sequence.
Definition: PtrState.h:84
bool MatchWithRelease(ARCMDKindCache &Cache, Instruction *Release)
Return true if this set of retains can be paired with the given release.
Definition: PtrState.cpp:349
bool InitTopDown(ARCInstKind Kind, Instruction *I)
(Re-)Initialize this bottom up pointer returning true if we detected a pointer with nested releases.
Definition: PtrState.cpp:324
bool HandlePotentialAlterRefCount(Instruction *Inst, const Value *Ptr, ProvenanceAnalysis &PA, ARCInstKind Class, const BundledRetainClaimRVs &BundledRVs)
Definition: PtrState.cpp:377
void HandlePotentialUse(Instruction *Inst, const Value *Ptr, ProvenanceAnalysis &PA, ARCInstKind Class)
Definition: PtrState.cpp:416