LLVM  10.0.0svn
Attributor.cpp
Go to the documentation of this file.
1 //===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements an inter procedural pass that deduces and/or propagating
10 // attributes. This is done in an abstract interpretation style fixpoint
11 // iteration. See the Attributor.h file comment and the class descriptions in
12 // that file for more information.
13 //
14 //===----------------------------------------------------------------------===//
15 
17 
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
26 #include "llvm/Analysis/Loads.h"
29 #include "llvm/IR/Argument.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/CFG.h"
32 #include "llvm/IR/InstIterator.h"
33 #include "llvm/IR/IntrinsicInst.h"
35 #include "llvm/Support/Debug.h"
39 
40 #include <cassert>
41 
42 using namespace llvm;
43 
44 #define DEBUG_TYPE "attributor"
45 
46 STATISTIC(NumFnWithExactDefinition,
47  "Number of function with exact definitions");
48 STATISTIC(NumFnWithoutExactDefinition,
49  "Number of function without exact definitions");
50 STATISTIC(NumAttributesTimedOut,
51  "Number of abstract attributes timed out before fixpoint");
52 STATISTIC(NumAttributesValidFixpoint,
53  "Number of abstract attributes in a valid fixpoint state");
54 STATISTIC(NumAttributesManifested,
55  "Number of abstract attributes manifested in IR");
56 
57 // Some helper macros to deal with statistics tracking.
58 //
59 // Usage:
60 // For simple IR attribute tracking overload trackStatistics in the abstract
61 // attribute and choose the right STATS_DECLTRACK_********* macro,
62 // e.g.,:
63 // void trackStatistics() const override {
64 // STATS_DECLTRACK_ARG_ATTR(returned)
65 // }
66 // If there is a single "increment" side one can use the macro
67 // STATS_DECLTRACK with a custom message. If there are multiple increment
68 // sides, STATS_DECL and STATS_TRACK can also be used separatly.
69 //
70 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME) \
71  ("Number of " #TYPE " marked '" #NAME "'")
72 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
73 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
74 #define STATS_DECL(NAME, TYPE, MSG) \
75  STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
76 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
77 #define STATS_DECLTRACK(NAME, TYPE, MSG) \
78  { \
79  STATS_DECL(NAME, TYPE, MSG) \
80  STATS_TRACK(NAME, TYPE) \
81  }
82 #define STATS_DECLTRACK_ARG_ATTR(NAME) \
83  STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
84 #define STATS_DECLTRACK_CSARG_ATTR(NAME) \
85  STATS_DECLTRACK(NAME, CSArguments, \
86  BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
87 #define STATS_DECLTRACK_FN_ATTR(NAME) \
88  STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
89 #define STATS_DECLTRACK_CS_ATTR(NAME) \
90  STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
91 #define STATS_DECLTRACK_FNRET_ATTR(NAME) \
92  STATS_DECLTRACK(NAME, FunctionReturn, \
93  BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
94 #define STATS_DECLTRACK_CSRET_ATTR(NAME) \
95  STATS_DECLTRACK(NAME, CSReturn, \
96  BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
97 #define STATS_DECLTRACK_FLOATING_ATTR(NAME) \
98  STATS_DECLTRACK(NAME, Floating, \
99  ("Number of floating values known to be '" #NAME "'"))
100 
101 // TODO: Determine a good default value.
102 //
103 // In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
104 // (when run with the first 5 abstract attributes). The results also indicate
105 // that we never reach 32 iterations but always find a fixpoint sooner.
106 //
107 // This will become more evolved once we perform two interleaved fixpoint
108 // iterations: bottom-up and top-down.
109 static cl::opt<unsigned>
110  MaxFixpointIterations("attributor-max-iterations", cl::Hidden,
111  cl::desc("Maximal number of fixpoint iterations."),
112  cl::init(32));
114  "attributor-max-iterations-verify", cl::Hidden,
115  cl::desc("Verify that max-iterations is a tight bound for a fixpoint"),
116  cl::init(false));
117 
119  "attributor-disable", cl::Hidden,
120  cl::desc("Disable the attributor inter-procedural deduction pass."),
121  cl::init(true));
122 
124  "attributor-manifest-internal", cl::Hidden,
125  cl::desc("Manifest Attributor internal string attributes."),
126  cl::init(false));
127 
129  "attributor-dependence-recompute-interval", cl::Hidden,
130  cl::desc("Number of iterations until dependences are recomputed."),
131  cl::init(4));
132 
133 static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
134  cl::init(true), cl::Hidden);
135 
136 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
137  cl::Hidden);
138 
139 /// Logic operators for the change status enum class.
140 ///
141 ///{
143  return l == ChangeStatus::CHANGED ? l : r;
144 }
146  return l == ChangeStatus::UNCHANGED ? l : r;
147 }
148 ///}
149 
150 /// Recursively visit all values that might become \p IRP at some point. This
151 /// will be done by looking through cast instructions, selects, phis, and calls
152 /// with the "returned" attribute. Once we cannot look through the value any
153 /// further, the callback \p VisitValueCB is invoked and passed the current
154 /// value, the \p State, and a flag to indicate if we stripped anything. To
155 /// limit how much effort is invested, we will never visit more values than
156 /// specified by \p MaxValues.
157 template <typename AAType, typename StateTy>
159  Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
160  const function_ref<bool(Value &, StateTy &, bool)> &VisitValueCB,
161  int MaxValues = 8) {
162 
163  const AAIsDead *LivenessAA = nullptr;
164  if (IRP.getAnchorScope())
165  LivenessAA = &A.getAAFor<AAIsDead>(
166  QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
167  /* TrackDependence */ false);
168  bool AnyDead = false;
169 
170  // TODO: Use Positions here to allow context sensitivity in VisitValueCB
171  SmallPtrSet<Value *, 16> Visited;
172  SmallVector<Value *, 16> Worklist;
173  Worklist.push_back(&IRP.getAssociatedValue());
174 
175  int Iteration = 0;
176  do {
177  Value *V = Worklist.pop_back_val();
178 
179  // Check if we should process the current value. To prevent endless
180  // recursion keep a record of the values we followed!
181  if (!Visited.insert(V).second)
182  continue;
183 
184  // Make sure we limit the compile time for complex expressions.
185  if (Iteration++ >= MaxValues)
186  return false;
187 
188  // Explicitly look through calls with a "returned" attribute if we do
189  // not have a pointer as stripPointerCasts only works on them.
190  Value *NewV = nullptr;
191  if (V->getType()->isPointerTy()) {
192  NewV = V->stripPointerCasts();
193  } else {
194  CallSite CS(V);
195  if (CS && CS.getCalledFunction()) {
196  for (Argument &Arg : CS.getCalledFunction()->args())
197  if (Arg.hasReturnedAttr()) {
198  NewV = CS.getArgOperand(Arg.getArgNo());
199  break;
200  }
201  }
202  }
203  if (NewV && NewV != V) {
204  Worklist.push_back(NewV);
205  continue;
206  }
207 
208  // Look through select instructions, visit both potential values.
209  if (auto *SI = dyn_cast<SelectInst>(V)) {
210  Worklist.push_back(SI->getTrueValue());
211  Worklist.push_back(SI->getFalseValue());
212  continue;
213  }
214 
215  // Look through phi nodes, visit all live operands.
216  if (auto *PHI = dyn_cast<PHINode>(V)) {
217  assert(LivenessAA &&
218  "Expected liveness in the presence of instructions!");
219  for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
220  const BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
221  if (LivenessAA->isAssumedDead(IncomingBB->getTerminator())) {
222  AnyDead = true;
223  continue;
224  }
225  Worklist.push_back(PHI->getIncomingValue(u));
226  }
227  continue;
228  }
229 
230  // Once a leaf is reached we inform the user through the callback.
231  if (!VisitValueCB(*V, State, Iteration > 1))
232  return false;
233  } while (!Worklist.empty());
234 
235  // If we actually used liveness information so we have to record a dependence.
236  if (AnyDead)
237  A.recordDependence(*LivenessAA, QueryingAA);
238 
239  // All values have been visited.
240  return true;
241 }
242 
243 /// Return true if \p New is equal or worse than \p Old.
244 static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
245  if (!Old.isIntAttribute())
246  return true;
247 
248  return Old.getValueAsInt() >= New.getValueAsInt();
249 }
250 
251 /// Return true if the information provided by \p Attr was added to the
252 /// attribute list \p Attrs. This is only the case if it was not already present
253 /// in \p Attrs at the position describe by \p PK and \p AttrIdx.
254 static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
255  AttributeList &Attrs, int AttrIdx) {
256 
257  if (Attr.isEnumAttribute()) {
259  if (Attrs.hasAttribute(AttrIdx, Kind))
260  if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
261  return false;
262  Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
263  return true;
264  }
265  if (Attr.isStringAttribute()) {
266  StringRef Kind = Attr.getKindAsString();
267  if (Attrs.hasAttribute(AttrIdx, Kind))
268  if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
269  return false;
270  Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
271  return true;
272  }
273  if (Attr.isIntAttribute()) {
275  if (Attrs.hasAttribute(AttrIdx, Kind))
276  if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
277  return false;
278  Attrs = Attrs.removeAttribute(Ctx, AttrIdx, Kind);
279  Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
280  return true;
281  }
282 
283  llvm_unreachable("Expected enum or string attribute!");
284 }
285 static const Value *getPointerOperand(const Instruction *I) {
286  if (auto *LI = dyn_cast<LoadInst>(I))
287  if (!LI->isVolatile())
288  return LI->getPointerOperand();
289 
290  if (auto *SI = dyn_cast<StoreInst>(I))
291  if (!SI->isVolatile())
292  return SI->getPointerOperand();
293 
294  if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I))
295  if (!CXI->isVolatile())
296  return CXI->getPointerOperand();
297 
298  if (auto *RMWI = dyn_cast<AtomicRMWInst>(I))
299  if (!RMWI->isVolatile())
300  return RMWI->getPointerOperand();
301 
302  return nullptr;
303 }
305  int64_t &BytesOffset,
306  const DataLayout &DL) {
307  const Value *Ptr = getPointerOperand(I);
308  if (!Ptr)
309  return nullptr;
310 
311  return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
312  /*AllowNonInbounds*/ false);
313 }
314 
317  if (getState().isAtFixpoint())
318  return HasChanged;
319 
320  LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
321 
322  HasChanged = updateImpl(A);
323 
324  LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
325  << "\n");
326 
327  return HasChanged;
328 }
329 
332  const ArrayRef<Attribute> &DeducedAttrs) {
333  Function *ScopeFn = IRP.getAssociatedFunction();
335 
336  // In the following some generic code that will manifest attributes in
337  // DeducedAttrs if they improve the current IR. Due to the different
338  // annotation positions we use the underlying AttributeList interface.
339 
341  switch (PK) {
348  Attrs = ScopeFn->getAttributes();
349  break;
354  break;
355  }
356 
358  LLVMContext &Ctx = IRP.getAnchorValue().getContext();
359  for (const Attribute &Attr : DeducedAttrs) {
360  if (!addIfNotExistent(Ctx, Attr, Attrs, IRP.getAttrIdx()))
361  continue;
362 
363  HasChanged = ChangeStatus::CHANGED;
364  }
365 
366  if (HasChanged == ChangeStatus::UNCHANGED)
367  return HasChanged;
368 
369  switch (PK) {
373  ScopeFn->setAttributes(Attrs);
374  break;
378  CallSite(&IRP.getAnchorValue()).setAttributes(Attrs);
379  break;
382  break;
383  }
384 
385  return HasChanged;
386 }
387 
390 
392  IRPositions.emplace_back(IRP);
393 
394  ImmutableCallSite ICS(&IRP.getAnchorValue());
395  switch (IRP.getPositionKind()) {
399  return;
402  IRPositions.emplace_back(
404  return;
406  assert(ICS && "Expected call site!");
407  // TODO: We need to look at the operand bundles similar to the redirection
408  // in CallBase.
409  if (!ICS.hasOperandBundles())
410  if (const Function *Callee = ICS.getCalledFunction())
411  IRPositions.emplace_back(IRPosition::function(*Callee));
412  return;
414  assert(ICS && "Expected call site!");
415  // TODO: We need to look at the operand bundles similar to the redirection
416  // in CallBase.
417  if (!ICS.hasOperandBundles()) {
418  if (const Function *Callee = ICS.getCalledFunction()) {
419  IRPositions.emplace_back(IRPosition::returned(*Callee));
420  IRPositions.emplace_back(IRPosition::function(*Callee));
421  }
422  }
423  IRPositions.emplace_back(
424  IRPosition::callsite_function(cast<CallBase>(*ICS.getInstruction())));
425  return;
427  int ArgNo = IRP.getArgNo();
428  assert(ICS && ArgNo >= 0 && "Expected call site!");
429  // TODO: We need to look at the operand bundles similar to the redirection
430  // in CallBase.
431  if (!ICS.hasOperandBundles()) {
432  const Function *Callee = ICS.getCalledFunction();
433  if (Callee && Callee->arg_size() > unsigned(ArgNo))
434  IRPositions.emplace_back(IRPosition::argument(*Callee->getArg(ArgNo)));
435  if (Callee)
436  IRPositions.emplace_back(IRPosition::function(*Callee));
437  }
438  IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
439  return;
440  }
441  }
442 }
443 
445  bool IgnoreSubsumingPositions) const {
446  for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
447  for (Attribute::AttrKind AK : AKs)
448  if (EquivIRP.getAttr(AK).getKindAsEnum() == AK)
449  return true;
450  // The first position returned by the SubsumingPositionIterator is
451  // always the position itself. If we ignore subsuming positions we
452  // are done after the first iteration.
453  if (IgnoreSubsumingPositions)
454  break;
455  }
456  return false;
457 }
458 
461  for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this))
462  for (Attribute::AttrKind AK : AKs) {
463  const Attribute &Attr = EquivIRP.getAttr(AK);
464  if (Attr.getKindAsEnum() == AK)
465  Attrs.push_back(Attr);
466  }
467 }
468 
469 void IRPosition::verify() {
470  switch (KindOrArgNo) {
471  default:
472  assert(KindOrArgNo >= 0 && "Expected argument or call site argument!");
473  assert((isa<CallBase>(AnchorVal) || isa<Argument>(AnchorVal)) &&
474  "Expected call base or argument for positive attribute index!");
475  if (isa<Argument>(AnchorVal)) {
476  assert(cast<Argument>(AnchorVal)->getArgNo() == unsigned(getArgNo()) &&
477  "Argument number mismatch!");
478  assert(cast<Argument>(AnchorVal) == &getAssociatedValue() &&
479  "Associated value mismatch!");
480  } else {
481  assert(cast<CallBase>(*AnchorVal).arg_size() > unsigned(getArgNo()) &&
482  "Call site argument number mismatch!");
483  assert(cast<CallBase>(*AnchorVal).getArgOperand(getArgNo()) ==
484  &getAssociatedValue() &&
485  "Associated value mismatch!");
486  }
487  break;
488  case IRP_INVALID:
489  assert(!AnchorVal && "Expected no value for an invalid position!");
490  break;
491  case IRP_FLOAT:
492  assert((!isa<CallBase>(&getAssociatedValue()) &&
493  !isa<Argument>(&getAssociatedValue())) &&
494  "Expected specialized kind for call base and argument values!");
495  break;
496  case IRP_RETURNED:
497  assert(isa<Function>(AnchorVal) &&
498  "Expected function for a 'returned' position!");
499  assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
500  break;
501  case IRP_CALL_SITE_RETURNED:
502  assert((isa<CallBase>(AnchorVal)) &&
503  "Expected call base for 'call site returned' position!");
504  assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
505  break;
506  case IRP_CALL_SITE:
507  assert((isa<CallBase>(AnchorVal)) &&
508  "Expected call base for 'call site function' position!");
509  assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
510  break;
511  case IRP_FUNCTION:
512  assert(isa<Function>(AnchorVal) &&
513  "Expected function for a 'function' position!");
514  assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
515  break;
516  }
517 }
518 
519 namespace {
520 /// Helper functions to clamp a state \p S of type \p StateType with the
521 /// information in \p R and indicate/return if \p S did change (as-in update is
522 /// required to be run again).
523 ///
524 ///{
525 template <typename StateType>
526 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R);
527 
528 template <>
529 ChangeStatus clampStateAndIndicateChange<IntegerState>(IntegerState &S,
530  const IntegerState &R) {
531  auto Assumed = S.getAssumed();
532  S ^= R;
533  return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
535 }
536 
537 template <>
538 ChangeStatus clampStateAndIndicateChange<BooleanState>(BooleanState &S,
539  const BooleanState &R) {
540  return clampStateAndIndicateChange<IntegerState>(S, R);
541 }
542 ///}
543 
544 /// Clamp the information known for all returned values of a function
545 /// (identified by \p QueryingAA) into \p S.
546 template <typename AAType, typename StateType = typename AAType::StateType>
547 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
548  StateType &S) {
549  LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
550  << static_cast<const AbstractAttribute &>(QueryingAA)
551  << " into " << S << "\n");
552 
553  assert((QueryingAA.getIRPosition().getPositionKind() ==
555  QueryingAA.getIRPosition().getPositionKind() ==
557  "Can only clamp returned value states for a function returned or call "
558  "site returned position!");
559 
560  // Use an optional state as there might not be any return values and we want
561  // to join (IntegerState::operator&) the state of all there are.
563 
564  // Callback for each possibly returned value.
565  auto CheckReturnValue = [&](Value &RV) -> bool {
566  const IRPosition &RVPos = IRPosition::value(RV);
567  const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
568  LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
569  << " @ " << RVPos << "\n");
570  const StateType &AAS = static_cast<const StateType &>(AA.getState());
571  if (T.hasValue())
572  *T &= AAS;
573  else
574  T = AAS;
575  LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
576  << "\n");
577  return T->isValidState();
578  };
579 
580  if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
581  S.indicatePessimisticFixpoint();
582  else if (T.hasValue())
583  S ^= *T;
584 }
585 
586 /// Helper class to compose two generic deduction
587 template <typename AAType, typename Base, typename StateType,
588  template <typename...> class F, template <typename...> class G>
589 struct AAComposeTwoGenericDeduction
590  : public F<AAType, G<AAType, Base, StateType>, StateType> {
591  AAComposeTwoGenericDeduction(const IRPosition &IRP)
592  : F<AAType, G<AAType, Base, StateType>, StateType>(IRP) {}
593 
594  /// See AbstractAttribute::updateImpl(...).
595  ChangeStatus updateImpl(Attributor &A) override {
596  ChangeStatus ChangedF = F<AAType, G<AAType, Base, StateType>, StateType>::updateImpl(A);
597  ChangeStatus ChangedG = G<AAType, Base, StateType>::updateImpl(A);
598  return ChangedF | ChangedG;
599  }
600 };
601 
602 /// Helper class for generic deduction: return value -> returned position.
603 template <typename AAType, typename Base,
604  typename StateType = typename AAType::StateType>
605 struct AAReturnedFromReturnedValues : public Base {
606  AAReturnedFromReturnedValues(const IRPosition &IRP) : Base(IRP) {}
607 
608  /// See AbstractAttribute::updateImpl(...).
609  ChangeStatus updateImpl(Attributor &A) override {
610  StateType S;
611  clampReturnedValueStates<AAType, StateType>(A, *this, S);
612  // TODO: If we know we visited all returned values, thus no are assumed
613  // dead, we can take the known information from the state T.
614  return clampStateAndIndicateChange<StateType>(this->getState(), S);
615  }
616 };
617 
618 /// Clamp the information known at all call sites for a given argument
619 /// (identified by \p QueryingAA) into \p S.
620 template <typename AAType, typename StateType = typename AAType::StateType>
621 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
622  StateType &S) {
623  LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
624  << static_cast<const AbstractAttribute &>(QueryingAA)
625  << " into " << S << "\n");
626 
627  assert(QueryingAA.getIRPosition().getPositionKind() ==
629  "Can only clamp call site argument states for an argument position!");
630 
631  // Use an optional state as there might not be any return values and we want
632  // to join (IntegerState::operator&) the state of all there are.
634 
635  // The argument number which is also the call site argument number.
636  unsigned ArgNo = QueryingAA.getIRPosition().getArgNo();
637 
638  auto CallSiteCheck = [&](AbstractCallSite ACS) {
639  const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
640  // Check if a coresponding argument was found or if it is on not associated
641  // (which can happen for callback calls).
642  if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
643  return false;
644 
645  const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos);
646  LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
647  << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
648  const StateType &AAS = static_cast<const StateType &>(AA.getState());
649  if (T.hasValue())
650  *T &= AAS;
651  else
652  T = AAS;
653  LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
654  << "\n");
655  return T->isValidState();
656  };
657 
658  if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true))
659  S.indicatePessimisticFixpoint();
660  else if (T.hasValue())
661  S ^= *T;
662 }
663 
664 /// Helper class for generic deduction: call site argument -> argument position.
665 template <typename AAType, typename Base,
666  typename StateType = typename AAType::StateType>
667 struct AAArgumentFromCallSiteArguments : public Base {
668  AAArgumentFromCallSiteArguments(const IRPosition &IRP) : Base(IRP) {}
669 
670  /// See AbstractAttribute::updateImpl(...).
671  ChangeStatus updateImpl(Attributor &A) override {
672  StateType S;
673  clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
674  // TODO: If we know we visited all incoming values, thus no are assumed
675  // dead, we can take the known information from the state T.
676  return clampStateAndIndicateChange<StateType>(this->getState(), S);
677  }
678 };
679 
680 /// Helper class for generic replication: function returned -> cs returned.
681 template <typename AAType, typename Base,
682  typename StateType = typename AAType::StateType>
683 struct AACallSiteReturnedFromReturned : public Base {
684  AACallSiteReturnedFromReturned(const IRPosition &IRP) : Base(IRP) {}
685 
686  /// See AbstractAttribute::updateImpl(...).
687  ChangeStatus updateImpl(Attributor &A) override {
688  assert(this->getIRPosition().getPositionKind() ==
690  "Can only wrap function returned positions for call site returned "
691  "positions!");
692  auto &S = this->getState();
693 
694  const Function *AssociatedFunction =
696  if (!AssociatedFunction)
697  return S.indicatePessimisticFixpoint();
698 
699  IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
700  const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
701  return clampStateAndIndicateChange(
702  S, static_cast<const typename AAType::StateType &>(AA.getState()));
703  }
704 };
705 
706 /// Helper class for generic deduction using must-be-executed-context
707 /// Base class is required to have `followUse` method.
708 
709 /// bool followUse(Attributor &A, const Use *U, const Instruction *I)
710 /// U - Underlying use.
711 /// I - The user of the \p U.
712 /// `followUse` returns true if the value should be tracked transitively.
713 
714 template <typename AAType, typename Base,
715  typename StateType = typename AAType::StateType>
716 struct AAFromMustBeExecutedContext : public Base {
717  AAFromMustBeExecutedContext(const IRPosition &IRP) : Base(IRP) {}
718 
719  void initialize(Attributor &A) override {
720  Base::initialize(A);
721  IRPosition &IRP = this->getIRPosition();
722  Instruction *CtxI = IRP.getCtxI();
723 
724  if (!CtxI)
725  return;
726 
727  for (const Use &U : IRP.getAssociatedValue().uses())
728  Uses.insert(&U);
729  }
730 
731  /// See AbstractAttribute::updateImpl(...).
732  ChangeStatus updateImpl(Attributor &A) override {
733  auto BeforeState = this->getState();
734  auto &S = this->getState();
735  Instruction *CtxI = this->getIRPosition().getCtxI();
736  if (!CtxI)
738 
741 
742  SetVector<const Use *> NextUses;
743 
744  for (const Use *U : Uses) {
745  if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
746  auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
747  bool Found = EIt.count(UserI);
748  while (!Found && ++EIt != EEnd)
749  Found = EIt.getCurrentInst() == UserI;
750  if (Found && Base::followUse(A, U, UserI))
751  for (const Use &Us : UserI->uses())
752  NextUses.insert(&Us);
753  }
754  }
755  for (const Use *U : NextUses)
756  Uses.insert(U);
757 
758  return BeforeState == S ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
759  }
760 
761 private:
762  /// Container for (transitive) uses of the associated value.
764 };
765 
766 template <typename AAType, typename Base,
767  typename StateType = typename AAType::StateType>
768 using AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext =
769  AAComposeTwoGenericDeduction<AAType, Base, StateType,
770  AAFromMustBeExecutedContext,
771  AAArgumentFromCallSiteArguments>;
772 
773 template <typename AAType, typename Base,
774  typename StateType = typename AAType::StateType>
775 using AACallSiteReturnedFromReturnedAndMustBeExecutedContext =
776  AAComposeTwoGenericDeduction<AAType, Base, StateType,
777  AAFromMustBeExecutedContext,
778  AACallSiteReturnedFromReturned>;
779 
780 /// -----------------------NoUnwind Function Attribute--------------------------
781 
782 struct AANoUnwindImpl : AANoUnwind {
783  AANoUnwindImpl(const IRPosition &IRP) : AANoUnwind(IRP) {}
784 
785  const std::string getAsStr() const override {
786  return getAssumed() ? "nounwind" : "may-unwind";
787  }
788 
789  /// See AbstractAttribute::updateImpl(...).
790  ChangeStatus updateImpl(Attributor &A) override {
791  auto Opcodes = {
792  (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
793  (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet,
794  (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
795 
796  auto CheckForNoUnwind = [&](Instruction &I) {
797  if (!I.mayThrow())
798  return true;
799 
800  if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
801  const auto &NoUnwindAA =
803  return NoUnwindAA.isAssumedNoUnwind();
804  }
805  return false;
806  };
807 
808  if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
809  return indicatePessimisticFixpoint();
810 
812  }
813 };
814 
815 struct AANoUnwindFunction final : public AANoUnwindImpl {
816  AANoUnwindFunction(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
817 
818  /// See AbstractAttribute::trackStatistics()
819  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
820 };
821 
822 /// NoUnwind attribute deduction for a call sites.
823 struct AANoUnwindCallSite final : AANoUnwindImpl {
824  AANoUnwindCallSite(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
825 
826  /// See AbstractAttribute::initialize(...).
827  void initialize(Attributor &A) override {
829  Function *F = getAssociatedFunction();
830  if (!F)
831  indicatePessimisticFixpoint();
832  }
833 
834  /// See AbstractAttribute::updateImpl(...).
835  ChangeStatus updateImpl(Attributor &A) override {
836  // TODO: Once we have call site specific value information we can provide
837  // call site specific liveness information and then it makes
838  // sense to specialize attributes for call sites arguments instead of
839  // redirecting requests to the callee argument.
840  Function *F = getAssociatedFunction();
841  const IRPosition &FnPos = IRPosition::function(*F);
842  auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos);
843  return clampStateAndIndicateChange(
844  getState(),
845  static_cast<const AANoUnwind::StateType &>(FnAA.getState()));
846  }
847 
848  /// See AbstractAttribute::trackStatistics()
849  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
850 };
851 
852 /// --------------------- Function Return Values -------------------------------
853 
854 /// "Attribute" that collects all potential returned values and the return
855 /// instructions that they arise from.
856 ///
857 /// If there is a unique returned value R, the manifest method will:
858 /// - mark R with the "returned" attribute, if R is an argument.
859 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
860 
861  /// Mapping of values potentially returned by the associated function to the
862  /// return instructions that might return them.
864 
865  /// Mapping to remember the number of returned values for a call site such
866  /// that we can avoid updates if nothing changed.
867  DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
868 
869  /// Set of unresolved calls returned by the associated function.
870  SmallSetVector<CallBase *, 4> UnresolvedCalls;
871 
872  /// State flags
873  ///
874  ///{
875  bool IsFixed = false;
876  bool IsValidState = true;
877  ///}
878 
879 public:
880  AAReturnedValuesImpl(const IRPosition &IRP) : AAReturnedValues(IRP) {}
881 
882  /// See AbstractAttribute::initialize(...).
883  void initialize(Attributor &A) override {
884  // Reset the state.
885  IsFixed = false;
886  IsValidState = true;
887  ReturnedValues.clear();
888 
889  Function *F = getAssociatedFunction();
890  if (!F) {
891  indicatePessimisticFixpoint();
892  return;
893  }
894 
895  // The map from instruction opcodes to those instructions in the function.
896  auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
897 
898  // Look through all arguments, if one is marked as returned we are done.
899  for (Argument &Arg : F->args()) {
900  if (Arg.hasReturnedAttr()) {
901  auto &ReturnInstSet = ReturnedValues[&Arg];
902  for (Instruction *RI : OpcodeInstMap[Instruction::Ret])
903  ReturnInstSet.insert(cast<ReturnInst>(RI));
904 
905  indicateOptimisticFixpoint();
906  return;
907  }
908  }
909 
910  if (!F->hasExactDefinition())
911  indicatePessimisticFixpoint();
912  }
913 
914  /// See AbstractAttribute::manifest(...).
915  ChangeStatus manifest(Attributor &A) override;
916 
917  /// See AbstractAttribute::getState(...).
918  AbstractState &getState() override { return *this; }
919 
920  /// See AbstractAttribute::getState(...).
921  const AbstractState &getState() const override { return *this; }
922 
923  /// See AbstractAttribute::updateImpl(Attributor &A).
924  ChangeStatus updateImpl(Attributor &A) override;
925 
926  llvm::iterator_range<iterator> returned_values() override {
927  return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
928  }
929 
930  llvm::iterator_range<const_iterator> returned_values() const override {
931  return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
932  }
933 
934  const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
935  return UnresolvedCalls;
936  }
937 
938  /// Return the number of potential return values, -1 if unknown.
939  size_t getNumReturnValues() const override {
940  return isValidState() ? ReturnedValues.size() : -1;
941  }
942 
943  /// Return an assumed unique return value if a single candidate is found. If
944  /// there cannot be one, return a nullptr. If it is not clear yet, return the
945  /// Optional::NoneType.
946  Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
947 
948  /// See AbstractState::checkForAllReturnedValues(...).
949  bool checkForAllReturnedValuesAndReturnInsts(
950  const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
951  &Pred) const override;
952 
953  /// Pretty print the attribute similar to the IR representation.
954  const std::string getAsStr() const override;
955 
956  /// See AbstractState::isAtFixpoint().
957  bool isAtFixpoint() const override { return IsFixed; }
958 
959  /// See AbstractState::isValidState().
960  bool isValidState() const override { return IsValidState; }
961 
962  /// See AbstractState::indicateOptimisticFixpoint(...).
963  ChangeStatus indicateOptimisticFixpoint() override {
964  IsFixed = true;
966  }
967 
968  ChangeStatus indicatePessimisticFixpoint() override {
969  IsFixed = true;
970  IsValidState = false;
971  return ChangeStatus::CHANGED;
972  }
973 };
974 
975 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
977 
978  // Bookkeeping.
979  assert(isValidState());
980  STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
981  "Number of function with known return values");
982 
983  // Check if we have an assumed unique return value that we could manifest.
984  Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
985 
986  if (!UniqueRV.hasValue() || !UniqueRV.getValue())
987  return Changed;
988 
989  // Bookkeeping.
990  STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
991  "Number of function with unique return");
992 
993  // Callback to replace the uses of CB with the constant C.
994  auto ReplaceCallSiteUsersWith = [](CallBase &CB, Constant &C) {
995  if (CB.getNumUses() == 0 || CB.isMustTailCall())
997  CB.replaceAllUsesWith(&C);
998  return ChangeStatus::CHANGED;
999  };
1000 
1001  // If the assumed unique return value is an argument, annotate it.
1002  if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
1003  getIRPosition() = IRPosition::argument(*UniqueRVArg);
1004  Changed = IRAttribute::manifest(A);
1005  } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
1006  // We can replace the returned value with the unique returned constant.
1007  Value &AnchorValue = getAnchorValue();
1008  if (Function *F = dyn_cast<Function>(&AnchorValue)) {
1009  for (const Use &U : F->uses())
1010  if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
1011  if (CB->isCallee(&U)) {
1012  Constant *RVCCast =
1014  Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
1015  }
1016  } else {
1017  assert(isa<CallBase>(AnchorValue) &&
1018  "Expcected a function or call base anchor!");
1019  Constant *RVCCast =
1020  ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
1021  Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
1022  }
1023  if (Changed == ChangeStatus::CHANGED)
1024  STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
1025  "Number of function returns replaced by constant return");
1026  }
1027 
1028  return Changed;
1029 }
1030 
1031 const std::string AAReturnedValuesImpl::getAsStr() const {
1032  return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1033  (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
1034  ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
1035 }
1036 
1038 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1039  // If checkForAllReturnedValues provides a unique value, ignoring potential
1040  // undef values that can also be present, it is assumed to be the actual
1041  // return value and forwarded to the caller of this method. If there are
1042  // multiple, a nullptr is returned indicating there cannot be a unique
1043  // returned value.
1044  Optional<Value *> UniqueRV;
1045 
1046  auto Pred = [&](Value &RV) -> bool {
1047  // If we found a second returned value and neither the current nor the saved
1048  // one is an undef, there is no unique returned value. Undefs are special
1049  // since we can pretend they have any value.
1050  if (UniqueRV.hasValue() && UniqueRV != &RV &&
1051  !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
1052  UniqueRV = nullptr;
1053  return false;
1054  }
1055 
1056  // Do not overwrite a value with an undef.
1057  if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
1058  UniqueRV = &RV;
1059 
1060  return true;
1061  };
1062 
1063  if (!A.checkForAllReturnedValues(Pred, *this))
1064  UniqueRV = nullptr;
1065 
1066  return UniqueRV;
1067 }
1068 
1069 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1070  const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
1071  &Pred) const {
1072  if (!isValidState())
1073  return false;
1074 
1075  // Check all returned values but ignore call sites as long as we have not
1076  // encountered an overdefined one during an update.
1077  for (auto &It : ReturnedValues) {
1078  Value *RV = It.first;
1079 
1080  CallBase *CB = dyn_cast<CallBase>(RV);
1081  if (CB && !UnresolvedCalls.count(CB))
1082  continue;
1083 
1084  if (!Pred(*RV, It.second))
1085  return false;
1086  }
1087 
1088  return true;
1089 }
1090 
1091 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1092  size_t NumUnresolvedCalls = UnresolvedCalls.size();
1093  bool Changed = false;
1094 
1095  // State used in the value traversals starting in returned values.
1096  struct RVState {
1097  // The map in which we collect return values -> return instrs.
1098  decltype(ReturnedValues) &RetValsMap;
1099  // The flag to indicate a change.
1100  bool &Changed;
1101  // The return instrs we come from.
1103  };
1104 
1105  // Callback for a leaf value returned by the associated function.
1106  auto VisitValueCB = [](Value &Val, RVState &RVS, bool) -> bool {
1107  auto Size = RVS.RetValsMap[&Val].size();
1108  RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1109  bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1110  RVS.Changed |= Inserted;
1111  LLVM_DEBUG({
1112  if (Inserted)
1113  dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1114  << " => " << RVS.RetInsts.size() << "\n";
1115  });
1116  return true;
1117  };
1118 
1119  // Helper method to invoke the generic value traversal.
1120  auto VisitReturnedValue = [&](Value &RV, RVState &RVS) {
1121  IRPosition RetValPos = IRPosition::value(RV);
1122  return genericValueTraversal<AAReturnedValues, RVState>(A, RetValPos, *this,
1123  RVS, VisitValueCB);
1124  };
1125 
1126  // Callback for all "return intructions" live in the associated function.
1127  auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1128  ReturnInst &Ret = cast<ReturnInst>(I);
1129  RVState RVS({ReturnedValues, Changed, {}});
1130  RVS.RetInsts.insert(&Ret);
1131  return VisitReturnedValue(*Ret.getReturnValue(), RVS);
1132  };
1133 
1134  // Start by discovering returned values from all live returned instructions in
1135  // the associated function.
1136  if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1137  return indicatePessimisticFixpoint();
1138 
1139  // Once returned values "directly" present in the code are handled we try to
1140  // resolve returned calls.
1141  decltype(ReturnedValues) NewRVsMap;
1142  for (auto &It : ReturnedValues) {
1143  LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *It.first
1144  << " by #" << It.second.size() << " RIs\n");
1145  CallBase *CB = dyn_cast<CallBase>(It.first);
1146  if (!CB || UnresolvedCalls.count(CB))
1147  continue;
1148 
1149  if (!CB->getCalledFunction()) {
1150  LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1151  << "\n");
1152  UnresolvedCalls.insert(CB);
1153  continue;
1154  }
1155 
1156  // TODO: use the function scope once we have call site AAReturnedValues.
1157  const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1158  *this, IRPosition::function(*CB->getCalledFunction()));
1159  LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1160  << static_cast<const AbstractAttribute &>(RetValAA)
1161  << "\n");
1162 
1163  // Skip dead ends, thus if we do not know anything about the returned
1164  // call we mark it as unresolved and it will stay that way.
1165  if (!RetValAA.getState().isValidState()) {
1166  LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1167  << "\n");
1168  UnresolvedCalls.insert(CB);
1169  continue;
1170  }
1171 
1172  // Do not try to learn partial information. If the callee has unresolved
1173  // return values we will treat the call as unresolved/opaque.
1174  auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1175  if (!RetValAAUnresolvedCalls.empty()) {
1176  UnresolvedCalls.insert(CB);
1177  continue;
1178  }
1179 
1180  // Now check if we can track transitively returned values. If possible, thus
1181  // if all return value can be represented in the current scope, do so.
1182  bool Unresolved = false;
1183  for (auto &RetValAAIt : RetValAA.returned_values()) {
1184  Value *RetVal = RetValAAIt.first;
1185  if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1186  isa<Constant>(RetVal))
1187  continue;
1188  // Anything that did not fit in the above categories cannot be resolved,
1189  // mark the call as unresolved.
1190  LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1191  "cannot be translated: "
1192  << *RetVal << "\n");
1193  UnresolvedCalls.insert(CB);
1194  Unresolved = true;
1195  break;
1196  }
1197 
1198  if (Unresolved)
1199  continue;
1200 
1201  // Now track transitively returned values.
1202  unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1203  if (NumRetAA == RetValAA.getNumReturnValues()) {
1204  LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1205  "changed since it was seen last\n");
1206  continue;
1207  }
1208  NumRetAA = RetValAA.getNumReturnValues();
1209 
1210  for (auto &RetValAAIt : RetValAA.returned_values()) {
1211  Value *RetVal = RetValAAIt.first;
1212  if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1213  // Arguments are mapped to call site operands and we begin the traversal
1214  // again.
1215  bool Unused = false;
1216  RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1217  VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS);
1218  continue;
1219  } else if (isa<CallBase>(RetVal)) {
1220  // Call sites are resolved by the callee attribute over time, no need to
1221  // do anything for us.
1222  continue;
1223  } else if (isa<Constant>(RetVal)) {
1224  // Constants are valid everywhere, we can simply take them.
1225  NewRVsMap[RetVal].insert(It.second.begin(), It.second.end());
1226  continue;
1227  }
1228  }
1229  }
1230 
1231  // To avoid modifications to the ReturnedValues map while we iterate over it
1232  // we kept record of potential new entries in a copy map, NewRVsMap.
1233  for (auto &It : NewRVsMap) {
1234  assert(!It.second.empty() && "Entry does not add anything.");
1235  auto &ReturnInsts = ReturnedValues[It.first];
1236  for (ReturnInst *RI : It.second)
1237  if (ReturnInsts.insert(RI)) {
1238  LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1239  << *It.first << " => " << *RI << "\n");
1240  Changed = true;
1241  }
1242  }
1243 
1244  Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1246 }
1247 
1248 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1249  AAReturnedValuesFunction(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1250 
1251  /// See AbstractAttribute::trackStatistics()
1252  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1253 };
1254 
1255 /// Returned values information for a call sites.
1256 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1257  AAReturnedValuesCallSite(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1258 
1259  /// See AbstractAttribute::initialize(...).
1260  void initialize(Attributor &A) override {
1261  // TODO: Once we have call site specific value information we can provide
1262  // call site specific liveness information and then it makes
1263  // sense to specialize attributes for call sites instead of
1264  // redirecting requests to the callee.
1265  llvm_unreachable("Abstract attributes for returned values are not "
1266  "supported for call sites yet!");
1267  }
1268 
1269  /// See AbstractAttribute::updateImpl(...).
1270  ChangeStatus updateImpl(Attributor &A) override {
1271  return indicatePessimisticFixpoint();
1272  }
1273 
1274  /// See AbstractAttribute::trackStatistics()
1275  void trackStatistics() const override {}
1276 };
1277 
1278 /// ------------------------ NoSync Function Attribute -------------------------
1279 
1280 struct AANoSyncImpl : AANoSync {
1281  AANoSyncImpl(const IRPosition &IRP) : AANoSync(IRP) {}
1282 
1283  const std::string getAsStr() const override {
1284  return getAssumed() ? "nosync" : "may-sync";
1285  }
1286 
1287  /// See AbstractAttribute::updateImpl(...).
1288  ChangeStatus updateImpl(Attributor &A) override;
1289 
1290  /// Helper function used to determine whether an instruction is non-relaxed
1291  /// atomic. In other words, if an atomic instruction does not have unordered
1292  /// or monotonic ordering
1293  static bool isNonRelaxedAtomic(Instruction *I);
1294 
1295  /// Helper function used to determine whether an instruction is volatile.
1296  static bool isVolatile(Instruction *I);
1297 
1298  /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1299  /// memset).
1300  static bool isNoSyncIntrinsic(Instruction *I);
1301 };
1302 
1303 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1304  if (!I->isAtomic())
1305  return false;
1306 
1307  AtomicOrdering Ordering;
1308  switch (I->getOpcode()) {
1309  case Instruction::AtomicRMW:
1310  Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1311  break;
1312  case Instruction::Store:
1313  Ordering = cast<StoreInst>(I)->getOrdering();
1314  break;
1315  case Instruction::Load:
1316  Ordering = cast<LoadInst>(I)->getOrdering();
1317  break;
1318  case Instruction::Fence: {
1319  auto *FI = cast<FenceInst>(I);
1320  if (FI->getSyncScopeID() == SyncScope::SingleThread)
1321  return false;
1322  Ordering = FI->getOrdering();
1323  break;
1324  }
1325  case Instruction::AtomicCmpXchg: {
1326  AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1327  AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1328  // Only if both are relaxed, than it can be treated as relaxed.
1329  // Otherwise it is non-relaxed.
1330  if (Success != AtomicOrdering::Unordered &&
1331  Success != AtomicOrdering::Monotonic)
1332  return true;
1333  if (Failure != AtomicOrdering::Unordered &&
1334  Failure != AtomicOrdering::Monotonic)
1335  return true;
1336  return false;
1337  }
1338  default:
1340  "New atomic operations need to be known in the attributor.");
1341  }
1342 
1343  // Relaxed.
1344  if (Ordering == AtomicOrdering::Unordered ||
1345  Ordering == AtomicOrdering::Monotonic)
1346  return false;
1347  return true;
1348 }
1349 
1350 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1351 /// FIXME: We should ipmrove the handling of intrinsics.
1352 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1353  if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1354  switch (II->getIntrinsicID()) {
1355  /// Element wise atomic memory intrinsics are can only be unordered,
1356  /// therefore nosync.
1357  case Intrinsic::memset_element_unordered_atomic:
1358  case Intrinsic::memmove_element_unordered_atomic:
1359  case Intrinsic::memcpy_element_unordered_atomic:
1360  return true;
1361  case Intrinsic::memset:
1362  case Intrinsic::memmove:
1363  case Intrinsic::memcpy:
1364  if (!cast<MemIntrinsic>(II)->isVolatile())
1365  return true;
1366  return false;
1367  default:
1368  return false;
1369  }
1370  }
1371  return false;
1372 }
1373 
1375  assert(!ImmutableCallSite(I) && !isa<CallBase>(I) &&
1376  "Calls should not be checked here");
1377 
1378  switch (I->getOpcode()) {
1379  case Instruction::AtomicRMW:
1380  return cast<AtomicRMWInst>(I)->isVolatile();
1381  case Instruction::Store:
1382  return cast<StoreInst>(I)->isVolatile();
1383  case Instruction::Load:
1384  return cast<LoadInst>(I)->isVolatile();
1385  case Instruction::AtomicCmpXchg:
1386  return cast<AtomicCmpXchgInst>(I)->isVolatile();
1387  default:
1388  return false;
1389  }
1390 }
1391 
1392 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1393 
1394  auto CheckRWInstForNoSync = [&](Instruction &I) {
1395  /// We are looking for volatile instructions or Non-Relaxed atomics.
1396  /// FIXME: We should ipmrove the handling of intrinsics.
1397 
1398  if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1399  return true;
1400 
1401  if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
1402  if (ICS.hasFnAttr(Attribute::NoSync))
1403  return true;
1404 
1405  const auto &NoSyncAA =
1407  if (NoSyncAA.isAssumedNoSync())
1408  return true;
1409  return false;
1410  }
1411 
1412  if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1413  return true;
1414 
1415  return false;
1416  };
1417 
1418  auto CheckForNoSync = [&](Instruction &I) {
1419  // At this point we handled all read/write effects and they are all
1420  // nosync, so they can be skipped.
1421  if (I.mayReadOrWriteMemory())
1422  return true;
1423 
1424  // non-convergent and readnone imply nosync.
1425  return !ImmutableCallSite(&I).isConvergent();
1426  };
1427 
1428  if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1429  !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1430  return indicatePessimisticFixpoint();
1431 
1432  return ChangeStatus::UNCHANGED;
1433 }
1434 
1435 struct AANoSyncFunction final : public AANoSyncImpl {
1436  AANoSyncFunction(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1437 
1438  /// See AbstractAttribute::trackStatistics()
1439  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1440 };
1441 
1442 /// NoSync attribute deduction for a call sites.
1443 struct AANoSyncCallSite final : AANoSyncImpl {
1444  AANoSyncCallSite(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1445 
1446  /// See AbstractAttribute::initialize(...).
1447  void initialize(Attributor &A) override {
1449  Function *F = getAssociatedFunction();
1450  if (!F)
1451  indicatePessimisticFixpoint();
1452  }
1453 
1454  /// See AbstractAttribute::updateImpl(...).
1455  ChangeStatus updateImpl(Attributor &A) override {
1456  // TODO: Once we have call site specific value information we can provide
1457  // call site specific liveness information and then it makes
1458  // sense to specialize attributes for call sites arguments instead of
1459  // redirecting requests to the callee argument.
1460  Function *F = getAssociatedFunction();
1461  const IRPosition &FnPos = IRPosition::function(*F);
1462  auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos);
1463  return clampStateAndIndicateChange(
1464  getState(), static_cast<const AANoSync::StateType &>(FnAA.getState()));
1465  }
1466 
1467  /// See AbstractAttribute::trackStatistics()
1468  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1469 };
1470 
1471 /// ------------------------ No-Free Attributes ----------------------------
1472 
1473 struct AANoFreeImpl : public AANoFree {
1474  AANoFreeImpl(const IRPosition &IRP) : AANoFree(IRP) {}
1475 
1476  /// See AbstractAttribute::updateImpl(...).
1477  ChangeStatus updateImpl(Attributor &A) override {
1478  auto CheckForNoFree = [&](Instruction &I) {
1479  ImmutableCallSite ICS(&I);
1480  if (ICS.hasFnAttr(Attribute::NoFree))
1481  return true;
1482 
1483  const auto &NoFreeAA =
1485  return NoFreeAA.isAssumedNoFree();
1486  };
1487 
1488  if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1489  return indicatePessimisticFixpoint();
1490  return ChangeStatus::UNCHANGED;
1491  }
1492 
1493  /// See AbstractAttribute::getAsStr().
1494  const std::string getAsStr() const override {
1495  return getAssumed() ? "nofree" : "may-free";
1496  }
1497 };
1498 
1499 struct AANoFreeFunction final : public AANoFreeImpl {
1500  AANoFreeFunction(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1501 
1502  /// See AbstractAttribute::trackStatistics()
1503  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1504 };
1505 
1506 /// NoFree attribute deduction for a call sites.
1507 struct AANoFreeCallSite final : AANoFreeImpl {
1508  AANoFreeCallSite(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1509 
1510  /// See AbstractAttribute::initialize(...).
1511  void initialize(Attributor &A) override {
1513  Function *F = getAssociatedFunction();
1514  if (!F)
1515  indicatePessimisticFixpoint();
1516  }
1517 
1518  /// See AbstractAttribute::updateImpl(...).
1519  ChangeStatus updateImpl(Attributor &A) override {
1520  // TODO: Once we have call site specific value information we can provide
1521  // call site specific liveness information and then it makes
1522  // sense to specialize attributes for call sites arguments instead of
1523  // redirecting requests to the callee argument.
1524  Function *F = getAssociatedFunction();
1525  const IRPosition &FnPos = IRPosition::function(*F);
1526  auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos);
1527  return clampStateAndIndicateChange(
1528  getState(), static_cast<const AANoFree::StateType &>(FnAA.getState()));
1529  }
1530 
1531  /// See AbstractAttribute::trackStatistics()
1532  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1533 };
1534 
1535 /// ------------------------ NonNull Argument Attribute ------------------------
1536 static int64_t getKnownNonNullAndDerefBytesForUse(
1537  Attributor &A, AbstractAttribute &QueryingAA, Value &AssociatedValue,
1538  const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1539  TrackUse = false;
1540 
1541  const Value *UseV = U->get();
1542  if (!UseV->getType()->isPointerTy())
1543  return 0;
1544 
1545  Type *PtrTy = UseV->getType();
1546  const Function *F = I->getFunction();
1547  bool NullPointerIsDefined =
1548  F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1549  const DataLayout &DL = A.getInfoCache().getDL();
1550  if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
1551  if (ICS.isBundleOperand(U))
1552  return 0;
1553 
1554  if (ICS.isCallee(U)) {
1555  IsNonNull |= !NullPointerIsDefined;
1556  return 0;
1557  }
1558 
1559  unsigned ArgNo = ICS.getArgumentNo(U);
1560  IRPosition IRP = IRPosition::callsite_argument(ICS, ArgNo);
1561  auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP);
1562  IsNonNull |= DerefAA.isKnownNonNull();
1563  return DerefAA.getKnownDereferenceableBytes();
1564  }
1565 
1566  int64_t Offset;
1567  if (const Value *Base = getBasePointerOfAccessPointerOperand(I, Offset, DL)) {
1568  if (Base == &AssociatedValue && getPointerOperand(I) == UseV) {
1569  int64_t DerefBytes =
1570  Offset + (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1571 
1572  IsNonNull |= !NullPointerIsDefined;
1573  return DerefBytes;
1574  }
1575  }
1576  if (const Value *Base =
1577  GetPointerBaseWithConstantOffset(UseV, Offset, DL,
1578  /*AllowNonInbounds*/ false)) {
1579  auto &DerefAA =
1580  A.getAAFor<AADereferenceable>(QueryingAA, IRPosition::value(*Base));
1581  IsNonNull |= (!NullPointerIsDefined && DerefAA.isKnownNonNull());
1582  IsNonNull |= (!NullPointerIsDefined && (Offset != 0));
1583  int64_t DerefBytes = DerefAA.getKnownDereferenceableBytes();
1584  return std::max(int64_t(0), DerefBytes - Offset);
1585  }
1586 
1587  return 0;
1588 }
1589 
1590 struct AANonNullImpl : AANonNull {
1591  AANonNullImpl(const IRPosition &IRP)
1592  : AANonNull(IRP),
1593  NullIsDefined(NullPointerIsDefined(
1594  getAnchorScope(),
1595  getAssociatedValue().getType()->getPointerAddressSpace())) {}
1596 
1597  /// See AbstractAttribute::initialize(...).
1598  void initialize(Attributor &A) override {
1599  if (!NullIsDefined &&
1600  hasAttr({Attribute::NonNull, Attribute::Dereferenceable}))
1601  indicateOptimisticFixpoint();
1602  else
1604  }
1605 
1606  /// See AAFromMustBeExecutedContext
1607  bool followUse(Attributor &A, const Use *U, const Instruction *I) {
1608  bool IsNonNull = false;
1609  bool TrackUse = false;
1610  getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1611  IsNonNull, TrackUse);
1612  takeKnownMaximum(IsNonNull);
1613  return TrackUse;
1614  }
1615 
1616  /// See AbstractAttribute::getAsStr().
1617  const std::string getAsStr() const override {
1618  return getAssumed() ? "nonnull" : "may-null";
1619  }
1620 
1621  /// Flag to determine if the underlying value can be null and still allow
1622  /// valid accesses.
1623  const bool NullIsDefined;
1624 };
1625 
1626 /// NonNull attribute for a floating value.
1627 struct AANonNullFloating
1628  : AAFromMustBeExecutedContext<AANonNull, AANonNullImpl> {
1629  using Base = AAFromMustBeExecutedContext<AANonNull, AANonNullImpl>;
1630  AANonNullFloating(const IRPosition &IRP) : Base(IRP) {}
1631 
1632  /// See AbstractAttribute::initialize(...).
1633  void initialize(Attributor &A) override {
1634  Base::initialize(A);
1635 
1636  if (isAtFixpoint())
1637  return;
1638 
1639  const IRPosition &IRP = getIRPosition();
1640  const Value &V = IRP.getAssociatedValue();
1641  const DataLayout &DL = A.getDataLayout();
1642 
1643  // TODO: This context sensitive query should be removed once we can do
1644  // context sensitive queries in the genericValueTraversal below.
1645  if (isKnownNonZero(&V, DL, 0, /* TODO: AC */ nullptr, IRP.getCtxI(),
1646  /* TODO: DT */ nullptr))
1647  indicateOptimisticFixpoint();
1648  }
1649 
1650  /// See AbstractAttribute::updateImpl(...).
1651  ChangeStatus updateImpl(Attributor &A) override {
1652  ChangeStatus Change = Base::updateImpl(A);
1653  if (isKnownNonNull())
1654  return Change;
1655 
1656  if (!NullIsDefined) {
1657  const auto &DerefAA = A.getAAFor<AADereferenceable>(*this, getIRPosition());
1658  if (DerefAA.getAssumedDereferenceableBytes())
1659  return Change;
1660  }
1661 
1662  const DataLayout &DL = A.getDataLayout();
1663 
1664  auto VisitValueCB = [&](Value &V, AAAlign::StateType &T,
1665  bool Stripped) -> bool {
1666  const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
1667  if (!Stripped && this == &AA) {
1668  if (!isKnownNonZero(&V, DL, 0, /* TODO: AC */ nullptr,
1669  /* CtxI */ getCtxI(),
1670  /* TODO: DT */ nullptr))
1672  } else {
1673  // Use abstract attribute information.
1674  const AANonNull::StateType &NS =
1675  static_cast<const AANonNull::StateType &>(AA.getState());
1676  T ^= NS;
1677  }
1678  return T.isValidState();
1679  };
1680 
1681  StateType T;
1682  if (!genericValueTraversal<AANonNull, StateType>(A, getIRPosition(), *this,
1683  T, VisitValueCB))
1684  return indicatePessimisticFixpoint();
1685 
1686  return clampStateAndIndicateChange(getState(), T);
1687  }
1688 
1689  /// See AbstractAttribute::trackStatistics()
1690  void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1691 };
1692 
1693 /// NonNull attribute for function return value.
1694 struct AANonNullReturned final
1695  : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> {
1696  AANonNullReturned(const IRPosition &IRP)
1697  : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP) {}
1698 
1699  /// See AbstractAttribute::trackStatistics()
1700  void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1701 };
1702 
1703 /// NonNull attribute for function argument.
1704 struct AANonNullArgument final
1705  : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
1706  AANonNullImpl> {
1707  AANonNullArgument(const IRPosition &IRP)
1708  : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
1709  AANonNullImpl>(
1710  IRP) {}
1711 
1712  /// See AbstractAttribute::trackStatistics()
1713  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1714 };
1715 
1716 struct AANonNullCallSiteArgument final : AANonNullFloating {
1717  AANonNullCallSiteArgument(const IRPosition &IRP) : AANonNullFloating(IRP) {}
1718 
1719  /// See AbstractAttribute::trackStatistics()
1720  void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1721 };
1722 
1723 /// NonNull attribute for a call site return position.
1724 struct AANonNullCallSiteReturned final
1725  : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
1726  AANonNullImpl> {
1727  AANonNullCallSiteReturned(const IRPosition &IRP)
1728  : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
1729  AANonNullImpl>(
1730  IRP) {}
1731 
1732  /// See AbstractAttribute::trackStatistics()
1733  void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1734 };
1735 
1736 /// ------------------------ No-Recurse Attributes ----------------------------
1737 
1738 struct AANoRecurseImpl : public AANoRecurse {
1739  AANoRecurseImpl(const IRPosition &IRP) : AANoRecurse(IRP) {}
1740 
1741  /// See AbstractAttribute::getAsStr()
1742  const std::string getAsStr() const override {
1743  return getAssumed() ? "norecurse" : "may-recurse";
1744  }
1745 };
1746 
1747 struct AANoRecurseFunction final : AANoRecurseImpl {
1748  AANoRecurseFunction(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
1749 
1750  /// See AbstractAttribute::initialize(...).
1751  void initialize(Attributor &A) override {
1753  if (const Function *F = getAnchorScope())
1754  if (A.getInfoCache().getSccSize(*F) == 1)
1755  return;
1756  indicatePessimisticFixpoint();
1757  }
1758 
1759  /// See AbstractAttribute::updateImpl(...).
1760  ChangeStatus updateImpl(Attributor &A) override {
1761 
1762  auto CheckForNoRecurse = [&](Instruction &I) {
1763  ImmutableCallSite ICS(&I);
1764  if (ICS.hasFnAttr(Attribute::NoRecurse))
1765  return true;
1766 
1767  const auto &NoRecurseAA =
1769  if (!NoRecurseAA.isAssumedNoRecurse())
1770  return false;
1771 
1772  // Recursion to the same function
1773  if (ICS.getCalledFunction() == getAnchorScope())
1774  return false;
1775 
1776  return true;
1777  };
1778 
1779  if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1780  return indicatePessimisticFixpoint();
1781  return ChangeStatus::UNCHANGED;
1782  }
1783 
1784  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1785 };
1786 
1787 /// NoRecurse attribute deduction for a call sites.
1788 struct AANoRecurseCallSite final : AANoRecurseImpl {
1789  AANoRecurseCallSite(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
1790 
1791  /// See AbstractAttribute::initialize(...).
1792  void initialize(Attributor &A) override {
1794  Function *F = getAssociatedFunction();
1795  if (!F)
1796  indicatePessimisticFixpoint();
1797  }
1798 
1799  /// See AbstractAttribute::updateImpl(...).
1800  ChangeStatus updateImpl(Attributor &A) override {
1801  // TODO: Once we have call site specific value information we can provide
1802  // call site specific liveness information and then it makes
1803  // sense to specialize attributes for call sites arguments instead of
1804  // redirecting requests to the callee argument.
1805  Function *F = getAssociatedFunction();
1806  const IRPosition &FnPos = IRPosition::function(*F);
1807  auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos);
1808  return clampStateAndIndicateChange(
1809  getState(),
1810  static_cast<const AANoRecurse::StateType &>(FnAA.getState()));
1811  }
1812 
1813  /// See AbstractAttribute::trackStatistics()
1814  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1815 };
1816 
1817 /// ------------------------ Will-Return Attributes ----------------------------
1818 
1819 // Helper function that checks whether a function has any cycle.
1820 // TODO: Replace with more efficent code
1821 static bool containsCycle(Function &F) {
1823 
1824  // Traverse BB by dfs and check whether successor is already visited.
1825  for (BasicBlock *BB : depth_first(&F)) {
1826  Visited.insert(BB);
1827  for (auto *SuccBB : successors(BB)) {
1828  if (Visited.count(SuccBB))
1829  return true;
1830  }
1831  }
1832  return false;
1833 }
1834 
1835 // Helper function that checks the function have a loop which might become an
1836 // endless loop
1837 // FIXME: Any cycle is regarded as endless loop for now.
1838 // We have to allow some patterns.
1839 static bool containsPossiblyEndlessLoop(Function *F) {
1840  return !F || !F->hasExactDefinition() || containsCycle(*F);
1841 }
1842 
1843 struct AAWillReturnImpl : public AAWillReturn {
1844  AAWillReturnImpl(const IRPosition &IRP) : AAWillReturn(IRP) {}
1845 
1846  /// See AbstractAttribute::initialize(...).
1847  void initialize(Attributor &A) override {
1849 
1850  Function *F = getAssociatedFunction();
1851  if (containsPossiblyEndlessLoop(F))
1852  indicatePessimisticFixpoint();
1853  }
1854 
1855  /// See AbstractAttribute::updateImpl(...).
1856  ChangeStatus updateImpl(Attributor &A) override {
1857  auto CheckForWillReturn = [&](Instruction &I) {
1859  const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
1860  if (WillReturnAA.isKnownWillReturn())
1861  return true;
1862  if (!WillReturnAA.isAssumedWillReturn())
1863  return false;
1864  const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
1865  return NoRecurseAA.isAssumedNoRecurse();
1866  };
1867 
1868  if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
1869  return indicatePessimisticFixpoint();
1870 
1871  return ChangeStatus::UNCHANGED;
1872  }
1873 
1874  /// See AbstractAttribute::getAsStr()
1875  const std::string getAsStr() const override {
1876  return getAssumed() ? "willreturn" : "may-noreturn";
1877  }
1878 };
1879 
1880 struct AAWillReturnFunction final : AAWillReturnImpl {
1881  AAWillReturnFunction(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
1882 
1883  /// See AbstractAttribute::trackStatistics()
1884  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
1885 };
1886 
1887 /// WillReturn attribute deduction for a call sites.
1888 struct AAWillReturnCallSite final : AAWillReturnImpl {
1889  AAWillReturnCallSite(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
1890 
1891  /// See AbstractAttribute::initialize(...).
1892  void initialize(Attributor &A) override {
1894  Function *F = getAssociatedFunction();
1895  if (!F)
1896  indicatePessimisticFixpoint();
1897  }
1898 
1899  /// See AbstractAttribute::updateImpl(...).
1900  ChangeStatus updateImpl(Attributor &A) override {
1901  // TODO: Once we have call site specific value information we can provide
1902  // call site specific liveness information and then it makes
1903  // sense to specialize attributes for call sites arguments instead of
1904  // redirecting requests to the callee argument.
1905  Function *F = getAssociatedFunction();
1906  const IRPosition &FnPos = IRPosition::function(*F);
1907  auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos);
1908  return clampStateAndIndicateChange(
1909  getState(),
1910  static_cast<const AAWillReturn::StateType &>(FnAA.getState()));
1911  }
1912 
1913  /// See AbstractAttribute::trackStatistics()
1914  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
1915 };
1916 
1917 /// ------------------------ NoAlias Argument Attribute ------------------------
1918 
1919 struct AANoAliasImpl : AANoAlias {
1920  AANoAliasImpl(const IRPosition &IRP) : AANoAlias(IRP) {}
1921 
1922  const std::string getAsStr() const override {
1923  return getAssumed() ? "noalias" : "may-alias";
1924  }
1925 };
1926 
1927 /// NoAlias attribute for a floating value.
1928 struct AANoAliasFloating final : AANoAliasImpl {
1929  AANoAliasFloating(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
1930 
1931  /// See AbstractAttribute::initialize(...).
1932  void initialize(Attributor &A) override {
1934  Value &Val = getAssociatedValue();
1935  if (isa<AllocaInst>(Val))
1936  indicateOptimisticFixpoint();
1937  if (isa<ConstantPointerNull>(Val) &&
1938  Val.getType()->getPointerAddressSpace() == 0)
1939  indicateOptimisticFixpoint();
1940  }
1941 
1942  /// See AbstractAttribute::updateImpl(...).
1943  ChangeStatus updateImpl(Attributor &A) override {
1944  // TODO: Implement this.
1945  return indicatePessimisticFixpoint();
1946  }
1947 
1948  /// See AbstractAttribute::trackStatistics()
1949  void trackStatistics() const override {
1951  }
1952 };
1953 
1954 /// NoAlias attribute for an argument.
1955 struct AANoAliasArgument final
1956  : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
1957  AANoAliasArgument(const IRPosition &IRP)
1958  : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>(IRP) {}
1959 
1960  /// See AbstractAttribute::trackStatistics()
1961  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
1962 };
1963 
1964 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
1965  AANoAliasCallSiteArgument(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
1966 
1967  /// See AbstractAttribute::initialize(...).
1968  void initialize(Attributor &A) override {
1969  // See callsite argument attribute and callee argument attribute.
1970  ImmutableCallSite ICS(&getAnchorValue());
1971  if (ICS.paramHasAttr(getArgNo(), Attribute::NoAlias))
1972  indicateOptimisticFixpoint();
1973  }
1974 
1975  /// See AbstractAttribute::updateImpl(...).
1976  ChangeStatus updateImpl(Attributor &A) override {
1977  // We can deduce "noalias" if the following conditions hold.
1978  // (i) Associated value is assumed to be noalias in the definition.
1979  // (ii) Associated value is assumed to be no-capture in all the uses
1980  // possibly executed before this callsite.
1981  // (iii) There is no other pointer argument which could alias with the
1982  // value.
1983 
1984  const Value &V = getAssociatedValue();
1985  const IRPosition IRP = IRPosition::value(V);
1986 
1987  // (i) Check whether noalias holds in the definition.
1988 
1989  auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP);
1990 
1991  if (!NoAliasAA.isAssumedNoAlias())
1992  return indicatePessimisticFixpoint();
1993 
1994  LLVM_DEBUG(dbgs() << "[Attributor][AANoAliasCSArg] " << V
1995  << " is assumed NoAlias in the definition\n");
1996 
1997  // (ii) Check whether the value is captured in the scope using AANoCapture.
1998  // FIXME: This is conservative though, it is better to look at CFG and
1999  // check only uses possibly executed before this callsite.
2000 
2001  auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
2002  if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2003  LLVM_DEBUG(
2004  dbgs() << "[Attributor][AANoAliasCSArg] " << V
2005  << " cannot be noalias as it is potentially captured\n");
2006  return indicatePessimisticFixpoint();
2007  }
2008 
2009  // (iii) Check there is no other pointer argument which could alias with the
2010  // value.
2011  ImmutableCallSite ICS(&getAnchorValue());
2012  for (unsigned i = 0; i < ICS.getNumArgOperands(); i++) {
2013  if (getArgNo() == (int)i)
2014  continue;
2015  const Value *ArgOp = ICS.getArgOperand(i);
2016  if (!ArgOp->getType()->isPointerTy())
2017  continue;
2018 
2019  if (const Function *F = getAnchorScope()) {
2020  if (AAResults *AAR = A.getInfoCache().getAAResultsForFunction(*F)) {
2021  bool IsAliasing = AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2022  LLVM_DEBUG(dbgs()
2023  << "[Attributor][NoAliasCSArg] Check alias between "
2024  "callsite arguments "
2025  << AAR->isNoAlias(&getAssociatedValue(), ArgOp) << " "
2026  << getAssociatedValue() << " " << *ArgOp << " => "
2027  << (IsAliasing ? "" : "no-") << "alias \n");
2028 
2029  if (IsAliasing)
2030  continue;
2031  }
2032  }
2033  return indicatePessimisticFixpoint();
2034  }
2035 
2036  return ChangeStatus::UNCHANGED;
2037  }
2038 
2039  /// See AbstractAttribute::trackStatistics()
2040  void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2041 };
2042 
2043 /// NoAlias attribute for function return value.
2044 struct AANoAliasReturned final : AANoAliasImpl {
2045  AANoAliasReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2046 
2047  /// See AbstractAttribute::updateImpl(...).
2048  virtual ChangeStatus updateImpl(Attributor &A) override {
2049 
2050  auto CheckReturnValue = [&](Value &RV) -> bool {
2051  if (Constant *C = dyn_cast<Constant>(&RV))
2052  if (C->isNullValue() || isa<UndefValue>(C))
2053  return true;
2054 
2055  /// For now, we can only deduce noalias if we have call sites.
2056  /// FIXME: add more support.
2057  ImmutableCallSite ICS(&RV);
2058  if (!ICS)
2059  return false;
2060 
2061  const IRPosition &RVPos = IRPosition::value(RV);
2062  const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos);
2063  if (!NoAliasAA.isAssumedNoAlias())
2064  return false;
2065 
2066  const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos);
2067  return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2068  };
2069 
2070  if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2071  return indicatePessimisticFixpoint();
2072 
2073  return ChangeStatus::UNCHANGED;
2074  }
2075 
2076  /// See AbstractAttribute::trackStatistics()
2077  void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2078 };
2079 
2080 /// NoAlias attribute deduction for a call site return value.
2081 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2082  AANoAliasCallSiteReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2083 
2084  /// See AbstractAttribute::initialize(...).
2085  void initialize(Attributor &A) override {
2087  Function *F = getAssociatedFunction();
2088  if (!F)
2089  indicatePessimisticFixpoint();
2090  }
2091 
2092  /// See AbstractAttribute::updateImpl(...).
2093  ChangeStatus updateImpl(Attributor &A) override {
2094  // TODO: Once we have call site specific value information we can provide
2095  // call site specific liveness information and then it makes
2096  // sense to specialize attributes for call sites arguments instead of
2097  // redirecting requests to the callee argument.
2098  Function *F = getAssociatedFunction();
2099  const IRPosition &FnPos = IRPosition::returned(*F);
2100  auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos);
2101  return clampStateAndIndicateChange(
2102  getState(), static_cast<const AANoAlias::StateType &>(FnAA.getState()));
2103  }
2104 
2105  /// See AbstractAttribute::trackStatistics()
2106  void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2107 };
2108 
2109 /// -------------------AAIsDead Function Attribute-----------------------
2110 
2111 struct AAIsDeadImpl : public AAIsDead {
2112  AAIsDeadImpl(const IRPosition &IRP) : AAIsDead(IRP) {}
2113 
2114  void initialize(Attributor &A) override {
2115  const Function *F = getAssociatedFunction();
2116  if (F && !F->isDeclaration())
2117  exploreFromEntry(A, F);
2118  }
2119 
2120  void exploreFromEntry(Attributor &A, const Function *F) {
2121  ToBeExploredPaths.insert(&(F->getEntryBlock().front()));
2122 
2123  for (size_t i = 0; i < ToBeExploredPaths.size(); ++i)
2124  if (const Instruction *NextNoReturnI =
2125  findNextNoReturn(A, ToBeExploredPaths[i]))
2126  NoReturnCalls.insert(NextNoReturnI);
2127 
2128  // Mark the block live after we looked for no-return instructions.
2129  assumeLive(A, F->getEntryBlock());
2130  }
2131 
2132  /// Find the next assumed noreturn instruction in the block of \p I starting
2133  /// from, thus including, \p I.
2134  ///
2135  /// The caller is responsible to monitor the ToBeExploredPaths set as new
2136  /// instructions discovered in other basic block will be placed in there.
2137  ///
2138  /// \returns The next assumed noreturn instructions in the block of \p I
2139  /// starting from, thus including, \p I.
2140  const Instruction *findNextNoReturn(Attributor &A, const Instruction *I);
2141 
2142  /// See AbstractAttribute::getAsStr().
2143  const std::string getAsStr() const override {
2144  return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
2145  std::to_string(getAssociatedFunction()->size()) + "][#NRI " +
2146  std::to_string(NoReturnCalls.size()) + "]";
2147  }
2148 
2149  /// See AbstractAttribute::manifest(...).
2150  ChangeStatus manifest(Attributor &A) override {
2151  assert(getState().isValidState() &&
2152  "Attempted to manifest an invalid state!");
2153 
2155  Function &F = *getAssociatedFunction();
2156 
2157  if (AssumedLiveBlocks.empty()) {
2158  A.deleteAfterManifest(F);
2159  return ChangeStatus::CHANGED;
2160  }
2161 
2162  // Flag to determine if we can change an invoke to a call assuming the
2163  // callee is nounwind. This is not possible if the personality of the
2164  // function allows to catch asynchronous exceptions.
2165  bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
2166 
2167  for (const Instruction *NRC : NoReturnCalls) {
2168  Instruction *I = const_cast<Instruction *>(NRC);
2169  BasicBlock *BB = I->getParent();
2170  Instruction *SplitPos = I->getNextNode();
2171  // TODO: mark stuff before unreachable instructions as dead.
2172 
2173  if (auto *II = dyn_cast<InvokeInst>(I)) {
2174  // If we keep the invoke the split position is at the beginning of the
2175  // normal desitination block (it invokes a noreturn function after all).
2176  BasicBlock *NormalDestBB = II->getNormalDest();
2177  SplitPos = &NormalDestBB->front();
2178 
2179  /// Invoke is replaced with a call and unreachable is placed after it if
2180  /// the callee is nounwind and noreturn. Otherwise, we keep the invoke
2181  /// and only place an unreachable in the normal successor.
2182  if (Invoke2CallAllowed) {
2183  if (II->getCalledFunction()) {
2184  const IRPosition &IPos = IRPosition::callsite_function(*II);
2185  const auto &AANoUnw = A.getAAFor<AANoUnwind>(*this, IPos);
2186  if (AANoUnw.isAssumedNoUnwind()) {
2187  LLVM_DEBUG(dbgs()
2188  << "[AAIsDead] Replace invoke with call inst\n");
2189  // We do not need an invoke (II) but instead want a call followed
2190  // by an unreachable. However, we do not remove II as other
2191  // abstract attributes might have it cached as part of their
2192  // results. Given that we modify the CFG anyway, we simply keep II
2193  // around but in a new dead block. To avoid II being live through
2194  // a different edge we have to ensure the block we place it in is
2195  // only reached from the current block of II and then not reached
2196  // at all when we insert the unreachable.
2197  SplitBlockPredecessors(NormalDestBB, {BB}, ".i2c");
2199  CI->insertBefore(II);
2200  CI->takeName(II);
2201  II->replaceAllUsesWith(CI);
2202  SplitPos = CI->getNextNode();
2203  }
2204  }
2205  }
2206 
2207  if (SplitPos == &NormalDestBB->front()) {
2208  // If this is an invoke of a noreturn function the edge to the normal
2209  // destination block is dead but not necessarily the block itself.
2210  // TODO: We need to move to an edge based system during deduction and
2211  // also manifest.
2212  assert(!NormalDestBB->isLandingPad() &&
2213  "Expected the normal destination not to be a landingpad!");
2214  if (NormalDestBB->getUniquePredecessor() == BB) {
2215  assumeLive(A, *NormalDestBB);
2216  } else {
2217  BasicBlock *SplitBB =
2218  SplitBlockPredecessors(NormalDestBB, {BB}, ".dead");
2219  // The split block is live even if it contains only an unreachable
2220  // instruction at the end.
2221  assumeLive(A, *SplitBB);
2222  SplitPos = SplitBB->getTerminator();
2223  HasChanged = ChangeStatus::CHANGED;
2224  }
2225  }
2226  }
2227 
2228  if (isa_and_nonnull<UnreachableInst>(SplitPos))
2229  continue;
2230 
2231  BB = SplitPos->getParent();
2232  SplitBlock(BB, SplitPos);
2233  changeToUnreachable(BB->getTerminator(), /* UseLLVMTrap */ false);
2234  HasChanged = ChangeStatus::CHANGED;
2235  }
2236 
2237  for (BasicBlock &BB : F)
2238  if (!AssumedLiveBlocks.count(&BB))
2239  A.deleteAfterManifest(BB);
2240 
2241  return HasChanged;
2242  }
2243 
2244  /// See AbstractAttribute::updateImpl(...).
2245  ChangeStatus updateImpl(Attributor &A) override;
2246 
2247  /// See AAIsDead::isAssumedDead(BasicBlock *).
2248  bool isAssumedDead(const BasicBlock *BB) const override {
2249  assert(BB->getParent() == getAssociatedFunction() &&
2250  "BB must be in the same anchor scope function.");
2251 
2252  if (!getAssumed())
2253  return false;
2254  return !AssumedLiveBlocks.count(BB);
2255  }
2256 
2257  /// See AAIsDead::isKnownDead(BasicBlock *).
2258  bool isKnownDead(const BasicBlock *BB) const override {
2259  return getKnown() && isAssumedDead(BB);
2260  }
2261 
2262  /// See AAIsDead::isAssumed(Instruction *I).
2263  bool isAssumedDead(const Instruction *I) const override {
2264  assert(I->getParent()->getParent() == getAssociatedFunction() &&
2265  "Instruction must be in the same anchor scope function.");
2266 
2267  if (!getAssumed())
2268  return false;
2269 
2270  // If it is not in AssumedLiveBlocks then it for sure dead.
2271  // Otherwise, it can still be after noreturn call in a live block.
2272  if (!AssumedLiveBlocks.count(I->getParent()))
2273  return true;
2274 
2275  // If it is not after a noreturn call, than it is live.
2276  return isAfterNoReturn(I);
2277  }
2278 
2279  /// See AAIsDead::isKnownDead(Instruction *I).
2280  bool isKnownDead(const Instruction *I) const override {
2281  return getKnown() && isAssumedDead(I);
2282  }
2283 
2284  /// Check if instruction is after noreturn call, in other words, assumed dead.
2285  bool isAfterNoReturn(const Instruction *I) const;
2286 
2287  /// Determine if \p F might catch asynchronous exceptions.
2288  static bool mayCatchAsynchronousExceptions(const Function &F) {
2289  return F.hasPersonalityFn() && !canSimplifyInvokeNoUnwind(&F);
2290  }
2291 
2292  /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
2293  /// that internal function called from \p BB should now be looked at.
2294  void assumeLive(Attributor &A, const BasicBlock &BB) {
2295  if (!AssumedLiveBlocks.insert(&BB).second)
2296  return;
2297 
2298  // We assume that all of BB is (probably) live now and if there are calls to
2299  // internal functions we will assume that those are now live as well. This
2300  // is a performance optimization for blocks with calls to a lot of internal
2301  // functions. It can however cause dead functions to be treated as live.
2302  for (const Instruction &I : BB)
2303  if (ImmutableCallSite ICS = ImmutableCallSite(&I))
2304  if (const Function *F = ICS.getCalledFunction())
2305  if (F->hasLocalLinkage())
2307  }
2308 
2309  /// Collection of to be explored paths.
2310  SmallSetVector<const Instruction *, 8> ToBeExploredPaths;
2311 
2312  /// Collection of all assumed live BasicBlocks.
2313  DenseSet<const BasicBlock *> AssumedLiveBlocks;
2314 
2315  /// Collection of calls with noreturn attribute, assumed or knwon.
2317 };
2318 
2319 struct AAIsDeadFunction final : public AAIsDeadImpl {
2320  AAIsDeadFunction(const IRPosition &IRP) : AAIsDeadImpl(IRP) {}
2321 
2322  /// See AbstractAttribute::trackStatistics()
2323  void trackStatistics() const override {
2324  STATS_DECL(PartiallyDeadBlocks, Function,
2325  "Number of basic blocks classified as partially dead");
2326  BUILD_STAT_NAME(PartiallyDeadBlocks, Function) += NoReturnCalls.size();
2327  }
2328 };
2329 
2330 bool AAIsDeadImpl::isAfterNoReturn(const Instruction *I) const {
2331  const Instruction *PrevI = I->getPrevNode();
2332  while (PrevI) {
2333  if (NoReturnCalls.count(PrevI))
2334  return true;
2335  PrevI = PrevI->getPrevNode();
2336  }
2337  return false;
2338 }
2339 
2340 const Instruction *AAIsDeadImpl::findNextNoReturn(Attributor &A,
2341  const Instruction *I) {
2342  const BasicBlock *BB = I->getParent();
2343  const Function &F = *BB->getParent();
2344 
2345  // Flag to determine if we can change an invoke to a call assuming the callee
2346  // is nounwind. This is not possible if the personality of the function allows
2347  // to catch asynchronous exceptions.
2348  bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
2349 
2350  // TODO: We should have a function that determines if an "edge" is dead.
2351  // Edges could be from an instruction to the next or from a terminator
2352  // to the successor. For now, we need to special case the unwind block
2353  // of InvokeInst below.
2354 
2355  while (I) {
2356  ImmutableCallSite ICS(I);
2357 
2358  if (ICS) {
2359  const IRPosition &IPos = IRPosition::callsite_function(ICS);
2360  // Regarless of the no-return property of an invoke instruction we only
2361  // learn that the regular successor is not reachable through this
2362  // instruction but the unwind block might still be.
2363  if (auto *Invoke = dyn_cast<InvokeInst>(I)) {
2364  // Use nounwind to justify the unwind block is dead as well.
2365  const auto &AANoUnw = A.getAAFor<AANoUnwind>(*this, IPos);
2366  if (!Invoke2CallAllowed || !AANoUnw.isAssumedNoUnwind()) {
2367  assumeLive(A, *Invoke->getUnwindDest());
2368  ToBeExploredPaths.insert(&Invoke->getUnwindDest()->front());
2369  }
2370  }
2371 
2372  const auto &NoReturnAA = A.getAAFor<AANoReturn>(*this, IPos);
2373  if (NoReturnAA.isAssumedNoReturn())
2374  return I;
2375  }
2376 
2377  I = I->getNextNode();
2378  }
2379 
2380  // get new paths (reachable blocks).
2381  for (const BasicBlock *SuccBB : successors(BB)) {
2382  assumeLive(A, *SuccBB);
2383  ToBeExploredPaths.insert(&SuccBB->front());
2384  }
2385 
2386  // No noreturn instruction found.
2387  return nullptr;
2388 }
2389 
2390 ChangeStatus AAIsDeadImpl::updateImpl(Attributor &A) {
2392 
2393  // Temporary collection to iterate over existing noreturn instructions. This
2394  // will alow easier modification of NoReturnCalls collection
2395  SmallVector<const Instruction *, 8> NoReturnChanged;
2396 
2397  for (const Instruction *I : NoReturnCalls)
2398  NoReturnChanged.push_back(I);
2399 
2400  for (const Instruction *I : NoReturnChanged) {
2401  size_t Size = ToBeExploredPaths.size();
2402 
2403  const Instruction *NextNoReturnI = findNextNoReturn(A, I);
2404  if (NextNoReturnI != I) {
2405  Status = ChangeStatus::CHANGED;
2406  NoReturnCalls.remove(I);
2407  if (NextNoReturnI)
2408  NoReturnCalls.insert(NextNoReturnI);
2409  }
2410 
2411  // Explore new paths.
2412  while (Size != ToBeExploredPaths.size()) {
2413  Status = ChangeStatus::CHANGED;
2414  if (const Instruction *NextNoReturnI =
2415  findNextNoReturn(A, ToBeExploredPaths[Size++]))
2416  NoReturnCalls.insert(NextNoReturnI);
2417  }
2418  }
2419 
2420  LLVM_DEBUG(dbgs() << "[AAIsDead] AssumedLiveBlocks: "
2421  << AssumedLiveBlocks.size() << " Total number of blocks: "
2422  << getAssociatedFunction()->size() << "\n");
2423 
2424  // If we know everything is live there is no need to query for liveness.
2425  if (NoReturnCalls.empty() &&
2426  getAssociatedFunction()->size() == AssumedLiveBlocks.size()) {
2427  // Indicating a pessimistic fixpoint will cause the state to be "invalid"
2428  // which will cause the Attributor to not return the AAIsDead on request,
2429  // which will prevent us from querying isAssumedDead().
2430  indicatePessimisticFixpoint();
2431  assert(!isValidState() && "Expected an invalid state!");
2432  Status = ChangeStatus::CHANGED;
2433  }
2434 
2435  return Status;
2436 }
2437 
2438 /// Liveness information for a call sites.
2439 struct AAIsDeadCallSite final : AAIsDeadImpl {
2440  AAIsDeadCallSite(const IRPosition &IRP) : AAIsDeadImpl(IRP) {}
2441 
2442  /// See AbstractAttribute::initialize(...).
2443  void initialize(Attributor &A) override {
2444  // TODO: Once we have call site specific value information we can provide
2445  // call site specific liveness information and then it makes
2446  // sense to specialize attributes for call sites instead of
2447  // redirecting requests to the callee.
2448  llvm_unreachable("Abstract attributes for liveness are not "
2449  "supported for call sites yet!");
2450  }
2451 
2452  /// See AbstractAttribute::updateImpl(...).
2453  ChangeStatus updateImpl(Attributor &A) override {
2454  return indicatePessimisticFixpoint();
2455  }
2456 
2457  /// See AbstractAttribute::trackStatistics()
2458  void trackStatistics() const override {}
2459 };
2460 
2461 /// -------------------- Dereferenceable Argument Attribute --------------------
2462 
2463 template <>
2464 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
2465  const DerefState &R) {
2466  ChangeStatus CS0 = clampStateAndIndicateChange<IntegerState>(
2467  S.DerefBytesState, R.DerefBytesState);
2468  ChangeStatus CS1 =
2469  clampStateAndIndicateChange<IntegerState>(S.GlobalState, R.GlobalState);
2470  return CS0 | CS1;
2471 }
2472 
2473 struct AADereferenceableImpl : AADereferenceable {
2474  AADereferenceableImpl(const IRPosition &IRP) : AADereferenceable(IRP) {}
2475  using StateType = DerefState;
2476 
2477  void initialize(Attributor &A) override {
2479  getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
2480  Attrs);
2481  for (const Attribute &Attr : Attrs)
2482  takeKnownDerefBytesMaximum(Attr.getValueAsInt());
2483 
2484  NonNullAA = &A.getAAFor<AANonNull>(*this, getIRPosition());
2485 
2486  const IRPosition &IRP = this->getIRPosition();
2487  bool IsFnInterface = IRP.isFnInterfaceKind();
2488  const Function *FnScope = IRP.getAnchorScope();
2489  if (IsFnInterface && (!FnScope || !FnScope->hasExactDefinition()))
2490  indicatePessimisticFixpoint();
2491  }
2492 
2493  /// See AbstractAttribute::getState()
2494  /// {
2495  StateType &getState() override { return *this; }
2496  const StateType &getState() const override { return *this; }
2497  /// }
2498 
2499  /// See AAFromMustBeExecutedContext
2500  bool followUse(Attributor &A, const Use *U, const Instruction *I) {
2501  bool IsNonNull = false;
2502  bool TrackUse = false;
2503  int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
2504  A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
2505  takeKnownDerefBytesMaximum(DerefBytes);
2506  return TrackUse;
2507  }
2508 
2509  void getDeducedAttributes(LLVMContext &Ctx,
2510  SmallVectorImpl<Attribute> &Attrs) const override {
2511  // TODO: Add *_globally support
2512  if (isAssumedNonNull())
2514  Ctx, getAssumedDereferenceableBytes()));
2515  else
2517  Ctx, getAssumedDereferenceableBytes()));
2518  }
2519 
2520  /// See AbstractAttribute::getAsStr().
2521  const std::string getAsStr() const override {
2522  if (!getAssumedDereferenceableBytes())
2523  return "unknown-dereferenceable";
2524  return std::string("dereferenceable") +
2525  (isAssumedNonNull() ? "" : "_or_null") +
2526  (isAssumedGlobal() ? "_globally" : "") + "<" +
2527  std::to_string(getKnownDereferenceableBytes()) + "-" +
2528  std::to_string(getAssumedDereferenceableBytes()) + ">";
2529  }
2530 };
2531 
2532 /// Dereferenceable attribute for a floating value.
2533 struct AADereferenceableFloating
2534  : AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl> {
2535  using Base =
2536  AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl>;
2537  AADereferenceableFloating(const IRPosition &IRP) : Base(IRP) {}
2538 
2539  /// See AbstractAttribute::updateImpl(...).
2540  ChangeStatus updateImpl(Attributor &A) override {
2541  ChangeStatus Change = Base::updateImpl(A);
2542 
2543  const DataLayout &DL = A.getDataLayout();
2544 
2545  auto VisitValueCB = [&](Value &V, DerefState &T, bool Stripped) -> bool {
2546  unsigned IdxWidth =
2548  APInt Offset(IdxWidth, 0);
2549  const Value *Base =
2551 
2552  const auto &AA =
2553  A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
2554  int64_t DerefBytes = 0;
2555  if (!Stripped && this == &AA) {
2556  // Use IR information if we did not strip anything.
2557  // TODO: track globally.
2558  bool CanBeNull;
2559  DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
2560  T.GlobalState.indicatePessimisticFixpoint();
2561  } else {
2562  const DerefState &DS = static_cast<const DerefState &>(AA.getState());
2563  DerefBytes = DS.DerefBytesState.getAssumed();
2564  T.GlobalState &= DS.GlobalState;
2565  }
2566 
2567  // For now we do not try to "increase" dereferenceability due to negative
2568  // indices as we first have to come up with code to deal with loops and
2569  // for overflows of the dereferenceable bytes.
2570  int64_t OffsetSExt = Offset.getSExtValue();
2571  if (OffsetSExt < 0)
2572  OffsetSExt = 0;
2573 
2574  T.takeAssumedDerefBytesMinimum(
2575  std::max(int64_t(0), DerefBytes - OffsetSExt));
2576 
2577  if (this == &AA) {
2578  if (!Stripped) {
2579  // If nothing was stripped IR information is all we got.
2580  T.takeKnownDerefBytesMaximum(
2581  std::max(int64_t(0), DerefBytes - OffsetSExt));
2582  T.indicatePessimisticFixpoint();
2583  } else if (OffsetSExt > 0) {
2584  // If something was stripped but there is circular reasoning we look
2585  // for the offset. If it is positive we basically decrease the
2586  // dereferenceable bytes in a circluar loop now, which will simply
2587  // drive them down to the known value in a very slow way which we
2588  // can accelerate.
2589  T.indicatePessimisticFixpoint();
2590  }
2591  }
2592 
2593  return T.isValidState();
2594  };
2595 
2596  DerefState T;
2597  if (!genericValueTraversal<AADereferenceable, DerefState>(
2598  A, getIRPosition(), *this, T, VisitValueCB))
2599  return indicatePessimisticFixpoint();
2600 
2601  return Change | clampStateAndIndicateChange(getState(), T);
2602  }
2603 
2604  /// See AbstractAttribute::trackStatistics()
2605  void trackStatistics() const override {
2606  STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
2607  }
2608 };
2609 
2610 /// Dereferenceable attribute for a return value.
2611 struct AADereferenceableReturned final
2612  : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl,
2613  DerefState> {
2614  AADereferenceableReturned(const IRPosition &IRP)
2615  : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl,
2616  DerefState>(IRP) {}
2617 
2618  /// See AbstractAttribute::trackStatistics()
2619  void trackStatistics() const override {
2620  STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
2621  }
2622 };
2623 
2624 /// Dereferenceable attribute for an argument
2625 struct AADereferenceableArgument final
2626  : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
2627  AADereferenceable, AADereferenceableImpl, DerefState> {
2628  using Base = AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
2629  AADereferenceable, AADereferenceableImpl, DerefState>;
2630  AADereferenceableArgument(const IRPosition &IRP) : Base(IRP) {}
2631 
2632  /// See AbstractAttribute::trackStatistics()
2633  void trackStatistics() const override {
2634  STATS_DECLTRACK_ARG_ATTR(dereferenceable)
2635  }
2636 };
2637 
2638 /// Dereferenceable attribute for a call site argument.
2639 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
2640  AADereferenceableCallSiteArgument(const IRPosition &IRP)
2641  : AADereferenceableFloating(IRP) {}
2642 
2643  /// See AbstractAttribute::trackStatistics()
2644  void trackStatistics() const override {
2645  STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
2646  }
2647 };
2648 
2649 /// Dereferenceable attribute deduction for a call site return value.
2650 struct AADereferenceableCallSiteReturned final
2651  : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
2652  AADereferenceable, AADereferenceableImpl> {
2653  using Base = AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
2654  AADereferenceable, AADereferenceableImpl>;
2655  AADereferenceableCallSiteReturned(const IRPosition &IRP) : Base(IRP) {}
2656 
2657  /// See AbstractAttribute::initialize(...).
2658  void initialize(Attributor &A) override {
2659  Base::initialize(A);
2660  Function *F = getAssociatedFunction();
2661  if (!F)
2662  indicatePessimisticFixpoint();
2663  }
2664 
2665  /// See AbstractAttribute::updateImpl(...).
2666  ChangeStatus updateImpl(Attributor &A) override {
2667  // TODO: Once we have call site specific value information we can provide
2668  // call site specific liveness information and then it makes
2669  // sense to specialize attributes for call sites arguments instead of
2670  // redirecting requests to the callee argument.
2671 
2672  ChangeStatus Change = Base::updateImpl(A);
2673  Function *F = getAssociatedFunction();
2674  const IRPosition &FnPos = IRPosition::returned(*F);
2675  auto &FnAA = A.getAAFor<AADereferenceable>(*this, FnPos);
2676  return Change |
2677  clampStateAndIndicateChange(
2678  getState(), static_cast<const DerefState &>(FnAA.getState()));
2679  }
2680 
2681  /// See AbstractAttribute::trackStatistics()
2682  void trackStatistics() const override {
2683  STATS_DECLTRACK_CS_ATTR(dereferenceable);
2684  }
2685 };
2686 
2687 // ------------------------ Align Argument Attribute ------------------------
2688 
2689 struct AAAlignImpl : AAAlign {
2690  AAAlignImpl(const IRPosition &IRP) : AAAlign(IRP) {}
2691 
2692  // Max alignemnt value allowed in IR
2693  static const unsigned MAX_ALIGN = 1U << 29;
2694 
2695  /// See AbstractAttribute::initialize(...).
2696  void initialize(Attributor &A) override {
2697  takeAssumedMinimum(MAX_ALIGN);
2698 
2700  getAttrs({Attribute::Alignment}, Attrs);
2701  for (const Attribute &Attr : Attrs)
2702  takeKnownMaximum(Attr.getValueAsInt());
2703 
2704  if (getIRPosition().isFnInterfaceKind() &&
2705  (!getAssociatedFunction() ||
2706  !getAssociatedFunction()->hasExactDefinition()))
2707  indicatePessimisticFixpoint();
2708  }
2709 
2710  /// See AbstractAttribute::manifest(...).
2711  ChangeStatus manifest(Attributor &A) override {
2713 
2714  // Check for users that allow alignment annotations.
2715  Value &AnchorVal = getIRPosition().getAnchorValue();
2716  for (const Use &U : AnchorVal.uses()) {
2717  if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
2718  if (SI->getPointerOperand() == &AnchorVal)
2719  if (SI->getAlignment() < getAssumedAlign()) {
2721  "Number of times alignemnt added to a store");
2722  SI->setAlignment(Align(getAssumedAlign()));
2723  Changed = ChangeStatus::CHANGED;
2724  }
2725  } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
2726  if (LI->getPointerOperand() == &AnchorVal)
2727  if (LI->getAlignment() < getAssumedAlign()) {
2728  LI->setAlignment(Align(getAssumedAlign()));
2730  "Number of times alignemnt added to a load");
2731  Changed = ChangeStatus::CHANGED;
2732  }
2733  }
2734  }
2735 
2736  return AAAlign::manifest(A) | Changed;
2737  }
2738 
2739  // TODO: Provide a helper to determine the implied ABI alignment and check in
2740  // the existing manifest method and a new one for AAAlignImpl that value
2741  // to avoid making the alignment explicit if it did not improve.
2742 
2743  /// See AbstractAttribute::getDeducedAttributes
2744  virtual void
2745  getDeducedAttributes(LLVMContext &Ctx,
2746  SmallVectorImpl<Attribute> &Attrs) const override {
2747  if (getAssumedAlign() > 1)
2748  Attrs.emplace_back(
2749  Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
2750  }
2751 
2752  /// See AbstractAttribute::getAsStr().
2753  const std::string getAsStr() const override {
2754  return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
2755  "-" + std::to_string(getAssumedAlign()) + ">")
2756  : "unknown-align";
2757  }
2758 };
2759 
2760 /// Align attribute for a floating value.
2761 struct AAAlignFloating : AAAlignImpl {
2762  AAAlignFloating(const IRPosition &IRP) : AAAlignImpl(IRP) {}
2763 
2764  /// See AbstractAttribute::updateImpl(...).
2765  ChangeStatus updateImpl(Attributor &A) override {
2766  const DataLayout &DL = A.getDataLayout();
2767 
2768  auto VisitValueCB = [&](Value &V, AAAlign::StateType &T,
2769  bool Stripped) -> bool {
2770  const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
2771  if (!Stripped && this == &AA) {
2772  // Use only IR information if we did not strip anything.
2773  const MaybeAlign PA = V.getPointerAlignment(DL);
2774  T.takeKnownMaximum(PA ? PA->value() : 0);
2775  T.indicatePessimisticFixpoint();
2776  } else {
2777  // Use abstract attribute information.
2778  const AAAlign::StateType &DS =
2779  static_cast<const AAAlign::StateType &>(AA.getState());
2780  T ^= DS;
2781  }
2782  return T.isValidState();
2783  };
2784 
2785  StateType T;
2786  if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
2787  VisitValueCB))
2788  return indicatePessimisticFixpoint();
2789 
2790  // TODO: If we know we visited all incoming values, thus no are assumed
2791  // dead, we can take the known information from the state T.
2792  return clampStateAndIndicateChange(getState(), T);
2793  }
2794 
2795  /// See AbstractAttribute::trackStatistics()
2796  void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
2797 };
2798 
2799 /// Align attribute for function return value.
2800 struct AAAlignReturned final
2801  : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
2802  AAAlignReturned(const IRPosition &IRP)
2803  : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP) {}
2804 
2805  /// See AbstractAttribute::trackStatistics()
2806  void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
2807 };
2808 
2809 /// Align attribute for function argument.
2810 struct AAAlignArgument final
2811  : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
2812  AAAlignArgument(const IRPosition &IRP)
2813  : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>(IRP) {}
2814 
2815  /// See AbstractAttribute::trackStatistics()
2816  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
2817 };
2818 
2819 struct AAAlignCallSiteArgument final : AAAlignFloating {
2820  AAAlignCallSiteArgument(const IRPosition &IRP) : AAAlignFloating(IRP) {}
2821 
2822  /// See AbstractAttribute::manifest(...).
2823  ChangeStatus manifest(Attributor &A) override {
2824  return AAAlignImpl::manifest(A);
2825  }
2826 
2827  /// See AbstractAttribute::trackStatistics()
2828  void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
2829 };
2830 
2831 /// Align attribute deduction for a call site return value.
2832 struct AAAlignCallSiteReturned final : AAAlignImpl {
2833  AAAlignCallSiteReturned(const IRPosition &IRP) : AAAlignImpl(IRP) {}
2834 
2835  /// See AbstractAttribute::initialize(...).
2836  void initialize(Attributor &A) override {
2838  Function *F = getAssociatedFunction();
2839  if (!F)
2840  indicatePessimisticFixpoint();
2841  }
2842 
2843  /// See AbstractAttribute::updateImpl(...).
2844  ChangeStatus updateImpl(Attributor &A) override {
2845  // TODO: Once we have call site specific value information we can provide
2846  // call site specific liveness information and then it makes
2847  // sense to specialize attributes for call sites arguments instead of
2848  // redirecting requests to the callee argument.
2849  Function *F = getAssociatedFunction();
2850  const IRPosition &FnPos = IRPosition::returned(*F);
2851  auto &FnAA = A.getAAFor<AAAlign>(*this, FnPos);
2852  return clampStateAndIndicateChange(
2853  getState(), static_cast<const AAAlign::StateType &>(FnAA.getState()));
2854  }
2855 
2856  /// See AbstractAttribute::trackStatistics()
2857  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
2858 };
2859 
2860 /// ------------------ Function No-Return Attribute ----------------------------
2861 struct AANoReturnImpl : public AANoReturn {
2862  AANoReturnImpl(const IRPosition &IRP) : AANoReturn(IRP) {}
2863 
2864  /// See AbstractAttribute::initialize(...).
2865  void initialize(Attributor &A) override {
2867  Function *F = getAssociatedFunction();
2868  if (!F || F->hasFnAttribute(Attribute::WillReturn))
2869  indicatePessimisticFixpoint();
2870  }
2871 
2872  /// See AbstractAttribute::getAsStr().
2873  const std::string getAsStr() const override {
2874  return getAssumed() ? "noreturn" : "may-return";
2875  }
2876 
2877  /// See AbstractAttribute::updateImpl(Attributor &A).
2878  virtual ChangeStatus updateImpl(Attributor &A) override {
2879  const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, getIRPosition());
2880  if (WillReturnAA.isKnownWillReturn())
2881  return indicatePessimisticFixpoint();
2882  auto CheckForNoReturn = [](Instruction &) { return false; };
2883  if (!A.checkForAllInstructions(CheckForNoReturn, *this,
2884  {(unsigned)Instruction::Ret}))
2885  return indicatePessimisticFixpoint();
2886  return ChangeStatus::UNCHANGED;
2887  }
2888 };
2889 
2890 struct AANoReturnFunction final : AANoReturnImpl {
2891  AANoReturnFunction(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
2892 
2893  /// See AbstractAttribute::trackStatistics()
2894  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
2895 };
2896 
2897 /// NoReturn attribute deduction for a call sites.
2898 struct AANoReturnCallSite final : AANoReturnImpl {
2899  AANoReturnCallSite(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
2900 
2901  /// See AbstractAttribute::updateImpl(...).
2902  ChangeStatus updateImpl(Attributor &A) override {
2903  // TODO: Once we have call site specific value information we can provide
2904  // call site specific liveness information and then it makes
2905  // sense to specialize attributes for call sites arguments instead of
2906  // redirecting requests to the callee argument.
2907  Function *F = getAssociatedFunction();
2908  const IRPosition &FnPos = IRPosition::function(*F);
2909  auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
2910  return clampStateAndIndicateChange(
2911  getState(),
2912  static_cast<const AANoReturn::StateType &>(FnAA.getState()));
2913  }
2914 
2915  /// See AbstractAttribute::trackStatistics()
2916  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
2917 };
2918 
2919 /// ----------------------- Variable Capturing ---------------------------------
2920 
2921 /// A class to hold the state of for no-capture attributes.
2922 struct AANoCaptureImpl : public AANoCapture {
2923  AANoCaptureImpl(const IRPosition &IRP) : AANoCapture(IRP) {}
2924 
2925  /// See AbstractAttribute::initialize(...).
2926  void initialize(Attributor &A) override {
2928 
2929  // You cannot "capture" null in the default address space.
2930  if (isa<ConstantPointerNull>(getAssociatedValue()) &&
2931  getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
2932  indicateOptimisticFixpoint();
2933  return;
2934  }
2935 
2936  const IRPosition &IRP = getIRPosition();
2937  const Function *F =
2938  getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
2939 
2940  // Check what state the associated function can actually capture.
2941  if (F)
2942  determineFunctionCaptureCapabilities(IRP, *F, *this);
2943  else
2944  indicatePessimisticFixpoint();
2945  }
2946 
2947  /// See AbstractAttribute::updateImpl(...).
2948  ChangeStatus updateImpl(Attributor &A) override;
2949 
2950  /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
2951  virtual void
2952  getDeducedAttributes(LLVMContext &Ctx,
2953  SmallVectorImpl<Attribute> &Attrs) const override {
2954  if (!isAssumedNoCaptureMaybeReturned())
2955  return;
2956 
2957  if (getArgNo() >= 0) {
2958  if (isAssumedNoCapture())
2959  Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
2960  else if (ManifestInternal)
2961  Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
2962  }
2963  }
2964 
2965  /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
2966  /// depending on the ability of the function associated with \p IRP to capture
2967  /// state in memory and through "returning/throwing", respectively.
2968  static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
2969  const Function &F,
2970  IntegerState &State) {
2971  // TODO: Once we have memory behavior attributes we should use them here.
2972 
2973  // If we know we cannot communicate or write to memory, we do not care about
2974  // ptr2int anymore.
2975  if (F.onlyReadsMemory() && F.doesNotThrow() &&
2976  F.getReturnType()->isVoidTy()) {
2977  State.addKnownBits(NO_CAPTURE);
2978  return;
2979  }
2980 
2981  // A function cannot capture state in memory if it only reads memory, it can
2982  // however return/throw state and the state might be influenced by the
2983  // pointer value, e.g., loading from a returned pointer might reveal a bit.
2984  if (F.onlyReadsMemory())
2985  State.addKnownBits(NOT_CAPTURED_IN_MEM);
2986 
2987  // A function cannot communicate state back if it does not through
2988  // exceptions and doesn not return values.
2989  if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
2990  State.addKnownBits(NOT_CAPTURED_IN_RET);
2991 
2992  // Check existing "returned" attributes.
2993  int ArgNo = IRP.getArgNo();
2994  if (F.doesNotThrow() && ArgNo >= 0) {
2995  for (unsigned u = 0, e = F.arg_size(); u< e; ++u)
2996  if (F.hasParamAttribute(u, Attribute::Returned)) {
2997  if (u == unsigned(ArgNo))
2998  State.removeAssumedBits(NOT_CAPTURED_IN_RET);
2999  else if (F.onlyReadsMemory())
3000  State.addKnownBits(NO_CAPTURE);
3001  else
3002  State.addKnownBits(NOT_CAPTURED_IN_RET);
3003  break;
3004  }
3005  }
3006  }
3007 
3008  /// See AbstractState::getAsStr().
3009  const std::string getAsStr() const override {
3010  if (isKnownNoCapture())
3011  return "known not-captured";
3012  if (isAssumedNoCapture())
3013  return "assumed not-captured";
3014  if (isKnownNoCaptureMaybeReturned())
3015  return "known not-captured-maybe-returned";
3016  if (isAssumedNoCaptureMaybeReturned())
3017  return "assumed not-captured-maybe-returned";
3018  return "assumed-captured";
3019  }
3020 };
3021 
3022 /// Attributor-aware capture tracker.
3023 struct AACaptureUseTracker final : public CaptureTracker {
3024 
3025  /// Create a capture tracker that can lookup in-flight abstract attributes
3026  /// through the Attributor \p A.
3027  ///
3028  /// If a use leads to a potential capture, \p CapturedInMemory is set and the
3029  /// search is stopped. If a use leads to a return instruction,
3030  /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
3031  /// If a use leads to a ptr2int which may capture the value,
3032  /// \p CapturedInInteger is set. If a use is found that is currently assumed
3033  /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
3034  /// set. All values in \p PotentialCopies are later tracked as well. For every
3035  /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
3036  /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
3037  /// conservatively set to true.
3038  AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
3039  const AAIsDead &IsDeadAA, IntegerState &State,
3040  SmallVectorImpl<const Value *> &PotentialCopies,
3041  unsigned &RemainingUsesToExplore)
3042  : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
3043  PotentialCopies(PotentialCopies),
3044  RemainingUsesToExplore(RemainingUsesToExplore) {}
3045 
3046  /// Determine if \p V maybe captured. *Also updates the state!*
3047  bool valueMayBeCaptured(const Value *V) {
3048  if (V->getType()->isPointerTy()) {
3049  PointerMayBeCaptured(V, this);
3050  } else {
3052  }
3054  }
3055 
3056  /// See CaptureTracker::tooManyUses().
3057  void tooManyUses() override {
3059  }
3060 
3061  bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
3063  return true;
3064  const auto &DerefAA =
3065  A.getAAFor<AADereferenceable>(NoCaptureAA, IRPosition::value(*O));
3066  return DerefAA.getAssumedDereferenceableBytes();
3067  }
3068 
3069  /// See CaptureTracker::captured(...).
3070  bool captured(const Use *U) override {
3071  Instruction *UInst = cast<Instruction>(U->getUser());
3072  LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
3073  << "\n");
3074 
3075  // Because we may reuse the tracker multiple times we keep track of the
3076  // number of explored uses ourselves as well.
3077  if (RemainingUsesToExplore-- == 0) {
3078  LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
3079  return isCapturedIn(/* Memory */ true, /* Integer */ true,
3080  /* Return */ true);
3081  }
3082 
3083  // Deal with ptr2int by following uses.
3084  if (isa<PtrToIntInst>(UInst)) {
3085  LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
3086  return valueMayBeCaptured(UInst);
3087  }
3088 
3089  // Explicitly catch return instructions.
3090  if (isa<ReturnInst>(UInst))
3091  return isCapturedIn(/* Memory */ false, /* Integer */ false,
3092  /* Return */ true);
3093 
3094  // For now we only use special logic for call sites. However, the tracker
3095  // itself knows about a lot of other non-capturing cases already.
3096  CallSite CS(UInst);
3097  if (!CS || !CS.isArgOperand(U))
3098  return isCapturedIn(/* Memory */ true, /* Integer */ true,
3099  /* Return */ true);
3100 
3101  unsigned ArgNo = CS.getArgumentNo(U);
3102  const IRPosition &CSArgPos = IRPosition::callsite_argument(CS, ArgNo);
3103  // If we have a abstract no-capture attribute for the argument we can use
3104  // it to justify a non-capture attribute here. This allows recursion!
3105  auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos);
3106  if (ArgNoCaptureAA.isAssumedNoCapture())
3107  return isCapturedIn(/* Memory */ false, /* Integer */ false,
3108  /* Return */ false);
3109  if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3110  addPotentialCopy(CS);
3111  return isCapturedIn(/* Memory */ false, /* Integer */ false,
3112  /* Return */ false);
3113  }
3114 
3115  // Lastly, we could not find a reason no-capture can be assumed so we don't.
3116  return isCapturedIn(/* Memory */ true, /* Integer */ true,
3117  /* Return */ true);
3118  }
3119 
3120  /// Register \p CS as potential copy of the value we are checking.
3121  void addPotentialCopy(CallSite CS) {
3122  PotentialCopies.push_back(CS.getInstruction());
3123  }
3124 
3125  /// See CaptureTracker::shouldExplore(...).
3126  bool shouldExplore(const Use *U) override {
3127  // Check liveness.
3128  return !IsDeadAA.isAssumedDead(cast<Instruction>(U->getUser()));
3129  }
3130 
3131  /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
3132  /// \p CapturedInRet, then return the appropriate value for use in the
3133  /// CaptureTracker::captured() interface.
3134  bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
3135  bool CapturedInRet) {
3136  LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
3137  << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
3138  if (CapturedInMem)
3140  if (CapturedInInt)
3142  if (CapturedInRet)
3145  }
3146 
3147 private:
3148  /// The attributor providing in-flight abstract attributes.
3149  Attributor &A;
3150 
3151  /// The abstract attribute currently updated.
3152  AANoCapture &NoCaptureAA;
3153 
3154  /// The abstract liveness state.
3155  const AAIsDead &IsDeadAA;
3156 
3157  /// The state currently updated.
3158  IntegerState &State;
3159 
3160  /// Set of potential copies of the tracked value.
3161  SmallVectorImpl<const Value *> &PotentialCopies;
3162 
3163  /// Global counter to limit the number of explored uses.
3164  unsigned &RemainingUsesToExplore;
3165 };
3166 
3167 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
3168  const IRPosition &IRP = getIRPosition();
3169  const Value *V =
3170  getArgNo() >= 0 ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue();
3171  if (!V)
3172  return indicatePessimisticFixpoint();
3173 
3174  const Function *F =
3175  getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
3176  assert(F && "Expected a function!");
3177  const IRPosition &FnPos = IRPosition::function(*F);
3178  const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos);
3179 
3181 
3182  // Readonly means we cannot capture through memory.
3183  const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
3184  if (FnMemAA.isAssumedReadOnly()) {
3185  T.addKnownBits(NOT_CAPTURED_IN_MEM);
3186  if (FnMemAA.isKnownReadOnly())
3187  addKnownBits(NOT_CAPTURED_IN_MEM);
3188  }
3189 
3190  // Make sure all returned values are different than the underlying value.
3191  // TODO: we could do this in a more sophisticated way inside
3192  // AAReturnedValues, e.g., track all values that escape through returns
3193  // directly somehow.
3194  auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
3195  bool SeenConstant = false;
3196  for (auto &It : RVAA.returned_values()) {
3197  if (isa<Constant>(It.first)) {
3198  if (SeenConstant)
3199  return false;
3200  SeenConstant = true;
3201  } else if (!isa<Argument>(It.first) ||
3202  It.first == getAssociatedArgument())
3203  return false;
3204  }
3205  return true;
3206  };
3207 
3208  const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(*this, FnPos);
3209  if (NoUnwindAA.isAssumedNoUnwind()) {
3210  bool IsVoidTy = F->getReturnType()->isVoidTy();
3211  const AAReturnedValues *RVAA =
3212  IsVoidTy ? nullptr : &A.getAAFor<AAReturnedValues>(*this, FnPos);
3213  if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
3214  T.addKnownBits(NOT_CAPTURED_IN_RET);
3215  if (T.isKnown(NOT_CAPTURED_IN_MEM))
3216  return ChangeStatus::UNCHANGED;
3217  if (NoUnwindAA.isKnownNoUnwind() &&
3218  (IsVoidTy || RVAA->getState().isAtFixpoint())) {
3219  addKnownBits(NOT_CAPTURED_IN_RET);
3220  if (isKnown(NOT_CAPTURED_IN_MEM))
3221  return indicateOptimisticFixpoint();
3222  }
3223  }
3224  }
3225 
3226  // Use the CaptureTracker interface and logic with the specialized tracker,
3227  // defined in AACaptureUseTracker, that can look at in-flight abstract
3228  // attributes and directly updates the assumed state.
3229  SmallVector<const Value *, 4> PotentialCopies;
3230  unsigned RemainingUsesToExplore = DefaultMaxUsesToExplore;
3231  AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
3232  RemainingUsesToExplore);
3233 
3234  // Check all potential copies of the associated value until we can assume
3235  // none will be captured or we have to assume at least one might be.
3236  unsigned Idx = 0;
3237  PotentialCopies.push_back(V);
3238  while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
3239  Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
3240 
3242  auto Assumed = S.getAssumed();
3243  S.intersectAssumedBits(T.getAssumed());
3244  return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
3246 }
3247 
3248 /// NoCapture attribute for function arguments.
3249 struct AANoCaptureArgument final : AANoCaptureImpl {
3250  AANoCaptureArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
3251 
3252  /// See AbstractAttribute::trackStatistics()
3253  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
3254 };
3255 
3256 /// NoCapture attribute for call site arguments.
3257 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
3258  AANoCaptureCallSiteArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
3259 
3260  /// See AbstractAttribute::updateImpl(...).
3261  ChangeStatus updateImpl(Attributor &A) override {
3262  // TODO: Once we have call site specific value information we can provide
3263  // call site specific liveness information and then it makes
3264  // sense to specialize attributes for call sites arguments instead of
3265  // redirecting requests to the callee argument.
3266  Argument *Arg = getAssociatedArgument();
3267  if (!Arg)
3268  return indicatePessimisticFixpoint();
3269  const IRPosition &ArgPos = IRPosition::argument(*Arg);
3270  auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos);
3271  return clampStateAndIndicateChange(
3272  getState(),
3273  static_cast<const AANoCapture::StateType &>(ArgAA.getState()));
3274  }
3275 
3276  /// See AbstractAttribute::trackStatistics()
3277  void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
3278 };
3279 
3280 /// NoCapture attribute for floating values.
3281 struct AANoCaptureFloating final : AANoCaptureImpl {
3282  AANoCaptureFloating(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
3283 
3284  /// See AbstractAttribute::trackStatistics()
3285  void trackStatistics() const override {
3287  }
3288 };
3289 
3290 /// NoCapture attribute for function return value.
3291 struct AANoCaptureReturned final : AANoCaptureImpl {
3292  AANoCaptureReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {
3293  llvm_unreachable("NoCapture is not applicable to function returns!");
3294  }
3295 
3296  /// See AbstractAttribute::initialize(...).
3297  void initialize(Attributor &A) override {
3298  llvm_unreachable("NoCapture is not applicable to function returns!");
3299  }
3300 
3301  /// See AbstractAttribute::updateImpl(...).
3302  ChangeStatus updateImpl(Attributor &A) override {
3303  llvm_unreachable("NoCapture is not applicable to function returns!");
3304  }
3305 
3306  /// See AbstractAttribute::trackStatistics()
3307  void trackStatistics() const override {}
3308 };
3309 
3310 /// NoCapture attribute deduction for a call site return value.
3311 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
3312  AANoCaptureCallSiteReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
3313 
3314  /// See AbstractAttribute::trackStatistics()
3315  void trackStatistics() const override {
3316  STATS_DECLTRACK_CSRET_ATTR(nocapture)
3317  }
3318 };
3319 
3320 /// ------------------ Value Simplify Attribute ----------------------------
3321 struct AAValueSimplifyImpl : AAValueSimplify {
3322  AAValueSimplifyImpl(const IRPosition &IRP) : AAValueSimplify(IRP) {}
3323 
3324  /// See AbstractAttribute::getAsStr().
3325  const std::string getAsStr() const override {
3326  return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
3327  : "not-simple";
3328  }
3329 
3330  /// See AbstractAttribute::trackStatistics()
3331  void trackStatistics() const override {}
3332 
3333  /// See AAValueSimplify::getAssumedSimplifiedValue()
3334  Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
3335  if (!getAssumed())
3336  return const_cast<Value *>(&getAssociatedValue());
3337  return SimplifiedAssociatedValue;
3338  }
3339  void initialize(Attributor &A) override {}
3340 
3341  /// Helper function for querying AAValueSimplify and updating candicate.
3342  /// \param QueryingValue Value trying to unify with SimplifiedValue
3343  /// \param AccumulatedSimplifiedValue Current simplification result.
3344  static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
3345  Value &QueryingValue,
3346  Optional<Value *> &AccumulatedSimplifiedValue) {
3347  // FIXME: Add a typecast support.
3348 
3349  auto &ValueSimpifyAA = A.getAAFor<AAValueSimplify>(
3350  QueryingAA, IRPosition::value(QueryingValue));
3351 
3352  Optional<Value *> QueryingValueSimplified =
3353  ValueSimpifyAA.getAssumedSimplifiedValue(A);
3354 
3355  if (!QueryingValueSimplified.hasValue())
3356  return true;
3357 
3358  if (!QueryingValueSimplified.getValue())
3359  return false;
3360 
3361  Value &QueryingValueSimplifiedUnwrapped =
3362  *QueryingValueSimplified.getValue();
3363 
3364  if (isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
3365  return true;
3366 
3367  if (AccumulatedSimplifiedValue.hasValue())
3368  return AccumulatedSimplifiedValue == QueryingValueSimplified;
3369 
3370  LLVM_DEBUG(dbgs() << "[Attributor][ValueSimplify] " << QueryingValue
3371  << " is assumed to be "
3372  << QueryingValueSimplifiedUnwrapped << "\n");
3373 
3374  AccumulatedSimplifiedValue = QueryingValueSimplified;
3375  return true;
3376  }
3377 
3378  /// See AbstractAttribute::manifest(...).
3379  ChangeStatus manifest(Attributor &A) override {
3381 
3382  if (!SimplifiedAssociatedValue.hasValue() ||
3383  !SimplifiedAssociatedValue.getValue())
3384  return Changed;
3385 
3386  if (auto *C = dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())) {
3387  // We can replace the AssociatedValue with the constant.
3388  Value &V = getAssociatedValue();
3389  if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
3390  LLVM_DEBUG(dbgs() << "[Attributor][ValueSimplify] " << V << " -> " << *C
3391  << "\n");
3392  V.replaceAllUsesWith(C);
3393  Changed = ChangeStatus::CHANGED;
3394  }
3395  }
3396 
3397  return Changed | AAValueSimplify::manifest(A);
3398  }
3399 
3400 protected:
3401  // An assumed simplified value. Initially, it is set to Optional::None, which
3402  // means that the value is not clear under current assumption. If in the
3403  // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
3404  // returns orignal associated value.
3405  Optional<Value *> SimplifiedAssociatedValue;
3406 };
3407 
3408 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
3409  AAValueSimplifyArgument(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
3410 
3411  /// See AbstractAttribute::updateImpl(...).
3412  ChangeStatus updateImpl(Attributor &A) override {
3413  bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
3414 
3415  auto PredForCallSite = [&](AbstractCallSite ACS) {
3416  // Check if we have an associated argument or not (which can happen for
3417  // callback calls).
3418  if (Value *ArgOp = ACS.getCallArgOperand(getArgNo()))
3419  return checkAndUpdate(A, *this, *ArgOp, SimplifiedAssociatedValue);
3420  return false;
3421  };
3422 
3423  if (!A.checkForAllCallSites(PredForCallSite, *this, true))
3424  return indicatePessimisticFixpoint();
3425 
3426  // If a candicate was found in this update, return CHANGED.
3427  return HasValueBefore == SimplifiedAssociatedValue.hasValue()
3430  }
3431 
3432  /// See AbstractAttribute::trackStatistics()
3433  void trackStatistics() const override {
3434  STATS_DECLTRACK_ARG_ATTR(value_simplify)
3435  }
3436 };
3437 
3438 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
3439  AAValueSimplifyReturned(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
3440 
3441  /// See AbstractAttribute::updateImpl(...).
3442  ChangeStatus updateImpl(Attributor &A) override {
3443  bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
3444 
3445  auto PredForReturned = [&](Value &V) {
3446  return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
3447  };
3448 
3449  if (!A.checkForAllReturnedValues(PredForReturned, *this))
3450  return indicatePessimisticFixpoint();
3451 
3452  // If a candicate was found in this update, return CHANGED.
3453  return HasValueBefore == SimplifiedAssociatedValue.hasValue()
3456  }
3457  /// See AbstractAttribute::trackStatistics()
3458  void trackStatistics() const override {
3459  STATS_DECLTRACK_FNRET_ATTR(value_simplify)
3460  }
3461 };
3462 
3463 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
3464  AAValueSimplifyFloating(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
3465 
3466  /// See AbstractAttribute::initialize(...).
3467  void initialize(Attributor &A) override {
3468  Value &V = getAnchorValue();
3469 
3470  // TODO: add other stuffs
3471  if (isa<Constant>(V) || isa<UndefValue>(V))
3472  indicatePessimisticFixpoint();
3473  }
3474 
3475  /// See AbstractAttribute::updateImpl(...).
3476  ChangeStatus updateImpl(Attributor &A) override {
3477  bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
3478 
3479  auto VisitValueCB = [&](Value &V, BooleanState, bool Stripped) -> bool {
3480  auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V));
3481  if (!Stripped && this == &AA) {
3482  // TODO: Look the instruction and check recursively.
3483  LLVM_DEBUG(
3484  dbgs() << "[Attributor][ValueSimplify] Can't be stripped more : "
3485  << V << "\n");
3486  indicatePessimisticFixpoint();
3487  return false;
3488  }
3489  return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
3490  };
3491 
3492  if (!genericValueTraversal<AAValueSimplify, BooleanState>(
3493  A, getIRPosition(), *this, static_cast<BooleanState &>(*this),
3494  VisitValueCB))
3495  return indicatePessimisticFixpoint();
3496 
3497  // If a candicate was found in this update, return CHANGED.
3498 
3499  return HasValueBefore == SimplifiedAssociatedValue.hasValue()
3502  }
3503 
3504  /// See AbstractAttribute::trackStatistics()
3505  void trackStatistics() const override {
3506  STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
3507  }
3508 };
3509 
3510 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
3511  AAValueSimplifyFunction(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
3512 
3513  /// See AbstractAttribute::initialize(...).
3514  void initialize(Attributor &A) override {
3515  SimplifiedAssociatedValue = &getAnchorValue();
3516  indicateOptimisticFixpoint();
3517  }
3518  /// See AbstractAttribute::initialize(...).
3519  ChangeStatus updateImpl(Attributor &A) override {
3521  "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
3522  }
3523  /// See AbstractAttribute::trackStatistics()
3524  void trackStatistics() const override {
3525  STATS_DECLTRACK_FN_ATTR(value_simplify)
3526  }
3527 };
3528 
3529 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
3530  AAValueSimplifyCallSite(const IRPosition &IRP)
3531  : AAValueSimplifyFunction(IRP) {}
3532  /// See AbstractAttribute::trackStatistics()
3533  void trackStatistics() const override {
3534  STATS_DECLTRACK_CS_ATTR(value_simplify)
3535  }
3536 };
3537 
3538 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
3539  AAValueSimplifyCallSiteReturned(const IRPosition &IRP)
3540  : AAValueSimplifyReturned(IRP) {}
3541 
3542  void trackStatistics() const override {
3543  STATS_DECLTRACK_CSRET_ATTR(value_simplify)
3544  }
3545 };
3546 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
3547  AAValueSimplifyCallSiteArgument(const IRPosition &IRP)
3548  : AAValueSimplifyFloating(IRP) {}
3549 
3550  void trackStatistics() const override {
3551  STATS_DECLTRACK_CSARG_ATTR(value_simplify)
3552  }
3553 };
3554 
3555 /// ----------------------- Heap-To-Stack Conversion ---------------------------
3556 struct AAHeapToStackImpl : public AAHeapToStack {
3557  AAHeapToStackImpl(const IRPosition &IRP) : AAHeapToStack(IRP) {}
3558 
3559  const std::string getAsStr() const override {
3560  return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
3561  }
3562 
3563  ChangeStatus manifest(Attributor &A) override {
3564  assert(getState().isValidState() &&
3565  "Attempted to manifest an invalid state!");
3566 
3568  Function *F = getAssociatedFunction();
3569  const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
3570 
3571  for (Instruction *MallocCall : MallocCalls) {
3572  // This malloc cannot be replaced.
3573  if (BadMallocCalls.count(MallocCall))
3574  continue;
3575 
3576  for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
3577  LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
3578  A.deleteAfterManifest(*FreeCall);
3579  HasChanged = ChangeStatus::CHANGED;
3580  }
3581 
3582  LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
3583  << "\n");
3584 
3585  Constant *Size;
3586  if (isCallocLikeFn(MallocCall, TLI)) {
3587  auto *Num = cast<ConstantInt>(MallocCall->getOperand(0));
3588  auto *SizeT = dyn_cast<ConstantInt>(MallocCall->getOperand(1));
3589  APInt TotalSize = SizeT->getValue() * Num->getValue();
3590  Size =
3591  ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize);
3592  } else {
3593  Size = cast<ConstantInt>(MallocCall->getOperand(0));
3594  }
3595 
3596  unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
3597  Instruction *AI = new AllocaInst(Type::getInt8Ty(F->getContext()), AS,
3598  Size, "", MallocCall->getNextNode());
3599 
3600  if (AI->getType() != MallocCall->getType())
3601  AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
3602  AI->getNextNode());
3603 
3604  MallocCall->replaceAllUsesWith(AI);
3605 
3606  if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
3607  auto *NBB = II->getNormalDest();
3608  BranchInst::Create(NBB, MallocCall->getParent());
3609  A.deleteAfterManifest(*MallocCall);
3610  } else {
3611  A.deleteAfterManifest(*MallocCall);
3612  }
3613 
3614  if (isCallocLikeFn(MallocCall, TLI)) {
3615  auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
3616  AI->getNextNode());
3617  Value *Ops[] = {
3618  BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
3620 
3621  Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
3622  Module *M = F->getParent();
3623  Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
3624  CallInst::Create(Fn, Ops, "", BI->getNextNode());
3625  }
3626  HasChanged = ChangeStatus::CHANGED;
3627  }
3628 
3629  return HasChanged;
3630  }
3631 
3632  /// Collection of all malloc calls in a function.
3634 
3635  /// Collection of malloc calls that cannot be converted.
3636  DenseSet<const Instruction *> BadMallocCalls;
3637 
3638  /// A map for each malloc call to the set of associated free calls.
3640 
3641  ChangeStatus updateImpl(Attributor &A) override;
3642 };
3643 
3644 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
3645  const Function *F = getAssociatedFunction();
3646  const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
3647 
3648  auto UsesCheck = [&](Instruction &I) {
3650  SmallVector<const Use *, 8> Worklist;
3651 
3652  for (Use &U : I.uses())
3653  Worklist.push_back(&U);
3654 
3655  while (!Worklist.empty()) {
3656  const Use *U = Worklist.pop_back_val();
3657  if (!Visited.insert(U).second)
3658  continue;
3659 
3660  auto *UserI = U->getUser();
3661 
3662  if (isa<LoadInst>(UserI))
3663  continue;
3664  if (auto *SI = dyn_cast<StoreInst>(UserI)) {
3665  if (SI->getValueOperand() == U->get()) {
3666  LLVM_DEBUG(dbgs() << "[H2S] escaping store to memory: " << *UserI << "\n");
3667  return false;
3668  }
3669  // A store into the malloc'ed memory is fine.
3670  continue;
3671  }
3672 
3673  // NOTE: Right now, if a function that has malloc pointer as an argument
3674  // frees memory, we assume that the malloc pointer is freed.
3675 
3676  // TODO: Add nofree callsite argument attribute to indicate that pointer
3677  // argument is not freed.
3678  if (auto *CB = dyn_cast<CallBase>(UserI)) {
3679  if (!CB->isArgOperand(U))
3680  continue;
3681 
3682  if (CB->isLifetimeStartOrEnd())
3683  continue;
3684 
3685  // Record malloc.
3686  if (isFreeCall(UserI, TLI)) {
3687  FreesForMalloc[&I].insert(
3688  cast<Instruction>(const_cast<User *>(UserI)));
3689  continue;
3690  }
3691 
3692  // If a function does not free memory we are fine
3693  const auto &NoFreeAA =
3695 
3696  unsigned ArgNo = U - CB->arg_begin();
3697  const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
3698  *this, IRPosition::callsite_argument(*CB, ArgNo));
3699 
3700  if (!NoCaptureAA.isAssumedNoCapture() || !NoFreeAA.isAssumedNoFree()) {
3701  LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
3702  return false;
3703  }
3704  continue;
3705  }
3706 
3707  if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI)) {
3708  for (Use &U : UserI->uses())
3709  Worklist.push_back(&U);
3710  continue;
3711  }
3712 
3713  // Unknown user.
3714  LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
3715  return false;
3716  }
3717  return true;
3718  };
3719 
3720  auto MallocCallocCheck = [&](Instruction &I) {
3721  if (BadMallocCalls.count(&I))
3722  return true;
3723 
3724  bool IsMalloc = isMallocLikeFn(&I, TLI);
3725  bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
3726  if (!IsMalloc && !IsCalloc) {
3727  BadMallocCalls.insert(&I);
3728  return true;
3729  }
3730 
3731  if (IsMalloc) {
3732  if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
3733  if (Size->getValue().sle(MaxHeapToStackSize))
3734  if (UsesCheck(I)) {
3735  MallocCalls.insert(&I);
3736  return true;
3737  }
3738  } else if (IsCalloc) {
3739  bool Overflow = false;
3740  if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
3741  if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
3742  if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
3743  .sle(MaxHeapToStackSize))
3744  if (!Overflow && UsesCheck(I)) {
3745  MallocCalls.insert(&I);
3746  return true;
3747  }
3748  }
3749 
3750  BadMallocCalls.insert(&I);
3751  return true;
3752  };
3753 
3754  size_t NumBadMallocs = BadMallocCalls.size();
3755 
3756  A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
3757 
3758  if (NumBadMallocs != BadMallocCalls.size())
3759  return ChangeStatus::CHANGED;
3760 
3761  return ChangeStatus::UNCHANGED;
3762 }
3763 
3764 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
3765  AAHeapToStackFunction(const IRPosition &IRP) : AAHeapToStackImpl(IRP) {}
3766 
3767  /// See AbstractAttribute::trackStatistics()
3768  void trackStatistics() const override {
3769  STATS_DECL(MallocCalls, Function,
3770  "Number of MallocCalls converted to allocas");
3771  BUILD_STAT_NAME(MallocCalls, Function) += MallocCalls.size();
3772  }
3773 };
3774 
3775 /// -------------------- Memory Behavior Attributes ----------------------------
3776 /// Includes read-none, read-only, and write-only.
3777 /// ----------------------------------------------------------------------------
3778 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
3779  AAMemoryBehaviorImpl(const IRPosition &IRP) : AAMemoryBehavior(IRP) {}
3780 
3781  /// See AbstractAttribute::initialize(...).
3782  void initialize(Attributor &A) override {
3783  intersectAssumedBits(BEST_STATE);
3784  getKnownStateFromValue(getIRPosition(), getState());
3786  }
3787 
3788  /// Return the memory behavior information encoded in the IR for \p IRP.
3789  static void getKnownStateFromValue(const IRPosition &IRP,
3790  IntegerState &State) {
3792  IRP.getAttrs(AttrKinds, Attrs);
3793  for (const Attribute &Attr : Attrs) {
3794  switch (Attr.getKindAsEnum()) {
3795  case Attribute::ReadNone:
3796  State.addKnownBits(NO_ACCESSES);
3797  break;
3798  case Attribute::ReadOnly:
3799  State.addKnownBits(NO_WRITES);
3800  break;
3801  case Attribute::WriteOnly:
3802  State.addKnownBits(NO_READS);
3803  break;
3804  default:
3805  llvm_unreachable("Unexpcted attribute!");
3806  }
3807  }
3808 
3809  if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
3810  if (!I->mayReadFromMemory())
3811  State.addKnownBits(NO_READS);
3812  if (!I->mayWriteToMemory())
3813  State.addKnownBits(NO_WRITES);
3814  }
3815  }
3816 
3817  /// See AbstractAttribute::getDeducedAttributes(...).
3818  void getDeducedAttributes(LLVMContext &Ctx,
3819  SmallVectorImpl<Attribute> &Attrs) const override {
3820  assert(Attrs.size() == 0);
3821  if (isAssumedReadNone())
3822  Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
3823  else if (isAssumedReadOnly())
3824  Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
3825  else if (isAssumedWriteOnly())
3826  Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
3827  assert(Attrs.size() <= 1);
3828  }
3829 
3830  /// See AbstractAttribute::manifest(...).
3831  ChangeStatus manifest(Attributor &A) override {
3832  IRPosition &IRP = getIRPosition();
3833 
3834  // Check if we would improve the existing attributes first.
3835  SmallVector<Attribute, 4> DeducedAttrs;
3836  getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
3837  if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
3838  return IRP.hasAttr(Attr.getKindAsEnum(),
3839  /* IgnoreSubsumingPositions */ true);
3840  }))
3841  return ChangeStatus::UNCHANGED;
3842 
3843  // Clear existing attributes.
3844  IRP.removeAttrs(AttrKinds);
3845 
3846  // Use the generic manifest method.
3847  return IRAttribute::manifest(A);
3848  }
3849 
3850  /// See AbstractState::getAsStr().
3851  const std::string getAsStr() const override {
3852  if (isAssumedReadNone())
3853  return "readnone";
3854  if (isAssumedReadOnly())
3855  return "readonly";
3856  if (isAssumedWriteOnly())
3857  return "writeonly";
3858  return "may-read/write";
3859  }
3860 
3861  /// The set of IR attributes AAMemoryBehavior deals with.
3862  static const Attribute::AttrKind AttrKinds[3];
3863 };
3864 
3865 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
3866  Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
3867 
3868 /// Memory behavior attribute for a floating value.
3869 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
3870  AAMemoryBehaviorFloating(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
3871 
3872  /// See AbstractAttribute::initialize(...).
3873  void initialize(Attributor &A) override {
3875  // Initialize the use vector with all direct uses of the associated value.
3876  for (const Use &U : getAssociatedValue().uses())
3877  Uses.insert(&U);
3878  }
3879 
3880  /// See AbstractAttribute::updateImpl(...).
3881  ChangeStatus updateImpl(Attributor &A) override;
3882 
3883  /// See AbstractAttribute::trackStatistics()
3884  void trackStatistics() const override {
3885  if (isAssumedReadNone())
3887  else if (isAssumedReadOnly())
3889  else if (isAssumedWriteOnly())
3891  }
3892 
3893 private:
3894  /// Return true if users of \p UserI might access the underlying
3895  /// variable/location described by \p U and should therefore be analyzed.
3896  bool followUsersOfUseIn(Attributor &A, const Use *U,
3897  const Instruction *UserI);
3898 
3899  /// Update the state according to the effect of use \p U in \p UserI.
3900  void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
3901 
3902 protected:
3903  /// Container for (transitive) uses of the associated argument.
3905 };
3906 
3907 /// Memory behavior attribute for function argument.
3908 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
3909  AAMemoryBehaviorArgument(const IRPosition &IRP)
3910  : AAMemoryBehaviorFloating(IRP) {}
3911 
3912  /// See AbstractAttribute::initialize(...).
3913  void initialize(Attributor &A) override {
3915 
3916  // Initialize the use vector with all direct uses of the associated value.
3917  Argument *Arg = getAssociatedArgument();
3918  if (!Arg || !Arg->getParent()->hasExactDefinition())
3919  indicatePessimisticFixpoint();
3920  }
3921 
3922  ChangeStatus manifest(Attributor &A) override {
3923  // TODO: From readattrs.ll: "inalloca parameters are always
3924  // considered written"
3925  if (hasAttr({Attribute::InAlloca})) {
3926  removeKnownBits(NO_WRITES);
3927  removeAssumedBits(NO_WRITES);
3928  }
3929  return AAMemoryBehaviorFloating::manifest(A);
3930  }
3931 
3932 
3933  /// See AbstractAttribute::trackStatistics()
3934  void trackStatistics() const override {
3935  if (isAssumedReadNone())
3936  STATS_DECLTRACK_ARG_ATTR(readnone)
3937  else if (isAssumedReadOnly())
3938  STATS_DECLTRACK_ARG_ATTR(readonly)
3939  else if (isAssumedWriteOnly())
3940  STATS_DECLTRACK_ARG_ATTR(writeonly)
3941  }
3942 };
3943 
3944 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
3945  AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP)
3946  : AAMemoryBehaviorArgument(IRP) {}
3947 
3948  /// See AbstractAttribute::updateImpl(...).
3949  ChangeStatus updateImpl(Attributor &A) override {
3950  // TODO: Once we have call site specific value information we can provide
3951  // call site specific liveness liveness information and then it makes
3952  // sense to specialize attributes for call sites arguments instead of
3953  // redirecting requests to the callee argument.
3954  Argument *Arg = getAssociatedArgument();
3955  const IRPosition &ArgPos = IRPosition::argument(*Arg);
3956  auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
3957  return clampStateAndIndicateChange(
3958  getState(),
3959  static_cast<const AANoCapture::StateType &>(ArgAA.getState()));
3960  }
3961 
3962  /// See AbstractAttribute::trackStatistics()
3963  void trackStatistics() const override {
3964  if (isAssumedReadNone())
3965  STATS_DECLTRACK_CSARG_ATTR(readnone)
3966  else if (isAssumedReadOnly())
3967  STATS_DECLTRACK_CSARG_ATTR(readonly)
3968  else if (isAssumedWriteOnly())
3969  STATS_DECLTRACK_CSARG_ATTR(writeonly)
3970  }
3971 };
3972 
3973 /// Memory behavior attribute for a call site return position.
3974 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
3975  AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP)
3976  : AAMemoryBehaviorFloating(IRP) {}
3977 
3978  /// See AbstractAttribute::manifest(...).
3979  ChangeStatus manifest(Attributor &A) override {
3980  // We do not annotate returned values.
3981  return ChangeStatus::UNCHANGED;
3982  }
3983 
3984  /// See AbstractAttribute::trackStatistics()
3985  void trackStatistics() const override {}
3986 };
3987 
3988 /// An AA to represent the memory behavior function attributes.
3989 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
3990  AAMemoryBehaviorFunction(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
3991 
3992  /// See AbstractAttribute::updateImpl(Attributor &A).
3993  virtual ChangeStatus updateImpl(Attributor &A) override;
3994 
3995  /// See AbstractAttribute::manifest(...).
3996  ChangeStatus manifest(Attributor &A) override {
3997  Function &F = cast<Function>(getAnchorValue());
3998  if (isAssumedReadNone()) {
3999  F.removeFnAttr(Attribute::ArgMemOnly);
4000  F.removeFnAttr(Attribute::InaccessibleMemOnly);
4001  F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
4002  }
4003  return AAMemoryBehaviorImpl::manifest(A);
4004  }
4005 
4006  /// See AbstractAttribute::trackStatistics()
4007  void trackStatistics() const override {
4008  if (isAssumedReadNone())
4009  STATS_DECLTRACK_FN_ATTR(readnone)
4010  else if (isAssumedReadOnly())
4011  STATS_DECLTRACK_FN_ATTR(readonly)
4012  else if (isAssumedWriteOnly())
4013  STATS_DECLTRACK_FN_ATTR(writeonly)
4014  }
4015 };
4016 
4017 /// AAMemoryBehavior attribute for call sites.
4018 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
4019  AAMemoryBehaviorCallSite(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
4020 
4021  /// See AbstractAttribute::initialize(...).
4022  void initialize(Attributor &A) override {
4024  Function *F = getAssociatedFunction();
4025  if (!F || !F->hasExactDefinition())
4026  indicatePessimisticFixpoint();
4027  }
4028 
4029  /// See AbstractAttribute::updateImpl(...).
4030  ChangeStatus updateImpl(Attributor &A) override {
4031  // TODO: Once we have call site specific value information we can provide
4032  // call site specific liveness liveness information and then it makes
4033  // sense to specialize attributes for call sites arguments instead of
4034  // redirecting requests to the callee argument.
4035  Function *F = getAssociatedFunction();
4036  const IRPosition &FnPos = IRPosition::function(*F);
4037  auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
4038  return clampStateAndIndicateChange(
4039  getState(), static_cast<const AAAlign::StateType &>(FnAA.getState()));
4040  }
4041 
4042  /// See AbstractAttribute::trackStatistics()
4043  void trackStatistics() const override {
4044  if (isAssumedReadNone())
4045  STATS_DECLTRACK_CS_ATTR(readnone)
4046  else if (isAssumedReadOnly())
4047  STATS_DECLTRACK_CS_ATTR(readonly)
4048  else if (isAssumedWriteOnly())
4049  STATS_DECLTRACK_CS_ATTR(writeonly)
4050  }
4051 };
4052 } // namespace
4053 
4054 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
4055 
4056  // The current assumed state used to determine a change.
4057  auto AssumedState = getAssumed();
4058 
4059  auto CheckRWInst = [&](Instruction &I) {
4060  // If the instruction has an own memory behavior state, use it to restrict
4061  // the local state. No further analysis is required as the other memory
4062  // state is as optimistic as it gets.
4063  if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
4064  const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
4065  *this, IRPosition::callsite_function(ICS));
4066  intersectAssumedBits(MemBehaviorAA.getAssumed());
4067  return !isAtFixpoint();
4068  }
4069 
4070  // Remove access kind modifiers if necessary.
4071  if (I.mayReadFromMemory())
4072  removeAssumedBits(NO_READS);
4073  if (I.mayWriteToMemory())
4074  removeAssumedBits(NO_WRITES);
4075  return !isAtFixpoint();
4076  };
4077 
4078  if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
4079  return indicatePessimisticFixpoint();
4080 
4081  return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
4083 }
4084 
4085 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
4086 
4087  const IRPosition &IRP = getIRPosition();
4088  const IRPosition &FnPos = IRPosition::function_scope(IRP);
4090 
4091  // First, check the function scope. We take the known information and we avoid
4092  // work if the assumed information implies the current assumed information for
4093  // this attribute.
4094  const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
4095  S.addKnownBits(FnMemAA.getKnown());
4096  if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
4097  return ChangeStatus::UNCHANGED;
4098 
4099  // Make sure the value is not captured (except through "return"), if
4100  // it is, any information derived would be irrelevant anyway as we cannot
4101  // check the potential aliases introduced by the capture. However, no need
4102  // to fall back to anythign less optimistic than the function state.
4103  const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
4104  if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4105  S.intersectAssumedBits(FnMemAA.getAssumed());
4106  return ChangeStatus::CHANGED;
4107  }
4108 
4109  // The current assumed state used to determine a change.
4110  auto AssumedState = S.getAssumed();
4111 
4112  // Liveness information to exclude dead users.
4113  // TODO: Take the FnPos once we have call site specific liveness information.
4114  const auto &LivenessAA = A.getAAFor<AAIsDead>(
4116 
4117  // Visit and expand uses until all are analyzed or a fixpoint is reached.
4118  for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
4119  const Use *U = Uses[i];
4120  Instruction *UserI = cast<Instruction>(U->getUser());
4121  LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
4122  << " [Dead: " << (LivenessAA.isAssumedDead(UserI))
4123  << "]\n");
4124  if (LivenessAA.isAssumedDead(UserI))
4125  continue;
4126 
4127  // Check if the users of UserI should also be visited.
4128  if (followUsersOfUseIn(A, U, UserI))
4129  for (const Use &UserIUse : UserI->uses())
4130  Uses.insert(&UserIUse);
4131 
4132  // If UserI might touch memory we analyze the use in detail.
4133  if (UserI->mayReadOrWriteMemory())
4134  analyzeUseIn(A, U, UserI);
4135  }
4136 
4137  return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
4139 }
4140 
4141 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
4142  const Instruction *UserI) {
4143  // The loaded value is unrelated to the pointer argument, no need to
4144  // follow the users of the load.
4145  if (isa<LoadInst>(UserI))
4146  return false;
4147 
4148  // By default we follow all uses assuming UserI might leak information on U,
4149  // we have special handling for call sites operands though.
4150  ImmutableCallSite ICS(UserI);
4151  if (!ICS || !ICS.isArgOperand(U))
4152  return true;
4153 
4154  // If the use is a call argument known not to be captured, the users of
4155  // the call do not need to be visited because they have to be unrelated to
4156  // the input. Note that this check is not trivial even though we disallow
4157  // general capturing of the underlying argument. The reason is that the
4158  // call might the argument "through return", which we allow and for which we
4159  // need to check call users.
4160  unsigned ArgNo = ICS.getArgumentNo(U);
4161  const auto &ArgNoCaptureAA =
4162  A.getAAFor<AANoCapture>(*this, IRPosition::callsite_argument(ICS, ArgNo));
4163  return !ArgNoCaptureAA.isAssumedNoCapture();
4164 }
4165 
4166 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
4167  const Instruction *UserI) {
4168  assert(UserI->mayReadOrWriteMemory());
4169 
4170  switch (UserI->getOpcode()) {
4171  default:
4172  // TODO: Handle all atomics and other side-effect operations we know of.
4173  break;
4174  case Instruction::Load:
4175  // Loads cause the NO_READS property to disappear.
4176  removeAssumedBits(NO_READS);
4177  return;
4178 
4179  case Instruction::Store:
4180  // Stores cause the NO_WRITES property to disappear if the use is the
4181  // pointer operand. Note that we do assume that capturing was taken care of
4182  // somewhere else.
4183  if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
4184  removeAssumedBits(NO_WRITES);
4185  return;
4186 
4187  case Instruction::Call:
4188  case Instruction::CallBr:
4189  case Instruction::Invoke: {
4190  // For call sites we look at the argument memory behavior attribute (this
4191  // could be recursive!) in order to restrict our own state.
4192  ImmutableCallSite ICS(UserI);
4193 
4194  // Give up on operand bundles.
4195  if (ICS.isBundleOperand(U)) {
4196  indicatePessimisticFixpoint();
4197  return;
4198  }
4199 
4200  // Calling a function does read the function pointer, maybe write it if the
4201  // function is self-modifying.
4202  if (ICS.isCallee(U)) {
4203  removeAssumedBits(NO_READS);
4204  break;
4205  }
4206 
4207  // Adjust the possible access behavior based on the information on the
4208  // argument.
4209  unsigned ArgNo = ICS.getArgumentNo(U);
4210  const IRPosition &ArgPos = IRPosition::callsite_argument(ICS, ArgNo);
4211  const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
4212  // "assumed" has at most the same bits as the MemBehaviorAA assumed
4213  // and at least "known".
4214  intersectAssumedBits(MemBehaviorAA.getAssumed());
4215  return;
4216  }
4217  };
4218 
4219  // Generally, look at the "may-properties" and adjust the assumed state if we
4220  // did not trigger special handling before.
4221  if (UserI->mayReadFromMemory())
4222  removeAssumedBits(NO_READS);
4223  if (UserI->mayWriteToMemory())
4224  removeAssumedBits(NO_WRITES);
4225 }
4226 
4227 /// ----------------------------------------------------------------------------
4228 /// Attributor
4229 /// ----------------------------------------------------------------------------
4230 
4232  const AAIsDead *LivenessAA) {
4233  const Instruction *CtxI = AA.getIRPosition().getCtxI();
4234  if (!CtxI)
4235  return false;
4236 
4237  if (!LivenessAA)
4238  LivenessAA =
4239  &getAAFor<AAIsDead>(AA, IRPosition::function(*CtxI->getFunction()),
4240  /* TrackDependence */ false);
4241 
4242  // Don't check liveness for AAIsDead.
4243  if (&AA == LivenessAA)
4244  return false;
4245 
4246  if (!LivenessAA->isAssumedDead(CtxI))
4247  return false;
4248 
4249  // We actually used liveness information so we have to record a dependence.
4250  recordDependence(*LivenessAA, AA);
4251 
4252  return true;
4253 }
4254 
4256  const function_ref<bool(AbstractCallSite)> &Pred,
4257  const AbstractAttribute &QueryingAA, bool RequireAllCallSites) {
4258  // We can try to determine information from
4259  // the call sites. However, this is only possible all call sites are known,
4260  // hence the function has internal linkage.
4261  const IRPosition &IRP = QueryingAA.getIRPosition();
4262  const Function *AssociatedFunction = IRP.getAssociatedFunction();
4263  if (!AssociatedFunction) {
4264  LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
4265  << "\n");
4266  return false;
4267  }
4268 
4269  return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites,
4270  &QueryingAA);
4271 }
4272 
4274  const function_ref<bool(AbstractCallSite)> &Pred, const Function &Fn,
4275  bool RequireAllCallSites, const AbstractAttribute *QueryingAA) {
4276  if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
4277  LLVM_DEBUG(
4278  dbgs()
4279  << "[Attributor] Function " << Fn.getName()
4280  << " has no internal linkage, hence not all call sites are known\n");
4281  return false;
4282  }
4283 
4284  for (const Use &U : Fn.uses()) {
4285  AbstractCallSite ACS(&U);
4286  if (!ACS) {
4287  LLVM_DEBUG(dbgs() << "[Attributor] Function "
4288  << Fn.getName()
4289  << " has non call site use " << *U.get() << " in "
4290  << *U.getUser() << "\n");
4291  return false;
4292  }
4293 
4294  Instruction *I = ACS.getInstruction();
4295  Function *Caller = I->getFunction();
4296 
4297  const auto *LivenessAA =
4298  lookupAAFor<AAIsDead>(IRPosition::function(*Caller), QueryingAA,
4299  /* TrackDependence */ false);
4300 
4301  // Skip dead calls.
4302  if (LivenessAA && LivenessAA->isAssumedDead(I)) {
4303  // We actually used liveness information so we have to record a
4304  // dependence.
4305  if (QueryingAA)
4306  recordDependence(*LivenessAA, *QueryingAA);
4307  continue;
4308  }
4309 
4310  const Use *EffectiveUse =
4311  ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
4312  if (!ACS.isCallee(EffectiveUse)) {
4313  if (!RequireAllCallSites)
4314  continue;
4315  LLVM_DEBUG(dbgs() << "[Attributor] User " << EffectiveUse->getUser()
4316  << " is an invalid use of "
4317  << Fn.getName() << "\n");
4318  return false;
4319  }
4320 
4321  if (Pred(ACS))
4322  continue;
4323 
4324  LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
4325  << *ACS.getInstruction() << "\n");
4326  return false;
4327  }
4328 
4329  return true;
4330 }
4331 
4333  const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
4334  &Pred,
4335  const AbstractAttribute &QueryingAA) {
4336 
4337  const IRPosition &IRP = QueryingAA.getIRPosition();
4338  // Since we need to provide return instructions we have to have an exact
4339  // definition.
4340  const Function *AssociatedFunction = IRP.getAssociatedFunction();
4341  if (!AssociatedFunction)
4342  return false;
4343 
4344  // If this is a call site query we use the call site specific return values
4345  // and liveness information.
4346  // TODO: use the function scope once we have call site AAReturnedValues.
4347  const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
4348  const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
4349  if (!AARetVal.getState().isValidState())
4350  return false;
4351 
4352  return AARetVal.checkForAllReturnedValuesAndReturnInsts(Pred);
4353 }
4354 
4356  const function_ref<bool(Value &)> &Pred,
4357  const AbstractAttribute &QueryingAA) {
4358 
4359  const IRPosition &IRP = QueryingAA.getIRPosition();
4360  const Function *AssociatedFunction = IRP.getAssociatedFunction();
4361  if (!AssociatedFunction)
4362  return false;
4363 
4364  // TODO: use the function scope once we have call site AAReturnedValues.
4365  const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
4366  const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
4367  if (!AARetVal.getState().isValidState())
4368  return false;
4369 
4370  return AARetVal.checkForAllReturnedValuesAndReturnInsts(
4371  [&](Value &RV, const SmallSetVector<ReturnInst *, 4> &) {
4372  return Pred(RV);
4373  });
4374 }
4375 
4376 static bool
4378  const function_ref<bool(Instruction &)> &Pred,
4379  const AAIsDead *LivenessAA, bool &AnyDead,
4380  const ArrayRef<unsigned> &Opcodes) {
4381  for (unsigned Opcode : Opcodes) {
4382  for (Instruction *I : OpcodeInstMap[Opcode]) {
4383  // Skip dead instructions.
4384  if (LivenessAA && LivenessAA->isAssumedDead(I)) {
4385  AnyDead = true;
4386  continue;
4387  }
4388 
4389  if (!Pred(*I))
4390  return false;
4391  }
4392  }
4393  return true;
4394 }
4395 
4397  const llvm::function_ref<bool(Instruction &)> &Pred,
4398  const AbstractAttribute &QueryingAA, const ArrayRef<unsigned> &Opcodes) {
4399 
4400  const IRPosition &IRP = QueryingAA.getIRPosition();
4401  // Since we need to provide instructions we have to have an exact definition.
4402  const Function *AssociatedFunction = IRP.getAssociatedFunction();
4403  if (!AssociatedFunction)
4404  return false;
4405 
4406  // TODO: use the function scope once we have call site AAReturnedValues.
4407  const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
4408  const auto &LivenessAA =
4409  getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
4410  bool AnyDead = false;
4411 
4412  auto &OpcodeInstMap =
4413  InfoCache.getOpcodeInstMapForFunction(*AssociatedFunction);
4414  if (!checkForAllInstructionsImpl(OpcodeInstMap, Pred, &LivenessAA, AnyDead,
4415  Opcodes))
4416  return false;
4417 
4418  // If we actually used liveness information so we have to record a dependence.
4419  if (AnyDead)
4420  recordDependence(LivenessAA, QueryingAA);
4421 
4422  return true;
4423 }
4424 
4426  const llvm::function_ref<bool(Instruction &)> &Pred,
4427  AbstractAttribute &QueryingAA) {
4428 
4429  const Function *AssociatedFunction =
4430  QueryingAA.getIRPosition().getAssociatedFunction();
4431  if (!AssociatedFunction)
4432  return false;
4433 
4434  // TODO: use the function scope once we have call site AAReturnedValues.
4435  const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
4436  const auto &LivenessAA =
4437  getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
4438  bool AnyDead = false;
4439 
4440  for (Instruction *I :
4441  InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
4442  // Skip dead instructions.
4443  if (LivenessAA.isAssumedDead(I)) {
4444  AnyDead = true;
4445  continue;
4446  }
4447 
4448  if (!Pred(*I))
4449  return false;
4450  }
4451 
4452  // If we actually used liveness information so we have to record a dependence.
4453  if (AnyDead)
4454  recordDependence(LivenessAA, QueryingAA);
4455 
4456  return true;
4457 }
4458 
4460  LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
4461  << AllAbstractAttributes.size()
4462  << " abstract attributes.\n");
4463 
4464  // Now that all abstract attributes are collected and initialized we start
4465  // the abstract analysis.
4466 
4467  unsigned IterationCounter = 1;
4468 
4471  Worklist.insert(AllAbstractAttributes.begin(), AllAbstractAttributes.end());
4472 
4473  bool RecomputeDependences = false;
4474 
4475  do {
4476  // Remember the size to determine new attributes.
4477  size_t NumAAs = AllAbstractAttributes.size();
4478  LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
4479  << ", Worklist size: " << Worklist.size() << "\n");
4480 
4481  // If dependences (=QueryMap) are recomputed we have to look at all abstract
4482  // attributes again, regardless of what changed in the last iteration.
4483  if (RecomputeDependences) {
4484  LLVM_DEBUG(
4485  dbgs() << "[Attributor] Run all AAs to recompute dependences\n");
4486  QueryMap.clear();
4487  ChangedAAs.clear();
4488  Worklist.insert(AllAbstractAttributes.begin(),
4489  AllAbstractAttributes.end());
4490  }
4491 
4492  // Add all abstract attributes that are potentially dependent on one that
4493  // changed to the work list.
4494  for (AbstractAttribute *ChangedAA : ChangedAAs) {
4495  auto &QuerriedAAs = QueryMap[ChangedAA];
4496  Worklist.insert(QuerriedAAs.begin(), QuerriedAAs.end());
4497  }
4498 
4499  LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
4500  << ", Worklist+Dependent size: " << Worklist.size()
4501  << "\n");
4502 
4503  // Reset the changed set.
4504  ChangedAAs.clear();
4505 
4506  // Update all abstract attribute in the work list and record the ones that
4507  // changed.
4508  for (AbstractAttribute *AA : Worklist)
4509  if (!isAssumedDead(*AA, nullptr))
4510  if (AA->update(*this) == ChangeStatus::CHANGED)
4511  ChangedAAs.push_back(AA);
4512 
4513  // Check if we recompute the dependences in the next iteration.
4514  RecomputeDependences = (DepRecomputeInterval > 0 &&
4515  IterationCounter % DepRecomputeInterval == 0);
4516 
4517  // Add attributes to the changed set if they have been created in the last
4518  // iteration.
4519  ChangedAAs.append(AllAbstractAttributes.begin() + NumAAs,
4520  AllAbstractAttributes.end());
4521 
4522  // Reset the work list and repopulate with the changed abstract attributes.
4523  // Note that dependent ones are added above.
4524  Worklist.clear();
4525  Worklist.insert(ChangedAAs.begin(), ChangedAAs.end());
4526 
4527  } while (!Worklist.empty() && (IterationCounter++ < MaxFixpointIterations ||
4529 
4530  LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
4531  << IterationCounter << "/" << MaxFixpointIterations
4532  << " iterations\n");
4533 
4534  size_t NumFinalAAs = AllAbstractAttributes.size();
4535 
4536  // Reset abstract arguments not settled in a sound fixpoint by now. This
4537  // happens when we stopped the fixpoint iteration early. Note that only the
4538  // ones marked as "changed" *and* the ones transitively depending on them
4539  // need to be reverted to a pessimistic state. Others might not be in a
4540  // fixpoint state but we can use the optimistic results for them anyway.
4542  for (unsigned u = 0; u < ChangedAAs.size(); u++) {
4543  AbstractAttribute *ChangedAA = ChangedAAs[u];
4544  if (!Visited.insert(ChangedAA).second)
4545  continue;
4546 
4547  AbstractState &State = ChangedAA->getState();
4548  if (!State.isAtFixpoint()) {
4550 
4551  NumAttributesTimedOut++;
4552  }
4553 
4554  auto &QuerriedAAs = QueryMap[ChangedAA];
4555  ChangedAAs.append(QuerriedAAs.begin(), QuerriedAAs.end());
4556  }
4557 
4558  LLVM_DEBUG({
4559  if (!Visited.empty())
4560  dbgs() << "\n[Attributor] Finalized " << Visited.size()
4561  << " abstract attributes.\n";
4562  });
4563 
4564  unsigned NumManifested = 0;
4565  unsigned NumAtFixpoint = 0;
4566  ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
4567  for (AbstractAttribute *AA : AllAbstractAttributes) {
4568  AbstractState &State = AA->getState();
4569 
4570  // If there is not already a fixpoint reached, we can now take the
4571  // optimistic state. This is correct because we enforced a pessimistic one
4572  // on abstract attributes that were transitively dependent on a changed one
4573  // already above.
4574  if (!State.isAtFixpoint())
4576 
4577  // If the state is invalid, we do not try to manifest it.
4578  if (!State.isValidState())
4579  continue;
4580 
4581  // Skip dead code.
4582  if (isAssumedDead(*AA, nullptr))
4583  continue;
4584  // Manifest the state and record if we changed the IR.
4585  ChangeStatus LocalChange = AA->manifest(*this);
4586  if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
4587  AA->trackStatistics();
4588 
4589  ManifestChange = ManifestChange | LocalChange;
4590 
4591  NumAtFixpoint++;
4592  NumManifested += (LocalChange == ChangeStatus::CHANGED);
4593  }
4594 
4595  (void)NumManifested;
4596  (void)NumAtFixpoint;
4597  LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
4598  << " arguments while " << NumAtFixpoint
4599  << " were in a valid fixpoint state\n");
4600 
4601  NumAttributesManifested += NumManifested;
4602  NumAttributesValidFixpoint += NumAtFixpoint;
4603 
4604  (void)NumFinalAAs;
4605  assert(
4606  NumFinalAAs == AllAbstractAttributes.size() &&
4607  "Expected the final number of abstract attributes to remain unchanged!");
4608 
4609  // Delete stuff at the end to avoid invalid references and a nice order.
4610  {
4611  LLVM_DEBUG(dbgs() << "\n[Attributor] Delete at least "
4612  << ToBeDeletedFunctions.size() << " functions and "
4613  << ToBeDeletedBlocks.size() << " blocks and "
4614  << ToBeDeletedInsts.size() << " instructions\n");
4615  for (Instruction *I : ToBeDeletedInsts) {
4616  if (!I->use_empty())
4617  I->replaceAllUsesWith(UndefValue::get(I->getType()));
4618  I->eraseFromParent();
4619  }
4620 
4621  if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
4622  SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
4623  ToBeDeletedBBs.reserve(NumDeadBlocks);
4624  ToBeDeletedBBs.append(ToBeDeletedBlocks.begin(), ToBeDeletedBlocks.end());
4625  DeleteDeadBlocks(ToBeDeletedBBs);
4627  "Number of dead basic blocks deleted.");
4628  }
4629 
4630  STATS_DECL(AAIsDead, Function, "Number of dead functions deleted.");
4631  for (Function *Fn : ToBeDeletedFunctions) {
4633  Fn->eraseFromParent();
4635  }
4636 
4637  // Identify dead internal functions and delete them. This happens outside
4638  // the other fixpoint analysis as we might treat potentially dead functions
4639  // as live to lower the number of iterations. If they happen to be dead, the
4640  // below fixpoint loop will identify and eliminate them.
4641  SmallVector<Function *, 8> InternalFns;
4642  for (Function &F : M)
4643  if (F.hasLocalLinkage())
4644  InternalFns.push_back(&F);
4645 
4646  bool FoundDeadFn = true;
4647  while (FoundDeadFn) {
4648  FoundDeadFn = false;
4649  for (unsigned u = 0, e = InternalFns.size(); u < e; ++u) {
4650  Function *F = InternalFns[u];
4651  if (!F)
4652  continue;
4653 
4654  const auto *LivenessAA =
4655  lookupAAFor<AAIsDead>(IRPosition::function(*F));
4656  if (LivenessAA &&
4657  !checkForAllCallSites([](AbstractCallSite ACS) { return false; },
4658  *LivenessAA, true))
4659  continue;
4660 
4663  F->eraseFromParent();
4664  InternalFns[u] = nullptr;
4665  FoundDeadFn = true;
4666  }
4667  }
4668  }
4669 
4671  IterationCounter != MaxFixpointIterations) {
4672  errs() << "\n[Attributor] Fixpoint iteration done after: "
4673  << IterationCounter << "/" << MaxFixpointIterations
4674  << " iterations\n";
4675  llvm_unreachable("The fixpoint was not reached with exactly the number of "
4676  "specified iterations!");
4677  }
4678 
4679  return ManifestChange;
4680 }
4681 
4683 
4684  // Walk all instructions to find interesting instructions that might be
4685  // queried by abstract attributes during their initialization or update.
4686  // This has to happen before we create attributes.
4687  auto &ReadOrWriteInsts = InfoCache.FuncRWInstsMap[&F];
4688  auto &InstOpcodeMap = InfoCache.FuncInstOpcodeMap[&F];
4689 
4690  for (Instruction &I : instructions(&F)) {
4691  bool IsInterestingOpcode = false;
4692 
4693  // To allow easy access to all instructions in a function with a given
4694  // opcode we store them in the InfoCache. As not all opcodes are interesting
4695  // to concrete attributes we only cache the ones that are as identified in
4696  // the following switch.
4697  // Note: There are no concrete attributes now so this is initially empty.
4698  switch (I.getOpcode()) {
4699  default:
4700  assert((!ImmutableCallSite(&I)) && (!isa<CallBase>(&I)) &&
4701  "New call site/base instruction type needs to be known int the "
4702  "Attributor.");
4703  break;
4704  case Instruction::Load:
4705  // The alignment of a pointer is interesting for loads.
4706  case Instruction::Store:
4707  // The alignment of a pointer is interesting for stores.
4708  case Instruction::Call:
4709  case Instruction::CallBr:
4710  case Instruction::Invoke:
4711  case Instruction::CleanupRet:
4712  case Instruction::CatchSwitch:
4713  case Instruction::Resume:
4714  case Instruction::Ret:
4715  IsInterestingOpcode = true;
4716  }
4717  if (IsInterestingOpcode)
4718  InstOpcodeMap[I.getOpcode()].push_back(&I);
4719  if (I.mayReadOrWriteMemory())
4720  ReadOrWriteInsts.push_back(&I);
4721  }
4722 }
4723 
4725  if (!VisitedFunctions.insert(&F).second)
4726  return;
4727 
4728  IRPosition FPos = IRPosition::function(F);
4729 
4730  // Check for dead BasicBlocks in every function.
4731  // We need dead instruction detection because we do not want to deal with
4732  // broken IR in which SSA rules do not apply.
4733  getOrCreateAAFor<AAIsDead>(FPos);
4734 
4735  // Every function might be "will-return".
4736  getOrCreateAAFor<AAWillReturn>(FPos);
4737 
4738  // Every function can be nounwind.
4739  getOrCreateAAFor<AANoUnwind>(FPos);
4740 
4741  // Every function might be marked "nosync"
4742  getOrCreateAAFor<AANoSync>(FPos);
4743 
4744  // Every function might be "no-free".
4745  getOrCreateAAFor<AANoFree>(FPos);
4746 
4747  // Every function might be "no-return".
4748  getOrCreateAAFor<AANoReturn>(FPos);
4749 
4750  // Every function might be "no-recurse".
4751  getOrCreateAAFor<AANoRecurse>(FPos);
4752 
4753  // Every function might be "readnone/readonly/writeonly/...".
4754  getOrCreateAAFor<AAMemoryBehavior>(FPos);
4755 
4756  // Every function might be applicable for Heap-To-Stack conversion.
4757  if (EnableHeapToStack)
4758  getOrCreateAAFor<AAHeapToStack>(FPos);
4759 
4760  // Return attributes are only appropriate if the return type is non void.
4761  Type *ReturnType = F.getReturnType();
4762  if (!ReturnType->isVoidTy()) {
4763  // Argument attribute "returned" --- Create only one per function even
4764  // though it is an argument attribute.
4765  getOrCreateAAFor<AAReturnedValues>(FPos);
4766 
4767  IRPosition RetPos = IRPosition::returned(F);
4768 
4769  // Every function might be simplified.
4770  getOrCreateAAFor<AAValueSimplify>(RetPos);
4771 
4772  if (ReturnType->isPointerTy()) {
4773 
4774  // Every function with pointer return type might be marked align.
4775  getOrCreateAAFor<AAAlign>(RetPos);
4776 
4777  // Every function with pointer return type might be marked nonnull.
4778  getOrCreateAAFor<AANonNull>(RetPos);
4779 
4780  // Every function with pointer return type might be marked noalias.
4781  getOrCreateAAFor<AANoAlias>(RetPos);
4782 
4783  // Every function with pointer return type might be marked
4784  // dereferenceable.
4785  getOrCreateAAFor<AADereferenceable>(RetPos);
4786  }
4787  }
4788 
4789  for (Argument &Arg : F.args()) {
4791 
4792  // Every argument might be simplified.
4793  getOrCreateAAFor<AAValueSimplify>(ArgPos);
4794 
4795  if (Arg.getType()->isPointerTy()) {
4796  // Every argument with pointer type might be marked nonnull.
4797  getOrCreateAAFor<AANonNull>(ArgPos);
4798 
4799  // Every argument with pointer type might be marked noalias.
4800  getOrCreateAAFor<AANoAlias>(ArgPos);
4801 
4802  // Every argument with pointer type might be marked dereferenceable.
4803  getOrCreateAAFor<AADereferenceable>(ArgPos);
4804 
4805  // Every argument with pointer type might be marked align.
4806  getOrCreateAAFor<AAAlign>(ArgPos);
4807 
4808  // Every argument with pointer type might be marked nocapture.
4809  getOrCreateAAFor<AANoCapture>(ArgPos);
4810 
4811  // Every argument with pointer type might be marked
4812  // "readnone/readonly/writeonly/..."
4813  getOrCreateAAFor<AAMemoryBehavior>(ArgPos);
4814  }
4815  }
4816 
4817  auto CallSitePred = [&](Instruction &I) -> bool {
4818  CallSite CS(&I);
4819  if (CS.getCalledFunction()) {
4820  for (int i = 0, e = CS.getCalledFunction()->arg_size(); i < e; i++) {
4821 
4822  IRPosition CSArgPos = IRPosition::callsite_argument(CS, i);
4823 
4824  // Call site argument might be simplified.
4825  getOrCreateAAFor<AAValueSimplify>(CSArgPos);
4826 
4827  if (!CS.getArgument(i)->getType()->isPointerTy())
4828  continue;
4829 
4830  // Call site argument attribute "non-null".
4831  getOrCreateAAFor<AANonNull>(CSArgPos);
4832 
4833  // Call site argument attribute "no-alias".
4834  getOrCreateAAFor<AANoAlias>(CSArgPos);
4835 
4836  // Call site argument attribute "dereferenceable".
4837  getOrCreateAAFor<AADereferenceable>(CSArgPos);
4838 
4839  // Call site argument attribute "align".
4840  getOrCreateAAFor<AAAlign>(CSArgPos);
4841  }
4842  }
4843  return true;
4844  };
4845 
4846  auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
4847  bool Success, AnyDead = false;
4848  Success = checkForAllInstructionsImpl(
4849  OpcodeInstMap, CallSitePred, nullptr, AnyDead,
4850  {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
4852  (void)Success;
4853  assert(Success && !AnyDead && "Expected the check call to be successful!");
4854 
4855  auto LoadStorePred = [&](Instruction &I) -> bool {
4856  if (isa<LoadInst>(I))
4857  getOrCreateAAFor<AAAlign>(
4858  IRPosition::value(*cast<LoadInst>(I).getPointerOperand()));
4859  else
4860  getOrCreateAAFor<AAAlign>(
4861  IRPosition::value(*cast<StoreInst>(I).getPointerOperand()));
4862  return true;
4863  };
4864  Success = checkForAllInstructionsImpl(
4865  OpcodeInstMap, LoadStorePred, nullptr, AnyDead,
4867  (void)Success;
4868  assert(Success && !AnyDead && "Expected the check call to be successful!");
4869 }
4870 
4871 /// Helpers to ease debugging through output streams and print calls.
4872 ///
4873 ///{
4875  return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
4876 }
4877 
4879  switch (AP) {
4881  return OS << "inv";
4882  case IRPosition::IRP_FLOAT:
4883  return OS << "flt";
4885  return OS << "fn_ret";
4887  return OS << "cs_ret";
4889  return OS << "fn";
4891  return OS << "cs";
4893  return OS << "arg";
4895  return OS << "cs_arg";
4896  }
4897  llvm_unreachable("Unknown attribute position!");
4898 }
4899 
4901  const Value &AV = Pos.getAssociatedValue();
4902  return OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
4903  << Pos.getAnchorValue().getName() << "@" << Pos.getArgNo() << "]}";
4904 }
4905 
4907  return OS << "(" << S.getKnown() << "-" << S.getAssumed() << ")"
4908  << static_cast<const AbstractState &>(S);
4909 }
4910 
4912  return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
4913 }
4914 
4916  AA.print(OS);
4917  return OS;
4918 }
4919 
4921  OS << "[P: " << getIRPosition() << "][" << getAsStr() << "][S: " << getState()
4922  << "]";
4923 }
4924 ///}
4925 
4926 /// ----------------------------------------------------------------------------
4927 /// Pass (Manager) Boilerplate
4928 /// ----------------------------------------------------------------------------
4929 
4931  if (DisableAttributor)
4932  return false;
4933 
4934  LLVM_DEBUG(dbgs() << "[Attributor] Run on module with " << M.size()
4935  << " functions.\n");
4936 
4937  // Create an Attributor and initially empty information cache that is filled
4938  // while we identify default attribute opportunities.
4939  InformationCache InfoCache(M, AG);
4940  Attributor A(InfoCache, DepRecInterval);
4941 
4942  for (Function &F : M)
4944 
4945  for (Function &F : M) {
4946  if (F.hasExactDefinition())
4947  NumFnWithExactDefinition++;
4948  else
4949  NumFnWithoutExactDefinition++;
4950 
4951  // We look at internal functions only on-demand but if any use is not a
4952  // direct call, we have to do it eagerly.
4953  if (F.hasLocalLinkage()) {
4954  if (llvm::all_of(F.uses(), [](const Use &U) {
4955  return ImmutableCallSite(U.getUser()) &&
4956  ImmutableCallSite(U.getUser()).isCallee(&U);
4957  }))
4958  continue;
4959  }
4960 
4961  // Populate the Attributor with abstract attribute opportunities in the
4962  // function and the information cache with IR information.
4964  }
4965 
4966  return A.run(M) == ChangeStatus::CHANGED;
4967 }
4968 
4970  AnalysisGetter AG(AM);
4971  if (runAttributorOnModule(M, AG)) {
4972  // FIXME: Think about passes we will preserve and add them here.
4973  return PreservedAnalyses::none();
4974  }
4975  return PreservedAnalyses::all();
4976 }
4977 
4978 namespace {
4979 
4980 struct AttributorLegacyPass : public ModulePass {
4981  static char ID;
4982 
4983  AttributorLegacyPass() : ModulePass(ID) {
4985  }
4986 
4987  bool runOnModule(Module &M) override {
4988  if (skipModule(M))
4989  return false;
4990 
4991  AnalysisGetter AG;
4992  return runAttributorOnModule(M, AG);
4993  }
4994 
4995  void getAnalysisUsage(AnalysisUsage &AU) const override {
4996  // FIXME: Think about passes we will preserve and add them here.
4998  }
4999 };
5000 
5001 } // end anonymous namespace
5002 
5003 Pass *llvm::createAttributorLegacyPass() { return new AttributorLegacyPass(); }
5004 
5005 char AttributorLegacyPass::ID = 0;
5006 
5007 const char AAReturnedValues::ID = 0;
5008 const char AANoUnwind::ID = 0;
5009 const char AANoSync::ID = 0;
5010 const char AANoFree::ID = 0;
5011 const char AANonNull::ID = 0;
5012 const char AANoRecurse::ID = 0;
5013 const char AAWillReturn::ID = 0;
5014 const char AANoAlias::ID = 0;
5015 const char AANoReturn::ID = 0;
5016 const char AAIsDead::ID = 0;
5017 const char AADereferenceable::ID = 0;
5018 const char AAAlign::ID = 0;
5019 const char AANoCapture::ID = 0;
5020 const char AAValueSimplify::ID = 0;
5021 const char AAHeapToStack::ID = 0;
5022 const char AAMemoryBehavior::ID = 0;
5023 
5024 // Macro magic to create the static generator function for attributes that
5025 // follow the naming scheme.
5026 
5027 #define SWITCH_PK_INV(CLASS, PK, POS_NAME) \
5028  case IRPosition::PK: \
5029  llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
5030 
5031 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX) \
5032  case IRPosition::PK: \
5033  AA = new CLASS##SUFFIX(IRP); \
5034  break;
5035 
5036 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
5037  CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
5038  CLASS *AA = nullptr; \
5039  switch (IRP.getPositionKind()) { \
5040  SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
5041  SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \
5042  SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \
5043  SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \
5044  SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \
5045  SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \
5046  SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \
5047  SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \
5048  } \
5049  return *AA; \
5050  }
5051 
5052 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
5053  CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
5054  CLASS *AA = nullptr; \
5055  switch (IRP.getPositionKind()) { \
5056  SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
5057  SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function") \
5058  SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \
5059  SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \
5060  SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \
5061  SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \
5062  SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \
5063  SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \
5064  } \
5065  return *AA; \
5066  }
5067 
5068 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
5069  CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
5070  CLASS *AA = nullptr; \
5071  switch (IRP.getPositionKind()) { \
5072  SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
5073  SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \
5074  SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \
5075  SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \
5076  SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \
5077  SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \
5078  SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \
5079  SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \
5080  } \
5081  return *AA; \
5082  }
5083 
5084 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
5085  CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
5086  CLASS *AA = nullptr; \
5087  switch (IRP.getPositionKind()) { \
5088  SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
5089  SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \
5090  SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \
5091  SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \
5092  SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \
5093  SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \
5094  SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \
5095  SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \
5096  } \
5097  return *AA; \
5098  }
5099 
5100 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
5101  CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
5102  CLASS *AA = nullptr; \
5103  switch (IRP.getPositionKind()) { \
5104  SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
5105  SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \
5106  SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \
5107  SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \
5108  SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \
5109  SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \
5110  SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \
5111  SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \
5112  } \
5113  return *AA; \
5114  }
5115 
5124 
5130 
5132 
5134 
5136 
5137 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
5138 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
5139 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
5140 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
5141 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
5142 #undef SWITCH_PK_CREATE
5143 #undef SWITCH_PK_INV
5144 
5145 INITIALIZE_PASS_BEGIN(AttributorLegacyPass, "attributor",
5146  "Deduce and propagate attributes", false, false)
5148 INITIALIZE_PASS_END(AttributorLegacyPass, "attributor",
5149  "Deduce and propagate attributes", false, false)
An attribute for a call site return value.
Definition: Attributor.h:151
Pass interface - Implemented by all &#39;passes&#39;.
Definition: Pass.h:80
void DeleteDeadBlocks(ArrayRef< BasicBlock *> BBs, DomTreeUpdater *DTU=nullptr, bool KeepOneInputPHIs=false)
Delete the specified blocks from BB.
bool onlyReadsMemory() const
Determine if the function does not access or only reads memory.
Definition: Function.h:486
OpcodeInstMapTy & getOpcodeInstMapForFunction(const Function &F)
Return the map that relates "interesting" opcodes with all instructions with that opcode in F...
Definition: Attributor.h:618
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:1871
uint64_t CallInst * C
Return a value (possibly void), from a function.
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:641
iterator_range< use_iterator > uses()
Definition: Value.h:375
StringRef getKindAsString() const
Return the attribute&#39;s kind as a string.
Definition: Attributes.cpp:213
raw_ostream & errs()
This returns a reference to a raw_ostream for standard error.
static IntegerType * getInt1Ty(LLVMContext &C)
Definition: Type.cpp:177
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
Definition: DataLayout.h:402
static bool isEqualOrWorse(const Attribute &New, const Attribute &Old)
Return true if New is equal or worse than Old.
Definition: Attributor.cpp:244
bool hasLocalLinkage() const
Definition: GlobalValue.h:445
void clear()
Definition: MapVector.h:88
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
size_type size() const
Definition: MapVector.h:60
This class represents an incoming formal argument to a Function.
Definition: Argument.h:29
This callback is used in conjunction with PointerMayBeCaptured.
static ChangeStatus manifestAttrs(Attributor &A, IRPosition &IRP, const ArrayRef< Attribute > &DeducedAttrs)
Definition: Attributor.cpp:331
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:288
This class represents lattice values for constants.
Definition: AllocatorList.h:23
bool isAtomic() const
Return true if this instruction has an AtomicOrdering of unordered or higher.
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:77
This is the interface for a simple mod/ref and alias analysis over globals.
SubsumingPositionIterator(const IRPosition &IRP)
Definition: Attributor.cpp:391
#define STATS_TRACK(NAME, TYPE)
Definition: Attributor.cpp:76
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:66
bool user_empty() const
Definition: Value.h:384
static Attribute getWithDereferenceableBytes(LLVMContext &Context, uint64_t Bytes)
Definition: Attributes.cpp:155
ChangeStatus
Simple enum class that forces the status to be spelled out explicitly.
Definition: Attributor.h:120
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
virtual void print(raw_ostream &OS) const
Helper functions, for debug purposes only.
A position that is not associated with a spot suitable for attributes.
Definition: Attributor.h:148
Implements a dense probed hash-table based set.
Definition: DenseSet.h:249
If we do not capture the value in memory, through integers, or as a derived pointer we know it is not...
Definition: Attributor.h:1818
static const Value * getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset, const DataLayout &DL)
Definition: Attributor.cpp:304
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:1943
IntegerState DerefBytesState
State representing for dereferenceable bytes.
Definition: Attributor.h:1659
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:1588
An abstract interface for all nocapture attributes.
Definition: Attributor.h:1800
This class represents a function call, abstracting a target machine&#39;s calling convention.
iterator & end()
Return an universal end iterator.
Definition: MustExecute.h:405
unsigned constexpr DefaultMaxUsesToExplore
The default value for MaxUsesToExplore argument.
Abstract Attribute Classes
Definition: Attributor.h:1421
bool mayWriteToMemory() const
Return true if this instruction may modify memory.
The two locations do not alias at all.
Definition: AliasAnalysis.h:84
static cl::opt< bool > VerifyMaxFixpointIterations("attributor-max-iterations-verify", cl::Hidden, cl::desc("Verify that max-iterations is a tight bound for a fixpoint"), cl::init(false))
An efficient, type-erasing, non-owning reference to a callable.
Definition: STLExtras.h:104
base_t getAssumed() const
Return the assumed state encoding.
Definition: Attributor.h:1104
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:1895
An attribute for a call site argument.
Definition: Attributor.h:155
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:743
virtual const IRPosition & getIRPosition() const =0
Return an IR position, see struct IRPosition.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:323
This class implements a map that also provides access to all stored values in a deterministic order...
Definition: MapVector.h:37
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1165
bool isAssumedNoRecurse() const
Return true if "norecurse" is assumed.
Definition: Attributor.h:1522
An abstract attribute for willreturn.
Definition: Attributor.h:1535
STATISTIC(NumFunctions, "Total number of functions")
APInt operator &(APInt a, const APInt &b)
Definition: APInt.h:1987
Value & getAssociatedValue()
}
Definition: Attributor.h:361
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1100
F(f)
MustBeExecutedContextExplorer & getMustBeExecutedContextExplorer()
Return MustBeExecutedContextExplorer.
Definition: Attributor.h:631
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:635
FunTy * getCalledFunction() const
Return the function being called if this is a direct call, otherwise return null (if it&#39;s an indirect...
Definition: CallSite.h:111
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:144
BasicBlock * SplitBlock(BasicBlock *Old, Instruction *SplitPt, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the specified block at the specified instruction - everything before SplitPt stays in Old and e...
void reserve(size_type N)
Definition: SmallVector.h:369
Kind
The positions we distinguish in the IR.
Definition: Attributor.h:146
Wrapper for FunctoinAnalysisManager.
Definition: Attributor.h:561
virtual ChangeStatus indicatePessimisticFixpoint()=0
Indicate that the abstract state should converge to the pessimistic state.
bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const
Return true if the attribute exists at the given index.
Instruction * getCtxI()
}
Definition: Attributor.h:341
Value * get() const
Definition: Use.h:107
const CallInst * isFreeCall(const Value *I, const TargetLibraryInfo *TLI)
isFreeCall - Returns non-null if the value is a call to the builtin free()
bool checkForAllCallSites(const function_ref< bool(AbstractCallSite)> &Pred, const AbstractAttribute &QueryingAA, bool RequireAllCallSites)
Check Pred on all function call sites.
virtual bool isDereferenceableOrNull(Value *O, const DataLayout &DL)
isDereferenceableOrNull - Overload to allow clients with additional knowledge about pointer dereferen...
void initializeInformationCache(Function &F)
Initialize the information cache for queries regarding function F.
AnalysisUsage & addRequired()
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:50
static const IRPosition function_scope(const IRPosition &IRP)
Create a position with function scope matching the "context" of IRP.
Definition: Attributor.h:234
bool isAssumedDead(const AbstractAttribute &AA, const AAIsDead *LivenessAA)
Return true if AA (or its context instruction) is assumed dead.
Type * getPointerElementType() const
Definition: Type.h:381
An AbstractAttribute for noreturn.
Definition: Attributor.h:1592
uint64_t getValueAsInt() const
Return the attribute&#39;s value as an integer.
Definition: Attributes.cpp:206
A visitor class for IR positions.
Definition: Attributor.h:550
bool isStringAttribute() const
Return true if the attribute is a string (target-dependent) attribute.
Definition: Attributes.cpp:191
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:1796
A Use represents the edge between a Value definition and its users.
Definition: Use.h:55
static Attribute getWithAlignment(LLVMContext &Context, Align Alignment)
Return a uniquified Attribute object that has the specific alignment set.
Definition: Attributes.cpp:145
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
An abstract attribute for norecurse.
Definition: Attributor.h:1516
static Attribute getWithDereferenceableOrNullBytes(LLVMContext &Context, uint64_t Bytes)
Definition: Attributes.cpp:161
#define STATS_DECLTRACK_ARG_ATTR(NAME)
Definition: Attributor.cpp:82
unsigned getArgumentNo(Value::const_user_iterator I) const
Given a value use iterator, returns the argument that corresponds to it.
Definition: CallSite.h:206
This file contains the simple types necessary to represent the attributes associated with functions a...
bool isAssumedNoUnwind() const
Returns true if nounwind is assumed.
Definition: Attributor.h:1466
bool hasParamAttribute(unsigned ArgNo, Attribute::AttrKind Kind) const
check if an attributes is in the list of attributes.
Definition: Function.h:403
#define STATS_DECLTRACK_CSARG_ATTR(NAME)
Definition: Attributor.cpp:84
bool canSimplifyInvokeNoUnwind(const Function *F)
static cl::opt< unsigned > MaxFixpointIterations("attributor-max-iterations", cl::Hidden, cl::desc("Maximal number of fixpoint iterations."), cl::init(32))
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:1531
An abstract interface for all noalias attributes.
Definition: Attributor.h:1554
bool checkForAllReadWriteInstructions(const llvm::function_ref< bool(Instruction &)> &Pred, AbstractAttribute &QueryingAA)
Check Pred on all Read/Write instructions.
AtomicOrdering
Atomic ordering for LLVM&#39;s memory model.
AttributeList getAttributes(LLVMContext &C, ID id)
Return the attributes for an intrinsic.
An attribute for the function return value.
Definition: Attributor.h:150
InstrTy * getInstruction() const
Definition: CallSite.h:96
uint32_t getAssumedDereferenceableBytes() const
Return assumed dereferenceable bytes.
Definition: Attributor.h:1763
ChangeStatus indicatePessimisticFixpoint() override
See AbstractState::indicatePessimisticFixpoint(...)
Definition: Attributor.h:1095
attributor
User * getUser() const LLVM_READONLY
Returns the User that contains this Use.
Definition: Use.cpp:40
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1583
int getArgNo() const
}
Definition: Attributor.h:376
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:246
static cl::opt< bool > ManifestInternal("attributor-manifest-internal", cl::Hidden, cl::desc("Manifest Attributor internal string attributes."), cl::init(false))
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:141
An abstract interface for liveness abstract attribute.
Definition: Attributor.h:1611
bool checkForAllReturnedValuesAndReturnInsts(const function_ref< bool(Value &, const SmallSetVector< ReturnInst *, 4 > &)> &Pred, const AbstractAttribute &QueryingAA)
Check Pred on all values potentially returned by F.
const T & getValue() const LLVM_LVALUE_FUNCTION
Definition: Optional.h:255
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
Definition: BasicBlock.cpp:253
bool isAssumedNoFree() const
Return true if "nofree" is assumed.
Definition: Attributor.h:1579
This class represents a no-op cast from one type to another.
void initializeAttributorLegacyPassPass(PassRegistry &)
ChangeStatus run(Module &M)
Run the analyses until a fixpoint is reached or enforced (timeout).
static void initialize(TargetLibraryInfoImpl &TLI, const Triple &T, ArrayRef< StringLiteral > StandardNames)
Initialize the set of available library functions based on the specified target triple.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:125
#define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Return true if the call or the callee has the given attribute.
Definition: CallSite.h:385
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:223
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition: Value.h:602
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition: Function.h:737
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:429
An abstract interface for all nonnull attributes.
Definition: Attributor.h:1497
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type *> Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1093
ChangeStatus manifest(Attributor &A) override
See AbstractAttribute::manifest(...).
Definition: Attributor.h:1261
Value * getOperand(unsigned i) const
Definition: User.h:169
ChangeStatus update(Attributor &A)
Hook for the Attributor to trigger an update of the internal state.
Definition: Attributor.cpp:315
#define STATS_DECLTRACK_CS_ATTR(NAME)
Definition: Attributor.cpp:89
unsigned getAttrIdx() const
Return the index in the attribute list for this position.
Definition: Attributor.h:379
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: PassManager.h:157
virtual bool isAtFixpoint() const =0
Return if this abstract state is fixed, thus does not need to be updated if information changes as it...
const AAType & getAAFor(const AbstractAttribute &QueryingAA, const IRPosition &IRP, bool TrackDependence=true)
Lookup an abstract attribute of type AAType at position IRP.
Definition: Attributor.h:757
virtual bool isValidState() const =0
Return if this abstract state is in a valid state.
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:1569
#define BUILD_STAT_NAME(NAME, TYPE)
Definition: Attributor.cpp:72
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:141
const BasicBlock & getEntryBlock() const
Definition: Function.h:669
BasicBlock * SplitBlockPredecessors(BasicBlock