LLVM  10.0.0svn
Attributor.cpp
Go to the documentation of this file.
1 //===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements an inter procedural pass that deduces and/or propagating
10 // attributes. This is done in an abstract interpretation style fixpoint
11 // iteration. See the Attributor.h file comment and the class descriptions in
12 // that file for more information.
13 //
14 //===----------------------------------------------------------------------===//
15 
17 
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
26 #include "llvm/Analysis/Loads.h"
29 #include "llvm/IR/Argument.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/CFG.h"
32 #include "llvm/IR/InstIterator.h"
33 #include "llvm/IR/IntrinsicInst.h"
35 #include "llvm/Support/Debug.h"
39 
40 #include <cassert>
41 
42 using namespace llvm;
43 
44 #define DEBUG_TYPE "attributor"
45 
46 STATISTIC(NumFnWithExactDefinition,
47  "Number of function with exact definitions");
48 STATISTIC(NumFnWithoutExactDefinition,
49  "Number of function without exact definitions");
50 STATISTIC(NumAttributesTimedOut,
51  "Number of abstract attributes timed out before fixpoint");
52 STATISTIC(NumAttributesValidFixpoint,
53  "Number of abstract attributes in a valid fixpoint state");
54 STATISTIC(NumAttributesManifested,
55  "Number of abstract attributes manifested in IR");
56 
57 // Some helper macros to deal with statistics tracking.
58 //
59 // Usage:
60 // For simple IR attribute tracking overload trackStatistics in the abstract
61 // attribute and choose the right STATS_DECLTRACK_********* macro,
62 // e.g.,:
63 // void trackStatistics() const override {
64 // STATS_DECLTRACK_ARG_ATTR(returned)
65 // }
66 // If there is a single "increment" side one can use the macro
67 // STATS_DECLTRACK with a custom message. If there are multiple increment
68 // sides, STATS_DECL and STATS_TRACK can also be used separatly.
69 //
70 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME) \
71  ("Number of " #TYPE " marked '" #NAME "'")
72 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
73 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
74 #define STATS_DECL(NAME, TYPE, MSG) \
75  STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
76 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
77 #define STATS_DECLTRACK(NAME, TYPE, MSG) \
78  { \
79  STATS_DECL(NAME, TYPE, MSG) \
80  STATS_TRACK(NAME, TYPE) \
81  }
82 #define STATS_DECLTRACK_ARG_ATTR(NAME) \
83  STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
84 #define STATS_DECLTRACK_CSARG_ATTR(NAME) \
85  STATS_DECLTRACK(NAME, CSArguments, \
86  BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
87 #define STATS_DECLTRACK_FN_ATTR(NAME) \
88  STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
89 #define STATS_DECLTRACK_CS_ATTR(NAME) \
90  STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
91 #define STATS_DECLTRACK_FNRET_ATTR(NAME) \
92  STATS_DECLTRACK(NAME, FunctionReturn, \
93  BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
94 #define STATS_DECLTRACK_CSRET_ATTR(NAME) \
95  STATS_DECLTRACK(NAME, CSReturn, \
96  BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
97 #define STATS_DECLTRACK_FLOATING_ATTR(NAME) \
98  STATS_DECLTRACK(NAME, Floating, \
99  ("Number of floating values known to be '" #NAME "'"))
100 
101 // TODO: Determine a good default value.
102 //
103 // In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
104 // (when run with the first 5 abstract attributes). The results also indicate
105 // that we never reach 32 iterations but always find a fixpoint sooner.
106 //
107 // This will become more evolved once we perform two interleaved fixpoint
108 // iterations: bottom-up and top-down.
109 static cl::opt<unsigned>
110  MaxFixpointIterations("attributor-max-iterations", cl::Hidden,
111  cl::desc("Maximal number of fixpoint iterations."),
112  cl::init(32));
114  "attributor-max-iterations-verify", cl::Hidden,
115  cl::desc("Verify that max-iterations is a tight bound for a fixpoint"),
116  cl::init(false));
117 
119  "attributor-disable", cl::Hidden,
120  cl::desc("Disable the attributor inter-procedural deduction pass."),
121  cl::init(true));
122 
124  "attributor-manifest-internal", cl::Hidden,
125  cl::desc("Manifest Attributor internal string attributes."),
126  cl::init(false));
127 
129  "attributor-dependence-recompute-interval", cl::Hidden,
130  cl::desc("Number of iterations until dependences are recomputed."),
131  cl::init(4));
132 
133 static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
134  cl::init(true), cl::Hidden);
135 
136 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
137  cl::Hidden);
138 
139 /// Logic operators for the change status enum class.
140 ///
141 ///{
143  return l == ChangeStatus::CHANGED ? l : r;
144 }
146  return l == ChangeStatus::UNCHANGED ? l : r;
147 }
148 ///}
149 
150 /// Recursively visit all values that might become \p IRP at some point. This
151 /// will be done by looking through cast instructions, selects, phis, and calls
152 /// with the "returned" attribute. Once we cannot look through the value any
153 /// further, the callback \p VisitValueCB is invoked and passed the current
154 /// value, the \p State, and a flag to indicate if we stripped anything. To
155 /// limit how much effort is invested, we will never visit more values than
156 /// specified by \p MaxValues.
157 template <typename AAType, typename StateTy>
159  Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
160  const function_ref<bool(Value &, StateTy &, bool)> &VisitValueCB,
161  int MaxValues = 8) {
162 
163  const AAIsDead *LivenessAA = nullptr;
164  if (IRP.getAnchorScope())
165  LivenessAA = &A.getAAFor<AAIsDead>(
166  QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
167  /* TrackDependence */ false);
168  bool AnyDead = false;
169 
170  // TODO: Use Positions here to allow context sensitivity in VisitValueCB
171  SmallPtrSet<Value *, 16> Visited;
172  SmallVector<Value *, 16> Worklist;
173  Worklist.push_back(&IRP.getAssociatedValue());
174 
175  int Iteration = 0;
176  do {
177  Value *V = Worklist.pop_back_val();
178 
179  // Check if we should process the current value. To prevent endless
180  // recursion keep a record of the values we followed!
181  if (!Visited.insert(V).second)
182  continue;
183 
184  // Make sure we limit the compile time for complex expressions.
185  if (Iteration++ >= MaxValues)
186  return false;
187 
188  // Explicitly look through calls with a "returned" attribute if we do
189  // not have a pointer as stripPointerCasts only works on them.
190  Value *NewV = nullptr;
191  if (V->getType()->isPointerTy()) {
192  NewV = V->stripPointerCasts();
193  } else {
194  CallSite CS(V);
195  if (CS && CS.getCalledFunction()) {
196  for (Argument &Arg : CS.getCalledFunction()->args())
197  if (Arg.hasReturnedAttr()) {
198  NewV = CS.getArgOperand(Arg.getArgNo());
199  break;
200  }
201  }
202  }
203  if (NewV && NewV != V) {
204  Worklist.push_back(NewV);
205  continue;
206  }
207 
208  // Look through select instructions, visit both potential values.
209  if (auto *SI = dyn_cast<SelectInst>(V)) {
210  Worklist.push_back(SI->getTrueValue());
211  Worklist.push_back(SI->getFalseValue());
212  continue;
213  }
214 
215  // Look through phi nodes, visit all live operands.
216  if (auto *PHI = dyn_cast<PHINode>(V)) {
217  assert(LivenessAA &&
218  "Expected liveness in the presence of instructions!");
219  for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
220  const BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
221  if (LivenessAA->isAssumedDead(IncomingBB->getTerminator())) {
222  AnyDead = true;
223  continue;
224  }
225  Worklist.push_back(PHI->getIncomingValue(u));
226  }
227  continue;
228  }
229 
230  // Once a leaf is reached we inform the user through the callback.
231  if (!VisitValueCB(*V, State, Iteration > 1))
232  return false;
233  } while (!Worklist.empty());
234 
235  // If we actually used liveness information so we have to record a dependence.
236  if (AnyDead)
237  A.recordDependence(*LivenessAA, QueryingAA);
238 
239  // All values have been visited.
240  return true;
241 }
242 
243 /// Return true if \p New is equal or worse than \p Old.
244 static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
245  if (!Old.isIntAttribute())
246  return true;
247 
248  return Old.getValueAsInt() >= New.getValueAsInt();
249 }
250 
251 /// Return true if the information provided by \p Attr was added to the
252 /// attribute list \p Attrs. This is only the case if it was not already present
253 /// in \p Attrs at the position describe by \p PK and \p AttrIdx.
254 static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
255  AttributeList &Attrs, int AttrIdx) {
256 
257  if (Attr.isEnumAttribute()) {
259  if (Attrs.hasAttribute(AttrIdx, Kind))
260  if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
261  return false;
262  Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
263  return true;
264  }
265  if (Attr.isStringAttribute()) {
266  StringRef Kind = Attr.getKindAsString();
267  if (Attrs.hasAttribute(AttrIdx, Kind))
268  if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
269  return false;
270  Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
271  return true;
272  }
273  if (Attr.isIntAttribute()) {
275  if (Attrs.hasAttribute(AttrIdx, Kind))
276  if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
277  return false;
278  Attrs = Attrs.removeAttribute(Ctx, AttrIdx, Kind);
279  Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
280  return true;
281  }
282 
283  llvm_unreachable("Expected enum or string attribute!");
284 }
285 static const Value *getPointerOperand(const Instruction *I) {
286  if (auto *LI = dyn_cast<LoadInst>(I))
287  if (!LI->isVolatile())
288  return LI->getPointerOperand();
289 
290  if (auto *SI = dyn_cast<StoreInst>(I))
291  if (!SI->isVolatile())
292  return SI->getPointerOperand();
293 
294  if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I))
295  if (!CXI->isVolatile())
296  return CXI->getPointerOperand();
297 
298  if (auto *RMWI = dyn_cast<AtomicRMWInst>(I))
299  if (!RMWI->isVolatile())
300  return RMWI->getPointerOperand();
301 
302  return nullptr;
303 }
305  int64_t &BytesOffset,
306  const DataLayout &DL) {
307  const Value *Ptr = getPointerOperand(I);
308  if (!Ptr)
309  return nullptr;
310 
311  return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
312  /*AllowNonInbounds*/ false);
313 }
314 
317  if (getState().isAtFixpoint())
318  return HasChanged;
319 
320  LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
321 
322  HasChanged = updateImpl(A);
323 
324  LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
325  << "\n");
326 
327  return HasChanged;
328 }
329 
332  const ArrayRef<Attribute> &DeducedAttrs) {
333  Function *ScopeFn = IRP.getAssociatedFunction();
335 
336  // In the following some generic code that will manifest attributes in
337  // DeducedAttrs if they improve the current IR. Due to the different
338  // annotation positions we use the underlying AttributeList interface.
339 
341  switch (PK) {
348  Attrs = ScopeFn->getAttributes();
349  break;
354  break;
355  }
356 
358  LLVMContext &Ctx = IRP.getAnchorValue().getContext();
359  for (const Attribute &Attr : DeducedAttrs) {
360  if (!addIfNotExistent(Ctx, Attr, Attrs, IRP.getAttrIdx()))
361  continue;
362 
363  HasChanged = ChangeStatus::CHANGED;
364  }
365 
366  if (HasChanged == ChangeStatus::UNCHANGED)
367  return HasChanged;
368 
369  switch (PK) {
373  ScopeFn->setAttributes(Attrs);
374  break;
378  CallSite(&IRP.getAnchorValue()).setAttributes(Attrs);
379  break;
382  break;
383  }
384 
385  return HasChanged;
386 }
387 
390 
392  IRPositions.emplace_back(IRP);
393 
394  ImmutableCallSite ICS(&IRP.getAnchorValue());
395  switch (IRP.getPositionKind()) {
399  return;
402  IRPositions.emplace_back(
404  return;
406  assert(ICS && "Expected call site!");
407  // TODO: We need to look at the operand bundles similar to the redirection
408  // in CallBase.
409  if (!ICS.hasOperandBundles())
410  if (const Function *Callee = ICS.getCalledFunction())
411  IRPositions.emplace_back(IRPosition::function(*Callee));
412  return;
414  assert(ICS && "Expected call site!");
415  // TODO: We need to look at the operand bundles similar to the redirection
416  // in CallBase.
417  if (!ICS.hasOperandBundles()) {
418  if (const Function *Callee = ICS.getCalledFunction()) {
419  IRPositions.emplace_back(IRPosition::returned(*Callee));
420  IRPositions.emplace_back(IRPosition::function(*Callee));
421  }
422  }
423  IRPositions.emplace_back(
424  IRPosition::callsite_function(cast<CallBase>(*ICS.getInstruction())));
425  return;
427  int ArgNo = IRP.getArgNo();
428  assert(ICS && ArgNo >= 0 && "Expected call site!");
429  // TODO: We need to look at the operand bundles similar to the redirection
430  // in CallBase.
431  if (!ICS.hasOperandBundles()) {
432  const Function *Callee = ICS.getCalledFunction();
433  if (Callee && Callee->arg_size() > unsigned(ArgNo))
434  IRPositions.emplace_back(IRPosition::argument(*Callee->getArg(ArgNo)));
435  if (Callee)
436  IRPositions.emplace_back(IRPosition::function(*Callee));
437  }
438  IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
439  return;
440  }
441  }
442 }
443 
445  bool IgnoreSubsumingPositions) const {
446  for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
447  for (Attribute::AttrKind AK : AKs)
448  if (EquivIRP.getAttr(AK).getKindAsEnum() == AK)
449  return true;
450  // The first position returned by the SubsumingPositionIterator is
451  // always the position itself. If we ignore subsuming positions we
452  // are done after the first iteration.
453  if (IgnoreSubsumingPositions)
454  break;
455  }
456  return false;
457 }
458 
461  for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this))
462  for (Attribute::AttrKind AK : AKs) {
463  const Attribute &Attr = EquivIRP.getAttr(AK);
464  if (Attr.getKindAsEnum() == AK)
465  Attrs.push_back(Attr);
466  }
467 }
468 
469 void IRPosition::verify() {
470  switch (KindOrArgNo) {
471  default:
472  assert(KindOrArgNo >= 0 && "Expected argument or call site argument!");
473  assert((isa<CallBase>(AnchorVal) || isa<Argument>(AnchorVal)) &&
474  "Expected call base or argument for positive attribute index!");
475  if (isa<Argument>(AnchorVal)) {
476  assert(cast<Argument>(AnchorVal)->getArgNo() == unsigned(getArgNo()) &&
477  "Argument number mismatch!");
478  assert(cast<Argument>(AnchorVal) == &getAssociatedValue() &&
479  "Associated value mismatch!");
480  } else {
481  assert(cast<CallBase>(*AnchorVal).arg_size() > unsigned(getArgNo()) &&
482  "Call site argument number mismatch!");
483  assert(cast<CallBase>(*AnchorVal).getArgOperand(getArgNo()) ==
484  &getAssociatedValue() &&
485  "Associated value mismatch!");
486  }
487  break;
488  case IRP_INVALID:
489  assert(!AnchorVal && "Expected no value for an invalid position!");
490  break;
491  case IRP_FLOAT:
492  assert((!isa<CallBase>(&getAssociatedValue()) &&
493  !isa<Argument>(&getAssociatedValue())) &&
494  "Expected specialized kind for call base and argument values!");
495  break;
496  case IRP_RETURNED:
497  assert(isa<Function>(AnchorVal) &&
498  "Expected function for a 'returned' position!");
499  assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
500  break;
501  case IRP_CALL_SITE_RETURNED:
502  assert((isa<CallBase>(AnchorVal)) &&
503  "Expected call base for 'call site returned' position!");
504  assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
505  break;
506  case IRP_CALL_SITE:
507  assert((isa<CallBase>(AnchorVal)) &&
508  "Expected call base for 'call site function' position!");
509  assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
510  break;
511  case IRP_FUNCTION:
512  assert(isa<Function>(AnchorVal) &&
513  "Expected function for a 'function' position!");
514  assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
515  break;
516  }
517 }
518 
519 namespace {
520 /// Helper functions to clamp a state \p S of type \p StateType with the
521 /// information in \p R and indicate/return if \p S did change (as-in update is
522 /// required to be run again).
523 ///
524 ///{
525 template <typename StateType>
526 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R);
527 
528 template <>
529 ChangeStatus clampStateAndIndicateChange<IntegerState>(IntegerState &S,
530  const IntegerState &R) {
531  auto Assumed = S.getAssumed();
532  S ^= R;
533  return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
535 }
536 
537 template <>
538 ChangeStatus clampStateAndIndicateChange<BooleanState>(BooleanState &S,
539  const BooleanState &R) {
540  return clampStateAndIndicateChange<IntegerState>(S, R);
541 }
542 ///}
543 
544 /// Clamp the information known for all returned values of a function
545 /// (identified by \p QueryingAA) into \p S.
546 template <typename AAType, typename StateType = typename AAType::StateType>
547 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
548  StateType &S) {
549  LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
550  << static_cast<const AbstractAttribute &>(QueryingAA)
551  << " into " << S << "\n");
552 
553  assert((QueryingAA.getIRPosition().getPositionKind() ==
555  QueryingAA.getIRPosition().getPositionKind() ==
557  "Can only clamp returned value states for a function returned or call "
558  "site returned position!");
559 
560  // Use an optional state as there might not be any return values and we want
561  // to join (IntegerState::operator&) the state of all there are.
563 
564  // Callback for each possibly returned value.
565  auto CheckReturnValue = [&](Value &RV) -> bool {
566  const IRPosition &RVPos = IRPosition::value(RV);
567  const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
568  LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
569  << " @ " << RVPos << "\n");
570  const StateType &AAS = static_cast<const StateType &>(AA.getState());
571  if (T.hasValue())
572  *T &= AAS;
573  else
574  T = AAS;
575  LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
576  << "\n");
577  return T->isValidState();
578  };
579 
580  if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
581  S.indicatePessimisticFixpoint();
582  else if (T.hasValue())
583  S ^= *T;
584 }
585 
586 /// Helper class to compose two generic deduction
587 template <typename AAType, typename Base, typename StateType,
588  template <typename...> class F, template <typename...> class G>
589 struct AAComposeTwoGenericDeduction
590  : public F<AAType, G<AAType, Base, StateType>, StateType> {
591  AAComposeTwoGenericDeduction(const IRPosition &IRP)
592  : F<AAType, G<AAType, Base, StateType>, StateType>(IRP) {}
593 
594  /// See AbstractAttribute::updateImpl(...).
595  ChangeStatus updateImpl(Attributor &A) override {
596  return F<AAType, G<AAType, Base, StateType>, StateType>::updateImpl(A) |
597  G<AAType, Base, StateType>::updateImpl(A);
598  }
599 };
600 
601 /// Helper class for generic deduction: return value -> returned position.
602 template <typename AAType, typename Base,
603  typename StateType = typename AAType::StateType>
604 struct AAReturnedFromReturnedValues : public Base {
605  AAReturnedFromReturnedValues(const IRPosition &IRP) : Base(IRP) {}
606 
607  /// See AbstractAttribute::updateImpl(...).
608  ChangeStatus updateImpl(Attributor &A) override {
609  StateType S;
610  clampReturnedValueStates<AAType, StateType>(A, *this, S);
611  // TODO: If we know we visited all returned values, thus no are assumed
612  // dead, we can take the known information from the state T.
613  return clampStateAndIndicateChange<StateType>(this->getState(), S);
614  }
615 };
616 
617 /// Clamp the information known at all call sites for a given argument
618 /// (identified by \p QueryingAA) into \p S.
619 template <typename AAType, typename StateType = typename AAType::StateType>
620 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
621  StateType &S) {
622  LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
623  << static_cast<const AbstractAttribute &>(QueryingAA)
624  << " into " << S << "\n");
625 
626  assert(QueryingAA.getIRPosition().getPositionKind() ==
628  "Can only clamp call site argument states for an argument position!");
629 
630  // Use an optional state as there might not be any return values and we want
631  // to join (IntegerState::operator&) the state of all there are.
633 
634  // The argument number which is also the call site argument number.
635  unsigned ArgNo = QueryingAA.getIRPosition().getArgNo();
636 
637  auto CallSiteCheck = [&](AbstractCallSite ACS) {
638  const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
639  // Check if a coresponding argument was found or if it is on not associated
640  // (which can happen for callback calls).
641  if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
642  return false;
643 
644  const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos);
645  LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
646  << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
647  const StateType &AAS = static_cast<const StateType &>(AA.getState());
648  if (T.hasValue())
649  *T &= AAS;
650  else
651  T = AAS;
652  LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
653  << "\n");
654  return T->isValidState();
655  };
656 
657  if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true))
658  S.indicatePessimisticFixpoint();
659  else if (T.hasValue())
660  S ^= *T;
661 }
662 
663 /// Helper class for generic deduction: call site argument -> argument position.
664 template <typename AAType, typename Base,
665  typename StateType = typename AAType::StateType>
666 struct AAArgumentFromCallSiteArguments : public Base {
667  AAArgumentFromCallSiteArguments(const IRPosition &IRP) : Base(IRP) {}
668 
669  /// See AbstractAttribute::updateImpl(...).
670  ChangeStatus updateImpl(Attributor &A) override {
671  StateType S;
672  clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
673  // TODO: If we know we visited all incoming values, thus no are assumed
674  // dead, we can take the known information from the state T.
675  return clampStateAndIndicateChange<StateType>(this->getState(), S);
676  }
677 };
678 
679 /// Helper class for generic replication: function returned -> cs returned.
680 template <typename AAType, typename Base,
681  typename StateType = typename AAType::StateType>
682 struct AACallSiteReturnedFromReturned : public Base {
683  AACallSiteReturnedFromReturned(const IRPosition &IRP) : Base(IRP) {}
684 
685  /// See AbstractAttribute::updateImpl(...).
686  ChangeStatus updateImpl(Attributor &A) override {
687  assert(this->getIRPosition().getPositionKind() ==
689  "Can only wrap function returned positions for call site returned "
690  "positions!");
691  auto &S = this->getState();
692 
693  const Function *AssociatedFunction =
695  if (!AssociatedFunction)
696  return S.indicatePessimisticFixpoint();
697 
698  IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
699  const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
700  return clampStateAndIndicateChange(
701  S, static_cast<const typename AAType::StateType &>(AA.getState()));
702  }
703 };
704 
705 /// Helper class for generic deduction using must-be-executed-context
706 /// Base class is required to have `followUse` method.
707 
708 /// bool followUse(Attributor &A, const Use *U, const Instruction *I)
709 /// U - Underlying use.
710 /// I - The user of the \p U.
711 /// `followUse` returns true if the value should be tracked transitively.
712 
713 template <typename AAType, typename Base,
714  typename StateType = typename AAType::StateType>
715 struct AAFromMustBeExecutedContext : public Base {
716  AAFromMustBeExecutedContext(const IRPosition &IRP) : Base(IRP) {}
717 
718  void initialize(Attributor &A) override {
719  Base::initialize(A);
720  IRPosition &IRP = this->getIRPosition();
721  Instruction *CtxI = IRP.getCtxI();
722 
723  if (!CtxI)
724  return;
725 
726  for (const Use &U : IRP.getAssociatedValue().uses())
727  Uses.insert(&U);
728  }
729 
730  /// See AbstractAttribute::updateImpl(...).
731  ChangeStatus updateImpl(Attributor &A) override {
732  auto BeforeState = this->getState();
733  auto &S = this->getState();
734  Instruction *CtxI = this->getIRPosition().getCtxI();
735  if (!CtxI)
737 
740 
741  SetVector<const Use *> NextUses;
742 
743  for (const Use *U : Uses) {
744  if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
745  auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
746  bool Found = EIt.count(UserI);
747  while (!Found && ++EIt != EEnd)
748  Found = EIt.getCurrentInst() == UserI;
749  if (Found && Base::followUse(A, U, UserI))
750  for (const Use &Us : UserI->uses())
751  NextUses.insert(&Us);
752  }
753  }
754  for (const Use *U : NextUses)
755  Uses.insert(U);
756 
757  return BeforeState == S ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
758  }
759 
760 private:
761  /// Container for (transitive) uses of the associated value.
763 };
764 
765 template <typename AAType, typename Base,
766  typename StateType = typename AAType::StateType>
767 using AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext =
768  AAComposeTwoGenericDeduction<AAType, Base, StateType,
769  AAFromMustBeExecutedContext,
770  AAArgumentFromCallSiteArguments>;
771 
772 template <typename AAType, typename Base,
773  typename StateType = typename AAType::StateType>
774 using AACallSiteReturnedFromReturnedAndMustBeExecutedContext =
775  AAComposeTwoGenericDeduction<AAType, Base, StateType,
776  AAFromMustBeExecutedContext,
777  AACallSiteReturnedFromReturned>;
778 
779 /// -----------------------NoUnwind Function Attribute--------------------------
780 
781 struct AANoUnwindImpl : AANoUnwind {
782  AANoUnwindImpl(const IRPosition &IRP) : AANoUnwind(IRP) {}
783 
784  const std::string getAsStr() const override {
785  return getAssumed() ? "nounwind" : "may-unwind";
786  }
787 
788  /// See AbstractAttribute::updateImpl(...).
789  ChangeStatus updateImpl(Attributor &A) override {
790  auto Opcodes = {
791  (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
792  (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet,
793  (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
794 
795  auto CheckForNoUnwind = [&](Instruction &I) {
796  if (!I.mayThrow())
797  return true;
798 
799  if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
800  const auto &NoUnwindAA =
802  return NoUnwindAA.isAssumedNoUnwind();
803  }
804  return false;
805  };
806 
807  if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
808  return indicatePessimisticFixpoint();
809 
811  }
812 };
813 
814 struct AANoUnwindFunction final : public AANoUnwindImpl {
815  AANoUnwindFunction(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
816 
817  /// See AbstractAttribute::trackStatistics()
818  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
819 };
820 
821 /// NoUnwind attribute deduction for a call sites.
822 struct AANoUnwindCallSite final : AANoUnwindImpl {
823  AANoUnwindCallSite(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
824 
825  /// See AbstractAttribute::initialize(...).
826  void initialize(Attributor &A) override {
828  Function *F = getAssociatedFunction();
829  if (!F)
830  indicatePessimisticFixpoint();
831  }
832 
833  /// See AbstractAttribute::updateImpl(...).
834  ChangeStatus updateImpl(Attributor &A) override {
835  // TODO: Once we have call site specific value information we can provide
836  // call site specific liveness information and then it makes
837  // sense to specialize attributes for call sites arguments instead of
838  // redirecting requests to the callee argument.
839  Function *F = getAssociatedFunction();
840  const IRPosition &FnPos = IRPosition::function(*F);
841  auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos);
842  return clampStateAndIndicateChange(
843  getState(),
844  static_cast<const AANoUnwind::StateType &>(FnAA.getState()));
845  }
846 
847  /// See AbstractAttribute::trackStatistics()
848  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
849 };
850 
851 /// --------------------- Function Return Values -------------------------------
852 
853 /// "Attribute" that collects all potential returned values and the return
854 /// instructions that they arise from.
855 ///
856 /// If there is a unique returned value R, the manifest method will:
857 /// - mark R with the "returned" attribute, if R is an argument.
858 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
859 
860  /// Mapping of values potentially returned by the associated function to the
861  /// return instructions that might return them.
863 
864  /// Mapping to remember the number of returned values for a call site such
865  /// that we can avoid updates if nothing changed.
866  DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
867 
868  /// Set of unresolved calls returned by the associated function.
869  SmallSetVector<CallBase *, 4> UnresolvedCalls;
870 
871  /// State flags
872  ///
873  ///{
874  bool IsFixed = false;
875  bool IsValidState = true;
876  ///}
877 
878 public:
879  AAReturnedValuesImpl(const IRPosition &IRP) : AAReturnedValues(IRP) {}
880 
881  /// See AbstractAttribute::initialize(...).
882  void initialize(Attributor &A) override {
883  // Reset the state.
884  IsFixed = false;
885  IsValidState = true;
886  ReturnedValues.clear();
887 
888  Function *F = getAssociatedFunction();
889  if (!F) {
890  indicatePessimisticFixpoint();
891  return;
892  }
893 
894  // The map from instruction opcodes to those instructions in the function.
895  auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
896 
897  // Look through all arguments, if one is marked as returned we are done.
898  for (Argument &Arg : F->args()) {
899  if (Arg.hasReturnedAttr()) {
900  auto &ReturnInstSet = ReturnedValues[&Arg];
901  for (Instruction *RI : OpcodeInstMap[Instruction::Ret])
902  ReturnInstSet.insert(cast<ReturnInst>(RI));
903 
904  indicateOptimisticFixpoint();
905  return;
906  }
907  }
908 
909  if (!F->hasExactDefinition())
910  indicatePessimisticFixpoint();
911  }
912 
913  /// See AbstractAttribute::manifest(...).
914  ChangeStatus manifest(Attributor &A) override;
915 
916  /// See AbstractAttribute::getState(...).
917  AbstractState &getState() override { return *this; }
918 
919  /// See AbstractAttribute::getState(...).
920  const AbstractState &getState() const override { return *this; }
921 
922  /// See AbstractAttribute::updateImpl(Attributor &A).
923  ChangeStatus updateImpl(Attributor &A) override;
924 
925  llvm::iterator_range<iterator> returned_values() override {
926  return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
927  }
928 
929  llvm::iterator_range<const_iterator> returned_values() const override {
930  return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
931  }
932 
933  const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
934  return UnresolvedCalls;
935  }
936 
937  /// Return the number of potential return values, -1 if unknown.
938  size_t getNumReturnValues() const override {
939  return isValidState() ? ReturnedValues.size() : -1;
940  }
941 
942  /// Return an assumed unique return value if a single candidate is found. If
943  /// there cannot be one, return a nullptr. If it is not clear yet, return the
944  /// Optional::NoneType.
945  Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
946 
947  /// See AbstractState::checkForAllReturnedValues(...).
948  bool checkForAllReturnedValuesAndReturnInsts(
949  const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
950  &Pred) const override;
951 
952  /// Pretty print the attribute similar to the IR representation.
953  const std::string getAsStr() const override;
954 
955  /// See AbstractState::isAtFixpoint().
956  bool isAtFixpoint() const override { return IsFixed; }
957 
958  /// See AbstractState::isValidState().
959  bool isValidState() const override { return IsValidState; }
960 
961  /// See AbstractState::indicateOptimisticFixpoint(...).
962  ChangeStatus indicateOptimisticFixpoint() override {
963  IsFixed = true;
965  }
966 
967  ChangeStatus indicatePessimisticFixpoint() override {
968  IsFixed = true;
969  IsValidState = false;
970  return ChangeStatus::CHANGED;
971  }
972 };
973 
974 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
976 
977  // Bookkeeping.
978  assert(isValidState());
979  STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
980  "Number of function with known return values");
981 
982  // Check if we have an assumed unique return value that we could manifest.
983  Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
984 
985  if (!UniqueRV.hasValue() || !UniqueRV.getValue())
986  return Changed;
987 
988  // Bookkeeping.
989  STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
990  "Number of function with unique return");
991 
992  // Callback to replace the uses of CB with the constant C.
993  auto ReplaceCallSiteUsersWith = [](CallBase &CB, Constant &C) {
994  if (CB.getNumUses() == 0 || CB.isMustTailCall())
996  CB.replaceAllUsesWith(&C);
997  return ChangeStatus::CHANGED;
998  };
999 
1000  // If the assumed unique return value is an argument, annotate it.
1001  if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
1002  getIRPosition() = IRPosition::argument(*UniqueRVArg);
1003  Changed = IRAttribute::manifest(A);
1004  } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
1005  // We can replace the returned value with the unique returned constant.
1006  Value &AnchorValue = getAnchorValue();
1007  if (Function *F = dyn_cast<Function>(&AnchorValue)) {
1008  for (const Use &U : F->uses())
1009  if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
1010  if (CB->isCallee(&U)) {
1011  Constant *RVCCast =
1013  Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
1014  }
1015  } else {
1016  assert(isa<CallBase>(AnchorValue) &&
1017  "Expcected a function or call base anchor!");
1018  Constant *RVCCast =
1019  ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
1020  Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
1021  }
1022  if (Changed == ChangeStatus::CHANGED)
1023  STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
1024  "Number of function returns replaced by constant return");
1025  }
1026 
1027  return Changed;
1028 }
1029 
1030 const std::string AAReturnedValuesImpl::getAsStr() const {
1031  return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1032  (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
1033  ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
1034 }
1035 
1037 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1038  // If checkForAllReturnedValues provides a unique value, ignoring potential
1039  // undef values that can also be present, it is assumed to be the actual
1040  // return value and forwarded to the caller of this method. If there are
1041  // multiple, a nullptr is returned indicating there cannot be a unique
1042  // returned value.
1043  Optional<Value *> UniqueRV;
1044 
1045  auto Pred = [&](Value &RV) -> bool {
1046  // If we found a second returned value and neither the current nor the saved
1047  // one is an undef, there is no unique returned value. Undefs are special
1048  // since we can pretend they have any value.
1049  if (UniqueRV.hasValue() && UniqueRV != &RV &&
1050  !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
1051  UniqueRV = nullptr;
1052  return false;
1053  }
1054 
1055  // Do not overwrite a value with an undef.
1056  if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
1057  UniqueRV = &RV;
1058 
1059  return true;
1060  };
1061 
1062  if (!A.checkForAllReturnedValues(Pred, *this))
1063  UniqueRV = nullptr;
1064 
1065  return UniqueRV;
1066 }
1067 
1068 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1069  const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
1070  &Pred) const {
1071  if (!isValidState())
1072  return false;
1073 
1074  // Check all returned values but ignore call sites as long as we have not
1075  // encountered an overdefined one during an update.
1076  for (auto &It : ReturnedValues) {
1077  Value *RV = It.first;
1078 
1079  CallBase *CB = dyn_cast<CallBase>(RV);
1080  if (CB && !UnresolvedCalls.count(CB))
1081  continue;
1082 
1083  if (!Pred(*RV, It.second))
1084  return false;
1085  }
1086 
1087  return true;
1088 }
1089 
1090 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1091  size_t NumUnresolvedCalls = UnresolvedCalls.size();
1092  bool Changed = false;
1093 
1094  // State used in the value traversals starting in returned values.
1095  struct RVState {
1096  // The map in which we collect return values -> return instrs.
1097  decltype(ReturnedValues) &RetValsMap;
1098  // The flag to indicate a change.
1099  bool &Changed;
1100  // The return instrs we come from.
1102  };
1103 
1104  // Callback for a leaf value returned by the associated function.
1105  auto VisitValueCB = [](Value &Val, RVState &RVS, bool) -> bool {
1106  auto Size = RVS.RetValsMap[&Val].size();
1107  RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1108  bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1109  RVS.Changed |= Inserted;
1110  LLVM_DEBUG({
1111  if (Inserted)
1112  dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1113  << " => " << RVS.RetInsts.size() << "\n";
1114  });
1115  return true;
1116  };
1117 
1118  // Helper method to invoke the generic value traversal.
1119  auto VisitReturnedValue = [&](Value &RV, RVState &RVS) {
1120  IRPosition RetValPos = IRPosition::value(RV);
1121  return genericValueTraversal<AAReturnedValues, RVState>(A, RetValPos, *this,
1122  RVS, VisitValueCB);
1123  };
1124 
1125  // Callback for all "return intructions" live in the associated function.
1126  auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1127  ReturnInst &Ret = cast<ReturnInst>(I);
1128  RVState RVS({ReturnedValues, Changed, {}});
1129  RVS.RetInsts.insert(&Ret);
1130  return VisitReturnedValue(*Ret.getReturnValue(), RVS);
1131  };
1132 
1133  // Start by discovering returned values from all live returned instructions in
1134  // the associated function.
1135  if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1136  return indicatePessimisticFixpoint();
1137 
1138  // Once returned values "directly" present in the code are handled we try to
1139  // resolve returned calls.
1140  decltype(ReturnedValues) NewRVsMap;
1141  for (auto &It : ReturnedValues) {
1142  LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *It.first
1143  << " by #" << It.second.size() << " RIs\n");
1144  CallBase *CB = dyn_cast<CallBase>(It.first);
1145  if (!CB || UnresolvedCalls.count(CB))
1146  continue;
1147 
1148  if (!CB->getCalledFunction()) {
1149  LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1150  << "\n");
1151  UnresolvedCalls.insert(CB);
1152  continue;
1153  }
1154 
1155  // TODO: use the function scope once we have call site AAReturnedValues.
1156  const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1157  *this, IRPosition::function(*CB->getCalledFunction()));
1158  LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1159  << static_cast<const AbstractAttribute &>(RetValAA)
1160  << "\n");
1161 
1162  // Skip dead ends, thus if we do not know anything about the returned
1163  // call we mark it as unresolved and it will stay that way.
1164  if (!RetValAA.getState().isValidState()) {
1165  LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1166  << "\n");
1167  UnresolvedCalls.insert(CB);
1168  continue;
1169  }
1170 
1171  // Do not try to learn partial information. If the callee has unresolved
1172  // return values we will treat the call as unresolved/opaque.
1173  auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1174  if (!RetValAAUnresolvedCalls.empty()) {
1175  UnresolvedCalls.insert(CB);
1176  continue;
1177  }
1178 
1179  // Now check if we can track transitively returned values. If possible, thus
1180  // if all return value can be represented in the current scope, do so.
1181  bool Unresolved = false;
1182  for (auto &RetValAAIt : RetValAA.returned_values()) {
1183  Value *RetVal = RetValAAIt.first;
1184  if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1185  isa<Constant>(RetVal))
1186  continue;
1187  // Anything that did not fit in the above categories cannot be resolved,
1188  // mark the call as unresolved.
1189  LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1190  "cannot be translated: "
1191  << *RetVal << "\n");
1192  UnresolvedCalls.insert(CB);
1193  Unresolved = true;
1194  break;
1195  }
1196 
1197  if (Unresolved)
1198  continue;
1199 
1200  // Now track transitively returned values.
1201  unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1202  if (NumRetAA == RetValAA.getNumReturnValues()) {
1203  LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1204  "changed since it was seen last\n");
1205  continue;
1206  }
1207  NumRetAA = RetValAA.getNumReturnValues();
1208 
1209  for (auto &RetValAAIt : RetValAA.returned_values()) {
1210  Value *RetVal = RetValAAIt.first;
1211  if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1212  // Arguments are mapped to call site operands and we begin the traversal
1213  // again.
1214  bool Unused = false;
1215  RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1216  VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS);
1217  continue;
1218  } else if (isa<CallBase>(RetVal)) {
1219  // Call sites are resolved by the callee attribute over time, no need to
1220  // do anything for us.
1221  continue;
1222  } else if (isa<Constant>(RetVal)) {
1223  // Constants are valid everywhere, we can simply take them.
1224  NewRVsMap[RetVal].insert(It.second.begin(), It.second.end());
1225  continue;
1226  }
1227  }
1228  }
1229 
1230  // To avoid modifications to the ReturnedValues map while we iterate over it
1231  // we kept record of potential new entries in a copy map, NewRVsMap.
1232  for (auto &It : NewRVsMap) {
1233  assert(!It.second.empty() && "Entry does not add anything.");
1234  auto &ReturnInsts = ReturnedValues[It.first];
1235  for (ReturnInst *RI : It.second)
1236  if (ReturnInsts.insert(RI)) {
1237  LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1238  << *It.first << " => " << *RI << "\n");
1239  Changed = true;
1240  }
1241  }
1242 
1243  Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1245 }
1246 
1247 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1248  AAReturnedValuesFunction(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1249 
1250  /// See AbstractAttribute::trackStatistics()
1251  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1252 };
1253 
1254 /// Returned values information for a call sites.
1255 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1256  AAReturnedValuesCallSite(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1257 
1258  /// See AbstractAttribute::initialize(...).
1259  void initialize(Attributor &A) override {
1260  // TODO: Once we have call site specific value information we can provide
1261  // call site specific liveness information and then it makes
1262  // sense to specialize attributes for call sites instead of
1263  // redirecting requests to the callee.
1264  llvm_unreachable("Abstract attributes for returned values are not "
1265  "supported for call sites yet!");
1266  }
1267 
1268  /// See AbstractAttribute::updateImpl(...).
1269  ChangeStatus updateImpl(Attributor &A) override {
1270  return indicatePessimisticFixpoint();
1271  }
1272 
1273  /// See AbstractAttribute::trackStatistics()
1274  void trackStatistics() const override {}
1275 };
1276 
1277 /// ------------------------ NoSync Function Attribute -------------------------
1278 
1279 struct AANoSyncImpl : AANoSync {
1280  AANoSyncImpl(const IRPosition &IRP) : AANoSync(IRP) {}
1281 
1282  const std::string getAsStr() const override {
1283  return getAssumed() ? "nosync" : "may-sync";
1284  }
1285 
1286  /// See AbstractAttribute::updateImpl(...).
1287  ChangeStatus updateImpl(Attributor &A) override;
1288 
1289  /// Helper function used to determine whether an instruction is non-relaxed
1290  /// atomic. In other words, if an atomic instruction does not have unordered
1291  /// or monotonic ordering
1292  static bool isNonRelaxedAtomic(Instruction *I);
1293 
1294  /// Helper function used to determine whether an instruction is volatile.
1295  static bool isVolatile(Instruction *I);
1296 
1297  /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1298  /// memset).
1299  static bool isNoSyncIntrinsic(Instruction *I);
1300 };
1301 
1302 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1303  if (!I->isAtomic())
1304  return false;
1305 
1306  AtomicOrdering Ordering;
1307  switch (I->getOpcode()) {
1308  case Instruction::AtomicRMW:
1309  Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1310  break;
1311  case Instruction::Store:
1312  Ordering = cast<StoreInst>(I)->getOrdering();
1313  break;
1314  case Instruction::Load:
1315  Ordering = cast<LoadInst>(I)->getOrdering();
1316  break;
1317  case Instruction::Fence: {
1318  auto *FI = cast<FenceInst>(I);
1319  if (FI->getSyncScopeID() == SyncScope::SingleThread)
1320  return false;
1321  Ordering = FI->getOrdering();
1322  break;
1323  }
1324  case Instruction::AtomicCmpXchg: {
1325  AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1326  AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1327  // Only if both are relaxed, than it can be treated as relaxed.
1328  // Otherwise it is non-relaxed.
1329  if (Success != AtomicOrdering::Unordered &&
1330  Success != AtomicOrdering::Monotonic)
1331  return true;
1332  if (Failure != AtomicOrdering::Unordered &&
1333  Failure != AtomicOrdering::Monotonic)
1334  return true;
1335  return false;
1336  }
1337  default:
1339  "New atomic operations need to be known in the attributor.");
1340  }
1341 
1342  // Relaxed.
1343  if (Ordering == AtomicOrdering::Unordered ||
1344  Ordering == AtomicOrdering::Monotonic)
1345  return false;
1346  return true;
1347 }
1348 
1349 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1350 /// FIXME: We should ipmrove the handling of intrinsics.
1351 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1352  if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1353  switch (II->getIntrinsicID()) {
1354  /// Element wise atomic memory intrinsics are can only be unordered,
1355  /// therefore nosync.
1356  case Intrinsic::memset_element_unordered_atomic:
1357  case Intrinsic::memmove_element_unordered_atomic:
1358  case Intrinsic::memcpy_element_unordered_atomic:
1359  return true;
1360  case Intrinsic::memset:
1361  case Intrinsic::memmove:
1362  case Intrinsic::memcpy:
1363  if (!cast<MemIntrinsic>(II)->isVolatile())
1364  return true;
1365  return false;
1366  default:
1367  return false;
1368  }
1369  }
1370  return false;
1371 }
1372 
1374  assert(!ImmutableCallSite(I) && !isa<CallBase>(I) &&
1375  "Calls should not be checked here");
1376 
1377  switch (I->getOpcode()) {
1378  case Instruction::AtomicRMW:
1379  return cast<AtomicRMWInst>(I)->isVolatile();
1380  case Instruction::Store:
1381  return cast<StoreInst>(I)->isVolatile();
1382  case Instruction::Load:
1383  return cast<LoadInst>(I)->isVolatile();
1384  case Instruction::AtomicCmpXchg:
1385  return cast<AtomicCmpXchgInst>(I)->isVolatile();
1386  default:
1387  return false;
1388  }
1389 }
1390 
1391 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1392 
1393  auto CheckRWInstForNoSync = [&](Instruction &I) {
1394  /// We are looking for volatile instructions or Non-Relaxed atomics.
1395  /// FIXME: We should ipmrove the handling of intrinsics.
1396 
1397  if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1398  return true;
1399 
1400  if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
1401  if (ICS.hasFnAttr(Attribute::NoSync))
1402  return true;
1403 
1404  const auto &NoSyncAA =
1406  if (NoSyncAA.isAssumedNoSync())
1407  return true;
1408  return false;
1409  }
1410 
1411  if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1412  return true;
1413 
1414  return false;
1415  };
1416 
1417  auto CheckForNoSync = [&](Instruction &I) {
1418  // At this point we handled all read/write effects and they are all
1419  // nosync, so they can be skipped.
1420  if (I.mayReadOrWriteMemory())
1421  return true;
1422 
1423  // non-convergent and readnone imply nosync.
1424  return !ImmutableCallSite(&I).isConvergent();
1425  };
1426 
1427  if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1428  !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1429  return indicatePessimisticFixpoint();
1430 
1431  return ChangeStatus::UNCHANGED;
1432 }
1433 
1434 struct AANoSyncFunction final : public AANoSyncImpl {
1435  AANoSyncFunction(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1436 
1437  /// See AbstractAttribute::trackStatistics()
1438  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1439 };
1440 
1441 /// NoSync attribute deduction for a call sites.
1442 struct AANoSyncCallSite final : AANoSyncImpl {
1443  AANoSyncCallSite(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1444 
1445  /// See AbstractAttribute::initialize(...).
1446  void initialize(Attributor &A) override {
1448  Function *F = getAssociatedFunction();
1449  if (!F)
1450  indicatePessimisticFixpoint();
1451  }
1452 
1453  /// See AbstractAttribute::updateImpl(...).
1454  ChangeStatus updateImpl(Attributor &A) override {
1455  // TODO: Once we have call site specific value information we can provide
1456  // call site specific liveness information and then it makes
1457  // sense to specialize attributes for call sites arguments instead of
1458  // redirecting requests to the callee argument.
1459  Function *F = getAssociatedFunction();
1460  const IRPosition &FnPos = IRPosition::function(*F);
1461  auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos);
1462  return clampStateAndIndicateChange(
1463  getState(), static_cast<const AANoSync::StateType &>(FnAA.getState()));
1464  }
1465 
1466  /// See AbstractAttribute::trackStatistics()
1467  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1468 };
1469 
1470 /// ------------------------ No-Free Attributes ----------------------------
1471 
1472 struct AANoFreeImpl : public AANoFree {
1473  AANoFreeImpl(const IRPosition &IRP) : AANoFree(IRP) {}
1474 
1475  /// See AbstractAttribute::updateImpl(...).
1476  ChangeStatus updateImpl(Attributor &A) override {
1477  auto CheckForNoFree = [&](Instruction &I) {
1478  ImmutableCallSite ICS(&I);
1479  if (ICS.hasFnAttr(Attribute::NoFree))
1480  return true;
1481 
1482  const auto &NoFreeAA =
1484  return NoFreeAA.isAssumedNoFree();
1485  };
1486 
1487  if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1488  return indicatePessimisticFixpoint();
1489  return ChangeStatus::UNCHANGED;
1490  }
1491 
1492  /// See AbstractAttribute::getAsStr().
1493  const std::string getAsStr() const override {
1494  return getAssumed() ? "nofree" : "may-free";
1495  }
1496 };
1497 
1498 struct AANoFreeFunction final : public AANoFreeImpl {
1499  AANoFreeFunction(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1500 
1501  /// See AbstractAttribute::trackStatistics()
1502  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1503 };
1504 
1505 /// NoFree attribute deduction for a call sites.
1506 struct AANoFreeCallSite final : AANoFreeImpl {
1507  AANoFreeCallSite(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1508 
1509  /// See AbstractAttribute::initialize(...).
1510  void initialize(Attributor &A) override {
1512  Function *F = getAssociatedFunction();
1513  if (!F)
1514  indicatePessimisticFixpoint();
1515  }
1516 
1517  /// See AbstractAttribute::updateImpl(...).
1518  ChangeStatus updateImpl(Attributor &A) override {
1519  // TODO: Once we have call site specific value information we can provide
1520  // call site specific liveness information and then it makes
1521  // sense to specialize attributes for call sites arguments instead of
1522  // redirecting requests to the callee argument.
1523  Function *F = getAssociatedFunction();
1524  const IRPosition &FnPos = IRPosition::function(*F);
1525  auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos);
1526  return clampStateAndIndicateChange(
1527  getState(), static_cast<const AANoFree::StateType &>(FnAA.getState()));
1528  }
1529 
1530  /// See AbstractAttribute::trackStatistics()
1531  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1532 };
1533 
1534 /// ------------------------ NonNull Argument Attribute ------------------------
1535 static int64_t getKnownNonNullAndDerefBytesForUse(
1536  Attributor &A, AbstractAttribute &QueryingAA, Value &AssociatedValue,
1537  const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1538  // TODO: Add GEP support
1539  TrackUse = false;
1540 
1541  const Function *F = I->getFunction();
1542  bool NullPointerIsDefined = F ? F->nullPointerIsDefined() : true;
1543  const DataLayout &DL = A.getInfoCache().getDL();
1544  if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
1545  if (ICS.isBundleOperand(U))
1546  return 0;
1547 
1548  if (ICS.isCallee(U)) {
1549  IsNonNull |= !NullPointerIsDefined;
1550  return 0;
1551  }
1552 
1553  unsigned ArgNo = ICS.getArgumentNo(U);
1554  IRPosition IRP = IRPosition::callsite_argument(ICS, ArgNo);
1555  auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP);
1556  IsNonNull |= DerefAA.isKnownNonNull();
1557  return DerefAA.getKnownDereferenceableBytes();
1558  }
1559 
1560  int64_t Offset;
1561  if (const Value *Base = getBasePointerOfAccessPointerOperand(I, Offset, DL)) {
1562  if (Base == &AssociatedValue) {
1563  int64_t DerefBytes =
1564  Offset +
1565  (int64_t)DL.getTypeStoreSize(
1566  getPointerOperand(I)->getType()->getPointerElementType());
1567 
1568  IsNonNull |= !NullPointerIsDefined;
1569  return DerefBytes;
1570  }
1571  }
1572 
1573  return 0;
1574 }
1575 struct AANonNullImpl : AANonNull {
1576  AANonNullImpl(const IRPosition &IRP) : AANonNull(IRP) {}
1577 
1578  /// See AbstractAttribute::initialize(...).
1579  void initialize(Attributor &A) override {
1580  if (hasAttr({Attribute::NonNull, Attribute::Dereferenceable}))
1581  indicateOptimisticFixpoint();
1582  else
1584  }
1585 
1586  /// See AAFromMustBeExecutedContext
1587  bool followUse(Attributor &A, const Use *U, const Instruction *I) {
1588  bool IsNonNull = false;
1589  bool TrackUse = false;
1590  getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1591  IsNonNull, TrackUse);
1592  takeKnownMaximum(IsNonNull);
1593  return TrackUse;
1594  }
1595 
1596  /// See AbstractAttribute::getAsStr().
1597  const std::string getAsStr() const override {
1598  return getAssumed() ? "nonnull" : "may-null";
1599  }
1600 };
1601 
1602 /// NonNull attribute for a floating value.
1603 struct AANonNullFloating
1604  : AAFromMustBeExecutedContext<AANonNull, AANonNullImpl> {
1605  using Base = AAFromMustBeExecutedContext<AANonNull, AANonNullImpl>;
1606  AANonNullFloating(const IRPosition &IRP) : Base(IRP) {}
1607 
1608  /// See AbstractAttribute::initialize(...).
1609  void initialize(Attributor &A) override {
1610  Base::initialize(A);
1611 
1612  if (isAtFixpoint())
1613  return;
1614 
1615  const IRPosition &IRP = getIRPosition();
1616  const Value &V = IRP.getAssociatedValue();
1617  const DataLayout &DL = A.getDataLayout();
1618 
1619  // TODO: This context sensitive query should be removed once we can do
1620  // context sensitive queries in the genericValueTraversal below.
1621  if (isKnownNonZero(&V, DL, 0, /* TODO: AC */ nullptr, IRP.getCtxI(),
1622  /* TODO: DT */ nullptr))
1623  indicateOptimisticFixpoint();
1624  }
1625 
1626  /// See AbstractAttribute::updateImpl(...).
1627  ChangeStatus updateImpl(Attributor &A) override {
1628  ChangeStatus Change = Base::updateImpl(A);
1629  if (isKnownNonNull())
1630  return Change;
1631 
1632  const DataLayout &DL = A.getDataLayout();
1633 
1634  auto VisitValueCB = [&](Value &V, AAAlign::StateType &T,
1635  bool Stripped) -> bool {
1636  const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
1637  if (!Stripped && this == &AA) {
1638  if (!isKnownNonZero(&V, DL, 0, /* TODO: AC */ nullptr,
1639  /* TODO: CtxI */ nullptr,
1640  /* TODO: DT */ nullptr))
1642  } else {
1643  // Use abstract attribute information.
1644  const AANonNull::StateType &NS =
1645  static_cast<const AANonNull::StateType &>(AA.getState());
1646  T ^= NS;
1647  }
1648  return T.isValidState();
1649  };
1650 
1651  StateType T;
1652  if (!genericValueTraversal<AANonNull, StateType>(A, getIRPosition(), *this,
1653  T, VisitValueCB))
1654  return indicatePessimisticFixpoint();
1655 
1656  return clampStateAndIndicateChange(getState(), T);
1657  }
1658 
1659  /// See AbstractAttribute::trackStatistics()
1660  void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1661 };
1662 
1663 /// NonNull attribute for function return value.
1664 struct AANonNullReturned final
1665  : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> {
1666  AANonNullReturned(const IRPosition &IRP)
1667  : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP) {}
1668 
1669  /// See AbstractAttribute::trackStatistics()
1670  void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1671 };
1672 
1673 /// NonNull attribute for function argument.
1674 struct AANonNullArgument final
1675  : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
1676  AANonNullImpl> {
1677  AANonNullArgument(const IRPosition &IRP)
1678  : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
1679  AANonNullImpl>(
1680  IRP) {}
1681 
1682  /// See AbstractAttribute::trackStatistics()
1683  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1684 };
1685 
1686 struct AANonNullCallSiteArgument final : AANonNullFloating {
1687  AANonNullCallSiteArgument(const IRPosition &IRP) : AANonNullFloating(IRP) {}
1688 
1689  /// See AbstractAttribute::trackStatistics()
1690  void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1691 };
1692 
1693 /// NonNull attribute for a call site return position.
1694 struct AANonNullCallSiteReturned final
1695  : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
1696  AANonNullImpl> {
1697  AANonNullCallSiteReturned(const IRPosition &IRP)
1698  : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
1699  AANonNullImpl>(
1700  IRP) {}
1701 
1702  /// See AbstractAttribute::trackStatistics()
1703  void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1704 };
1705 
1706 /// ------------------------ No-Recurse Attributes ----------------------------
1707 
1708 struct AANoRecurseImpl : public AANoRecurse {
1709  AANoRecurseImpl(const IRPosition &IRP) : AANoRecurse(IRP) {}
1710 
1711  /// See AbstractAttribute::getAsStr()
1712  const std::string getAsStr() const override {
1713  return getAssumed() ? "norecurse" : "may-recurse";
1714  }
1715 };
1716 
1717 struct AANoRecurseFunction final : AANoRecurseImpl {
1718  AANoRecurseFunction(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
1719 
1720  /// See AbstractAttribute::initialize(...).
1721  void initialize(Attributor &A) override {
1723  if (const Function *F = getAnchorScope())
1724  if (A.getInfoCache().getSccSize(*F) == 1)
1725  return;
1726  indicatePessimisticFixpoint();
1727  }
1728 
1729  /// See AbstractAttribute::updateImpl(...).
1730  ChangeStatus updateImpl(Attributor &A) override {
1731 
1732  auto CheckForNoRecurse = [&](Instruction &I) {
1733  ImmutableCallSite ICS(&I);
1734  if (ICS.hasFnAttr(Attribute::NoRecurse))
1735  return true;
1736 
1737  const auto &NoRecurseAA =
1739  if (!NoRecurseAA.isAssumedNoRecurse())
1740  return false;
1741 
1742  // Recursion to the same function
1743  if (ICS.getCalledFunction() == getAnchorScope())
1744  return false;
1745 
1746  return true;
1747  };
1748 
1749  if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1750  return indicatePessimisticFixpoint();
1751  return ChangeStatus::UNCHANGED;
1752  }
1753 
1754  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1755 };
1756 
1757 /// NoRecurse attribute deduction for a call sites.
1758 struct AANoRecurseCallSite final : AANoRecurseImpl {
1759  AANoRecurseCallSite(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
1760 
1761  /// See AbstractAttribute::initialize(...).
1762  void initialize(Attributor &A) override {
1764  Function *F = getAssociatedFunction();
1765  if (!F)
1766  indicatePessimisticFixpoint();
1767  }
1768 
1769  /// See AbstractAttribute::updateImpl(...).
1770  ChangeStatus updateImpl(Attributor &A) override {
1771  // TODO: Once we have call site specific value information we can provide
1772  // call site specific liveness information and then it makes
1773  // sense to specialize attributes for call sites arguments instead of
1774  // redirecting requests to the callee argument.
1775  Function *F = getAssociatedFunction();
1776  const IRPosition &FnPos = IRPosition::function(*F);
1777  auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos);
1778  return clampStateAndIndicateChange(
1779  getState(),
1780  static_cast<const AANoRecurse::StateType &>(FnAA.getState()));
1781  }
1782 
1783  /// See AbstractAttribute::trackStatistics()
1784  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1785 };
1786 
1787 /// ------------------------ Will-Return Attributes ----------------------------
1788 
1789 // Helper function that checks whether a function has any cycle.
1790 // TODO: Replace with more efficent code
1791 static bool containsCycle(Function &F) {
1793 
1794  // Traverse BB by dfs and check whether successor is already visited.
1795  for (BasicBlock *BB : depth_first(&F)) {
1796  Visited.insert(BB);
1797  for (auto *SuccBB : successors(BB)) {
1798  if (Visited.count(SuccBB))
1799  return true;
1800  }
1801  }
1802  return false;
1803 }
1804 
1805 // Helper function that checks the function have a loop which might become an
1806 // endless loop
1807 // FIXME: Any cycle is regarded as endless loop for now.
1808 // We have to allow some patterns.
1809 static bool containsPossiblyEndlessLoop(Function *F) {
1810  return !F || !F->hasExactDefinition() || containsCycle(*F);
1811 }
1812 
1813 struct AAWillReturnImpl : public AAWillReturn {
1814  AAWillReturnImpl(const IRPosition &IRP) : AAWillReturn(IRP) {}
1815 
1816  /// See AbstractAttribute::initialize(...).
1817  void initialize(Attributor &A) override {
1819 
1820  Function *F = getAssociatedFunction();
1821  if (containsPossiblyEndlessLoop(F))
1822  indicatePessimisticFixpoint();
1823  }
1824 
1825  /// See AbstractAttribute::updateImpl(...).
1826  ChangeStatus updateImpl(Attributor &A) override {
1827  auto CheckForWillReturn = [&](Instruction &I) {
1829  const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
1830  if (WillReturnAA.isKnownWillReturn())
1831  return true;
1832  if (!WillReturnAA.isAssumedWillReturn())
1833  return false;
1834  const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
1835  return NoRecurseAA.isAssumedNoRecurse();
1836  };
1837 
1838  if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
1839  return indicatePessimisticFixpoint();
1840 
1841  return ChangeStatus::UNCHANGED;
1842  }
1843 
1844  /// See AbstractAttribute::getAsStr()
1845  const std::string getAsStr() const override {
1846  return getAssumed() ? "willreturn" : "may-noreturn";
1847  }
1848 };
1849 
1850 struct AAWillReturnFunction final : AAWillReturnImpl {
1851  AAWillReturnFunction(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
1852 
1853  /// See AbstractAttribute::trackStatistics()
1854  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
1855 };
1856 
1857 /// WillReturn attribute deduction for a call sites.
1858 struct AAWillReturnCallSite final : AAWillReturnImpl {
1859  AAWillReturnCallSite(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
1860 
1861  /// See AbstractAttribute::initialize(...).
1862  void initialize(Attributor &A) override {
1864  Function *F = getAssociatedFunction();
1865  if (!F)
1866  indicatePessimisticFixpoint();
1867  }
1868 
1869  /// See AbstractAttribute::updateImpl(...).
1870  ChangeStatus updateImpl(Attributor &A) override {
1871  // TODO: Once we have call site specific value information we can provide
1872  // call site specific liveness information and then it makes
1873  // sense to specialize attributes for call sites arguments instead of
1874  // redirecting requests to the callee argument.
1875  Function *F = getAssociatedFunction();
1876  const IRPosition &FnPos = IRPosition::function(*F);
1877  auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos);
1878  return clampStateAndIndicateChange(
1879  getState(),
1880  static_cast<const AAWillReturn::StateType &>(FnAA.getState()));
1881  }
1882 
1883  /// See AbstractAttribute::trackStatistics()
1884  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
1885 };
1886 
1887 /// ------------------------ NoAlias Argument Attribute ------------------------
1888 
1889 struct AANoAliasImpl : AANoAlias {
1890  AANoAliasImpl(const IRPosition &IRP) : AANoAlias(IRP) {}
1891 
1892  const std::string getAsStr() const override {
1893  return getAssumed() ? "noalias" : "may-alias";
1894  }
1895 };
1896 
1897 /// NoAlias attribute for a floating value.
1898 struct AANoAliasFloating final : AANoAliasImpl {
1899  AANoAliasFloating(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
1900 
1901  /// See AbstractAttribute::initialize(...).
1902  void initialize(Attributor &A) override {
1904  Value &Val = getAssociatedValue();
1905  if (isa<AllocaInst>(Val))
1906  indicateOptimisticFixpoint();
1907  if (isa<ConstantPointerNull>(Val) &&
1908  Val.getType()->getPointerAddressSpace() == 0)
1909  indicateOptimisticFixpoint();
1910  }
1911 
1912  /// See AbstractAttribute::updateImpl(...).
1913  ChangeStatus updateImpl(Attributor &A) override {
1914  // TODO: Implement this.
1915  return indicatePessimisticFixpoint();
1916  }
1917 
1918  /// See AbstractAttribute::trackStatistics()
1919  void trackStatistics() const override {
1921  }
1922 };
1923 
1924 /// NoAlias attribute for an argument.
1925 struct AANoAliasArgument final
1926  : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
1927  AANoAliasArgument(const IRPosition &IRP)
1928  : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>(IRP) {}
1929 
1930  /// See AbstractAttribute::trackStatistics()
1931  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
1932 };
1933 
1934 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
1935  AANoAliasCallSiteArgument(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
1936 
1937  /// See AbstractAttribute::initialize(...).
1938  void initialize(Attributor &A) override {
1939  // See callsite argument attribute and callee argument attribute.
1940  ImmutableCallSite ICS(&getAnchorValue());
1941  if (ICS.paramHasAttr(getArgNo(), Attribute::NoAlias))
1942  indicateOptimisticFixpoint();
1943  }
1944 
1945  /// See AbstractAttribute::updateImpl(...).
1946  ChangeStatus updateImpl(Attributor &A) override {
1947  // We can deduce "noalias" if the following conditions hold.
1948  // (i) Associated value is assumed to be noalias in the definition.
1949  // (ii) Associated value is assumed to be no-capture in all the uses
1950  // possibly executed before this callsite.
1951  // (iii) There is no other pointer argument which could alias with the
1952  // value.
1953 
1954  const Value &V = getAssociatedValue();
1955  const IRPosition IRP = IRPosition::value(V);
1956 
1957  // (i) Check whether noalias holds in the definition.
1958 
1959  auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP);
1960 
1961  if (!NoAliasAA.isAssumedNoAlias())
1962  return indicatePessimisticFixpoint();
1963 
1964  LLVM_DEBUG(dbgs() << "[Attributor][AANoAliasCSArg] " << V
1965  << " is assumed NoAlias in the definition\n");
1966 
1967  // (ii) Check whether the value is captured in the scope using AANoCapture.
1968  // FIXME: This is conservative though, it is better to look at CFG and
1969  // check only uses possibly executed before this callsite.
1970 
1971  auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
1972  if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
1973  LLVM_DEBUG(
1974  dbgs() << "[Attributor][AANoAliasCSArg] " << V
1975  << " cannot be noalias as it is potentially captured\n");
1976  return indicatePessimisticFixpoint();
1977  }
1978 
1979  // (iii) Check there is no other pointer argument which could alias with the
1980  // value.
1981  ImmutableCallSite ICS(&getAnchorValue());
1982  for (unsigned i = 0; i < ICS.getNumArgOperands(); i++) {
1983  if (getArgNo() == (int)i)
1984  continue;
1985  const Value *ArgOp = ICS.getArgOperand(i);
1986  if (!ArgOp->getType()->isPointerTy())
1987  continue;
1988 
1989  if (const Function *F = getAnchorScope()) {
1990  if (AAResults *AAR = A.getInfoCache().getAAResultsForFunction(*F)) {
1991  bool IsAliasing = AAR->isNoAlias(&getAssociatedValue(), ArgOp);
1992  LLVM_DEBUG(dbgs()
1993  << "[Attributor][NoAliasCSArg] Check alias between "
1994  "callsite arguments "
1995  << AAR->isNoAlias(&getAssociatedValue(), ArgOp) << " "
1996  << getAssociatedValue() << " " << *ArgOp << " => "
1997  << (IsAliasing ? "" : "no-") << "alias \n");
1998 
1999  if (IsAliasing)
2000  continue;
2001  }
2002  }
2003  return indicatePessimisticFixpoint();
2004  }
2005 
2006  return ChangeStatus::UNCHANGED;
2007  }
2008 
2009  /// See AbstractAttribute::trackStatistics()
2010  void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2011 };
2012 
2013 /// NoAlias attribute for function return value.
2014 struct AANoAliasReturned final : AANoAliasImpl {
2015  AANoAliasReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2016 
2017  /// See AbstractAttribute::updateImpl(...).
2018  virtual ChangeStatus updateImpl(Attributor &A) override {
2019 
2020  auto CheckReturnValue = [&](Value &RV) -> bool {
2021  if (Constant *C = dyn_cast<Constant>(&RV))
2022  if (C->isNullValue() || isa<UndefValue>(C))
2023  return true;
2024 
2025  /// For now, we can only deduce noalias if we have call sites.
2026  /// FIXME: add more support.
2027  ImmutableCallSite ICS(&RV);
2028  if (!ICS)
2029  return false;
2030 
2031  const IRPosition &RVPos = IRPosition::value(RV);
2032  const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos);
2033  if (!NoAliasAA.isAssumedNoAlias())
2034  return false;
2035 
2036  const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos);
2037  return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2038  };
2039 
2040  if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2041  return indicatePessimisticFixpoint();
2042 
2043  return ChangeStatus::UNCHANGED;
2044  }
2045 
2046  /// See AbstractAttribute::trackStatistics()
2047  void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2048 };
2049 
2050 /// NoAlias attribute deduction for a call site return value.
2051 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2052  AANoAliasCallSiteReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2053 
2054  /// See AbstractAttribute::initialize(...).
2055  void initialize(Attributor &A) override {
2057  Function *F = getAssociatedFunction();
2058  if (!F)
2059  indicatePessimisticFixpoint();
2060  }
2061 
2062  /// See AbstractAttribute::updateImpl(...).
2063  ChangeStatus updateImpl(Attributor &A) override {
2064  // TODO: Once we have call site specific value information we can provide
2065  // call site specific liveness information and then it makes
2066  // sense to specialize attributes for call sites arguments instead of
2067  // redirecting requests to the callee argument.
2068  Function *F = getAssociatedFunction();
2069  const IRPosition &FnPos = IRPosition::returned(*F);
2070  auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos);
2071  return clampStateAndIndicateChange(
2072  getState(), static_cast<const AANoAlias::StateType &>(FnAA.getState()));
2073  }
2074 
2075  /// See AbstractAttribute::trackStatistics()
2076  void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2077 };
2078 
2079 /// -------------------AAIsDead Function Attribute-----------------------
2080 
2081 struct AAIsDeadImpl : public AAIsDead {
2082  AAIsDeadImpl(const IRPosition &IRP) : AAIsDead(IRP) {}
2083 
2084  void initialize(Attributor &A) override {
2085  const Function *F = getAssociatedFunction();
2086  if (F && !F->isDeclaration())
2087  exploreFromEntry(A, F);
2088  }
2089 
2090  void exploreFromEntry(Attributor &A, const Function *F) {
2091  ToBeExploredPaths.insert(&(F->getEntryBlock().front()));
2092 
2093  for (size_t i = 0; i < ToBeExploredPaths.size(); ++i)
2094  if (const Instruction *NextNoReturnI =
2095  findNextNoReturn(A, ToBeExploredPaths[i]))
2096  NoReturnCalls.insert(NextNoReturnI);
2097 
2098  // Mark the block live after we looked for no-return instructions.
2099  assumeLive(A, F->getEntryBlock());
2100  }
2101 
2102  /// Find the next assumed noreturn instruction in the block of \p I starting
2103  /// from, thus including, \p I.
2104  ///
2105  /// The caller is responsible to monitor the ToBeExploredPaths set as new
2106  /// instructions discovered in other basic block will be placed in there.
2107  ///
2108  /// \returns The next assumed noreturn instructions in the block of \p I
2109  /// starting from, thus including, \p I.
2110  const Instruction *findNextNoReturn(Attributor &A, const Instruction *I);
2111 
2112  /// See AbstractAttribute::getAsStr().
2113  const std::string getAsStr() const override {
2114  return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
2115  std::to_string(getAssociatedFunction()->size()) + "][#NRI " +
2116  std::to_string(NoReturnCalls.size()) + "]";
2117  }
2118 
2119  /// See AbstractAttribute::manifest(...).
2120  ChangeStatus manifest(Attributor &A) override {
2121  assert(getState().isValidState() &&
2122  "Attempted to manifest an invalid state!");
2123 
2125  Function &F = *getAssociatedFunction();
2126 
2127  if (AssumedLiveBlocks.empty()) {
2128  A.deleteAfterManifest(F);
2129  return ChangeStatus::CHANGED;
2130  }
2131 
2132  // Flag to determine if we can change an invoke to a call assuming the
2133  // callee is nounwind. This is not possible if the personality of the
2134  // function allows to catch asynchronous exceptions.
2135  bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
2136 
2137  for (const Instruction *NRC : NoReturnCalls) {
2138  Instruction *I = const_cast<Instruction *>(NRC);
2139  BasicBlock *BB = I->getParent();
2140  Instruction *SplitPos = I->getNextNode();
2141  // TODO: mark stuff before unreachable instructions as dead.
2142 
2143  if (auto *II = dyn_cast<InvokeInst>(I)) {
2144  // If we keep the invoke the split position is at the beginning of the
2145  // normal desitination block (it invokes a noreturn function after all).
2146  BasicBlock *NormalDestBB = II->getNormalDest();
2147  SplitPos = &NormalDestBB->front();
2148 
2149  /// Invoke is replaced with a call and unreachable is placed after it if
2150  /// the callee is nounwind and noreturn. Otherwise, we keep the invoke
2151  /// and only place an unreachable in the normal successor.
2152  if (Invoke2CallAllowed) {
2153  if (II->getCalledFunction()) {
2154  const IRPosition &IPos = IRPosition::callsite_function(*II);
2155  const auto &AANoUnw = A.getAAFor<AANoUnwind>(*this, IPos);
2156  if (AANoUnw.isAssumedNoUnwind()) {
2157  LLVM_DEBUG(dbgs()
2158  << "[AAIsDead] Replace invoke with call inst\n");
2159  // We do not need an invoke (II) but instead want a call followed
2160  // by an unreachable. However, we do not remove II as other
2161  // abstract attributes might have it cached as part of their
2162  // results. Given that we modify the CFG anyway, we simply keep II
2163  // around but in a new dead block. To avoid II being live through
2164  // a different edge we have to ensure the block we place it in is
2165  // only reached from the current block of II and then not reached
2166  // at all when we insert the unreachable.
2167  SplitBlockPredecessors(NormalDestBB, {BB}, ".i2c");
2169  CI->insertBefore(II);
2170  CI->takeName(II);
2171  II->replaceAllUsesWith(CI);
2172  SplitPos = CI->getNextNode();
2173  }
2174  }
2175  }
2176 
2177  if (SplitPos == &NormalDestBB->front()) {
2178  // If this is an invoke of a noreturn function the edge to the normal
2179  // destination block is dead but not necessarily the block itself.
2180  // TODO: We need to move to an edge based system during deduction and
2181  // also manifest.
2182  assert(!NormalDestBB->isLandingPad() &&
2183  "Expected the normal destination not to be a landingpad!");
2184  if (NormalDestBB->getUniquePredecessor() == BB) {
2185  assumeLive(A, *NormalDestBB);
2186  } else {
2187  BasicBlock *SplitBB =
2188  SplitBlockPredecessors(NormalDestBB, {BB}, ".dead");
2189  // The split block is live even if it contains only an unreachable
2190  // instruction at the end.
2191  assumeLive(A, *SplitBB);
2192  SplitPos = SplitBB->getTerminator();
2193  HasChanged = ChangeStatus::CHANGED;
2194  }
2195  }
2196  }
2197 
2198  if (isa_and_nonnull<UnreachableInst>(SplitPos))
2199  continue;
2200 
2201  BB = SplitPos->getParent();
2202  SplitBlock(BB, SplitPos);
2203  changeToUnreachable(BB->getTerminator(), /* UseLLVMTrap */ false);
2204  HasChanged = ChangeStatus::CHANGED;
2205  }
2206 
2207  for (BasicBlock &BB : F)
2208  if (!AssumedLiveBlocks.count(&BB))
2209  A.deleteAfterManifest(BB);
2210 
2211  return HasChanged;
2212  }
2213 
2214  /// See AbstractAttribute::updateImpl(...).
2215  ChangeStatus updateImpl(Attributor &A) override;
2216 
2217  /// See AAIsDead::isAssumedDead(BasicBlock *).
2218  bool isAssumedDead(const BasicBlock *BB) const override {
2219  assert(BB->getParent() == getAssociatedFunction() &&
2220  "BB must be in the same anchor scope function.");
2221 
2222  if (!getAssumed())
2223  return false;
2224  return !AssumedLiveBlocks.count(BB);
2225  }
2226 
2227  /// See AAIsDead::isKnownDead(BasicBlock *).
2228  bool isKnownDead(const BasicBlock *BB) const override {
2229  return getKnown() && isAssumedDead(BB);
2230  }
2231 
2232  /// See AAIsDead::isAssumed(Instruction *I).
2233  bool isAssumedDead(const Instruction *I) const override {
2234  assert(I->getParent()->getParent() == getAssociatedFunction() &&
2235  "Instruction must be in the same anchor scope function.");
2236 
2237  if (!getAssumed())
2238  return false;
2239 
2240  // If it is not in AssumedLiveBlocks then it for sure dead.
2241  // Otherwise, it can still be after noreturn call in a live block.
2242  if (!AssumedLiveBlocks.count(I->getParent()))
2243  return true;
2244 
2245  // If it is not after a noreturn call, than it is live.
2246  return isAfterNoReturn(I);
2247  }
2248 
2249  /// See AAIsDead::isKnownDead(Instruction *I).
2250  bool isKnownDead(const Instruction *I) const override {
2251  return getKnown() && isAssumedDead(I);
2252  }
2253 
2254  /// Check if instruction is after noreturn call, in other words, assumed dead.
2255  bool isAfterNoReturn(const Instruction *I) const;
2256 
2257  /// Determine if \p F might catch asynchronous exceptions.
2258  static bool mayCatchAsynchronousExceptions(const Function &F) {
2259  return F.hasPersonalityFn() && !canSimplifyInvokeNoUnwind(&F);
2260  }
2261 
2262  /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
2263  /// that internal function called from \p BB should now be looked at.
2264  void assumeLive(Attributor &A, const BasicBlock &BB) {
2265  if (!AssumedLiveBlocks.insert(&BB).second)
2266  return;
2267 
2268  // We assume that all of BB is (probably) live now and if there are calls to
2269  // internal functions we will assume that those are now live as well. This
2270  // is a performance optimization for blocks with calls to a lot of internal
2271  // functions. It can however cause dead functions to be treated as live.
2272  for (const Instruction &I : BB)
2273  if (ImmutableCallSite ICS = ImmutableCallSite(&I))
2274  if (const Function *F = ICS.getCalledFunction())
2275  if (F->hasLocalLinkage())
2277  }
2278 
2279  /// Collection of to be explored paths.
2280  SmallSetVector<const Instruction *, 8> ToBeExploredPaths;
2281 
2282  /// Collection of all assumed live BasicBlocks.
2283  DenseSet<const BasicBlock *> AssumedLiveBlocks;
2284 
2285  /// Collection of calls with noreturn attribute, assumed or knwon.
2287 };
2288 
2289 struct AAIsDeadFunction final : public AAIsDeadImpl {
2290  AAIsDeadFunction(const IRPosition &IRP) : AAIsDeadImpl(IRP) {}
2291 
2292  /// See AbstractAttribute::trackStatistics()
2293  void trackStatistics() const override {
2294  STATS_DECL(PartiallyDeadBlocks, Function,
2295  "Number of basic blocks classified as partially dead");
2296  BUILD_STAT_NAME(PartiallyDeadBlocks, Function) += NoReturnCalls.size();
2297  }
2298 };
2299 
2300 bool AAIsDeadImpl::isAfterNoReturn(const Instruction *I) const {
2301  const Instruction *PrevI = I->getPrevNode();
2302  while (PrevI) {
2303  if (NoReturnCalls.count(PrevI))
2304  return true;
2305  PrevI = PrevI->getPrevNode();
2306  }
2307  return false;
2308 }
2309 
2310 const Instruction *AAIsDeadImpl::findNextNoReturn(Attributor &A,
2311  const Instruction *I) {
2312  const BasicBlock *BB = I->getParent();
2313  const Function &F = *BB->getParent();
2314 
2315  // Flag to determine if we can change an invoke to a call assuming the callee
2316  // is nounwind. This is not possible if the personality of the function allows
2317  // to catch asynchronous exceptions.
2318  bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
2319 
2320  // TODO: We should have a function that determines if an "edge" is dead.
2321  // Edges could be from an instruction to the next or from a terminator
2322  // to the successor. For now, we need to special case the unwind block
2323  // of InvokeInst below.
2324 
2325  while (I) {
2326  ImmutableCallSite ICS(I);
2327 
2328  if (ICS) {
2329  const IRPosition &IPos = IRPosition::callsite_function(ICS);
2330  // Regarless of the no-return property of an invoke instruction we only
2331  // learn that the regular successor is not reachable through this
2332  // instruction but the unwind block might still be.
2333  if (auto *Invoke = dyn_cast<InvokeInst>(I)) {
2334  // Use nounwind to justify the unwind block is dead as well.
2335  const auto &AANoUnw = A.getAAFor<AANoUnwind>(*this, IPos);
2336  if (!Invoke2CallAllowed || !AANoUnw.isAssumedNoUnwind()) {
2337  assumeLive(A, *Invoke->getUnwindDest());
2338  ToBeExploredPaths.insert(&Invoke->getUnwindDest()->front());
2339  }
2340  }
2341 
2342  const auto &NoReturnAA = A.getAAFor<AANoReturn>(*this, IPos);
2343  if (NoReturnAA.isAssumedNoReturn())
2344  return I;
2345  }
2346 
2347  I = I->getNextNode();
2348  }
2349 
2350  // get new paths (reachable blocks).
2351  for (const BasicBlock *SuccBB : successors(BB)) {
2352  assumeLive(A, *SuccBB);
2353  ToBeExploredPaths.insert(&SuccBB->front());
2354  }
2355 
2356  // No noreturn instruction found.
2357  return nullptr;
2358 }
2359 
2360 ChangeStatus AAIsDeadImpl::updateImpl(Attributor &A) {
2362 
2363  // Temporary collection to iterate over existing noreturn instructions. This
2364  // will alow easier modification of NoReturnCalls collection
2365  SmallVector<const Instruction *, 8> NoReturnChanged;
2366 
2367  for (const Instruction *I : NoReturnCalls)
2368  NoReturnChanged.push_back(I);
2369 
2370  for (const Instruction *I : NoReturnChanged) {
2371  size_t Size = ToBeExploredPaths.size();
2372 
2373  const Instruction *NextNoReturnI = findNextNoReturn(A, I);
2374  if (NextNoReturnI != I) {
2375  Status = ChangeStatus::CHANGED;
2376  NoReturnCalls.remove(I);
2377  if (NextNoReturnI)
2378  NoReturnCalls.insert(NextNoReturnI);
2379  }
2380 
2381  // Explore new paths.
2382  while (Size != ToBeExploredPaths.size()) {
2383  Status = ChangeStatus::CHANGED;
2384  if (const Instruction *NextNoReturnI =
2385  findNextNoReturn(A, ToBeExploredPaths[Size++]))
2386  NoReturnCalls.insert(NextNoReturnI);
2387  }
2388  }
2389 
2390  LLVM_DEBUG(dbgs() << "[AAIsDead] AssumedLiveBlocks: "
2391  << AssumedLiveBlocks.size() << " Total number of blocks: "
2392  << getAssociatedFunction()->size() << "\n");
2393 
2394  // If we know everything is live there is no need to query for liveness.
2395  if (NoReturnCalls.empty() &&
2396  getAssociatedFunction()->size() == AssumedLiveBlocks.size()) {
2397  // Indicating a pessimistic fixpoint will cause the state to be "invalid"
2398  // which will cause the Attributor to not return the AAIsDead on request,
2399  // which will prevent us from querying isAssumedDead().
2400  indicatePessimisticFixpoint();
2401  assert(!isValidState() && "Expected an invalid state!");
2402  Status = ChangeStatus::CHANGED;
2403  }
2404 
2405  return Status;
2406 }
2407 
2408 /// Liveness information for a call sites.
2409 struct AAIsDeadCallSite final : AAIsDeadImpl {
2410  AAIsDeadCallSite(const IRPosition &IRP) : AAIsDeadImpl(IRP) {}
2411 
2412  /// See AbstractAttribute::initialize(...).
2413  void initialize(Attributor &A) override {
2414  // TODO: Once we have call site specific value information we can provide
2415  // call site specific liveness information and then it makes
2416  // sense to specialize attributes for call sites instead of
2417  // redirecting requests to the callee.
2418  llvm_unreachable("Abstract attributes for liveness are not "
2419  "supported for call sites yet!");
2420  }
2421 
2422  /// See AbstractAttribute::updateImpl(...).
2423  ChangeStatus updateImpl(Attributor &A) override {
2424  return indicatePessimisticFixpoint();
2425  }
2426 
2427  /// See AbstractAttribute::trackStatistics()
2428  void trackStatistics() const override {}
2429 };
2430 
2431 /// -------------------- Dereferenceable Argument Attribute --------------------
2432 
2433 template <>
2434 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
2435  const DerefState &R) {
2436  ChangeStatus CS0 = clampStateAndIndicateChange<IntegerState>(
2437  S.DerefBytesState, R.DerefBytesState);
2438  ChangeStatus CS1 =
2439  clampStateAndIndicateChange<IntegerState>(S.GlobalState, R.GlobalState);
2440  return CS0 | CS1;
2441 }
2442 
2443 struct AADereferenceableImpl : AADereferenceable {
2444  AADereferenceableImpl(const IRPosition &IRP) : AADereferenceable(IRP) {}
2445  using StateType = DerefState;
2446 
2447  void initialize(Attributor &A) override {
2449  getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
2450  Attrs);
2451  for (const Attribute &Attr : Attrs)
2452  takeKnownDerefBytesMaximum(Attr.getValueAsInt());
2453 
2454  NonNullAA = &A.getAAFor<AANonNull>(*this, getIRPosition());
2455 
2456  const IRPosition &IRP = this->getIRPosition();
2457  bool IsFnInterface = IRP.isFnInterfaceKind();
2458  const Function *FnScope = IRP.getAnchorScope();
2459  if (IsFnInterface && (!FnScope || !FnScope->hasExactDefinition()))
2460  indicatePessimisticFixpoint();
2461  }
2462 
2463  /// See AbstractAttribute::getState()
2464  /// {
2465  StateType &getState() override { return *this; }
2466  const StateType &getState() const override { return *this; }
2467  /// }
2468 
2469  /// See AAFromMustBeExecutedContext
2470  bool followUse(Attributor &A, const Use *U, const Instruction *I) {
2471  bool IsNonNull = false;
2472  bool TrackUse = false;
2473  int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
2474  A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
2475  takeKnownDerefBytesMaximum(DerefBytes);
2476  return TrackUse;
2477  }
2478 
2479  void getDeducedAttributes(LLVMContext &Ctx,
2480  SmallVectorImpl<Attribute> &Attrs) const override {
2481  // TODO: Add *_globally support
2482  if (isAssumedNonNull())
2484  Ctx, getAssumedDereferenceableBytes()));
2485  else
2487  Ctx, getAssumedDereferenceableBytes()));
2488  }
2489 
2490  /// See AbstractAttribute::getAsStr().
2491  const std::string getAsStr() const override {
2492  if (!getAssumedDereferenceableBytes())
2493  return "unknown-dereferenceable";
2494  return std::string("dereferenceable") +
2495  (isAssumedNonNull() ? "" : "_or_null") +
2496  (isAssumedGlobal() ? "_globally" : "") + "<" +
2497  std::to_string(getKnownDereferenceableBytes()) + "-" +
2498  std::to_string(getAssumedDereferenceableBytes()) + ">";
2499  }
2500 };
2501 
2502 /// Dereferenceable attribute for a floating value.
2503 struct AADereferenceableFloating
2504  : AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl> {
2505  using Base =
2506  AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl>;
2507  AADereferenceableFloating(const IRPosition &IRP) : Base(IRP) {}
2508 
2509  /// See AbstractAttribute::updateImpl(...).
2510  ChangeStatus updateImpl(Attributor &A) override {
2511  ChangeStatus Change = Base::updateImpl(A);
2512 
2513  const DataLayout &DL = A.getDataLayout();
2514 
2515  auto VisitValueCB = [&](Value &V, DerefState &T, bool Stripped) -> bool {
2516  unsigned IdxWidth =
2518  APInt Offset(IdxWidth, 0);
2519  const Value *Base =
2521 
2522  const auto &AA =
2523  A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
2524  int64_t DerefBytes = 0;
2525  if (!Stripped && this == &AA) {
2526  // Use IR information if we did not strip anything.
2527  // TODO: track globally.
2528  bool CanBeNull;
2529  DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
2530  T.GlobalState.indicatePessimisticFixpoint();
2531  } else {
2532  const DerefState &DS = static_cast<const DerefState &>(AA.getState());
2533  DerefBytes = DS.DerefBytesState.getAssumed();
2534  T.GlobalState &= DS.GlobalState;
2535  }
2536 
2537  // For now we do not try to "increase" dereferenceability due to negative
2538  // indices as we first have to come up with code to deal with loops and
2539  // for overflows of the dereferenceable bytes.
2540  int64_t OffsetSExt = Offset.getSExtValue();
2541  if (OffsetSExt < 0)
2542  Offset = 0;
2543 
2544  T.takeAssumedDerefBytesMinimum(
2545  std::max(int64_t(0), DerefBytes - OffsetSExt));
2546 
2547  if (this == &AA) {
2548  if (!Stripped) {
2549  // If nothing was stripped IR information is all we got.
2550  T.takeKnownDerefBytesMaximum(
2551  std::max(int64_t(0), DerefBytes - OffsetSExt));
2552  T.indicatePessimisticFixpoint();
2553  } else if (OffsetSExt > 0) {
2554  // If something was stripped but there is circular reasoning we look
2555  // for the offset. If it is positive we basically decrease the
2556  // dereferenceable bytes in a circluar loop now, which will simply
2557  // drive them down to the known value in a very slow way which we
2558  // can accelerate.
2559  T.indicatePessimisticFixpoint();
2560  }
2561  }
2562 
2563  return T.isValidState();
2564  };
2565 
2566  DerefState T;
2567  if (!genericValueTraversal<AADereferenceable, DerefState>(
2568  A, getIRPosition(), *this, T, VisitValueCB))
2569  return indicatePessimisticFixpoint();
2570 
2571  return Change | clampStateAndIndicateChange(getState(), T);
2572  }
2573 
2574  /// See AbstractAttribute::trackStatistics()
2575  void trackStatistics() const override {
2576  STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
2577  }
2578 };
2579 
2580 /// Dereferenceable attribute for a return value.
2581 struct AADereferenceableReturned final
2582  : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl,
2583  DerefState> {
2584  AADereferenceableReturned(const IRPosition &IRP)
2585  : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl,
2586  DerefState>(IRP) {}
2587 
2588  /// See AbstractAttribute::trackStatistics()
2589  void trackStatistics() const override {
2590  STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
2591  }
2592 };
2593 
2594 /// Dereferenceable attribute for an argument
2595 struct AADereferenceableArgument final
2596  : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
2597  AADereferenceable, AADereferenceableImpl, DerefState> {
2598  using Base = AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
2599  AADereferenceable, AADereferenceableImpl, DerefState>;
2600  AADereferenceableArgument(const IRPosition &IRP) : Base(IRP) {}
2601 
2602  /// See AbstractAttribute::trackStatistics()
2603  void trackStatistics() const override {
2604  STATS_DECLTRACK_ARG_ATTR(dereferenceable)
2605  }
2606 };
2607 
2608 /// Dereferenceable attribute for a call site argument.
2609 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
2610  AADereferenceableCallSiteArgument(const IRPosition &IRP)
2611  : AADereferenceableFloating(IRP) {}
2612 
2613  /// See AbstractAttribute::trackStatistics()
2614  void trackStatistics() const override {
2615  STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
2616  }
2617 };
2618 
2619 /// Dereferenceable attribute deduction for a call site return value.
2620 struct AADereferenceableCallSiteReturned final
2621  : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
2622  AADereferenceable, AADereferenceableImpl> {
2623  using Base = AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
2624  AADereferenceable, AADereferenceableImpl>;
2625  AADereferenceableCallSiteReturned(const IRPosition &IRP) : Base(IRP) {}
2626 
2627  /// See AbstractAttribute::initialize(...).
2628  void initialize(Attributor &A) override {
2629  Base::initialize(A);
2630  Function *F = getAssociatedFunction();
2631  if (!F)
2632  indicatePessimisticFixpoint();
2633  }
2634 
2635  /// See AbstractAttribute::updateImpl(...).
2636  ChangeStatus updateImpl(Attributor &A) override {
2637  // TODO: Once we have call site specific value information we can provide
2638  // call site specific liveness information and then it makes
2639  // sense to specialize attributes for call sites arguments instead of
2640  // redirecting requests to the callee argument.
2641 
2642  ChangeStatus Change = Base::updateImpl(A);
2643  Function *F = getAssociatedFunction();
2644  const IRPosition &FnPos = IRPosition::returned(*F);
2645  auto &FnAA = A.getAAFor<AADereferenceable>(*this, FnPos);
2646  return Change |
2647  clampStateAndIndicateChange(
2648  getState(), static_cast<const DerefState &>(FnAA.getState()));
2649  }
2650 
2651  /// See AbstractAttribute::trackStatistics()
2652  void trackStatistics() const override {
2653  STATS_DECLTRACK_CS_ATTR(dereferenceable);
2654  }
2655 };
2656 
2657 // ------------------------ Align Argument Attribute ------------------------
2658 
2659 struct AAAlignImpl : AAAlign {
2660  AAAlignImpl(const IRPosition &IRP) : AAAlign(IRP) {}
2661 
2662  // Max alignemnt value allowed in IR
2663  static const unsigned MAX_ALIGN = 1U << 29;
2664 
2665  /// See AbstractAttribute::initialize(...).
2666  void initialize(Attributor &A) override {
2667  takeAssumedMinimum(MAX_ALIGN);
2668 
2670  getAttrs({Attribute::Alignment}, Attrs);
2671  for (const Attribute &Attr : Attrs)
2672  takeKnownMaximum(Attr.getValueAsInt());
2673 
2674  if (getIRPosition().isFnInterfaceKind() &&
2675  (!getAssociatedFunction() ||
2676  !getAssociatedFunction()->hasExactDefinition()))
2677  indicatePessimisticFixpoint();
2678  }
2679 
2680  /// See AbstractAttribute::manifest(...).
2681  ChangeStatus manifest(Attributor &A) override {
2683 
2684  // Check for users that allow alignment annotations.
2685  Value &AnchorVal = getIRPosition().getAnchorValue();
2686  for (const Use &U : AnchorVal.uses()) {
2687  if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
2688  if (SI->getPointerOperand() == &AnchorVal)
2689  if (SI->getAlignment() < getAssumedAlign()) {
2691  "Number of times alignemnt added to a store");
2692  SI->setAlignment(Align(getAssumedAlign()));
2693  Changed = ChangeStatus::CHANGED;
2694  }
2695  } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
2696  if (LI->getPointerOperand() == &AnchorVal)
2697  if (LI->getAlignment() < getAssumedAlign()) {
2698  LI->setAlignment(Align(getAssumedAlign()));
2700  "Number of times alignemnt added to a load");
2701  Changed = ChangeStatus::CHANGED;
2702  }
2703  }
2704  }
2705 
2706  return AAAlign::manifest(A) | Changed;
2707  }
2708 
2709  // TODO: Provide a helper to determine the implied ABI alignment and check in
2710  // the existing manifest method and a new one for AAAlignImpl that value
2711  // to avoid making the alignment explicit if it did not improve.
2712 
2713  /// See AbstractAttribute::getDeducedAttributes
2714  virtual void
2715  getDeducedAttributes(LLVMContext &Ctx,
2716  SmallVectorImpl<Attribute> &Attrs) const override {
2717  if (getAssumedAlign() > 1)
2718  Attrs.emplace_back(Attribute::getWithAlignment(Ctx, getAssumedAlign()));
2719  }
2720 
2721  /// See AbstractAttribute::getAsStr().
2722  const std::string getAsStr() const override {
2723  return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
2724  "-" + std::to_string(getAssumedAlign()) + ">")
2725  : "unknown-align";
2726  }
2727 };
2728 
2729 /// Align attribute for a floating value.
2730 struct AAAlignFloating : AAAlignImpl {
2731  AAAlignFloating(const IRPosition &IRP) : AAAlignImpl(IRP) {}
2732 
2733  /// See AbstractAttribute::updateImpl(...).
2734  ChangeStatus updateImpl(Attributor &A) override {
2735  const DataLayout &DL = A.getDataLayout();
2736 
2737  auto VisitValueCB = [&](Value &V, AAAlign::StateType &T,
2738  bool Stripped) -> bool {
2739  const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
2740  if (!Stripped && this == &AA) {
2741  // Use only IR information if we did not strip anything.
2744  } else {
2745  // Use abstract attribute information.
2746  const AAAlign::StateType &DS =
2747  static_cast<const AAAlign::StateType &>(AA.getState());
2748  T ^= DS;
2749  }
2750  return T.isValidState();
2751  };
2752 
2753  StateType T;
2754  if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
2755  VisitValueCB))
2756  return indicatePessimisticFixpoint();
2757 
2758  // TODO: If we know we visited all incoming values, thus no are assumed
2759  // dead, we can take the known information from the state T.
2760  return clampStateAndIndicateChange(getState(), T);
2761  }
2762 
2763  /// See AbstractAttribute::trackStatistics()
2764  void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
2765 };
2766 
2767 /// Align attribute for function return value.
2768 struct AAAlignReturned final
2769  : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
2770  AAAlignReturned(const IRPosition &IRP)
2771  : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP) {}
2772 
2773  /// See AbstractAttribute::trackStatistics()
2774  void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
2775 };
2776 
2777 /// Align attribute for function argument.
2778 struct AAAlignArgument final
2779  : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
2780  AAAlignArgument(const IRPosition &IRP)
2781  : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>(IRP) {}
2782 
2783  /// See AbstractAttribute::trackStatistics()
2784  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
2785 };
2786 
2787 struct AAAlignCallSiteArgument final : AAAlignFloating {
2788  AAAlignCallSiteArgument(const IRPosition &IRP) : AAAlignFloating(IRP) {}
2789 
2790  /// See AbstractAttribute::manifest(...).
2791  ChangeStatus manifest(Attributor &A) override {
2792  return AAAlignImpl::manifest(A);
2793  }
2794 
2795  /// See AbstractAttribute::trackStatistics()
2796  void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
2797 };
2798 
2799 /// Align attribute deduction for a call site return value.
2800 struct AAAlignCallSiteReturned final : AAAlignImpl {
2801  AAAlignCallSiteReturned(const IRPosition &IRP) : AAAlignImpl(IRP) {}
2802 
2803  /// See AbstractAttribute::initialize(...).
2804  void initialize(Attributor &A) override {
2806  Function *F = getAssociatedFunction();
2807  if (!F)
2808  indicatePessimisticFixpoint();
2809  }
2810 
2811  /// See AbstractAttribute::updateImpl(...).
2812  ChangeStatus updateImpl(Attributor &A) override {
2813  // TODO: Once we have call site specific value information we can provide
2814  // call site specific liveness information and then it makes
2815  // sense to specialize attributes for call sites arguments instead of
2816  // redirecting requests to the callee argument.
2817  Function *F = getAssociatedFunction();
2818  const IRPosition &FnPos = IRPosition::returned(*F);
2819  auto &FnAA = A.getAAFor<AAAlign>(*this, FnPos);
2820  return clampStateAndIndicateChange(
2821  getState(), static_cast<const AAAlign::StateType &>(FnAA.getState()));
2822  }
2823 
2824  /// See AbstractAttribute::trackStatistics()
2825  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
2826 };
2827 
2828 /// ------------------ Function No-Return Attribute ----------------------------
2829 struct AANoReturnImpl : public AANoReturn {
2830  AANoReturnImpl(const IRPosition &IRP) : AANoReturn(IRP) {}
2831 
2832  /// See AbstractAttribute::getAsStr().
2833  const std::string getAsStr() const override {
2834  return getAssumed() ? "noreturn" : "may-return";
2835  }
2836 
2837  /// See AbstractAttribute::updateImpl(Attributor &A).
2838  virtual ChangeStatus updateImpl(Attributor &A) override {
2839  auto CheckForNoReturn = [](Instruction &) { return false; };
2840  if (!A.checkForAllInstructions(CheckForNoReturn, *this,
2841  {(unsigned)Instruction::Ret}))
2842  return indicatePessimisticFixpoint();
2843  return ChangeStatus::UNCHANGED;
2844  }
2845 };
2846 
2847 struct AANoReturnFunction final : AANoReturnImpl {
2848  AANoReturnFunction(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
2849 
2850  /// See AbstractAttribute::trackStatistics()
2851  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
2852 };
2853 
2854 /// NoReturn attribute deduction for a call sites.
2855 struct AANoReturnCallSite final : AANoReturnImpl {
2856  AANoReturnCallSite(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
2857 
2858  /// See AbstractAttribute::initialize(...).
2859  void initialize(Attributor &A) override {
2861  Function *F = getAssociatedFunction();
2862  if (!F)
2863  indicatePessimisticFixpoint();
2864  }
2865 
2866  /// See AbstractAttribute::updateImpl(...).
2867  ChangeStatus updateImpl(Attributor &A) override {
2868  // TODO: Once we have call site specific value information we can provide
2869  // call site specific liveness information and then it makes
2870  // sense to specialize attributes for call sites arguments instead of
2871  // redirecting requests to the callee argument.
2872  Function *F = getAssociatedFunction();
2873  const IRPosition &FnPos = IRPosition::function(*F);
2874  auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
2875  return clampStateAndIndicateChange(
2876  getState(),
2877  static_cast<const AANoReturn::StateType &>(FnAA.getState()));
2878  }
2879 
2880  /// See AbstractAttribute::trackStatistics()
2881  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
2882 };
2883 
2884 /// ----------------------- Variable Capturing ---------------------------------
2885 
2886 /// A class to hold the state of for no-capture attributes.
2887 struct AANoCaptureImpl : public AANoCapture {
2888  AANoCaptureImpl(const IRPosition &IRP) : AANoCapture(IRP) {}
2889 
2890  /// See AbstractAttribute::initialize(...).
2891  void initialize(Attributor &A) override {
2893 
2894  // You cannot "capture" null in the default address space.
2895  if (isa<ConstantPointerNull>(getAssociatedValue()) &&
2896  getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
2897  indicateOptimisticFixpoint();
2898  return;
2899  }
2900 
2901  const IRPosition &IRP = getIRPosition();
2902  const Function *F =
2903  getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
2904 
2905  // Check what state the associated function can actually capture.
2906  if (F)
2907  determineFunctionCaptureCapabilities(*F, *this);
2908  else
2909  indicatePessimisticFixpoint();
2910  }
2911 
2912  /// See AbstractAttribute::updateImpl(...).
2913  ChangeStatus updateImpl(Attributor &A) override;
2914 
2915  /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
2916  virtual void
2917  getDeducedAttributes(LLVMContext &Ctx,
2918  SmallVectorImpl<Attribute> &Attrs) const override {
2919  if (!isAssumedNoCaptureMaybeReturned())
2920  return;
2921 
2922  if (getArgNo() >= 0) {
2923  if (isAssumedNoCapture())
2924  Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
2925  else if (ManifestInternal)
2926  Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
2927  }
2928  }
2929 
2930  /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
2931  /// depending on the ability of the function associated with \p IRP to capture
2932  /// state in memory and through "returning/throwing", respectively.
2933  static void determineFunctionCaptureCapabilities(const Function &F,
2934  IntegerState &State) {
2935  // TODO: Once we have memory behavior attributes we should use them here.
2936 
2937  // If we know we cannot communicate or write to memory, we do not care about
2938  // ptr2int anymore.
2939  if (F.onlyReadsMemory() && F.doesNotThrow() &&
2940  F.getReturnType()->isVoidTy()) {
2941  State.addKnownBits(NO_CAPTURE);
2942  return;
2943  }
2944 
2945  // A function cannot capture state in memory if it only reads memory, it can
2946  // however return/throw state and the state might be influenced by the
2947  // pointer value, e.g., loading from a returned pointer might reveal a bit.
2948  if (F.onlyReadsMemory())
2949  State.addKnownBits(NOT_CAPTURED_IN_MEM);
2950 
2951  // A function cannot communicate state back if it does not through
2952  // exceptions and doesn not return values.
2953  if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
2954  State.addKnownBits(NOT_CAPTURED_IN_RET);
2955  }
2956 
2957  /// See AbstractState::getAsStr().
2958  const std::string getAsStr() const override {
2959  if (isKnownNoCapture())
2960  return "known not-captured";
2961  if (isAssumedNoCapture())
2962  return "assumed not-captured";
2963  if (isKnownNoCaptureMaybeReturned())
2964  return "known not-captured-maybe-returned";
2965  if (isAssumedNoCaptureMaybeReturned())
2966  return "assumed not-captured-maybe-returned";
2967  return "assumed-captured";
2968  }
2969 };
2970 
2971 /// Attributor-aware capture tracker.
2972 struct AACaptureUseTracker final : public CaptureTracker {
2973 
2974  /// Create a capture tracker that can lookup in-flight abstract attributes
2975  /// through the Attributor \p A.
2976  ///
2977  /// If a use leads to a potential capture, \p CapturedInMemory is set and the
2978  /// search is stopped. If a use leads to a return instruction,
2979  /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
2980  /// If a use leads to a ptr2int which may capture the value,
2981  /// \p CapturedInInteger is set. If a use is found that is currently assumed
2982  /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
2983  /// set. All values in \p PotentialCopies are later tracked as well. For every
2984  /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
2985  /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
2986  /// conservatively set to true.
2987  AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
2988  const AAIsDead &IsDeadAA, IntegerState &State,
2989  SmallVectorImpl<const Value *> &PotentialCopies,
2990  unsigned &RemainingUsesToExplore)
2991  : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
2992  PotentialCopies(PotentialCopies),
2993  RemainingUsesToExplore(RemainingUsesToExplore) {}
2994 
2995  /// Determine if \p V maybe captured. *Also updates the state!*
2996  bool valueMayBeCaptured(const Value *V) {
2997  if (V->getType()->isPointerTy()) {
2998  PointerMayBeCaptured(V, this);
2999  } else {
3001  }
3003  }
3004 
3005  /// See CaptureTracker::tooManyUses().
3006  void tooManyUses() override {
3008  }
3009 
3010  bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
3012  return true;
3013  const auto &DerefAA =
3014  A.getAAFor<AADereferenceable>(NoCaptureAA, IRPosition::value(*O));
3015  return DerefAA.getAssumedDereferenceableBytes();
3016  }
3017 
3018  /// See CaptureTracker::captured(...).
3019  bool captured(const Use *U) override {
3020  Instruction *UInst = cast<Instruction>(U->getUser());
3021  LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
3022  << "\n");
3023 
3024  // Because we may reuse the tracker multiple times we keep track of the
3025  // number of explored uses ourselves as well.
3026  if (RemainingUsesToExplore-- == 0) {
3027  LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
3028  return isCapturedIn(/* Memory */ true, /* Integer */ true,
3029  /* Return */ true);
3030  }
3031 
3032  // Deal with ptr2int by following uses.
3033  if (isa<PtrToIntInst>(UInst)) {
3034  LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
3035  return valueMayBeCaptured(UInst);
3036  }
3037 
3038  // Explicitly catch return instructions.
3039  if (isa<ReturnInst>(UInst))
3040  return isCapturedIn(/* Memory */ false, /* Integer */ false,
3041  /* Return */ true);
3042 
3043  // For now we only use special logic for call sites. However, the tracker
3044  // itself knows about a lot of other non-capturing cases already.
3045  CallSite CS(UInst);
3046  if (!CS || !CS.isArgOperand(U))
3047  return isCapturedIn(/* Memory */ true, /* Integer */ true,
3048  /* Return */ true);
3049 
3050  unsigned ArgNo = CS.getArgumentNo(U);
3051  const IRPosition &CSArgPos = IRPosition::callsite_argument(CS, ArgNo);
3052  // If we have a abstract no-capture attribute for the argument we can use
3053  // it to justify a non-capture attribute here. This allows recursion!
3054  auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos);
3055  if (ArgNoCaptureAA.isAssumedNoCapture())
3056  return isCapturedIn(/* Memory */ false, /* Integer */ false,
3057  /* Return */ false);
3058  if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3059  addPotentialCopy(CS);
3060  return isCapturedIn(/* Memory */ false, /* Integer */ false,
3061  /* Return */ false);
3062  }
3063 
3064  // Lastly, we could not find a reason no-capture can be assumed so we don't.
3065  return isCapturedIn(/* Memory */ true, /* Integer */ true,
3066  /* Return */ true);
3067  }
3068 
3069  /// Register \p CS as potential copy of the value we are checking.
3070  void addPotentialCopy(CallSite CS) {
3071  PotentialCopies.push_back(CS.getInstruction());
3072  }
3073 
3074  /// See CaptureTracker::shouldExplore(...).
3075  bool shouldExplore(const Use *U) override {
3076  // Check liveness.
3077  return !IsDeadAA.isAssumedDead(cast<Instruction>(U->getUser()));
3078  }
3079 
3080  /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
3081  /// \p CapturedInRet, then return the appropriate value for use in the
3082  /// CaptureTracker::captured() interface.
3083  bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
3084  bool CapturedInRet) {
3085  LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
3086  << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
3087  if (CapturedInMem)
3089  if (CapturedInInt)
3091  if (CapturedInRet)
3094  }
3095 
3096 private:
3097  /// The attributor providing in-flight abstract attributes.
3098  Attributor &A;
3099 
3100  /// The abstract attribute currently updated.
3101  AANoCapture &NoCaptureAA;
3102 
3103  /// The abstract liveness state.
3104  const AAIsDead &IsDeadAA;
3105 
3106  /// The state currently updated.
3107  IntegerState &State;
3108 
3109  /// Set of potential copies of the tracked value.
3110  SmallVectorImpl<const Value *> &PotentialCopies;
3111 
3112  /// Global counter to limit the number of explored uses.
3113  unsigned &RemainingUsesToExplore;
3114 };
3115 
3116 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
3117  const IRPosition &IRP = getIRPosition();
3118  const Value *V =
3119  getArgNo() >= 0 ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue();
3120  if (!V)
3121  return indicatePessimisticFixpoint();
3122 
3123  const Function *F =
3124  getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
3125  assert(F && "Expected a function!");
3126  const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, IRPosition::function(*F));
3127 
3129  // TODO: Once we have memory behavior attributes we should use them here
3130  // similar to the reasoning in
3131  // AANoCaptureImpl::determineFunctionCaptureCapabilities(...).
3132 
3133  // TODO: Use the AAReturnedValues to learn if the argument can return or
3134  // not.
3135 
3136  // Use the CaptureTracker interface and logic with the specialized tracker,
3137  // defined in AACaptureUseTracker, that can look at in-flight abstract
3138  // attributes and directly updates the assumed state.
3139  SmallVector<const Value *, 4> PotentialCopies;
3140  unsigned RemainingUsesToExplore = DefaultMaxUsesToExplore;
3141  AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
3142  RemainingUsesToExplore);
3143 
3144  // Check all potential copies of the associated value until we can assume
3145  // none will be captured or we have to assume at least one might be.
3146  unsigned Idx = 0;
3147  PotentialCopies.push_back(V);
3148  while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
3149  Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
3150 
3152  auto Assumed = S.getAssumed();
3153  S.intersectAssumedBits(T.getAssumed());
3154  return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
3156 }
3157 
3158 /// NoCapture attribute for function arguments.
3159 struct AANoCaptureArgument final : AANoCaptureImpl {
3160  AANoCaptureArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
3161 
3162  /// See AbstractAttribute::trackStatistics()
3163  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
3164 };
3165 
3166 /// NoCapture attribute for call site arguments.
3167 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
3168  AANoCaptureCallSiteArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
3169 
3170  /// See AbstractAttribute::updateImpl(...).
3171  ChangeStatus updateImpl(Attributor &A) override {
3172  // TODO: Once we have call site specific value information we can provide
3173  // call site specific liveness information and then it makes
3174  // sense to specialize attributes for call sites arguments instead of
3175  // redirecting requests to the callee argument.
3176  Argument *Arg = getAssociatedArgument();
3177  if (!Arg)
3178  return indicatePessimisticFixpoint();
3179  const IRPosition &ArgPos = IRPosition::argument(*Arg);
3180  auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos);
3181  return clampStateAndIndicateChange(
3182  getState(),
3183  static_cast<const AANoCapture::StateType &>(ArgAA.getState()));
3184  }
3185 
3186  /// See AbstractAttribute::trackStatistics()
3187  void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
3188 };
3189 
3190 /// NoCapture attribute for floating values.
3191 struct AANoCaptureFloating final : AANoCaptureImpl {
3192  AANoCaptureFloating(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
3193 
3194  /// See AbstractAttribute::trackStatistics()
3195  void trackStatistics() const override {
3197  }
3198 };
3199 
3200 /// NoCapture attribute for function return value.
3201 struct AANoCaptureReturned final : AANoCaptureImpl {
3202  AANoCaptureReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {
3203  llvm_unreachable("NoCapture is not applicable to function returns!");
3204  }
3205 
3206  /// See AbstractAttribute::initialize(...).
3207  void initialize(Attributor &A) override {
3208  llvm_unreachable("NoCapture is not applicable to function returns!");
3209  }
3210 
3211  /// See AbstractAttribute::updateImpl(...).
3212  ChangeStatus updateImpl(Attributor &A) override {
3213  llvm_unreachable("NoCapture is not applicable to function returns!");
3214  }
3215 
3216  /// See AbstractAttribute::trackStatistics()
3217  void trackStatistics() const override {}
3218 };
3219 
3220 /// NoCapture attribute deduction for a call site return value.
3221 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
3222  AANoCaptureCallSiteReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
3223 
3224  /// See AbstractAttribute::trackStatistics()
3225  void trackStatistics() const override {
3226  STATS_DECLTRACK_CSRET_ATTR(nocapture)
3227  }
3228 };
3229 
3230 /// ------------------ Value Simplify Attribute ----------------------------
3231 struct AAValueSimplifyImpl : AAValueSimplify {
3232  AAValueSimplifyImpl(const IRPosition &IRP) : AAValueSimplify(IRP) {}
3233 
3234  /// See AbstractAttribute::getAsStr().
3235  const std::string getAsStr() const override {
3236  return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
3237  : "not-simple";
3238  }
3239 
3240  /// See AbstractAttribute::trackStatistics()
3241  void trackStatistics() const override {}
3242 
3243  /// See AAValueSimplify::getAssumedSimplifiedValue()
3244  Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
3245  if (!getAssumed())
3246  return const_cast<Value *>(&getAssociatedValue());
3247  return SimplifiedAssociatedValue;
3248  }
3249  void initialize(Attributor &A) override {}
3250 
3251  /// Helper function for querying AAValueSimplify and updating candicate.
3252  /// \param QueryingValue Value trying to unify with SimplifiedValue
3253  /// \param AccumulatedSimplifiedValue Current simplification result.
3254  static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
3255  Value &QueryingValue,
3256  Optional<Value *> &AccumulatedSimplifiedValue) {
3257  // FIXME: Add a typecast support.
3258 
3259  auto &ValueSimpifyAA = A.getAAFor<AAValueSimplify>(
3260  QueryingAA, IRPosition::value(QueryingValue));
3261 
3262  Optional<Value *> QueryingValueSimplified =
3263  ValueSimpifyAA.getAssumedSimplifiedValue(A);
3264 
3265  if (!QueryingValueSimplified.hasValue())
3266  return true;
3267 
3268  if (!QueryingValueSimplified.getValue())
3269  return false;
3270 
3271  Value &QueryingValueSimplifiedUnwrapped =
3272  *QueryingValueSimplified.getValue();
3273 
3274  if (isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
3275  return true;
3276 
3277  if (AccumulatedSimplifiedValue.hasValue())
3278  return AccumulatedSimplifiedValue == QueryingValueSimplified;
3279 
3280  LLVM_DEBUG(dbgs() << "[Attributor][ValueSimplify] " << QueryingValue
3281  << " is assumed to be "
3282  << QueryingValueSimplifiedUnwrapped << "\n");
3283 
3284  AccumulatedSimplifiedValue = QueryingValueSimplified;
3285  return true;
3286  }
3287 
3288  /// See AbstractAttribute::manifest(...).
3289  ChangeStatus manifest(Attributor &A) override {
3291 
3292  if (!SimplifiedAssociatedValue.hasValue() ||
3293  !SimplifiedAssociatedValue.getValue())
3294  return Changed;
3295 
3296  if (auto *C = dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())) {
3297  // We can replace the AssociatedValue with the constant.
3298  Value &V = getAssociatedValue();
3299  if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
3300  LLVM_DEBUG(dbgs() << "[Attributor][ValueSimplify] " << V << " -> " << *C
3301  << "\n");
3302  V.replaceAllUsesWith(C);
3303  Changed = ChangeStatus::CHANGED;
3304  }
3305  }
3306 
3307  return Changed | AAValueSimplify::manifest(A);
3308  }
3309 
3310 protected:
3311  // An assumed simplified value. Initially, it is set to Optional::None, which
3312  // means that the value is not clear under current assumption. If in the
3313  // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
3314  // returns orignal associated value.
3315  Optional<Value *> SimplifiedAssociatedValue;
3316 };
3317 
3318 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
3319  AAValueSimplifyArgument(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
3320 
3321  /// See AbstractAttribute::updateImpl(...).
3322  ChangeStatus updateImpl(Attributor &A) override {
3323  bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
3324 
3325  auto PredForCallSite = [&](AbstractCallSite ACS) {
3326  // Check if we have an associated argument or not (which can happen for
3327  // callback calls).
3328  if (Value *ArgOp = ACS.getCallArgOperand(getArgNo()))
3329  return checkAndUpdate(A, *this, *ArgOp, SimplifiedAssociatedValue);
3330  return false;
3331  };
3332 
3333  if (!A.checkForAllCallSites(PredForCallSite, *this, true))
3334  return indicatePessimisticFixpoint();
3335 
3336  // If a candicate was found in this update, return CHANGED.
3337  return HasValueBefore == SimplifiedAssociatedValue.hasValue()
3340  }
3341 
3342  /// See AbstractAttribute::trackStatistics()
3343  void trackStatistics() const override {
3344  STATS_DECLTRACK_ARG_ATTR(value_simplify)
3345  }
3346 };
3347 
3348 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
3349  AAValueSimplifyReturned(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
3350 
3351  /// See AbstractAttribute::updateImpl(...).
3352  ChangeStatus updateImpl(Attributor &A) override {
3353  bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
3354 
3355  auto PredForReturned = [&](Value &V) {
3356  return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
3357  };
3358 
3359  if (!A.checkForAllReturnedValues(PredForReturned, *this))
3360  return indicatePessimisticFixpoint();
3361 
3362  // If a candicate was found in this update, return CHANGED.
3363  return HasValueBefore == SimplifiedAssociatedValue.hasValue()
3366  }
3367  /// See AbstractAttribute::trackStatistics()
3368  void trackStatistics() const override {
3369  STATS_DECLTRACK_FNRET_ATTR(value_simplify)
3370  }
3371 };
3372 
3373 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
3374  AAValueSimplifyFloating(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
3375 
3376  /// See AbstractAttribute::initialize(...).
3377  void initialize(Attributor &A) override {
3378  Value &V = getAnchorValue();
3379 
3380  // TODO: add other stuffs
3381  if (isa<Constant>(V) || isa<UndefValue>(V))
3382  indicatePessimisticFixpoint();
3383  }
3384 
3385  /// See AbstractAttribute::updateImpl(...).
3386  ChangeStatus updateImpl(Attributor &A) override {
3387  bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
3388 
3389  auto VisitValueCB = [&](Value &V, BooleanState, bool Stripped) -> bool {
3390  auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V));
3391  if (!Stripped && this == &AA) {
3392  // TODO: Look the instruction and check recursively.
3393  LLVM_DEBUG(
3394  dbgs() << "[Attributor][ValueSimplify] Can't be stripped more : "
3395  << V << "\n");
3396  indicatePessimisticFixpoint();
3397  return false;
3398  }
3399  return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
3400  };
3401 
3402  if (!genericValueTraversal<AAValueSimplify, BooleanState>(
3403  A, getIRPosition(), *this, static_cast<BooleanState &>(*this),
3404  VisitValueCB))
3405  return indicatePessimisticFixpoint();
3406 
3407  // If a candicate was found in this update, return CHANGED.
3408 
3409  return HasValueBefore == SimplifiedAssociatedValue.hasValue()
3412  }
3413 
3414  /// See AbstractAttribute::trackStatistics()
3415  void trackStatistics() const override {
3416  STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
3417  }
3418 };
3419 
3420 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
3421  AAValueSimplifyFunction(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
3422 
3423  /// See AbstractAttribute::initialize(...).
3424  void initialize(Attributor &A) override {
3425  SimplifiedAssociatedValue = &getAnchorValue();
3426  indicateOptimisticFixpoint();
3427  }
3428  /// See AbstractAttribute::initialize(...).
3429  ChangeStatus updateImpl(Attributor &A) override {
3431  "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
3432  }
3433  /// See AbstractAttribute::trackStatistics()
3434  void trackStatistics() const override {
3435  STATS_DECLTRACK_FN_ATTR(value_simplify)
3436  }
3437 };
3438 
3439 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
3440  AAValueSimplifyCallSite(const IRPosition &IRP)
3441  : AAValueSimplifyFunction(IRP) {}
3442  /// See AbstractAttribute::trackStatistics()
3443  void trackStatistics() const override {
3444  STATS_DECLTRACK_CS_ATTR(value_simplify)
3445  }
3446 };
3447 
3448 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
3449  AAValueSimplifyCallSiteReturned(const IRPosition &IRP)
3450  : AAValueSimplifyReturned(IRP) {}
3451 
3452  void trackStatistics() const override {
3453  STATS_DECLTRACK_CSRET_ATTR(value_simplify)
3454  }
3455 };
3456 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
3457  AAValueSimplifyCallSiteArgument(const IRPosition &IRP)
3458  : AAValueSimplifyFloating(IRP) {}
3459 
3460  void trackStatistics() const override {
3461  STATS_DECLTRACK_CSARG_ATTR(value_simplify)
3462  }
3463 };
3464 
3465 /// ----------------------- Heap-To-Stack Conversion ---------------------------
3466 struct AAHeapToStackImpl : public AAHeapToStack {
3467  AAHeapToStackImpl(const IRPosition &IRP) : AAHeapToStack(IRP) {}
3468 
3469  const std::string getAsStr() const override {
3470  return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
3471  }
3472 
3473  ChangeStatus manifest(Attributor &A) override {
3474  assert(getState().isValidState() &&
3475  "Attempted to manifest an invalid state!");
3476 
3478  Function *F = getAssociatedFunction();
3479  const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
3480 
3481  for (Instruction *MallocCall : MallocCalls) {
3482  // This malloc cannot be replaced.
3483  if (BadMallocCalls.count(MallocCall))
3484  continue;
3485 
3486  for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
3487  LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
3488  A.deleteAfterManifest(*FreeCall);
3489  HasChanged = ChangeStatus::CHANGED;
3490  }
3491 
3492  LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
3493  << "\n");
3494 
3495  Constant *Size;
3496  if (isCallocLikeFn(MallocCall, TLI)) {
3497  auto *Num = cast<ConstantInt>(MallocCall->getOperand(0));
3498  auto *SizeT = dyn_cast<ConstantInt>(MallocCall->getOperand(1));
3499  APInt TotalSize = SizeT->getValue() * Num->getValue();
3500  Size =
3501  ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize);
3502  } else {
3503  Size = cast<ConstantInt>(MallocCall->getOperand(0));
3504  }
3505 
3506  unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
3507  Instruction *AI = new AllocaInst(Type::getInt8Ty(F->getContext()), AS,
3508  Size, "", MallocCall->getNextNode());
3509 
3510  if (AI->getType() != MallocCall->getType())
3511  AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
3512  AI->getNextNode());
3513 
3514  MallocCall->replaceAllUsesWith(AI);
3515 
3516  if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
3517  auto *NBB = II->getNormalDest();
3518  BranchInst::Create(NBB, MallocCall->getParent());
3519  A.deleteAfterManifest(*MallocCall);
3520  } else {
3521  A.deleteAfterManifest(*MallocCall);
3522  }
3523 
3524  if (isCallocLikeFn(MallocCall, TLI)) {
3525  auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
3526  AI->getNextNode());
3527  Value *Ops[] = {
3528  BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
3530 
3531  Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
3532  Module *M = F->getParent();
3533  Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
3534  CallInst::Create(Fn, Ops, "", BI->getNextNode());
3535  }
3536  HasChanged = ChangeStatus::CHANGED;
3537  }
3538 
3539  return HasChanged;
3540  }
3541 
3542  /// Collection of all malloc calls in a function.
3544 
3545  /// Collection of malloc calls that cannot be converted.
3546  DenseSet<const Instruction *> BadMallocCalls;
3547 
3548  /// A map for each malloc call to the set of associated free calls.
3550 
3551  ChangeStatus updateImpl(Attributor &A) override;
3552 };
3553 
3554 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
3555  const Function *F = getAssociatedFunction();
3556  const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
3557 
3558  auto UsesCheck = [&](Instruction &I) {
3560  SmallVector<const Use *, 8> Worklist;
3561 
3562  for (Use &U : I.uses())
3563  Worklist.push_back(&U);
3564 
3565  while (!Worklist.empty()) {
3566  const Use *U = Worklist.pop_back_val();
3567  if (!Visited.insert(U).second)
3568  continue;
3569 
3570  auto *UserI = U->getUser();
3571 
3572  if (isa<LoadInst>(UserI))
3573  continue;
3574  if (auto *SI = dyn_cast<StoreInst>(UserI)) {
3575  if (SI->getValueOperand() == U->get()) {
3576  LLVM_DEBUG(dbgs() << "[H2S] escaping store to memory: " << *UserI << "\n");
3577  return false;
3578  }
3579  // A store into the malloc'ed memory is fine.
3580  continue;
3581  }
3582 
3583  // NOTE: Right now, if a function that has malloc pointer as an argument
3584  // frees memory, we assume that the malloc pointer is freed.
3585 
3586  // TODO: Add nofree callsite argument attribute to indicate that pointer
3587  // argument is not freed.
3588  if (auto *CB = dyn_cast<CallBase>(UserI)) {
3589  if (!CB->isArgOperand(U))
3590  continue;
3591 
3592  if (CB->isLifetimeStartOrEnd())
3593  continue;
3594 
3595  // Record malloc.
3596  if (isFreeCall(UserI, TLI)) {
3597  FreesForMalloc[&I].insert(
3598  cast<Instruction>(const_cast<User *>(UserI)));
3599  continue;
3600  }
3601 
3602  // If a function does not free memory we are fine
3603  const auto &NoFreeAA =
3605 
3606  unsigned ArgNo = U - CB->arg_begin();
3607  const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
3608  *this, IRPosition::callsite_argument(*CB, ArgNo));
3609 
3610  if (!NoCaptureAA.isAssumedNoCapture() || !NoFreeAA.isAssumedNoFree()) {
3611  LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
3612  return false;
3613  }
3614  continue;
3615  }
3616 
3617  if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI)) {
3618  for (Use &U : UserI->uses())
3619  Worklist.push_back(&U);
3620  continue;
3621  }
3622 
3623  // Unknown user.
3624  LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
3625  return false;
3626  }
3627  return true;
3628  };
3629 
3630  auto MallocCallocCheck = [&](Instruction &I) {
3631  if (BadMallocCalls.count(&I))
3632  return true;
3633 
3634  bool IsMalloc = isMallocLikeFn(&I, TLI);
3635  bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
3636  if (!IsMalloc && !IsCalloc) {
3637  BadMallocCalls.insert(&I);
3638  return true;
3639  }
3640 
3641  if (IsMalloc) {
3642  if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
3643  if (Size->getValue().sle(MaxHeapToStackSize))
3644  if (UsesCheck(I)) {
3645  MallocCalls.insert(&I);
3646  return true;
3647  }
3648  } else if (IsCalloc) {
3649  bool Overflow = false;
3650  if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
3651  if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
3652  if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
3653  .sle(MaxHeapToStackSize))
3654  if (!Overflow && UsesCheck(I)) {
3655  MallocCalls.insert(&I);
3656  return true;
3657  }
3658  }
3659 
3660  BadMallocCalls.insert(&I);
3661  return true;
3662  };
3663 
3664  size_t NumBadMallocs = BadMallocCalls.size();
3665 
3666  A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
3667 
3668  if (NumBadMallocs != BadMallocCalls.size())
3669  return ChangeStatus::CHANGED;
3670 
3671  return ChangeStatus::UNCHANGED;
3672 }
3673 
3674 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
3675  AAHeapToStackFunction(const IRPosition &IRP) : AAHeapToStackImpl(IRP) {}
3676 
3677  /// See AbstractAttribute::trackStatistics()
3678  void trackStatistics() const override {
3679  STATS_DECL(MallocCalls, Function,
3680  "Number of MallocCalls converted to allocas");
3681  BUILD_STAT_NAME(MallocCalls, Function) += MallocCalls.size();
3682  }
3683 };
3684 
3685 /// -------------------- Memory Behavior Attributes ----------------------------
3686 /// Includes read-none, read-only, and write-only.
3687 /// ----------------------------------------------------------------------------
3688 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
3689  AAMemoryBehaviorImpl(const IRPosition &IRP) : AAMemoryBehavior(IRP) {}
3690 
3691  /// See AbstractAttribute::initialize(...).
3692  void initialize(Attributor &A) override {
3693  intersectAssumedBits(BEST_STATE);
3694  getKnownStateFromValue(getIRPosition(), getState());
3696  }
3697 
3698  /// Return the memory behavior information encoded in the IR for \p IRP.
3699  static void getKnownStateFromValue(const IRPosition &IRP,
3700  IntegerState &State) {
3702  IRP.getAttrs(AttrKinds, Attrs);
3703  for (const Attribute &Attr : Attrs) {
3704  switch (Attr.getKindAsEnum()) {
3705  case Attribute::ReadNone:
3706  State.addKnownBits(NO_ACCESSES);
3707  break;
3708  case Attribute::ReadOnly:
3709  State.addKnownBits(NO_WRITES);
3710  break;
3711  case Attribute::WriteOnly:
3712  State.addKnownBits(NO_READS);
3713  break;
3714  default:
3715  llvm_unreachable("Unexpcted attribute!");
3716  }
3717  }
3718 
3719  if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
3720  if (!I->mayReadFromMemory())
3721  State.addKnownBits(NO_READS);
3722  if (!I->mayWriteToMemory())
3723  State.addKnownBits(NO_WRITES);
3724  }
3725  }
3726 
3727  /// See AbstractAttribute::getDeducedAttributes(...).
3728  void getDeducedAttributes(LLVMContext &Ctx,
3729  SmallVectorImpl<Attribute> &Attrs) const override {
3730  assert(Attrs.size() == 0);
3731  if (isAssumedReadNone())
3732  Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
3733  else if (isAssumedReadOnly())
3734  Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
3735  else if (isAssumedWriteOnly())
3736  Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
3737  assert(Attrs.size() <= 1);
3738  }
3739 
3740  /// See AbstractAttribute::manifest(...).
3741  ChangeStatus manifest(Attributor &A) override {
3742  IRPosition &IRP = getIRPosition();
3743 
3744  // Check if we would improve the existing attributes first.
3745  SmallVector<Attribute, 4> DeducedAttrs;
3746  getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
3747  if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
3748  return IRP.hasAttr(Attr.getKindAsEnum(),
3749  /* IgnoreSubsumingPositions */ true);
3750  }))
3751  return ChangeStatus::UNCHANGED;
3752 
3753  // Clear existing attributes.
3754  IRP.removeAttrs(AttrKinds);
3755 
3756  // Use the generic manifest method.
3757  return IRAttribute::manifest(A);
3758  }
3759 
3760  /// See AbstractState::getAsStr().
3761  const std::string getAsStr() const override {
3762  if (isAssumedReadNone())
3763  return "readnone";
3764  if (isAssumedReadOnly())
3765  return "readonly";
3766  if (isAssumedWriteOnly())
3767  return "writeonly";
3768  return "may-read/write";
3769  }
3770 
3771  /// The set of IR attributes AAMemoryBehavior deals with.
3772  static const Attribute::AttrKind AttrKinds[3];
3773 };
3774 
3775 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
3776  Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
3777 
3778 /// Memory behavior attribute for a floating value.
3779 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
3780  AAMemoryBehaviorFloating(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
3781 
3782  /// See AbstractAttribute::initialize(...).
3783  void initialize(Attributor &A) override {
3785  // Initialize the use vector with all direct uses of the associated value.
3786  for (const Use &U : getAssociatedValue().uses())
3787  Uses.insert(&U);
3788  }
3789 
3790  /// See AbstractAttribute::updateImpl(...).
3791  ChangeStatus updateImpl(Attributor &A) override;
3792 
3793  /// See AbstractAttribute::trackStatistics()
3794  void trackStatistics() const override {
3795  if (isAssumedReadNone())
3797  else if (isAssumedReadOnly())
3799  else if (isAssumedWriteOnly())
3801  }
3802 
3803 private:
3804  /// Return true if users of \p UserI might access the underlying
3805  /// variable/location described by \p U and should therefore be analyzed.
3806  bool followUsersOfUseIn(Attributor &A, const Use *U,
3807  const Instruction *UserI);
3808 
3809  /// Update the state according to the effect of use \p U in \p UserI.
3810  void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
3811 
3812 protected:
3813  /// Container for (transitive) uses of the associated argument.
3815 };
3816 
3817 /// Memory behavior attribute for function argument.
3818 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
3819  AAMemoryBehaviorArgument(const IRPosition &IRP)
3820  : AAMemoryBehaviorFloating(IRP) {}
3821 
3822  /// See AbstractAttribute::initialize(...).
3823  void initialize(Attributor &A) override {
3825 
3826  // TODO: From readattrs.ll: "inalloca parameters are always
3827  // considered written"
3828  if (hasAttr({Attribute::InAlloca}))
3829  removeAssumedBits(NO_WRITES);
3830 
3831  // Initialize the use vector with all direct uses of the associated value.
3832  Argument *Arg = getAssociatedArgument();
3833  if (!Arg || !Arg->getParent()->hasExactDefinition())
3834  indicatePessimisticFixpoint();
3835  }
3836 
3837  /// See AbstractAttribute::trackStatistics()
3838  void trackStatistics() const override {
3839  if (isAssumedReadNone())
3840  STATS_DECLTRACK_ARG_ATTR(readnone)
3841  else if (isAssumedReadOnly())
3842  STATS_DECLTRACK_ARG_ATTR(readonly)
3843  else if (isAssumedWriteOnly())
3844  STATS_DECLTRACK_ARG_ATTR(writeonly)
3845  }
3846 };
3847 
3848 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
3849  AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP)
3850  : AAMemoryBehaviorArgument(IRP) {}
3851 
3852  /// See AbstractAttribute::updateImpl(...).
3853  ChangeStatus updateImpl(Attributor &A) override {
3854  // TODO: Once we have call site specific value information we can provide
3855  // call site specific liveness liveness information and then it makes
3856  // sense to specialize attributes for call sites arguments instead of
3857  // redirecting requests to the callee argument.
3858  Argument *Arg = getAssociatedArgument();
3859  const IRPosition &ArgPos = IRPosition::argument(*Arg);
3860  auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
3861  return clampStateAndIndicateChange(
3862  getState(),
3863  static_cast<const AANoCapture::StateType &>(ArgAA.getState()));
3864  }
3865 
3866  /// See AbstractAttribute::trackStatistics()
3867  void trackStatistics() const override {
3868  if (isAssumedReadNone())
3869  STATS_DECLTRACK_CSARG_ATTR(readnone)
3870  else if (isAssumedReadOnly())
3871  STATS_DECLTRACK_CSARG_ATTR(readonly)
3872  else if (isAssumedWriteOnly())
3873  STATS_DECLTRACK_CSARG_ATTR(writeonly)
3874  }
3875 };
3876 
3877 /// Memory behavior attribute for a call site return position.
3878 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
3879  AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP)
3880  : AAMemoryBehaviorFloating(IRP) {}
3881 
3882  /// See AbstractAttribute::manifest(...).
3883  ChangeStatus manifest(Attributor &A) override {
3884  // We do not annotate returned values.
3885  return ChangeStatus::UNCHANGED;
3886  }
3887 
3888  /// See AbstractAttribute::trackStatistics()
3889  void trackStatistics() const override {}
3890 };
3891 
3892 /// An AA to represent the memory behavior function attributes.
3893 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
3894  AAMemoryBehaviorFunction(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
3895 
3896  /// See AbstractAttribute::updateImpl(Attributor &A).
3897  virtual ChangeStatus updateImpl(Attributor &A) override;
3898 
3899  /// See AbstractAttribute::manifest(...).
3900  ChangeStatus manifest(Attributor &A) override {
3901  Function &F = cast<Function>(getAnchorValue());
3902  if (isAssumedReadNone()) {
3903  F.removeFnAttr(Attribute::ArgMemOnly);
3904  F.removeFnAttr(Attribute::InaccessibleMemOnly);
3905  F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
3906  }
3907  return AAMemoryBehaviorImpl::manifest(A);
3908  }
3909 
3910  /// See AbstractAttribute::trackStatistics()
3911  void trackStatistics() const override {
3912  if (isAssumedReadNone())
3913  STATS_DECLTRACK_FN_ATTR(readnone)
3914  else if (isAssumedReadOnly())
3915  STATS_DECLTRACK_FN_ATTR(readonly)
3916  else if (isAssumedWriteOnly())
3917  STATS_DECLTRACK_FN_ATTR(writeonly)
3918  }
3919 };
3920 
3921 /// AAMemoryBehavior attribute for call sites.
3922 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
3923  AAMemoryBehaviorCallSite(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
3924 
3925  /// See AbstractAttribute::initialize(...).
3926  void initialize(Attributor &A) override {
3928  Function *F = getAssociatedFunction();
3929  if (!F || !F->hasExactDefinition())
3930  indicatePessimisticFixpoint();
3931  }
3932 
3933  /// See AbstractAttribute::updateImpl(...).
3934  ChangeStatus updateImpl(Attributor &A) override {
3935  // TODO: Once we have call site specific value information we can provide
3936  // call site specific liveness liveness information and then it makes
3937  // sense to specialize attributes for call sites arguments instead of
3938  // redirecting requests to the callee argument.
3939  Function *F = getAssociatedFunction();
3940  const IRPosition &FnPos = IRPosition::function(*F);
3941  auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
3942  return clampStateAndIndicateChange(
3943  getState(), static_cast<const AAAlign::StateType &>(FnAA.getState()));
3944  }
3945 
3946  /// See AbstractAttribute::trackStatistics()
3947  void trackStatistics() const override {
3948  if (isAssumedReadNone())
3949  STATS_DECLTRACK_CS_ATTR(readnone)
3950  else if (isAssumedReadOnly())
3951  STATS_DECLTRACK_CS_ATTR(readonly)
3952  else if (isAssumedWriteOnly())
3953  STATS_DECLTRACK_CS_ATTR(writeonly)
3954  }
3955 };
3956 } // namespace
3957 
3958 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
3959 
3960  // The current assumed state used to determine a change.
3961  auto AssumedState = getAssumed();
3962 
3963  auto CheckRWInst = [&](Instruction &I) {
3964  // If the instruction has an own memory behavior state, use it to restrict
3965  // the local state. No further analysis is required as the other memory
3966  // state is as optimistic as it gets.
3967  if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
3968  const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3969  *this, IRPosition::callsite_function(ICS));
3970  intersectAssumedBits(MemBehaviorAA.getAssumed());
3971  return !isAtFixpoint();
3972  }
3973 
3974  // Remove access kind modifiers if necessary.
3975  if (I.mayReadFromMemory())
3976  removeAssumedBits(NO_READS);
3977  if (I.mayWriteToMemory())
3978  removeAssumedBits(NO_WRITES);
3979  return !isAtFixpoint();
3980  };
3981 
3982  if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
3983  return indicatePessimisticFixpoint();
3984 
3985  return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
3987 }
3988 
3989 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
3990 
3991  const IRPosition &IRP = getIRPosition();
3992  const IRPosition &FnPos = IRPosition::function_scope(IRP);
3994 
3995  // First, check the function scope. We take the known information and we avoid
3996  // work if the assumed information implies the current assumed information for
3997  // this attribute.
3998  const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
3999  S.addKnownBits(FnMemAA.getKnown());
4000  if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
4001  return ChangeStatus::UNCHANGED;
4002 
4003  // Make sure the value is not captured (except through "return"), if
4004  // it is, any information derived would be irrelevant anyway as we cannot
4005  // check the potential aliases introduced by the capture.
4006  const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
4007  if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned())
4008  return indicatePessimisticFixpoint();
4009 
4010  // The current assumed state used to determine a change.
4011  auto AssumedState = S.getAssumed();
4012 
4013  // Liveness information to exclude dead users.
4014  // TODO: Take the FnPos once we have call site specific liveness information.
4015  const auto &LivenessAA = A.getAAFor<AAIsDead>(
4017 
4018  // Visit and expand uses until all are analyzed or a fixpoint is reached.
4019  for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
4020  const Use *U = Uses[i];
4021  Instruction *UserI = cast<Instruction>(U->getUser());
4022  LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
4023  << " [Dead: " << (LivenessAA.isAssumedDead(UserI))
4024  << "]\n");
4025  if (LivenessAA.isAssumedDead(UserI))
4026  continue;
4027 
4028  // Check if the users of UserI should also be visited.
4029  if (followUsersOfUseIn(A, U, UserI))
4030  for (const Use &UserIUse : UserI->uses())
4031  Uses.insert(&UserIUse);
4032 
4033  // If UserI might touch memory we analyze the use in detail.
4034  if (UserI->mayReadOrWriteMemory())
4035  analyzeUseIn(A, U, UserI);
4036  }
4037 
4038  return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
4040 }
4041 
4042 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
4043  const Instruction *UserI) {
4044  // The loaded value is unrelated to the pointer argument, no need to
4045  // follow the users of the load.
4046  if (isa<LoadInst>(UserI))
4047  return false;
4048 
4049  // By default we follow all uses assuming UserI might leak information on U,
4050  // we have special handling for call sites operands though.
4051  ImmutableCallSite ICS(UserI);
4052  if (!ICS || !ICS.isArgOperand(U))
4053  return true;
4054 
4055  // If the use is a call argument known not to be captured, the users of
4056  // the call do not need to be visited because they have to be unrelated to
4057  // the input. Note that this check is not trivial even though we disallow
4058  // general capturing of the underlying argument. The reason is that the
4059  // call might the argument "through return", which we allow and for which we
4060  // need to check call users.
4061  unsigned ArgNo = ICS.getArgumentNo(U);
4062  const auto &ArgNoCaptureAA =
4063  A.getAAFor<AANoCapture>(*this, IRPosition::callsite_argument(ICS, ArgNo));
4064  return !ArgNoCaptureAA.isAssumedNoCapture();
4065 }
4066 
4067 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
4068  const Instruction *UserI) {
4069  assert(UserI->mayReadOrWriteMemory());
4070 
4071  switch (UserI->getOpcode()) {
4072  default:
4073  // TODO: Handle all atomics and other side-effect operations we know of.
4074  break;
4075  case Instruction::Load:
4076  // Loads cause the NO_READS property to disappear.
4077  removeAssumedBits(NO_READS);
4078  return;
4079 
4080  case Instruction::Store:
4081  // Stores cause the NO_WRITES property to disappear if the use is the
4082  // pointer operand. Note that we do assume that capturing was taken care of
4083  // somewhere else.
4084  if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
4085  removeAssumedBits(NO_WRITES);
4086  return;
4087 
4088  case Instruction::Call:
4089  case Instruction::CallBr:
4090  case Instruction::Invoke: {
4091  // For call sites we look at the argument memory behavior attribute (this
4092  // could be recursive!) in order to restrict our own state.
4093  ImmutableCallSite ICS(UserI);
4094 
4095  // Give up on operand bundles.
4096  if (ICS.isBundleOperand(U)) {
4097  indicatePessimisticFixpoint();
4098  return;
4099  }
4100 
4101  // Calling a function does read the function pointer, maybe write it if the
4102  // function is self-modifying.
4103  if (ICS.isCallee(U)) {
4104  removeAssumedBits(NO_READS);
4105  break;
4106  }
4107 
4108  // Adjust the possible access behavior based on the information on the
4109  // argument.
4110  unsigned ArgNo = ICS.getArgumentNo(U);
4111  const IRPosition &ArgPos = IRPosition::callsite_argument(ICS, ArgNo);
4112  const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
4113  // "assumed" has at most the same bits as the MemBehaviorAA assumed
4114  // and at least "known".
4115  intersectAssumedBits(MemBehaviorAA.getAssumed());
4116  return;
4117  }
4118  };
4119 
4120  // Generally, look at the "may-properties" and adjust the assumed state if we
4121  // did not trigger special handling before.
4122  if (UserI->mayReadFromMemory())
4123  removeAssumedBits(NO_READS);
4124  if (UserI->mayWriteToMemory())
4125  removeAssumedBits(NO_WRITES);
4126 }
4127 
4128 /// ----------------------------------------------------------------------------
4129 /// Attributor
4130 /// ----------------------------------------------------------------------------
4131 
4133  const AAIsDead *LivenessAA) {
4134  const Instruction *CtxI = AA.getIRPosition().getCtxI();
4135  if (!CtxI)
4136  return false;
4137 
4138  if (!LivenessAA)
4139  LivenessAA =
4140  &getAAFor<AAIsDead>(AA, IRPosition::function(*CtxI->getFunction()),
4141  /* TrackDependence */ false);
4142 
4143  // Don't check liveness for AAIsDead.
4144  if (&AA == LivenessAA)
4145  return false;
4146 
4147  if (!LivenessAA->isAssumedDead(CtxI))
4148  return false;
4149 
4150  // We actually used liveness information so we have to record a dependence.
4151  recordDependence(*LivenessAA, AA);
4152 
4153  return true;
4154 }
4155 
4157  const function_ref<bool(AbstractCallSite)> &Pred,
4158  const AbstractAttribute &QueryingAA, bool RequireAllCallSites) {
4159  // We can try to determine information from
4160  // the call sites. However, this is only possible all call sites are known,
4161  // hence the function has internal linkage.
4162  const IRPosition &IRP = QueryingAA.getIRPosition();
4163  const Function *AssociatedFunction = IRP.getAssociatedFunction();
4164  if (!AssociatedFunction) {
4165  LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
4166  << "\n");
4167  return false;
4168  }
4169 
4170  return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites,
4171  &QueryingAA);
4172 }
4173 
4175  const function_ref<bool(AbstractCallSite)> &Pred, const Function &Fn,
4176  bool RequireAllCallSites, const AbstractAttribute *QueryingAA) {
4177  if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
4178  LLVM_DEBUG(
4179  dbgs()
4180  << "[Attributor] Function " << Fn.getName()
4181  << " has no internal linkage, hence not all call sites are known\n");
4182  return false;
4183  }
4184 
4185  for (const Use &U : Fn.uses()) {
4186  AbstractCallSite ACS(&U);
4187  if (!ACS) {
4188  LLVM_DEBUG(dbgs() << "[Attributor] Function "
4189  << Fn.getName()
4190  << " has non call site use " << *U.get() << " in "
4191  << *U.getUser() << "\n");
4192  return false;
4193  }
4194 
4195  Instruction *I = ACS.getInstruction();
4196  Function *Caller = I->getFunction();
4197 
4198  const auto *LivenessAA =
4199  lookupAAFor<AAIsDead>(IRPosition::function(*Caller), QueryingAA,
4200  /* TrackDependence */ false);
4201 
4202  // Skip dead calls.
4203  if (LivenessAA && LivenessAA->isAssumedDead(I)) {
4204  // We actually used liveness information so we have to record a
4205  // dependence.
4206  if (QueryingAA)
4207  recordDependence(*LivenessAA, *QueryingAA);
4208  continue;
4209  }
4210 
4211  const Use *EffectiveUse =
4212  ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
4213  if (!ACS.isCallee(EffectiveUse)) {
4214  if (!RequireAllCallSites)
4215  continue;
4216  LLVM_DEBUG(dbgs() << "[Attributor] User " << EffectiveUse->getUser()
4217  << " is an invalid use of "
4218  << Fn.getName() << "\n");
4219  return false;
4220  }
4221 
4222  if (Pred(ACS))
4223  continue;
4224 
4225  LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
4226  << *ACS.getInstruction() << "\n");
4227  return false;
4228  }
4229 
4230  return true;
4231 }
4232 
4234  const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
4235  &Pred,
4236  const AbstractAttribute &QueryingAA) {
4237 
4238  const IRPosition &IRP = QueryingAA.getIRPosition();
4239  // Since we need to provide return instructions we have to have an exact
4240  // definition.
4241  const Function *AssociatedFunction = IRP.getAssociatedFunction();
4242  if (!AssociatedFunction)
4243  return false;
4244 
4245  // If this is a call site query we use the call site specific return values
4246  // and liveness information.
4247  // TODO: use the function scope once we have call site AAReturnedValues.
4248  const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
4249  const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
4250  if (!AARetVal.getState().isValidState())
4251  return false;
4252 
4253  return AARetVal.checkForAllReturnedValuesAndReturnInsts(Pred);
4254 }
4255 
4257  const function_ref<bool(Value &)> &Pred,
4258  const AbstractAttribute &QueryingAA) {
4259 
4260  const IRPosition &IRP = QueryingAA.getIRPosition();
4261  const Function *AssociatedFunction = IRP.getAssociatedFunction();
4262  if (!AssociatedFunction)
4263  return false;
4264 
4265  // TODO: use the function scope once we have call site AAReturnedValues.
4266  const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
4267  const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
4268  if (!AARetVal.getState().isValidState())
4269  return false;
4270 
4271  return AARetVal.checkForAllReturnedValuesAndReturnInsts(
4272  [&](Value &RV, const SmallSetVector<ReturnInst *, 4> &) {
4273  return Pred(RV);
4274  });
4275 }
4276 
4277 static bool
4279  const function_ref<bool(Instruction &)> &Pred,
4280  const AAIsDead *LivenessAA, bool &AnyDead,
4281  const ArrayRef<unsigned> &Opcodes) {
4282  for (unsigned Opcode : Opcodes) {
4283  for (Instruction *I : OpcodeInstMap[Opcode]) {
4284  // Skip dead instructions.
4285  if (LivenessAA && LivenessAA->isAssumedDead(I)) {
4286  AnyDead = true;
4287  continue;
4288  }
4289 
4290  if (!Pred(*I))
4291  return false;
4292  }
4293  }
4294  return true;
4295 }
4296 
4298  const llvm::function_ref<bool(Instruction &)> &Pred,
4299  const AbstractAttribute &QueryingAA, const ArrayRef<unsigned> &Opcodes) {
4300 
4301  const IRPosition &IRP = QueryingAA.getIRPosition();
4302  // Since we need to provide instructions we have to have an exact definition.
4303  const Function *AssociatedFunction = IRP.getAssociatedFunction();
4304  if (!AssociatedFunction)
4305  return false;
4306 
4307  // TODO: use the function scope once we have call site AAReturnedValues.
4308  const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
4309  const auto &LivenessAA =
4310  getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
4311  bool AnyDead = false;
4312 
4313  auto &OpcodeInstMap =
4314  InfoCache.getOpcodeInstMapForFunction(*AssociatedFunction);
4315  if (!checkForAllInstructionsImpl(OpcodeInstMap, Pred, &LivenessAA, AnyDead,
4316  Opcodes))
4317  return false;
4318 
4319  // If we actually used liveness information so we have to record a dependence.
4320  if (AnyDead)
4321  recordDependence(LivenessAA, QueryingAA);
4322 
4323  return true;
4324 }
4325 
4327  const llvm::function_ref<bool(Instruction &)> &Pred,
4328  AbstractAttribute &QueryingAA) {
4329 
4330  const Function *AssociatedFunction =
4331  QueryingAA.getIRPosition().getAssociatedFunction();
4332  if (!AssociatedFunction)
4333  return false;
4334 
4335  // TODO: use the function scope once we have call site AAReturnedValues.
4336  const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
4337  const auto &LivenessAA =
4338  getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
4339  bool AnyDead = false;
4340 
4341  for (Instruction *I :
4342  InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
4343  // Skip dead instructions.
4344  if (LivenessAA.isAssumedDead(I)) {
4345  AnyDead = true;
4346  continue;
4347  }
4348 
4349  if (!Pred(*I))
4350  return false;
4351  }
4352 
4353  // If we actually used liveness information so we have to record a dependence.
4354  if (AnyDead)
4355  recordDependence(LivenessAA, QueryingAA);
4356 
4357  return true;
4358 }
4359 
4361  LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
4362  << AllAbstractAttributes.size()
4363  << " abstract attributes.\n");
4364 
4365  // Now that all abstract attributes are collected and initialized we start
4366  // the abstract analysis.
4367 
4368  unsigned IterationCounter = 1;
4369 
4372  Worklist.insert(AllAbstractAttributes.begin(), AllAbstractAttributes.end());
4373 
4374  bool RecomputeDependences = false;
4375 
4376  do {
4377  // Remember the size to determine new attributes.
4378  size_t NumAAs = AllAbstractAttributes.size();
4379  LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
4380  << ", Worklist size: " << Worklist.size() << "\n");
4381 
4382  // If dependences (=QueryMap) are recomputed we have to look at all abstract
4383  // attributes again, regardless of what changed in the last iteration.
4384  if (RecomputeDependences) {
4385  LLVM_DEBUG(
4386  dbgs() << "[Attributor] Run all AAs to recompute dependences\n");
4387  QueryMap.clear();
4388  ChangedAAs.clear();
4389  Worklist.insert(AllAbstractAttributes.begin(),
4390  AllAbstractAttributes.end());
4391  }
4392 
4393  // Add all abstract attributes that are potentially dependent on one that
4394  // changed to the work list.
4395  for (AbstractAttribute *ChangedAA : ChangedAAs) {
4396  auto &QuerriedAAs = QueryMap[ChangedAA];
4397  Worklist.insert(QuerriedAAs.begin(), QuerriedAAs.end());
4398  }
4399 
4400  LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
4401  << ", Worklist+Dependent size: " << Worklist.size()
4402  << "\n");
4403 
4404  // Reset the changed set.
4405  ChangedAAs.clear();
4406 
4407  // Update all abstract attribute in the work list and record the ones that
4408  // changed.
4409  for (AbstractAttribute *AA : Worklist)
4410  if (!isAssumedDead(*AA, nullptr))
4411  if (AA->update(*this) == ChangeStatus::CHANGED)
4412  ChangedAAs.push_back(AA);
4413 
4414  // Check if we recompute the dependences in the next iteration.
4415  RecomputeDependences = (DepRecomputeInterval > 0 &&
4416  IterationCounter % DepRecomputeInterval == 0);
4417 
4418  // Add attributes to the changed set if they have been created in the last
4419  // iteration.
4420  ChangedAAs.append(AllAbstractAttributes.begin() + NumAAs,
4421  AllAbstractAttributes.end());
4422 
4423  // Reset the work list and repopulate with the changed abstract attributes.
4424  // Note that dependent ones are added above.
4425  Worklist.clear();
4426  Worklist.insert(ChangedAAs.begin(), ChangedAAs.end());
4427 
4428  } while (!Worklist.empty() && (IterationCounter++ < MaxFixpointIterations ||
4430 
4431  LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
4432  << IterationCounter << "/" << MaxFixpointIterations
4433  << " iterations\n");
4434 
4435  size_t NumFinalAAs = AllAbstractAttributes.size();
4436 
4437  // Reset abstract arguments not settled in a sound fixpoint by now. This
4438  // happens when we stopped the fixpoint iteration early. Note that only the
4439  // ones marked as "changed" *and* the ones transitively depending on them
4440  // need to be reverted to a pessimistic state. Others might not be in a
4441  // fixpoint state but we can use the optimistic results for them anyway.
4443  for (unsigned u = 0; u < ChangedAAs.size(); u++) {
4444  AbstractAttribute *ChangedAA = ChangedAAs[u];
4445  if (!Visited.insert(ChangedAA).second)
4446  continue;
4447 
4448  AbstractState &State = ChangedAA->getState();
4449  if (!State.isAtFixpoint()) {
4451 
4452  NumAttributesTimedOut++;
4453  }
4454 
4455  auto &QuerriedAAs = QueryMap[ChangedAA];
4456  ChangedAAs.append(QuerriedAAs.begin(), QuerriedAAs.end());
4457  }
4458 
4459  LLVM_DEBUG({
4460  if (!Visited.empty())
4461  dbgs() << "\n[Attributor] Finalized " << Visited.size()
4462  << " abstract attributes.\n";
4463  });
4464 
4465  unsigned NumManifested = 0;
4466  unsigned NumAtFixpoint = 0;
4467  ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
4468  for (AbstractAttribute *AA : AllAbstractAttributes) {
4469  AbstractState &State = AA->getState();
4470 
4471  // If there is not already a fixpoint reached, we can now take the
4472  // optimistic state. This is correct because we enforced a pessimistic one
4473  // on abstract attributes that were transitively dependent on a changed one
4474  // already above.
4475  if (!State.isAtFixpoint())
4477 
4478  // If the state is invalid, we do not try to manifest it.
4479  if (!State.isValidState())
4480  continue;
4481 
4482  // Skip dead code.
4483  if (isAssumedDead(*AA, nullptr))
4484  continue;
4485  // Manifest the state and record if we changed the IR.
4486  ChangeStatus LocalChange = AA->manifest(*this);
4487  if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
4488  AA->trackStatistics();
4489 
4490  ManifestChange = ManifestChange | LocalChange;
4491 
4492  NumAtFixpoint++;
4493  NumManifested += (LocalChange == ChangeStatus::CHANGED);
4494  }
4495 
4496  (void)NumManifested;
4497  (void)NumAtFixpoint;
4498  LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
4499  << " arguments while " << NumAtFixpoint
4500  << " were in a valid fixpoint state\n");
4501 
4502  NumAttributesManifested += NumManifested;
4503  NumAttributesValidFixpoint += NumAtFixpoint;
4504 
4505  (void)NumFinalAAs;
4506  assert(
4507  NumFinalAAs == AllAbstractAttributes.size() &&
4508  "Expected the final number of abstract attributes to remain unchanged!");
4509 
4510  // Delete stuff at the end to avoid invalid references and a nice order.
4511  {
4512  LLVM_DEBUG(dbgs() << "\n[Attributor] Delete at least "
4513  << ToBeDeletedFunctions.size() << " functions and "
4514  << ToBeDeletedBlocks.size() << " blocks and "
4515  << ToBeDeletedInsts.size() << " instructions\n");
4516  for (Instruction *I : ToBeDeletedInsts) {
4517  if (!I->use_empty())
4518  I->replaceAllUsesWith(UndefValue::get(I->getType()));
4519  I->eraseFromParent();
4520  }
4521 
4522  if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
4523  SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
4524  ToBeDeletedBBs.reserve(NumDeadBlocks);
4525  ToBeDeletedBBs.append(ToBeDeletedBlocks.begin(), ToBeDeletedBlocks.end());
4526  DeleteDeadBlocks(ToBeDeletedBBs);
4528  "Number of dead basic blocks deleted.");
4529  }
4530 
4531  STATS_DECL(AAIsDead, Function, "Number of dead functions deleted.");
4532  for (Function *Fn : ToBeDeletedFunctions) {
4534  Fn->eraseFromParent();
4536  }
4537 
4538  // Identify dead internal functions and delete them. This happens outside
4539  // the other fixpoint analysis as we might treat potentially dead functions
4540  // as live to lower the number of iterations. If they happen to be dead, the
4541  // below fixpoint loop will identify and eliminate them.
4542  SmallVector<Function *, 8> InternalFns;
4543  for (Function &F : M)
4544  if (F.hasLocalLinkage())
4545  InternalFns.push_back(&F);
4546 
4547  bool FoundDeadFn = true;
4548  while (FoundDeadFn) {
4549  FoundDeadFn = false;
4550  for (unsigned u = 0, e = InternalFns.size(); u < e; ++u) {
4551  Function *F = InternalFns[u];
4552  if (!F)
4553  continue;
4554 
4555  const auto *LivenessAA =
4556  lookupAAFor<AAIsDead>(IRPosition::function(*F));
4557  if (LivenessAA &&
4558  !checkForAllCallSites([](AbstractCallSite ACS) { return false; },
4559  *LivenessAA, true))
4560  continue;
4561 
4564  F->eraseFromParent();
4565  InternalFns[u] = nullptr;
4566  FoundDeadFn = true;
4567  }
4568  }
4569  }
4570 
4572  IterationCounter != MaxFixpointIterations) {
4573  errs() << "\n[Attributor] Fixpoint iteration done after: "
4574  << IterationCounter << "/" << MaxFixpointIterations
4575  << " iterations\n";
4576  llvm_unreachable("The fixpoint was not reached with exactly the number of "
4577  "specified iterations!");
4578  }
4579 
4580  return ManifestChange;
4581 }
4582 
4584 
4585  // Walk all instructions to find interesting instructions that might be
4586  // queried by abstract attributes during their initialization or update.
4587  // This has to happen before we create attributes.
4588  auto &ReadOrWriteInsts = InfoCache.FuncRWInstsMap[&F];
4589  auto &InstOpcodeMap = InfoCache.FuncInstOpcodeMap[&F];
4590 
4591  for (Instruction &I : instructions(&F)) {
4592  bool IsInterestingOpcode = false;
4593 
4594  // To allow easy access to all instructions in a function with a given
4595  // opcode we store them in the InfoCache. As not all opcodes are interesting
4596  // to concrete attributes we only cache the ones that are as identified in
4597  // the following switch.
4598  // Note: There are no concrete attributes now so this is initially empty.
4599  switch (I.getOpcode()) {
4600  default:
4601  assert((!ImmutableCallSite(&I)) && (!isa<CallBase>(&I)) &&
4602  "New call site/base instruction type needs to be known int the "
4603  "Attributor.");
4604  break;
4605  case Instruction::Load:
4606  // The alignment of a pointer is interesting for loads.
4607  case Instruction::Store:
4608  // The alignment of a pointer is interesting for stores.
4609  case Instruction::Call:
4610  case Instruction::CallBr:
4611  case Instruction::Invoke:
4612  case Instruction::CleanupRet:
4613  case Instruction::CatchSwitch:
4614  case Instruction::Resume:
4615  case Instruction::Ret:
4616  IsInterestingOpcode = true;
4617  }
4618  if (IsInterestingOpcode)
4619  InstOpcodeMap[I.getOpcode()].push_back(&I);
4620  if (I.mayReadOrWriteMemory())
4621  ReadOrWriteInsts.push_back(&I);
4622  }
4623 }
4624 
4626  if (!VisitedFunctions.insert(&F).second)
4627  return;
4628 
4629  IRPosition FPos = IRPosition::function(F);
4630 
4631  // Check for dead BasicBlocks in every function.
4632  // We need dead instruction detection because we do not want to deal with
4633  // broken IR in which SSA rules do not apply.
4634  getOrCreateAAFor<AAIsDead>(FPos);
4635 
4636  // Every function might be "will-return".
4637  getOrCreateAAFor<AAWillReturn>(FPos);
4638 
4639  // Every function can be nounwind.
4640  getOrCreateAAFor<AANoUnwind>(FPos);
4641 
4642  // Every function might be marked "nosync"
4643  getOrCreateAAFor<AANoSync>(FPos);
4644 
4645  // Every function might be "no-free".
4646  getOrCreateAAFor<AANoFree>(FPos);
4647 
4648  // Every function might be "no-return".
4649  getOrCreateAAFor<AANoReturn>(FPos);
4650 
4651  // Every function might be "no-recurse".
4652  getOrCreateAAFor<AANoRecurse>(FPos);
4653 
4654  // Every function might be "readnone/readonly/writeonly/...".
4655  getOrCreateAAFor<AAMemoryBehavior>(FPos);
4656 
4657  // Every function might be applicable for Heap-To-Stack conversion.
4658  if (EnableHeapToStack)
4659  getOrCreateAAFor<AAHeapToStack>(FPos);
4660 
4661  // Return attributes are only appropriate if the return type is non void.
4662  Type *ReturnType = F.getReturnType();
4663  if (!ReturnType->isVoidTy()) {
4664  // Argument attribute "returned" --- Create only one per function even
4665  // though it is an argument attribute.
4666  getOrCreateAAFor<AAReturnedValues>(FPos);
4667 
4668  IRPosition RetPos = IRPosition::returned(F);
4669 
4670  // Every function might be simplified.
4671  getOrCreateAAFor<AAValueSimplify>(RetPos);
4672 
4673  if (ReturnType->isPointerTy()) {
4674 
4675  // Every function with pointer return type might be marked align.
4676  getOrCreateAAFor<AAAlign>(RetPos);
4677 
4678  // Every function with pointer return type might be marked nonnull.
4679  getOrCreateAAFor<AANonNull>(RetPos);
4680 
4681  // Every function with pointer return type might be marked noalias.
4682  getOrCreateAAFor<AANoAlias>(RetPos);
4683 
4684  // Every function with pointer return type might be marked
4685  // dereferenceable.
4686  getOrCreateAAFor<AADereferenceable>(RetPos);
4687  }
4688  }
4689 
4690  for (Argument &Arg : F.args()) {
4692 
4693  // Every argument might be simplified.
4694  getOrCreateAAFor<AAValueSimplify>(ArgPos);
4695 
4696  if (Arg.getType()->isPointerTy()) {
4697  // Every argument with pointer type might be marked nonnull.
4698  getOrCreateAAFor<AANonNull>(ArgPos);
4699 
4700  // Every argument with pointer type might be marked noalias.
4701  getOrCreateAAFor<AANoAlias>(ArgPos);
4702 
4703  // Every argument with pointer type might be marked dereferenceable.
4704  getOrCreateAAFor<AADereferenceable>(ArgPos);
4705 
4706  // Every argument with pointer type might be marked align.
4707  getOrCreateAAFor<AAAlign>(ArgPos);
4708 
4709  // Every argument with pointer type might be marked nocapture.
4710  getOrCreateAAFor<AANoCapture>(ArgPos);
4711 
4712  // Every argument with pointer type might be marked
4713  // "readnone/readonly/writeonly/..."
4714  getOrCreateAAFor<AAMemoryBehavior>(ArgPos);
4715  }
4716  }
4717 
4718  auto CallSitePred = [&](Instruction &I) -> bool {
4719  CallSite CS(&I);
4720  if (CS.getCalledFunction()) {
4721  for (int i = 0, e = CS.getCalledFunction()->arg_size(); i < e; i++) {
4722 
4723  IRPosition CSArgPos = IRPosition::callsite_argument(CS, i);
4724 
4725  // Call site argument might be simplified.
4726  getOrCreateAAFor<AAValueSimplify>(CSArgPos);
4727 
4728  if (!CS.getArgument(i)->getType()->isPointerTy())
4729  continue;
4730 
4731  // Call site argument attribute "non-null".
4732  getOrCreateAAFor<AANonNull>(CSArgPos);
4733 
4734  // Call site argument attribute "no-alias".
4735  getOrCreateAAFor<AANoAlias>(CSArgPos);
4736 
4737  // Call site argument attribute "dereferenceable".
4738  getOrCreateAAFor<AADereferenceable>(CSArgPos);
4739 
4740  // Call site argument attribute "align".
4741  getOrCreateAAFor<AAAlign>(CSArgPos);
4742  }
4743  }
4744  return true;
4745  };
4746 
4747  auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
4748  bool Success, AnyDead = false;
4749  Success = checkForAllInstructionsImpl(
4750  OpcodeInstMap, CallSitePred, nullptr, AnyDead,
4751  {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
4753  (void)Success;
4754  assert(Success && !AnyDead && "Expected the check call to be successful!");
4755 
4756  auto LoadStorePred = [&](Instruction &I) -> bool {
4757  if (isa<LoadInst>(I))
4758  getOrCreateAAFor<AAAlign>(
4759  IRPosition::value(*cast<LoadInst>(I).getPointerOperand()));
4760  else
4761  getOrCreateAAFor<AAAlign>(
4762  IRPosition::value(*cast<StoreInst>(I).getPointerOperand()));
4763  return true;
4764  };
4765  Success = checkForAllInstructionsImpl(
4766  OpcodeInstMap, LoadStorePred, nullptr, AnyDead,
4768  (void)Success;
4769  assert(Success && !AnyDead && "Expected the check call to be successful!");
4770 }
4771 
4772 /// Helpers to ease debugging through output streams and print calls.
4773 ///
4774 ///{
4776  return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
4777 }
4778 
4780  switch (AP) {
4782  return OS << "inv";
4783  case IRPosition::IRP_FLOAT:
4784  return OS << "flt";
4786  return OS << "fn_ret";
4788  return OS << "cs_ret";
4790  return OS << "fn";
4792  return OS << "cs";
4794  return OS << "arg";
4796  return OS << "cs_arg";
4797  }
4798  llvm_unreachable("Unknown attribute position!");
4799 }
4800 
4802  const Value &AV = Pos.getAssociatedValue();
4803  return OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
4804  << Pos.getAnchorValue().getName() << "@" << Pos.getArgNo() << "]}";
4805 }
4806 
4808  return OS << "(" << S.getKnown() << "-" << S.getAssumed() << ")"
4809  << static_cast<const AbstractState &>(S);
4810 }
4811 
4813  return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
4814 }
4815 
4817  AA.print(OS);
4818  return OS;
4819 }
4820 
4822  OS << "[P: " << getIRPosition() << "][" << getAsStr() << "][S: " << getState()
4823  << "]";
4824 }
4825 ///}
4826 
4827 /// ----------------------------------------------------------------------------
4828 /// Pass (Manager) Boilerplate
4829 /// ----------------------------------------------------------------------------
4830 
4832  if (DisableAttributor)
4833  return false;
4834 
4835  LLVM_DEBUG(dbgs() << "[Attributor] Run on module with " << M.size()
4836  << " functions.\n");
4837 
4838  // Create an Attributor and initially empty information cache that is filled
4839  // while we identify default attribute opportunities.
4840  InformationCache InfoCache(M, AG);
4841  Attributor A(InfoCache, DepRecInterval);
4842 
4843  for (Function &F : M)
4845 
4846  for (Function &F : M) {
4847  if (F.hasExactDefinition())
4848  NumFnWithExactDefinition++;
4849  else
4850  NumFnWithoutExactDefinition++;
4851 
4852  // We look at internal functions only on-demand but if any use is not a
4853  // direct call, we have to do it eagerly.
4854  if (F.hasLocalLinkage()) {
4855  if (llvm::all_of(F.uses(), [](const Use &U) {
4856  return ImmutableCallSite(U.getUser()) &&
4857  ImmutableCallSite(U.getUser()).isCallee(&U);
4858  }))
4859  continue;
4860  }
4861 
4862  // Populate the Attributor with abstract attribute opportunities in the
4863  // function and the information cache with IR information.
4865  }
4866 
4867  return A.run(M) == ChangeStatus::CHANGED;
4868 }
4869 
4871  AnalysisGetter AG(AM);
4872  if (runAttributorOnModule(M, AG)) {
4873  // FIXME: Think about passes we will preserve and add them here.
4874  return PreservedAnalyses::none();
4875  }
4876  return PreservedAnalyses::all();
4877 }
4878 
4879 namespace {
4880 
4881 struct AttributorLegacyPass : public ModulePass {
4882  static char ID;
4883 
4884  AttributorLegacyPass() : ModulePass(ID) {
4886  }
4887 
4888  bool runOnModule(Module &M) override {
4889  if (skipModule(M))
4890  return false;
4891 
4892  AnalysisGetter AG;
4893  return runAttributorOnModule(M, AG);
4894  }
4895 
4896  void getAnalysisUsage(AnalysisUsage &AU) const override {
4897  // FIXME: Think about passes we will preserve and add them here.
4899  }
4900 };
4901 
4902 } // end anonymous namespace
4903 
4904 Pass *llvm::createAttributorLegacyPass() { return new AttributorLegacyPass(); }
4905 
4906 char AttributorLegacyPass::ID = 0;
4907 
4908 const char AAReturnedValues::ID = 0;
4909 const char AANoUnwind::ID = 0;
4910 const char AANoSync::ID = 0;
4911 const char AANoFree::ID = 0;
4912 const char AANonNull::ID = 0;
4913 const char AANoRecurse::ID = 0;
4914 const char AAWillReturn::ID = 0;
4915 const char AANoAlias::ID = 0;
4916 const char AANoReturn::ID = 0;
4917 const char AAIsDead::ID = 0;
4918 const char AADereferenceable::ID = 0;
4919 const char AAAlign::ID = 0;
4920 const char AANoCapture::ID = 0;
4921 const char AAValueSimplify::ID = 0;
4922 const char AAHeapToStack::ID = 0;
4923 const char AAMemoryBehavior::ID = 0;
4924 
4925 // Macro magic to create the static generator function for attributes that
4926 // follow the naming scheme.
4927 
4928 #define SWITCH_PK_INV(CLASS, PK, POS_NAME) \
4929  case IRPosition::PK: \
4930  llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
4931 
4932 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX) \
4933  case IRPosition::PK: \
4934  AA = new CLASS##SUFFIX(IRP); \
4935  break;
4936 
4937 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
4938  CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
4939  CLASS *AA = nullptr; \
4940  switch (IRP.getPositionKind()) { \
4941  SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
4942  SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \
4943  SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \
4944  SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \
4945  SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \
4946  SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \
4947  SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \
4948  SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \
4949  } \
4950  return *AA; \
4951  }
4952 
4953 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
4954  CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
4955  CLASS *AA = nullptr; \
4956  switch (IRP.getPositionKind()) { \
4957  SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
4958  SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function") \
4959  SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \
4960  SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \
4961  SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \
4962  SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \
4963  SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \
4964  SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \
4965  } \
4966  return *AA; \
4967  }
4968 
4969 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
4970  CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
4971  CLASS *AA = nullptr; \
4972  switch (IRP.getPositionKind()) { \
4973  SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
4974  SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \
4975  SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \
4976  SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \
4977  SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \
4978  SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \
4979  SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \
4980  SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \
4981  } \
4982  return *AA; \
4983  }
4984 
4985 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
4986  CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
4987  CLASS *AA = nullptr; \
4988  switch (IRP.getPositionKind()) { \
4989  SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
4990  SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \
4991  SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \
4992  SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \
4993  SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \
4994  SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \
4995  SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \
4996  SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \
4997  } \
4998  return *AA; \
4999  }
5000 
5001 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
5002  CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
5003  CLASS *AA = nullptr; \
5004  switch (IRP.getPositionKind()) { \
5005  SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
5006  SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \
5007  SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \
5008  SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \
5009  SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \
5010  SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \
5011  SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \
5012  SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \
5013  } \
5014  return *AA; \
5015  }
5016 
5025 
5031 
5033 
5035 
5037 
5038 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
5039 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
5040 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
5041 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
5042 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
5043 #undef SWITCH_PK_CREATE
5044 #undef SWITCH_PK_INV
5045 
5046 INITIALIZE_PASS_BEGIN(AttributorLegacyPass, "attributor",
5047  "Deduce and propagate attributes", false, false)
5049 INITIALIZE_PASS_END(AttributorLegacyPass, "attributor",
5050  "Deduce and propagate attributes", false, false)
An attribute for a call site return value.
Definition: Attributor.h:151
Pass interface - Implemented by all &#39;passes&#39;.
Definition: Pass.h:80
void DeleteDeadBlocks(ArrayRef< BasicBlock *> BBs, DomTreeUpdater *DTU=nullptr, bool KeepOneInputPHIs=false)
Delete the specified blocks from BB.
bool onlyReadsMemory() const
Determine if the function does not access or only reads memory.
Definition: Function.h:481
OpcodeInstMapTy & getOpcodeInstMapForFunction(const Function &F)
Return the map that relates "interesting" opcodes with all instructions with that opcode in F...
Definition: Attributor.h:618
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:1865
uint64_t CallInst * C
Return a value (possibly void), from a function.
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:112
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:641
iterator_range< use_iterator > uses()
Definition: Value.h:374
StringRef getKindAsString() const
Return the attribute&#39;s kind as a string.
Definition: Attributes.cpp:216
raw_ostream & errs()
This returns a reference to a raw_ostream for standard error.
static IntegerType * getInt1Ty(LLVMContext &C)
Definition: Type.cpp:177
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
Definition: DataLayout.h:403
static bool isEqualOrWorse(const Attribute &New, const Attribute &Old)
Return true if New is equal or worse than Old.
Definition: Attributor.cpp:244
bool hasLocalLinkage() const
Definition: GlobalValue.h:445
void clear()
Definition: MapVector.h:88
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
size_type size() const
Definition: MapVector.h:60
This class represents an incoming formal argument to a Function.
Definition: Argument.h:29
This callback is used in conjunction with PointerMayBeCaptured.
static ChangeStatus manifestAttrs(Attributor &A, IRPosition &IRP, const ArrayRef< Attribute > &DeducedAttrs)
Definition: Attributor.cpp:331
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:288
This class represents lattice values for constants.
Definition: AllocatorList.h:23
bool isAtomic() const
Return true if this instruction has an AtomicOrdering of unordered or higher.
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:77
This is the interface for a simple mod/ref and alias analysis over globals.
SubsumingPositionIterator(const IRPosition &IRP)
Definition: Attributor.cpp:391
#define STATS_TRACK(NAME, TYPE)
Definition: Attributor.cpp:76
static Attribute getWithAlignment(LLVMContext &Context, uint64_t Align)
Return a uniquified Attribute object that has the specific alignment set.
Definition: Attributes.cpp:145
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:65
bool user_empty() const
Definition: Value.h:383
static Attribute getWithDereferenceableBytes(LLVMContext &Context, uint64_t Bytes)
Definition: Attributes.cpp:158
ChangeStatus
Simple enum class that forces the status to be spelled out explicitly.
Definition: Attributor.h:120
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
virtual void print(raw_ostream &OS) const
Helper functions, for debug purposes only.
A position that is not associated with a spot suitable for attributes.
Definition: Attributor.h:148
Implements a dense probed hash-table based set.
Definition: DenseSet.h:249
If we do not capture the value in memory, through integers, or as a derived pointer we know it is not...
Definition: Attributor.h:1812
static const Value * getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset, const DataLayout &DL)
Definition: Attributor.cpp:304
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:1937
IntegerState DerefBytesState
State representing for dereferenceable bytes.
Definition: Attributor.h:1653
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:1582
An abstract interface for all nocapture attributes.
Definition: Attributor.h:1794
This class represents a function call, abstracting a target machine&#39;s calling convention.
iterator & end()
Return an universal end iterator.
Definition: MustExecute.h:405
unsigned constexpr DefaultMaxUsesToExplore
The default value for MaxUsesToExplore argument.
Abstract Attribute Classes
Definition: Attributor.h:1415
bool mayWriteToMemory() const
Return true if this instruction may modify memory.
The two locations do not alias at all.
Definition: AliasAnalysis.h:84
static cl::opt< bool > VerifyMaxFixpointIterations("attributor-max-iterations-verify", cl::Hidden, cl::desc("Verify that max-iterations is a tight bound for a fixpoint"), cl::init(false))
An efficient, type-erasing, non-owning reference to a callable.
Definition: STLExtras.h:104
base_t getAssumed() const
Return the assumed state encoding.
Definition: Attributor.h:1104
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:1889
An attribute for a call site argument.
Definition: Attributor.h:155
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:743
virtual const IRPosition & getIRPosition() const =0
Return an IR position, see struct IRPosition.
This class implements a map that also provides access to all stored values in a deterministic order...
Definition: MapVector.h:37
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1165
bool isAssumedNoRecurse() const
Return true if "norecurse" is assumed.
Definition: Attributor.h:1516
An abstract attribute for willreturn.
Definition: Attributor.h:1529
STATISTIC(NumFunctions, "Total number of functions")
APInt operator &(APInt a, const APInt &b)
Definition: APInt.h:1987
Value & getAssociatedValue()
}
Definition: Attributor.h:361
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1100
F(f)
MustBeExecutedContextExplorer & getMustBeExecutedContextExplorer()
Return MustBeExecutedContextExplorer.
Definition: Attributor.h:631
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:621
FunTy * getCalledFunction() const
Return the function being called if this is a direct call, otherwise return null (if it&#39;s an indirect...
Definition: CallSite.h:111
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:144
BasicBlock * SplitBlock(BasicBlock *Old, Instruction *SplitPt, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the specified block at the specified instruction - everything before SplitPt stays in Old and e...
void reserve(size_type N)
Definition: SmallVector.h:369
Kind
The positions we distinguish in the IR.
Definition: Attributor.h:146
Wrapper for FunctoinAnalysisManager.
Definition: Attributor.h:561
virtual ChangeStatus indicatePessimisticFixpoint()=0
Indicate that the abstract state should converge to the pessimistic state.
bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const
Return true if the attribute exists at the given index.
Instruction * getCtxI()
}
Definition: Attributor.h:341
Value * get() const
Definition: Use.h:107
unsigned getPointerAlignment(const DataLayout &DL) const
Returns an alignment of the pointer value.
Definition: Value.cpp:674
const CallInst * isFreeCall(const Value *I, const TargetLibraryInfo *TLI)
isFreeCall - Returns non-null if the value is a call to the builtin free()
bool checkForAllCallSites(const function_ref< bool(AbstractCallSite)> &Pred, const AbstractAttribute &QueryingAA, bool RequireAllCallSites)
Check Pred on all function call sites.
virtual bool isDereferenceableOrNull(Value *O, const DataLayout &DL)
isDereferenceableOrNull - Overload to allow clients with additional knowledge about pointer dereferen...
void initializeInformationCache(Function &F)
Initialize the information cache for queries regarding function F.
AnalysisUsage & addRequired()
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:50
static const IRPosition function_scope(const IRPosition &IRP)
Create a position with function scope matching the "context" of IRP.
Definition: Attributor.h:234
bool isAssumedDead(const AbstractAttribute &AA, const AAIsDead *LivenessAA)
Return true if AA (or its context instruction) is assumed dead.
An AbstractAttribute for noreturn.
Definition: Attributor.h:1586
uint64_t getValueAsInt() const
Return the attribute&#39;s value as an integer.
Definition: Attributes.cpp:209
A visitor class for IR positions.
Definition: Attributor.h:550
bool isStringAttribute() const
Return true if the attribute is a string (target-dependent) attribute.
Definition: Attributes.cpp:194
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:1790
A Use represents the edge between a Value definition and its users.
Definition: Use.h:55
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
An abstract attribute for norecurse.
Definition: Attributor.h:1510
static Attribute getWithDereferenceableOrNullBytes(LLVMContext &Context, uint64_t Bytes)
Definition: Attributes.cpp:164
#define STATS_DECLTRACK_ARG_ATTR(NAME)
Definition: Attributor.cpp:82
unsigned getArgumentNo(Value::const_user_iterator I) const
Given a value use iterator, returns the argument that corresponds to it.
Definition: CallSite.h:206
This file contains the simple types necessary to represent the attributes associated with functions a...
bool isAssumedNoUnwind() const
Returns true if nounwind is assumed.
Definition: Attributor.h:1460
#define STATS_DECLTRACK_CSARG_ATTR(NAME)
Definition: Attributor.cpp:84
bool canSimplifyInvokeNoUnwind(const Function *F)
static cl::opt< unsigned > MaxFixpointIterations("attributor-max-iterations", cl::Hidden, cl::desc("Maximal number of fixpoint iterations."), cl::init(32))
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:1525
An abstract interface for all noalias attributes.
Definition: Attributor.h:1548
bool checkForAllReadWriteInstructions(const llvm::function_ref< bool(Instruction &)> &Pred, AbstractAttribute &QueryingAA)
Check Pred on all Read/Write instructions.
AtomicOrdering
Atomic ordering for LLVM&#39;s memory model.
AttributeList getAttributes(LLVMContext &C, ID id)
Return the attributes for an intrinsic.
An attribute for the function return value.
Definition: Attributor.h:150
InstrTy * getInstruction() const
Definition: CallSite.h:96
uint32_t getAssumedDereferenceableBytes() const
Return assumed dereferenceable bytes.
Definition: Attributor.h:1757
ChangeStatus indicatePessimisticFixpoint() override
See AbstractState::indicatePessimisticFixpoint(...)
Definition: Attributor.h:1095
attributor
User * getUser() const LLVM_READONLY
Returns the User that contains this Use.
Definition: Use.cpp:40
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1583
int getArgNo() const
}
Definition: Attributor.h:376
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
static cl::opt< bool > ManifestInternal("attributor-manifest-internal", cl::Hidden, cl::desc("Manifest Attributor internal string attributes."), cl::init(false))
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:141
TypeSize getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:455
An abstract interface for liveness abstract attribute.
Definition: Attributor.h:1605
bool checkForAllReturnedValuesAndReturnInsts(const function_ref< bool(Value &, const SmallSetVector< ReturnInst *, 4 > &)> &Pred, const AbstractAttribute &QueryingAA)
Check Pred on all values potentially returned by F.
const T & getValue() const LLVM_LVALUE_FUNCTION
Definition: Optional.h:255
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
Definition: BasicBlock.cpp:253
bool isAssumedNoFree() const
Return true if "nofree" is assumed.
Definition: Attributor.h:1573
This class represents a no-op cast from one type to another.
void initializeAttributorLegacyPassPass(PassRegistry &)
ChangeStatus run(Module &M)
Run the analyses until a fixpoint is reached or enforced (timeout).
static void initialize(TargetLibraryInfoImpl &TLI, const Triple &T, ArrayRef< StringLiteral > StandardNames)
Initialize the set of available library functions based on the specified target triple.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:125
#define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Return true if the call or the callee has the given attribute.
Definition: CallSite.h:385
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:223
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition: Value.h:601
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition: Function.h:732
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:429
An abstract interface for all nonnull attributes.
Definition: Attributor.h:1491
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type *> Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1093
ChangeStatus manifest(Attributor &A) override
See AbstractAttribute::manifest(...).
Definition: Attributor.h:1255
Value * getOperand(unsigned i) const
Definition: User.h:169
ChangeStatus update(Attributor &A)
Hook for the Attributor to trigger an update of the internal state.
Definition: Attributor.cpp:315
#define STATS_DECLTRACK_CS_ATTR(NAME)
Definition: Attributor.cpp:89
IntegerState & takeKnownMaximum(base_t Value)
Take maximum of known and Value.
Definition: Attributor.h:1146
unsigned getAttrIdx() const
Return the index in the attribute list for this position.
Definition: Attributor.h:379
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: PassManager.h:156
virtual bool isAtFixpoint() const =0
Return if this abstract state is fixed, thus does not need to be updated if information changes as it...
const AAType & getAAFor(const AbstractAttribute &QueryingAA, const IRPosition &IRP, bool TrackDependence=true)
Lookup an abstract attribute of type AAType at position IRP.
Definition: Attributor.h:757
virtual bool isValidState() const =0
Return if this abstract state is in a valid state.
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:1563
#define BUILD_STAT_NAME(NAME, TYPE)
Definition: Attributor.cpp:72
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:141
const BasicBlock & getEntryBlock() const
Definition: Function.h:664
BasicBlock * SplitBlockPredecessors(BasicBlock *BB, ArrayRef< BasicBlock *> Preds, const char *Suffix, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, bool PreserveLCSSA=false)
This method introduces at least one new basic block into the function and moves some of the predecess...
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:432
AbstractState StateType
Definition: Attributor.h:1322
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:168
base_t getKnown() const
Return the known state encoding.
Definition: Attributor.h:1101
#define STATS_DECLTRACK_FN_ATTR(NAME)
Definition: Attributor.cpp:87
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:153
bool checkForAllCallLikeInstructions(const function_ref< bool(Instruction &)> &Pred, const AbstractAttribute &QueryingAA)
Check Pred on all call-like instructions (=CallBased derived).
Definition: Attributor.h:881
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:1840
void removeAttrs(ArrayRef< Attribute::AttrKind > AKs)
Remove the attribute of kind AKs existing in the IR at this position.
Definition: Attributor.h:451
unsigned getNumArgOperands() const
Definition: CallSite.h:303
LLVM Basic Block Representation.
Definition: BasicBlock.h:57
Instruction * getInstruction() const
Return the underlying instruction.
Definition: CallSite.h:772
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:64
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:1544
An abstract interface for all dereferenceable attribute.
Definition: Attributor.h:1733
This is an important base class in LLVM.
Definition: Constant.h:41
LLVM_NODISCARD bool empty() const
Definition: SmallPtrSet.h:91
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:224
const Instruction & front() const
Definition: BasicBlock.h:285
ValTy * getArgument(unsigned ArgNo) const
Definition: CallSite.h:193
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:370
Value & getAnchorValue()
Return the value this abstract attribute is anchored with.
Definition: Attributor.h:256
Represent the analysis usage information of a pass.
static const IRPosition returned(const Function &F)
Create a position describing the returned value of F.
Definition: Attributor.h:178
An attribute for a function (scope).
Definition: Attributor.h:152
virtual void initialize(Attributor &A)
Initialize the state with the information in the Attributor A.
Definition: Attributor.h:1335
bool isAssumedNoCaptureMaybeReturned() const
Return true if we assume that the underlying value is not captured in its respective scope but we all...
Definition: Attributor.h:1832
constexpr double e
Definition: MathExtras.h:57
Attribute::AttrKind getKindAsEnum() const
Return the attribute&#39;s kind as an enum (Attribute::AttrKind).
Definition: Attributes.cpp:202
An attribute for a function argument.
Definition: Attributor.h:154
bool isFnInterfaceKind() const
}
Definition: Attributor.h:309
bool isEnumAttribute() const
Return true if the attribute is an Attribute::AttrKind type.
Definition: Attributes.cpp:186
The fixpoint analysis framework that orchestrates the attribute deduction.
Definition: Attributor.h:713
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
size_t arg_size() const
Definition: Function.h:728
bool isCallee(Value::const_user_iterator UI) const
Return true if UI is the use that defines the callee of this ACS.
Definition: CallSite.h:795
Argument * getArg(unsigned i) const
Definition: Function.h:713