LLVM  14.0.0git
AttributorAttributes.cpp
Go to the documentation of this file.
1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
15 
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/SCCIterator.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/Statistic.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/IRBuilder.h"
33 #include "llvm/IR/Instruction.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/IntrinsicInst.h"
36 #include "llvm/IR/NoFolder.h"
37 #include "llvm/Support/Alignment.h"
38 #include "llvm/Support/Casting.h"
45 #include <cassert>
46 
47 using namespace llvm;
48 
49 #define DEBUG_TYPE "attributor"
50 
52  "attributor-manifest-internal", cl::Hidden,
53  cl::desc("Manifest Attributor internal string attributes."),
54  cl::init(false));
55 
56 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
57  cl::Hidden);
58 
59 template <>
61 
63  "attributor-max-potential-values", cl::Hidden,
64  cl::desc("Maximum number of potential values to be "
65  "tracked for each position."),
67  cl::init(7));
68 
69 STATISTIC(NumAAs, "Number of abstract attributes created");
70 
71 // Some helper macros to deal with statistics tracking.
72 //
73 // Usage:
74 // For simple IR attribute tracking overload trackStatistics in the abstract
75 // attribute and choose the right STATS_DECLTRACK_********* macro,
76 // e.g.,:
77 // void trackStatistics() const override {
78 // STATS_DECLTRACK_ARG_ATTR(returned)
79 // }
80 // If there is a single "increment" side one can use the macro
81 // STATS_DECLTRACK with a custom message. If there are multiple increment
82 // sides, STATS_DECL and STATS_TRACK can also be used separately.
83 //
84 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME) \
85  ("Number of " #TYPE " marked '" #NAME "'")
86 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
87 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
88 #define STATS_DECL(NAME, TYPE, MSG) \
89  STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
90 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
91 #define STATS_DECLTRACK(NAME, TYPE, MSG) \
92  { \
93  STATS_DECL(NAME, TYPE, MSG) \
94  STATS_TRACK(NAME, TYPE) \
95  }
96 #define STATS_DECLTRACK_ARG_ATTR(NAME) \
97  STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
98 #define STATS_DECLTRACK_CSARG_ATTR(NAME) \
99  STATS_DECLTRACK(NAME, CSArguments, \
100  BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
101 #define STATS_DECLTRACK_FN_ATTR(NAME) \
102  STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
103 #define STATS_DECLTRACK_CS_ATTR(NAME) \
104  STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
105 #define STATS_DECLTRACK_FNRET_ATTR(NAME) \
106  STATS_DECLTRACK(NAME, FunctionReturn, \
107  BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
108 #define STATS_DECLTRACK_CSRET_ATTR(NAME) \
109  STATS_DECLTRACK(NAME, CSReturn, \
110  BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
111 #define STATS_DECLTRACK_FLOATING_ATTR(NAME) \
112  STATS_DECLTRACK(NAME, Floating, \
113  ("Number of floating values known to be '" #NAME "'"))
114 
115 // Specialization of the operator<< for abstract attributes subclasses. This
116 // disambiguates situations where multiple operators are applicable.
117 namespace llvm {
118 #define PIPE_OPERATOR(CLASS) \
119  raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) { \
120  return OS << static_cast<const AbstractAttribute &>(AA); \
121  }
122 
149 
150 #undef PIPE_OPERATOR
151 
152 template <>
154  const DerefState &R) {
155  ChangeStatus CS0 =
156  clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
157  ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
158  return CS0 | CS1;
159 }
160 
161 } // namespace llvm
162 
163 /// Get pointer operand of memory accessing instruction. If \p I is
164 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
165 /// is set to false and the instruction is volatile, return nullptr.
166 static const Value *getPointerOperand(const Instruction *I,
167  bool AllowVolatile) {
168  if (!AllowVolatile && I->isVolatile())
169  return nullptr;
170 
171  if (auto *LI = dyn_cast<LoadInst>(I)) {
172  return LI->getPointerOperand();
173  }
174 
175  if (auto *SI = dyn_cast<StoreInst>(I)) {
176  return SI->getPointerOperand();
177  }
178 
179  if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
180  return CXI->getPointerOperand();
181  }
182 
183  if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
184  return RMWI->getPointerOperand();
185  }
186 
187  return nullptr;
188 }
189 
190 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
191 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
192 /// getelement pointer instructions that traverse the natural type of \p Ptr if
193 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
194 /// through a cast to i8*.
195 ///
196 /// TODO: This could probably live somewhere more prominantly if it doesn't
197 /// already exist.
198 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
199  int64_t Offset, IRBuilder<NoFolder> &IRB,
200  const DataLayout &DL) {
201  assert(Offset >= 0 && "Negative offset not supported yet!");
202  LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
203  << "-bytes as " << *ResTy << "\n");
204 
205  if (Offset) {
206  SmallVector<Value *, 4> Indices;
207  std::string GEPName = Ptr->getName().str() + ".0";
208 
209  // Add 0 index to look through the pointer.
210  assert((uint64_t)Offset < DL.getTypeAllocSize(PtrElemTy) &&
211  "Offset out of bounds");
212  Indices.push_back(Constant::getNullValue(IRB.getInt32Ty()));
213 
214  Type *Ty = PtrElemTy;
215  do {
216  auto *STy = dyn_cast<StructType>(Ty);
217  if (!STy)
218  // Non-aggregate type, we cast and make byte-wise progress now.
219  break;
220 
221  const StructLayout *SL = DL.getStructLayout(STy);
222  if (int64_t(SL->getSizeInBytes()) < Offset)
223  break;
224 
226  assert(Idx < STy->getNumElements() && "Offset calculation error!");
227  uint64_t Rem = Offset - SL->getElementOffset(Idx);
228  Ty = STy->getElementType(Idx);
229 
230  LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
231  << " Idx: " << Idx << " Rem: " << Rem << "\n");
232 
233  GEPName += "." + std::to_string(Idx);
234  Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
235  Offset = Rem;
236  } while (Offset);
237 
238  // Create a GEP for the indices collected above.
239  Ptr = IRB.CreateGEP(PtrElemTy, Ptr, Indices, GEPName);
240 
241  // If an offset is left we use byte-wise adjustment.
242  if (Offset) {
243  Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
244  Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt32(Offset),
245  GEPName + ".b" + Twine(Offset));
246  }
247  }
248 
249  // Ensure the result has the requested type.
250  Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
251 
252  LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
253  return Ptr;
254 }
255 
256 /// Recursively visit all values that might become \p IRP at some point. This
257 /// will be done by looking through cast instructions, selects, phis, and calls
258 /// with the "returned" attribute. Once we cannot look through the value any
259 /// further, the callback \p VisitValueCB is invoked and passed the current
260 /// value, the \p State, and a flag to indicate if we stripped anything.
261 /// Stripped means that we unpacked the value associated with \p IRP at least
262 /// once. Note that the value used for the callback may still be the value
263 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
264 /// we will never visit more values than specified by \p MaxValues.
265 template <typename StateTy>
267  Attributor &A, IRPosition IRP, const AbstractAttribute &QueryingAA,
268  StateTy &State,
269  function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
270  VisitValueCB,
271  const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
272  function_ref<Value *(Value *)> StripCB = nullptr) {
273 
274  const AAIsDead *LivenessAA = nullptr;
275  if (IRP.getAnchorScope())
276  LivenessAA = &A.getAAFor<AAIsDead>(
277  QueryingAA,
280  bool AnyDead = false;
281 
282  Value *InitialV = &IRP.getAssociatedValue();
283  using Item = std::pair<Value *, const Instruction *>;
284  SmallSet<Item, 16> Visited;
285  SmallVector<Item, 16> Worklist;
286  Worklist.push_back({InitialV, CtxI});
287 
288  int Iteration = 0;
289  do {
290  Item I = Worklist.pop_back_val();
291  Value *V = I.first;
292  CtxI = I.second;
293  if (StripCB)
294  V = StripCB(V);
295 
296  // Check if we should process the current value. To prevent endless
297  // recursion keep a record of the values we followed!
298  if (!Visited.insert(I).second)
299  continue;
300 
301  // Make sure we limit the compile time for complex expressions.
302  if (Iteration++ >= MaxValues)
303  return false;
304 
305  // Explicitly look through calls with a "returned" attribute if we do
306  // not have a pointer as stripPointerCasts only works on them.
307  Value *NewV = nullptr;
308  if (V->getType()->isPointerTy()) {
309  NewV = V->stripPointerCasts();
310  } else {
311  auto *CB = dyn_cast<CallBase>(V);
312  if (CB && CB->getCalledFunction()) {
313  for (Argument &Arg : CB->getCalledFunction()->args())
314  if (Arg.hasReturnedAttr()) {
315  NewV = CB->getArgOperand(Arg.getArgNo());
316  break;
317  }
318  }
319  }
320  if (NewV && NewV != V) {
321  Worklist.push_back({NewV, CtxI});
322  continue;
323  }
324 
325  // Look through select instructions, visit assumed potential values.
326  if (auto *SI = dyn_cast<SelectInst>(V)) {
327  bool UsedAssumedInformation = false;
328  Optional<Constant *> C = A.getAssumedConstant(
329  *SI->getCondition(), QueryingAA, UsedAssumedInformation);
330  bool NoValueYet = !C.hasValue();
331  if (NoValueYet || isa_and_nonnull<UndefValue>(*C))
332  continue;
333  if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) {
334  if (CI->isZero())
335  Worklist.push_back({SI->getFalseValue(), CtxI});
336  else
337  Worklist.push_back({SI->getTrueValue(), CtxI});
338  continue;
339  }
340  // We could not simplify the condition, assume both values.(
341  Worklist.push_back({SI->getTrueValue(), CtxI});
342  Worklist.push_back({SI->getFalseValue(), CtxI});
343  continue;
344  }
345 
346  // Look through phi nodes, visit all live operands.
347  if (auto *PHI = dyn_cast<PHINode>(V)) {
348  assert(LivenessAA &&
349  "Expected liveness in the presence of instructions!");
350  for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
351  BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
352  bool UsedAssumedInformation = false;
353  if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
354  LivenessAA, UsedAssumedInformation,
355  /* CheckBBLivenessOnly */ true)) {
356  AnyDead = true;
357  continue;
358  }
359  Worklist.push_back(
360  {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
361  }
362  continue;
363  }
364 
365  if (UseValueSimplify && !isa<Constant>(V)) {
366  bool UsedAssumedInformation = false;
367  Optional<Value *> SimpleV =
368  A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation);
369  if (!SimpleV.hasValue())
370  continue;
371  if (!SimpleV.getValue())
372  return false;
373  Value *NewV = SimpleV.getValue();
374  if (NewV != V) {
375  Worklist.push_back({NewV, CtxI});
376  continue;
377  }
378  }
379 
380  // Once a leaf is reached we inform the user through the callback.
381  if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
382  return false;
383  } while (!Worklist.empty());
384 
385  // If we actually used liveness information so we have to record a dependence.
386  if (AnyDead)
387  A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
388 
389  // All values have been visited.
390  return true;
391 }
392 
394  SmallVectorImpl<Value *> &Objects,
395  const AbstractAttribute &QueryingAA,
396  const Instruction *CtxI) {
397  auto StripCB = [&](Value *V) { return getUnderlyingObject(V); };
398  SmallPtrSet<Value *, 8> SeenObjects;
399  auto VisitValueCB = [&SeenObjects](Value &Val, const Instruction *,
400  SmallVectorImpl<Value *> &Objects,
401  bool) -> bool {
402  if (SeenObjects.insert(&Val).second)
403  Objects.push_back(&Val);
404  return true;
405  };
406  if (!genericValueTraversal<decltype(Objects)>(
407  A, IRPosition::value(Ptr), QueryingAA, Objects, VisitValueCB, CtxI,
408  true, 32, StripCB))
409  return false;
410  return true;
411 }
412 
414  Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val,
415  const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
416  bool UseAssumed = false) {
417 
418  auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
419  const IRPosition &Pos = IRPosition::value(V);
420  // Only track dependence if we are going to use the assumed info.
421  const AAValueConstantRange &ValueConstantRangeAA =
422  A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
423  UseAssumed ? DepClassTy::OPTIONAL
424  : DepClassTy::NONE);
425  ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
426  : ValueConstantRangeAA.getKnown();
427  // We can only use the lower part of the range because the upper part can
428  // be higher than what the value can really be.
429  ROffset = Range.getSignedMin();
430  return true;
431  };
432 
433  return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
434  AttributorAnalysis);
435 }
436 
438  Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I,
439  int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) {
440  const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
441  if (!Ptr)
442  return nullptr;
443  APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
445  A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
446 
447  BytesOffset = OffsetAPInt.getSExtValue();
448  return Base;
449 }
450 
451 static const Value *
453  const DataLayout &DL,
454  bool AllowNonInbounds = false) {
455  const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
456  if (!Ptr)
457  return nullptr;
458 
459  return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
460  AllowNonInbounds);
461 }
462 
463 /// Clamp the information known for all returned values of a function
464 /// (identified by \p QueryingAA) into \p S.
465 template <typename AAType, typename StateType = typename AAType::StateType>
467  Attributor &A, const AAType &QueryingAA, StateType &S,
468  const IRPosition::CallBaseContext *CBContext = nullptr) {
469  LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
470  << QueryingAA << " into " << S << "\n");
471 
472  assert((QueryingAA.getIRPosition().getPositionKind() ==
474  QueryingAA.getIRPosition().getPositionKind() ==
476  "Can only clamp returned value states for a function returned or call "
477  "site returned position!");
478 
479  // Use an optional state as there might not be any return values and we want
480  // to join (IntegerState::operator&) the state of all there are.
482 
483  // Callback for each possibly returned value.
484  auto CheckReturnValue = [&](Value &RV) -> bool {
485  const IRPosition &RVPos = IRPosition::value(RV, CBContext);
486  const AAType &AA =
487  A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
488  LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
489  << " @ " << RVPos << "\n");
490  const StateType &AAS = AA.getState();
491  if (T.hasValue())
492  *T &= AAS;
493  else
494  T = AAS;
495  LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
496  << "\n");
497  return T->isValidState();
498  };
499 
500  if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
501  S.indicatePessimisticFixpoint();
502  else if (T.hasValue())
503  S ^= *T;
504 }
505 
506 /// Helper class for generic deduction: return value -> returned position.
507 template <typename AAType, typename BaseType,
508  typename StateType = typename BaseType::StateType,
509  bool PropagateCallBaseContext = false>
512  : BaseType(IRP, A) {}
513 
514  /// See AbstractAttribute::updateImpl(...).
516  StateType S(StateType::getBestState(this->getState()));
517  clampReturnedValueStates<AAType, StateType>(
518  A, *this, S,
519  PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
520  // TODO: If we know we visited all returned values, thus no are assumed
521  // dead, we can take the known information from the state T.
522  return clampStateAndIndicateChange<StateType>(this->getState(), S);
523  }
524 };
525 
526 /// Clamp the information known at all call sites for a given argument
527 /// (identified by \p QueryingAA) into \p S.
528 template <typename AAType, typename StateType = typename AAType::StateType>
529 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
530  StateType &S) {
531  LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
532  << QueryingAA << " into " << S << "\n");
533 
534  assert(QueryingAA.getIRPosition().getPositionKind() ==
536  "Can only clamp call site argument states for an argument position!");
537 
538  // Use an optional state as there might not be any return values and we want
539  // to join (IntegerState::operator&) the state of all there are.
541 
542  // The argument number which is also the call site argument number.
543  unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
544 
545  auto CallSiteCheck = [&](AbstractCallSite ACS) {
546  const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
547  // Check if a coresponding argument was found or if it is on not associated
548  // (which can happen for callback calls).
549  if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
550  return false;
551 
552  const AAType &AA =
553  A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
554  LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
555  << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
556  const StateType &AAS = AA.getState();
557  if (T.hasValue())
558  *T &= AAS;
559  else
560  T = AAS;
561  LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
562  << "\n");
563  return T->isValidState();
564  };
565 
566  bool AllCallSitesKnown;
567  if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
568  AllCallSitesKnown))
569  S.indicatePessimisticFixpoint();
570  else if (T.hasValue())
571  S ^= *T;
572 }
573 
574 /// This function is the bridge between argument position and the call base
575 /// context.
576 template <typename AAType, typename BaseType,
577  typename StateType = typename AAType::StateType>
579  BaseType &QueryingAttribute,
580  IRPosition &Pos, StateType &State) {
582  "Expected an 'argument' position !");
583  const CallBase *CBContext = Pos.getCallBaseContext();
584  if (!CBContext)
585  return false;
586 
587  int ArgNo = Pos.getCallSiteArgNo();
588  assert(ArgNo >= 0 && "Invalid Arg No!");
589 
590  const auto &AA = A.getAAFor<AAType>(
591  QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
593  const StateType &CBArgumentState =
594  static_cast<const StateType &>(AA.getState());
595 
596  LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
597  << "Position:" << Pos << "CB Arg state:" << CBArgumentState
598  << "\n");
599 
600  // NOTE: If we want to do call site grouping it should happen here.
601  State ^= CBArgumentState;
602  return true;
603 }
604 
605 /// Helper class for generic deduction: call site argument -> argument position.
606 template <typename AAType, typename BaseType,
607  typename StateType = typename AAType::StateType,
608  bool BridgeCallBaseContext = false>
611  : BaseType(IRP, A) {}
612 
613  /// See AbstractAttribute::updateImpl(...).
615  StateType S = StateType::getBestState(this->getState());
616 
617  if (BridgeCallBaseContext) {
618  bool Success =
619  getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
620  A, *this, this->getIRPosition(), S);
621  if (Success)
622  return clampStateAndIndicateChange<StateType>(this->getState(), S);
623  }
624  clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
625 
626  // TODO: If we know we visited all incoming values, thus no are assumed
627  // dead, we can take the known information from the state T.
628  return clampStateAndIndicateChange<StateType>(this->getState(), S);
629  }
630 };
631 
632 /// Helper class for generic replication: function returned -> cs returned.
633 template <typename AAType, typename BaseType,
634  typename StateType = typename BaseType::StateType,
635  bool IntroduceCallBaseContext = false>
638  : BaseType(IRP, A) {}
639 
640  /// See AbstractAttribute::updateImpl(...).
642  assert(this->getIRPosition().getPositionKind() ==
644  "Can only wrap function returned positions for call site returned "
645  "positions!");
646  auto &S = this->getState();
647 
648  const Function *AssociatedFunction =
649  this->getIRPosition().getAssociatedFunction();
650  if (!AssociatedFunction)
651  return S.indicatePessimisticFixpoint();
652 
653  CallBase &CBContext = static_cast<CallBase &>(this->getAnchorValue());
654  if (IntroduceCallBaseContext)
655  LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
656  << CBContext << "\n");
657 
659  *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
660  const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
661  return clampStateAndIndicateChange(S, AA.getState());
662  }
663 };
664 
665 /// Helper function to accumulate uses.
666 template <class AAType, typename StateType = typename AAType::StateType>
667 static void followUsesInContext(AAType &AA, Attributor &A,
669  const Instruction *CtxI,
671  StateType &State) {
672  auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
673  for (unsigned u = 0; u < Uses.size(); ++u) {
674  const Use *U = Uses[u];
675  if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
676  bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
677  if (Found && AA.followUseInMBEC(A, U, UserI, State))
678  for (const Use &Us : UserI->uses())
679  Uses.insert(&Us);
680  }
681  }
682 }
683 
684 /// Use the must-be-executed-context around \p I to add information into \p S.
685 /// The AAType class is required to have `followUseInMBEC` method with the
686 /// following signature and behaviour:
687 ///
688 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
689 /// U - Underlying use.
690 /// I - The user of the \p U.
691 /// Returns true if the value should be tracked transitively.
692 ///
693 template <class AAType, typename StateType = typename AAType::StateType>
694 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
695  Instruction &CtxI) {
696 
697  // Container for (transitive) uses of the associated value.
699  for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
700  Uses.insert(&U);
701 
703  A.getInfoCache().getMustBeExecutedContextExplorer();
704 
705  followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
706 
707  if (S.isAtFixpoint())
708  return;
709 
711  auto Pred = [&](const Instruction *I) {
712  if (const BranchInst *Br = dyn_cast<BranchInst>(I))
713  if (Br->isConditional())
714  BrInsts.push_back(Br);
715  return true;
716  };
717 
718  // Here, accumulate conditional branch instructions in the context. We
719  // explore the child paths and collect the known states. The disjunction of
720  // those states can be merged to its own state. Let ParentState_i be a state
721  // to indicate the known information for an i-th branch instruction in the
722  // context. ChildStates are created for its successors respectively.
723  //
724  // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
725  // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
726  // ...
727  // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
728  //
729  // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
730  //
731  // FIXME: Currently, recursive branches are not handled. For example, we
732  // can't deduce that ptr must be dereferenced in below function.
733  //
734  // void f(int a, int c, int *ptr) {
735  // if(a)
736  // if (b) {
737  // *ptr = 0;
738  // } else {
739  // *ptr = 1;
740  // }
741  // else {
742  // if (b) {
743  // *ptr = 0;
744  // } else {
745  // *ptr = 1;
746  // }
747  // }
748  // }
749 
750  Explorer.checkForAllContext(&CtxI, Pred);
751  for (const BranchInst *Br : BrInsts) {
752  StateType ParentState;
753 
754  // The known state of the parent state is a conjunction of children's
755  // known states so it is initialized with a best state.
756  ParentState.indicateOptimisticFixpoint();
757 
758  for (const BasicBlock *BB : Br->successors()) {
759  StateType ChildState;
760 
761  size_t BeforeSize = Uses.size();
762  followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
763 
764  // Erase uses which only appear in the child.
765  for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
766  It = Uses.erase(It);
767 
768  ParentState &= ChildState;
769  }
770 
771  // Use only known state.
772  S += ParentState;
773  }
774 }
775 
776 /// ------------------------ PointerInfo ---------------------------------------
777 
778 namespace llvm {
779 namespace AA {
780 namespace PointerInfo {
781 
782 /// An access kind description as used by AAPointerInfo.
783 struct OffsetAndSize;
784 
785 struct State;
786 
787 } // namespace PointerInfo
788 } // namespace AA
789 
790 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage.
791 template <>
792 struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> {
794  static inline Access getEmptyKey();
795  static inline Access getTombstoneKey();
796  static unsigned getHashValue(const Access &A);
797  static bool isEqual(const Access &LHS, const Access &RHS);
798 };
799 
800 /// Helper that allows OffsetAndSize as a key in a DenseMap.
801 template <>
802 struct DenseMapInfo<AA::PointerInfo ::OffsetAndSize>
803  : DenseMapInfo<std::pair<int64_t, int64_t>> {};
804 
805 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign
806 /// but the instruction
807 struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> {
810  static inline Access getEmptyKey();
811  static inline Access getTombstoneKey();
812  static unsigned getHashValue(const Access &A);
813  static bool isEqual(const Access &LHS, const Access &RHS);
814 };
815 
816 } // namespace llvm
817 
818 /// Helper to represent an access offset and size, with logic to deal with
819 /// uncertainty and check for overlapping accesses.
820 struct AA::PointerInfo::OffsetAndSize : public std::pair<int64_t, int64_t> {
821  using BaseTy = std::pair<int64_t, int64_t>;
822  OffsetAndSize(int64_t Offset, int64_t Size) : BaseTy(Offset, Size) {}
823  OffsetAndSize(const BaseTy &P) : BaseTy(P) {}
824  int64_t getOffset() const { return first; }
825  int64_t getSize() const { return second; }
827 
828  /// Return true if this offset and size pair might describe an address that
829  /// overlaps with \p OAS.
830  bool mayOverlap(const OffsetAndSize &OAS) const {
831  // Any unknown value and we are giving up -> overlap.
832  if (OAS.getOffset() == OffsetAndSize::Unknown ||
833  OAS.getSize() == OffsetAndSize::Unknown ||
836  return true;
837 
838  // Check if one offset point is in the other interval [offset, offset+size].
839  return OAS.getOffset() + OAS.getSize() > getOffset() &&
840  OAS.getOffset() < getOffset() + getSize();
841  }
842 
843  /// Constant used to represent unknown offset or sizes.
844  static constexpr int64_t Unknown = 1 << 31;
845 };
846 
847 /// Implementation of the DenseMapInfo.
848 ///
849 ///{
852  return Access(Base::getEmptyKey(), nullptr, AAPointerInfo::AK_READ, nullptr);
853 }
856  return Access(Base::getTombstoneKey(), nullptr, AAPointerInfo::AK_READ,
857  nullptr);
858 }
861  return Base::getHashValue(A.getRemoteInst());
862 }
866  return LHS.getRemoteInst() == RHS.getRemoteInst();
867 }
870  return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_READ,
871  nullptr);
872 }
875  return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_WRITE,
876  nullptr);
877 }
878 
883  (A.isWrittenValueYetUndetermined()
884  ? ~0
885  : DenseMapInfo<Value *>::getHashValue(A.getWrittenValue()))) +
886  A.getKind();
887 }
888 
892  return LHS == RHS;
893 }
894 ///}
895 
896 /// A type to track pointer/struct usage and accesses for AAPointerInfo.
898 
899  /// Return the best possible representable state.
900  static State getBestState(const State &SIS) { return State(); }
901 
902  /// Return the worst possible representable state.
903  static State getWorstState(const State &SIS) {
904  State R;
905  R.indicatePessimisticFixpoint();
906  return R;
907  }
908 
909  State() {}
910  State(const State &SIS) : AccessBins(SIS.AccessBins) {}
912 
913  const State &getAssumed() const { return *this; }
914 
915  /// See AbstractState::isValidState().
916  bool isValidState() const override { return BS.isValidState(); }
917 
918  /// See AbstractState::isAtFixpoint().
919  bool isAtFixpoint() const override { return BS.isAtFixpoint(); }
920 
921  /// See AbstractState::indicateOptimisticFixpoint().
923  BS.indicateOptimisticFixpoint();
925  }
926 
927  /// See AbstractState::indicatePessimisticFixpoint().
929  BS.indicatePessimisticFixpoint();
930  return ChangeStatus::CHANGED;
931  }
932 
933  State &operator=(const State &R) {
934  if (this == &R)
935  return *this;
936  BS = R.BS;
937  AccessBins = R.AccessBins;
938  return *this;
939  }
940 
942  if (this == &R)
943  return *this;
944  std::swap(BS, R.BS);
945  std::swap(AccessBins, R.AccessBins);
946  return *this;
947  }
948 
949  bool operator==(const State &R) const {
950  if (BS != R.BS)
951  return false;
952  if (AccessBins.size() != R.AccessBins.size())
953  return false;
954  auto It = begin(), RIt = R.begin(), E = end();
955  while (It != E) {
956  if (It->getFirst() != RIt->getFirst())
957  return false;
958  auto &Accs = It->getSecond();
959  auto &RAccs = RIt->getSecond();
960  if (Accs.size() != RAccs.size())
961  return false;
962  auto AccIt = Accs.begin(), RAccIt = RAccs.begin(), AccE = Accs.end();
963  while (AccIt != AccE) {
964  if (*AccIt != *RAccIt)
965  return false;
966  ++AccIt;
967  ++RAccIt;
968  }
969  ++It;
970  ++RIt;
971  }
972  return true;
973  }
974  bool operator!=(const State &R) const { return !(*this == R); }
975 
976  /// We store accesses in a set with the instruction as key.
978 
979  /// We store all accesses in bins denoted by their offset and size.
981 
982  AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); }
983  AccessBinsTy::const_iterator end() const { return AccessBins.end(); }
984 
985 protected:
986  /// The bins with all the accesses for the associated pointer.
988 
989  /// Add a new access to the state at offset \p Offset and with size \p Size.
990  /// The access is associated with \p I, writes \p Content (if anything), and
991  /// is of kind \p Kind.
992  /// \Returns CHANGED, if the state changed, UNCHANGED otherwise.
996  Instruction *RemoteI = nullptr,
997  Accesses *BinPtr = nullptr) {
999  Accesses &Bin = BinPtr ? *BinPtr : AccessBins[Key];
1000  AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty);
1001  // Check if we have an access for this instruction in this bin, if not,
1002  // simply add it.
1003  auto It = Bin.find(Acc);
1004  if (It == Bin.end()) {
1005  Bin.insert(Acc);
1006  return ChangeStatus::CHANGED;
1007  }
1008  // If the existing access is the same as then new one, nothing changed.
1009  AAPointerInfo::Access Before = *It;
1010  // The new one will be combined with the existing one.
1011  *It &= Acc;
1012  return *It == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
1013  }
1014 
1015  /// See AAPointerInfo::forallInterferingAccesses.
1017  Instruction &I,
1018  function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
1019  if (!isValidState())
1020  return false;
1021  // First find the offset and size of I.
1022  OffsetAndSize OAS(-1, -1);
1023  for (auto &It : AccessBins) {
1024  for (auto &Access : It.getSecond()) {
1025  if (Access.getRemoteInst() == &I) {
1026  OAS = It.getFirst();
1027  break;
1028  }
1029  }
1030  if (OAS.getSize() != -1)
1031  break;
1032  }
1033  if (OAS.getSize() == -1)
1034  return true;
1035 
1036  // Now that we have an offset and size, find all overlapping ones and use
1037  // the callback on the accesses.
1038  for (auto &It : AccessBins) {
1039  OffsetAndSize ItOAS = It.getFirst();
1040  if (!OAS.mayOverlap(ItOAS))
1041  continue;
1042  for (auto &Access : It.getSecond())
1043  if (!CB(Access, OAS == ItOAS))
1044  return false;
1045  }
1046  return true;
1047  }
1048 
1049 private:
1050  /// State to track fixpoint and validity.
1051  BooleanState BS;
1052 };
1053 
1055  : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> {
1057  AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
1058 
1059  /// See AbstractAttribute::initialize(...).
1061 
1062  /// See AbstractAttribute::getAsStr().
1063  const std::string getAsStr() const override {
1064  return std::string("PointerInfo ") +
1065  (isValidState() ? (std::string("#") +
1066  std::to_string(AccessBins.size()) + " bins")
1067  : "<invalid>");
1068  }
1069 
1070  /// See AbstractAttribute::manifest(...).
1072  return AAPointerInfo::manifest(A);
1073  }
1074 
1076  LoadInst &LI, function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1077  const override {
1078  return State::forallInterferingAccesses(LI, CB);
1079  }
1081  StoreInst &SI, function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1082  const override {
1083  return State::forallInterferingAccesses(SI, CB);
1084  }
1085 
1087  const AAPointerInfo &CalleeAA,
1088  int64_t CallArgOffset, CallBase &CB) {
1089  using namespace AA::PointerInfo;
1090  if (!CalleeAA.getState().isValidState() || !isValidState())
1091  return indicatePessimisticFixpoint();
1092 
1093  const auto &CalleeImplAA = static_cast<const AAPointerInfoImpl &>(CalleeAA);
1094  bool IsByval = CalleeImplAA.getAssociatedArgument()->hasByValAttr();
1095 
1096  // Combine the accesses bin by bin.
1098  for (auto &It : CalleeImplAA.getState()) {
1099  OffsetAndSize OAS = OffsetAndSize::getUnknown();
1100  if (CallArgOffset != OffsetAndSize::Unknown)
1101  OAS = OffsetAndSize(It.first.getOffset() + CallArgOffset,
1102  It.first.getSize());
1103  Accesses &Bin = AccessBins[OAS];
1104  for (const AAPointerInfo::Access &RAcc : It.second) {
1105  if (IsByval && !RAcc.isRead())
1106  continue;
1107  bool UsedAssumedInformation = false;
1108  Optional<Value *> Content = A.translateArgumentToCallSiteContent(
1109  RAcc.getContent(), CB, *this, UsedAssumedInformation);
1110  AccessKind AK =
1111  AccessKind(RAcc.getKind() & (IsByval ? AccessKind::AK_READ
1112  : AccessKind::AK_READ_WRITE));
1113  Changed =
1114  Changed | addAccess(OAS.getOffset(), OAS.getSize(), CB, Content, AK,
1115  RAcc.getType(), RAcc.getRemoteInst(), &Bin);
1116  }
1117  }
1118  return Changed;
1119  }
1120 
1121  /// Statistic tracking for all AAPointerInfo implementations.
1122  /// See AbstractAttribute::trackStatistics().
1123  void trackPointerInfoStatistics(const IRPosition &IRP) const {}
1124 };
1125 
1129  : AAPointerInfoImpl(IRP, A) {}
1130 
1131  /// See AbstractAttribute::initialize(...).
1133 
1134  /// Deal with an access and signal if it was handled successfully.
1137  ChangeStatus &Changed, Type *Ty,
1139  using namespace AA::PointerInfo;
1140  // No need to find a size if one is given or the offset is unknown.
1142  Ty) {
1143  const DataLayout &DL = A.getDataLayout();
1144  TypeSize AccessSize = DL.getTypeStoreSize(Ty);
1145  if (!AccessSize.isScalable())
1146  Size = AccessSize.getFixedSize();
1147  }
1148  Changed = Changed | addAccess(Offset, Size, I, Content, Kind, Ty);
1149  return true;
1150  };
1151 
1152  /// Helper struct, will support ranges eventually.
1153  struct OffsetInfo {
1155 
1156  bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; }
1157  };
1158 
1159  /// See AbstractAttribute::updateImpl(...).
1161  using namespace AA::PointerInfo;
1162  State S = getState();
1164  Value &AssociatedValue = getAssociatedValue();
1165 
1166  const DataLayout &DL = A.getDataLayout();
1167  DenseMap<Value *, OffsetInfo> OffsetInfoMap;
1168  OffsetInfoMap[&AssociatedValue] = OffsetInfo{0};
1169 
1170  auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo &PtrOI,
1171  bool &Follow) {
1172  OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1173  UsrOI = PtrOI;
1174  Follow = true;
1175  return true;
1176  };
1177 
1178  auto UsePred = [&](const Use &U, bool &Follow) -> bool {
1179  Value *CurPtr = U.get();
1180  User *Usr = U.getUser();
1181  LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in "
1182  << *Usr << "\n");
1183 
1184  OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1185 
1186  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) {
1187  if (CE->isCast())
1188  return HandlePassthroughUser(Usr, PtrOI, Follow);
1189  if (CE->isCompare())
1190  return true;
1191  if (!CE->isGEPWithNoNotionalOverIndexing()) {
1192  LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE
1193  << "\n");
1194  return false;
1195  }
1196  }
1197  if (auto *GEP = dyn_cast<GEPOperator>(Usr)) {
1198  OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1199  UsrOI = PtrOI;
1200 
1201  // TODO: Use range information.
1202  if (PtrOI.Offset == OffsetAndSize::Unknown ||
1203  !GEP->hasAllConstantIndices()) {
1205  Follow = true;
1206  return true;
1207  }
1208 
1209  SmallVector<Value *, 8> Indices;
1210  for (Use &Idx : llvm::make_range(GEP->idx_begin(), GEP->idx_end())) {
1211  if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) {
1212  Indices.push_back(CIdx);
1213  continue;
1214  }
1215 
1216  LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEP
1217  << " : " << *Idx << "\n");
1218  return false;
1219  }
1220  UsrOI.Offset = PtrOI.Offset +
1221  DL.getIndexedOffsetInType(
1222  CurPtr->getType()->getPointerElementType(), Indices);
1223  Follow = true;
1224  return true;
1225  }
1226  if (isa<CastInst>(Usr) || isa<SelectInst>(Usr))
1227  return HandlePassthroughUser(Usr, PtrOI, Follow);
1228 
1229  // For PHIs we need to take care of the recurrence explicitly as the value
1230  // might change while we iterate through a loop. For now, we give up if
1231  // the PHI is not invariant.
1232  if (isa<PHINode>(Usr)) {
1233  // Check if the PHI is invariant (so far).
1234  OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1235  if (UsrOI == PtrOI)
1236  return true;
1237 
1238  // Check if the PHI operand has already an unknown offset as we can't
1239  // improve on that anymore.
1240  if (PtrOI.Offset == OffsetAndSize::Unknown) {
1241  UsrOI = PtrOI;
1242  Follow = true;
1243  return true;
1244  }
1245 
1246  // Check if the PHI operand is not dependent on the PHI itself.
1247  APInt Offset(DL.getIndexTypeSizeInBits(AssociatedValue.getType()), 0);
1248  if (&AssociatedValue == CurPtr->stripAndAccumulateConstantOffsets(
1249  DL, Offset, /* AllowNonInbounds */ true)) {
1250  if (Offset != PtrOI.Offset) {
1251  LLVM_DEBUG(dbgs()
1252  << "[AAPointerInfo] PHI operand pointer offset mismatch "
1253  << *CurPtr << " in " << *Usr << "\n");
1254  return false;
1255  }
1256  return HandlePassthroughUser(Usr, PtrOI, Follow);
1257  }
1258 
1259  // TODO: Approximate in case we know the direction of the recurrence.
1260  LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex "
1261  << *CurPtr << " in " << *Usr << "\n");
1262  UsrOI = PtrOI;
1264  Follow = true;
1265  return true;
1266  }
1267 
1268  if (auto *LoadI = dyn_cast<LoadInst>(Usr))
1269  return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr,
1270  AccessKind::AK_READ, PtrOI.Offset, Changed,
1271  LoadI->getType());
1272  if (auto *StoreI = dyn_cast<StoreInst>(Usr)) {
1273  if (StoreI->getValueOperand() == CurPtr) {
1274  LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store "
1275  << *StoreI << "\n");
1276  return false;
1277  }
1278  bool UsedAssumedInformation = false;
1279  Optional<Value *> Content = A.getAssumedSimplified(
1280  *StoreI->getValueOperand(), *this, UsedAssumedInformation);
1281  return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE,
1282  PtrOI.Offset, Changed,
1283  StoreI->getValueOperand()->getType());
1284  }
1285  if (auto *CB = dyn_cast<CallBase>(Usr)) {
1286  if (CB->isLifetimeStartOrEnd())
1287  return true;
1288  if (CB->isArgOperand(&U)) {
1289  unsigned ArgNo = CB->getArgOperandNo(&U);
1290  const auto &CSArgPI = A.getAAFor<AAPointerInfo>(
1291  *this, IRPosition::callsite_argument(*CB, ArgNo),
1293  Changed = translateAndAddCalleeState(A, CSArgPI, PtrOI.Offset, *CB) |
1294  Changed;
1295  return true;
1296  }
1297  LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB
1298  << "\n");
1299  // TODO: Allow some call uses
1300  return false;
1301  }
1302 
1303  LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n");
1304  return false;
1305  };
1306  if (!A.checkForAllUses(UsePred, *this, AssociatedValue,
1307  /* CheckBBLivenessOnly */ true))
1308  return indicatePessimisticFixpoint();
1309 
1310  LLVM_DEBUG({
1311  dbgs() << "Accesses by bin after update:\n";
1312  for (auto &It : AccessBins) {
1313  dbgs() << "[" << It.first.getOffset() << "-"
1314  << It.first.getOffset() + It.first.getSize()
1315  << "] : " << It.getSecond().size() << "\n";
1316  for (auto &Acc : It.getSecond()) {
1317  dbgs() << " - " << Acc.getKind() << " - " << *Acc.getLocalInst()
1318  << "\n";
1319  if (Acc.getLocalInst() != Acc.getRemoteInst())
1320  dbgs() << " --> "
1321  << *Acc.getRemoteInst() << "\n";
1322  if (!Acc.isWrittenValueYetUndetermined())
1323  dbgs() << " - " << Acc.getWrittenValue() << "\n";
1324  }
1325  }
1326  });
1327 
1328  return Changed;
1329  }
1330 
1331  /// See AbstractAttribute::trackStatistics()
1332  void trackStatistics() const override {
1334  }
1335 };
1336 
1339  : AAPointerInfoImpl(IRP, A) {}
1340 
1341  /// See AbstractAttribute::updateImpl(...).
1343  return indicatePessimisticFixpoint();
1344  }
1345 
1346  /// See AbstractAttribute::trackStatistics()
1347  void trackStatistics() const override {
1349  }
1350 };
1351 
1354  : AAPointerInfoFloating(IRP, A) {}
1355 
1356  /// See AbstractAttribute::initialize(...).
1357  void initialize(Attributor &A) override {
1359  if (getAnchorScope()->isDeclaration())
1360  indicatePessimisticFixpoint();
1361  }
1362 
1363  /// See AbstractAttribute::trackStatistics()
1364  void trackStatistics() const override {
1366  }
1367 };
1368 
1371  : AAPointerInfoFloating(IRP, A) {}
1372 
1373  /// See AbstractAttribute::updateImpl(...).
1375  using namespace AA::PointerInfo;
1376  // We handle memory intrinsics explicitly, at least the first (=
1377  // destination) and second (=source) arguments as we know how they are
1378  // accessed.
1379  if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) {
1380  ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
1381  int64_t LengthVal = OffsetAndSize::Unknown;
1382  if (Length)
1383  LengthVal = Length->getSExtValue();
1384  Value &Ptr = getAssociatedValue();
1385  unsigned ArgNo = getIRPosition().getCallSiteArgNo();
1386  ChangeStatus Changed;
1387  if (ArgNo == 0) {
1388  handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed,
1389  nullptr, LengthVal);
1390  } else if (ArgNo == 1) {
1391  handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed,
1392  nullptr, LengthVal);
1393  } else {
1394  LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "
1395  << *MI << "\n");
1396  return indicatePessimisticFixpoint();
1397  }
1398  return Changed;
1399  }
1400 
1401  // TODO: Once we have call site specific value information we can provide
1402  // call site specific liveness information and then it makes
1403  // sense to specialize attributes for call sites arguments instead of
1404  // redirecting requests to the callee argument.
1405  Argument *Arg = getAssociatedArgument();
1406  if (!Arg)
1407  return indicatePessimisticFixpoint();
1408  const IRPosition &ArgPos = IRPosition::argument(*Arg);
1409  auto &ArgAA =
1410  A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED);
1411  return translateAndAddCalleeState(A, ArgAA, 0, *cast<CallBase>(getCtxI()));
1412  }
1413 
1414  /// See AbstractAttribute::trackStatistics()
1415  void trackStatistics() const override {
1417  }
1418 };
1419 
1422  : AAPointerInfoFloating(IRP, A) {}
1423 
1424  /// See AbstractAttribute::trackStatistics()
1425  void trackStatistics() const override {
1427  }
1428 };
1429 
1430 /// -----------------------NoUnwind Function Attribute--------------------------
1431 
1433  AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
1434 
1435  const std::string getAsStr() const override {
1436  return getAssumed() ? "nounwind" : "may-unwind";
1437  }
1438 
1439  /// See AbstractAttribute::updateImpl(...).
1441  auto Opcodes = {
1442  (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
1443  (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet,
1444  (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
1445 
1446  auto CheckForNoUnwind = [&](Instruction &I) {
1447  if (!I.mayThrow())
1448  return true;
1449 
1450  if (const auto *CB = dyn_cast<CallBase>(&I)) {
1451  const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
1453  return NoUnwindAA.isAssumedNoUnwind();
1454  }
1455  return false;
1456  };
1457 
1458  bool UsedAssumedInformation = false;
1459  if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes,
1460  UsedAssumedInformation))
1461  return indicatePessimisticFixpoint();
1462 
1463  return ChangeStatus::UNCHANGED;
1464  }
1465 };
1466 
1467 struct AANoUnwindFunction final : public AANoUnwindImpl {
1469  : AANoUnwindImpl(IRP, A) {}
1470 
1471  /// See AbstractAttribute::trackStatistics()
1473 };
1474 
1475 /// NoUnwind attribute deduction for a call sites.
1478  : AANoUnwindImpl(IRP, A) {}
1479 
1480  /// See AbstractAttribute::initialize(...).
1481  void initialize(Attributor &A) override {
1483  Function *F = getAssociatedFunction();
1484  if (!F || F->isDeclaration())
1485  indicatePessimisticFixpoint();
1486  }
1487 
1488  /// See AbstractAttribute::updateImpl(...).
1490  // TODO: Once we have call site specific value information we can provide
1491  // call site specific liveness information and then it makes
1492  // sense to specialize attributes for call sites arguments instead of
1493  // redirecting requests to the callee argument.
1494  Function *F = getAssociatedFunction();
1495  const IRPosition &FnPos = IRPosition::function(*F);
1496  auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
1497  return clampStateAndIndicateChange(getState(), FnAA.getState());
1498  }
1499 
1500  /// See AbstractAttribute::trackStatistics()
1502 };
1503 
1504 /// --------------------- Function Return Values -------------------------------
1505 
1506 /// "Attribute" that collects all potential returned values and the return
1507 /// instructions that they arise from.
1508 ///
1509 /// If there is a unique returned value R, the manifest method will:
1510 /// - mark R with the "returned" attribute, if R is an argument.
1512 
1513  /// Mapping of values potentially returned by the associated function to the
1514  /// return instructions that might return them.
1516 
1517  /// State flags
1518  ///
1519  ///{
1520  bool IsFixed = false;
1521  bool IsValidState = true;
1522  ///}
1523 
1524 public:
1526  : AAReturnedValues(IRP, A) {}
1527 
1528  /// See AbstractAttribute::initialize(...).
1529  void initialize(Attributor &A) override {
1530  // Reset the state.
1531  IsFixed = false;
1532  IsValidState = true;
1533  ReturnedValues.clear();
1534 
1535  Function *F = getAssociatedFunction();
1536  if (!F || F->isDeclaration()) {
1537  indicatePessimisticFixpoint();
1538  return;
1539  }
1540  assert(!F->getReturnType()->isVoidTy() &&
1541  "Did not expect a void return type!");
1542 
1543  // The map from instruction opcodes to those instructions in the function.
1544  auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
1545 
1546  // Look through all arguments, if one is marked as returned we are done.
1547  for (Argument &Arg : F->args()) {
1548  if (Arg.hasReturnedAttr()) {
1549  auto &ReturnInstSet = ReturnedValues[&Arg];
1550  if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
1551  for (Instruction *RI : *Insts)
1552  ReturnInstSet.insert(cast<ReturnInst>(RI));
1553 
1554  indicateOptimisticFixpoint();
1555  return;
1556  }
1557  }
1558 
1559  if (!A.isFunctionIPOAmendable(*F))
1560  indicatePessimisticFixpoint();
1561  }
1562 
1563  /// See AbstractAttribute::manifest(...).
1564  ChangeStatus manifest(Attributor &A) override;
1565 
1566  /// See AbstractAttribute::getState(...).
1567  AbstractState &getState() override { return *this; }
1568 
1569  /// See AbstractAttribute::getState(...).
1570  const AbstractState &getState() const override { return *this; }
1571 
1572  /// See AbstractAttribute::updateImpl(Attributor &A).
1573  ChangeStatus updateImpl(Attributor &A) override;
1574 
1576  return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1577  }
1578 
1580  return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1581  }
1582 
1583  /// Return the number of potential return values, -1 if unknown.
1584  size_t getNumReturnValues() const override {
1585  return isValidState() ? ReturnedValues.size() : -1;
1586  }
1587 
1588  /// Return an assumed unique return value if a single candidate is found. If
1589  /// there cannot be one, return a nullptr. If it is not clear yet, return the
1590  /// Optional::NoneType.
1591  Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
1592 
1593  /// See AbstractState::checkForAllReturnedValues(...).
1594  bool checkForAllReturnedValuesAndReturnInsts(
1595  function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1596  const override;
1597 
1598  /// Pretty print the attribute similar to the IR representation.
1599  const std::string getAsStr() const override;
1600 
1601  /// See AbstractState::isAtFixpoint().
1602  bool isAtFixpoint() const override { return IsFixed; }
1603 
1604  /// See AbstractState::isValidState().
1605  bool isValidState() const override { return IsValidState; }
1606 
1607  /// See AbstractState::indicateOptimisticFixpoint(...).
1609  IsFixed = true;
1610  return ChangeStatus::UNCHANGED;
1611  }
1612 
1614  IsFixed = true;
1615  IsValidState = false;
1616  return ChangeStatus::CHANGED;
1617  }
1618 };
1619 
1622 
1623  // Bookkeeping.
1624  assert(isValidState());
1625  STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
1626  "Number of function with known return values");
1627 
1628  // Check if we have an assumed unique return value that we could manifest.
1629  Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
1630 
1631  if (!UniqueRV.hasValue() || !UniqueRV.getValue())
1632  return Changed;
1633 
1634  // Bookkeeping.
1635  STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
1636  "Number of function with unique return");
1637  // If the assumed unique return value is an argument, annotate it.
1638  if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
1639  if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
1640  getAssociatedFunction()->getReturnType())) {
1641  getIRPosition() = IRPosition::argument(*UniqueRVArg);
1642  Changed = IRAttribute::manifest(A);
1643  }
1644  }
1645  return Changed;
1646 }
1647 
1648 const std::string AAReturnedValuesImpl::getAsStr() const {
1649  return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1650  (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")";
1651 }
1652 
1655  // If checkForAllReturnedValues provides a unique value, ignoring potential
1656  // undef values that can also be present, it is assumed to be the actual
1657  // return value and forwarded to the caller of this method. If there are
1658  // multiple, a nullptr is returned indicating there cannot be a unique
1659  // returned value.
1660  Optional<Value *> UniqueRV;
1661  Type *Ty = getAssociatedFunction()->getReturnType();
1662 
1663  auto Pred = [&](Value &RV) -> bool {
1664  UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty);
1665  return UniqueRV != Optional<Value *>(nullptr);
1666  };
1667 
1668  if (!A.checkForAllReturnedValues(Pred, *this))
1669  UniqueRV = nullptr;
1670 
1671  return UniqueRV;
1672 }
1673 
1675  function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1676  const {
1677  if (!isValidState())
1678  return false;
1679 
1680  // Check all returned values but ignore call sites as long as we have not
1681  // encountered an overdefined one during an update.
1682  for (auto &It : ReturnedValues) {
1683  Value *RV = It.first;
1684  if (!Pred(*RV, It.second))
1685  return false;
1686  }
1687 
1688  return true;
1689 }
1690 
1693 
1694  auto ReturnValueCB = [&](Value &V, const Instruction *CtxI, ReturnInst &Ret,
1695  bool) -> bool {
1696  bool UsedAssumedInformation = false;
1697  Optional<Value *> SimpleRetVal =
1698  A.getAssumedSimplified(V, *this, UsedAssumedInformation);
1699  if (!SimpleRetVal.hasValue())
1700  return true;
1701  if (!SimpleRetVal.getValue())
1702  return false;
1703  Value *RetVal = *SimpleRetVal;
1704  assert(AA::isValidInScope(*RetVal, Ret.getFunction()) &&
1705  "Assumed returned value should be valid in function scope!");
1706  if (ReturnedValues[RetVal].insert(&Ret))
1707  Changed = ChangeStatus::CHANGED;
1708  return true;
1709  };
1710 
1711  auto ReturnInstCB = [&](Instruction &I) {
1712  ReturnInst &Ret = cast<ReturnInst>(I);
1713  return genericValueTraversal<ReturnInst>(
1714  A, IRPosition::value(*Ret.getReturnValue()), *this, Ret, ReturnValueCB,
1715  &I);
1716  };
1717 
1718  // Discover returned values from all live returned instructions in the
1719  // associated function.
1720  bool UsedAssumedInformation = false;
1721  if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
1722  UsedAssumedInformation))
1723  return indicatePessimisticFixpoint();
1724  return Changed;
1725 }
1726 
1729  : AAReturnedValuesImpl(IRP, A) {}
1730 
1731  /// See AbstractAttribute::trackStatistics()
1732  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1733 };
1734 
1735 /// Returned values information for a call sites.
1738  : AAReturnedValuesImpl(IRP, A) {}
1739 
1740  /// See AbstractAttribute::initialize(...).
1741  void initialize(Attributor &A) override {
1742  // TODO: Once we have call site specific value information we can provide
1743  // call site specific liveness information and then it makes
1744  // sense to specialize attributes for call sites instead of
1745  // redirecting requests to the callee.
1746  llvm_unreachable("Abstract attributes for returned values are not "
1747  "supported for call sites yet!");
1748  }
1749 
1750  /// See AbstractAttribute::updateImpl(...).
1752  return indicatePessimisticFixpoint();
1753  }
1754 
1755  /// See AbstractAttribute::trackStatistics()
1756  void trackStatistics() const override {}
1757 };
1758 
1759 /// ------------------------ NoSync Function Attribute -------------------------
1760 
1762  AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1763 
1764  const std::string getAsStr() const override {
1765  return getAssumed() ? "nosync" : "may-sync";
1766  }
1767 
1768  /// See AbstractAttribute::updateImpl(...).
1769  ChangeStatus updateImpl(Attributor &A) override;
1770 
1771  /// Helper function used to determine whether an instruction is non-relaxed
1772  /// atomic. In other words, if an atomic instruction does not have unordered
1773  /// or monotonic ordering
1774  static bool isNonRelaxedAtomic(Instruction *I);
1775 
1776  /// Helper function specific for intrinsics which are potentially volatile
1777  static bool isNoSyncIntrinsic(Instruction *I);
1778 };
1779 
1781  if (!I->isAtomic())
1782  return false;
1783 
1784  if (auto *FI = dyn_cast<FenceInst>(I))
1785  // All legal orderings for fence are stronger than monotonic.
1786  return FI->getSyncScopeID() != SyncScope::SingleThread;
1787  else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
1788  // Unordered is not a legal ordering for cmpxchg.
1789  return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
1790  AI->getFailureOrdering() != AtomicOrdering::Monotonic);
1791  }
1792 
1793  AtomicOrdering Ordering;
1794  switch (I->getOpcode()) {
1795  case Instruction::AtomicRMW:
1796  Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1797  break;
1798  case Instruction::Store:
1799  Ordering = cast<StoreInst>(I)->getOrdering();
1800  break;
1801  case Instruction::Load:
1802  Ordering = cast<LoadInst>(I)->getOrdering();
1803  break;
1804  default:
1806  "New atomic operations need to be known in the attributor.");
1807  }
1808 
1809  return (Ordering != AtomicOrdering::Unordered &&
1810  Ordering != AtomicOrdering::Monotonic);
1811 }
1812 
1813 /// Return true if this intrinsic is nosync. This is only used for intrinsics
1814 /// which would be nosync except that they have a volatile flag. All other
1815 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
1817  if (auto *MI = dyn_cast<MemIntrinsic>(I))
1818  return !MI->isVolatile();
1819  return false;
1820 }
1821 
1823 
1824  auto CheckRWInstForNoSync = [&](Instruction &I) {
1825  /// We are looking for volatile instructions or Non-Relaxed atomics.
1826 
1827  if (const auto *CB = dyn_cast<CallBase>(&I)) {
1828  if (CB->hasFnAttr(Attribute::NoSync))
1829  return true;
1830 
1831  if (isNoSyncIntrinsic(&I))
1832  return true;
1833 
1834  const auto &NoSyncAA = A.getAAFor<AANoSync>(
1836  return NoSyncAA.isAssumedNoSync();
1837  }
1838 
1839  if (!I.isVolatile() && !isNonRelaxedAtomic(&I))
1840  return true;
1841 
1842  return false;
1843  };
1844 
1845  auto CheckForNoSync = [&](Instruction &I) {
1846  // At this point we handled all read/write effects and they are all
1847  // nosync, so they can be skipped.
1848  if (I.mayReadOrWriteMemory())
1849  return true;
1850 
1851  // non-convergent and readnone imply nosync.
1852  return !cast<CallBase>(I).isConvergent();
1853  };
1854 
1855  bool UsedAssumedInformation = false;
1856  if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this,
1857  UsedAssumedInformation) ||
1858  !A.checkForAllCallLikeInstructions(CheckForNoSync, *this,
1859  UsedAssumedInformation))
1860  return indicatePessimisticFixpoint();
1861 
1862  return ChangeStatus::UNCHANGED;
1863 }
1864 
1865 struct AANoSyncFunction final : public AANoSyncImpl {
1867  : AANoSyncImpl(IRP, A) {}
1868 
1869  /// See AbstractAttribute::trackStatistics()
1870  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1871 };
1872 
1873 /// NoSync attribute deduction for a call sites.
1876  : AANoSyncImpl(IRP, A) {}
1877 
1878  /// See AbstractAttribute::initialize(...).
1879  void initialize(Attributor &A) override {
1881  Function *F = getAssociatedFunction();
1882  if (!F || F->isDeclaration())
1883  indicatePessimisticFixpoint();
1884  }
1885 
1886  /// See AbstractAttribute::updateImpl(...).
1888  // TODO: Once we have call site specific value information we can provide
1889  // call site specific liveness information and then it makes
1890  // sense to specialize attributes for call sites arguments instead of
1891  // redirecting requests to the callee argument.
1892  Function *F = getAssociatedFunction();
1893  const IRPosition &FnPos = IRPosition::function(*F);
1894  auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
1895  return clampStateAndIndicateChange(getState(), FnAA.getState());
1896  }
1897 
1898  /// See AbstractAttribute::trackStatistics()
1899  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1900 };
1901 
1902 /// ------------------------ No-Free Attributes ----------------------------
1903 
1904 struct AANoFreeImpl : public AANoFree {
1905  AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1906 
1907  /// See AbstractAttribute::updateImpl(...).
1909  auto CheckForNoFree = [&](Instruction &I) {
1910  const auto &CB = cast<CallBase>(I);
1911  if (CB.hasFnAttr(Attribute::NoFree))
1912  return true;
1913 
1914  const auto &NoFreeAA = A.getAAFor<AANoFree>(
1916  return NoFreeAA.isAssumedNoFree();
1917  };
1918 
1919  bool UsedAssumedInformation = false;
1920  if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this,
1921  UsedAssumedInformation))
1922  return indicatePessimisticFixpoint();
1923  return ChangeStatus::UNCHANGED;
1924  }
1925 
1926  /// See AbstractAttribute::getAsStr().
1927  const std::string getAsStr() const override {
1928  return getAssumed() ? "nofree" : "may-free";
1929  }
1930 };
1931 
1932 struct AANoFreeFunction final : public AANoFreeImpl {
1934  : AANoFreeImpl(IRP, A) {}
1935 
1936  /// See AbstractAttribute::trackStatistics()
1937  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1938 };
1939 
1940 /// NoFree attribute deduction for a call sites.
1943  : AANoFreeImpl(IRP, A) {}
1944 
1945  /// See AbstractAttribute::initialize(...).
1946  void initialize(Attributor &A) override {
1948  Function *F = getAssociatedFunction();
1949  if (!F || F->isDeclaration())
1950  indicatePessimisticFixpoint();
1951  }
1952 
1953  /// See AbstractAttribute::updateImpl(...).
1955  // TODO: Once we have call site specific value information we can provide
1956  // call site specific liveness information and then it makes
1957  // sense to specialize attributes for call sites arguments instead of
1958  // redirecting requests to the callee argument.
1959  Function *F = getAssociatedFunction();
1960  const IRPosition &FnPos = IRPosition::function(*F);
1961  auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
1962  return clampStateAndIndicateChange(getState(), FnAA.getState());
1963  }
1964 
1965  /// See AbstractAttribute::trackStatistics()
1966  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1967 };
1968 
1969 /// NoFree attribute for floating values.
1972  : AANoFreeImpl(IRP, A) {}
1973 
1974  /// See AbstractAttribute::trackStatistics()
1975  void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1976 
1977  /// See Abstract Attribute::updateImpl(...).
1979  const IRPosition &IRP = getIRPosition();
1980 
1981  const auto &NoFreeAA = A.getAAFor<AANoFree>(
1983  if (NoFreeAA.isAssumedNoFree())
1984  return ChangeStatus::UNCHANGED;
1985 
1986  Value &AssociatedValue = getIRPosition().getAssociatedValue();
1987  auto Pred = [&](const Use &U, bool &Follow) -> bool {
1988  Instruction *UserI = cast<Instruction>(U.getUser());
1989  if (auto *CB = dyn_cast<CallBase>(UserI)) {
1990  if (CB->isBundleOperand(&U))
1991  return false;
1992  if (!CB->isArgOperand(&U))
1993  return true;
1994  unsigned ArgNo = CB->getArgOperandNo(&U);
1995 
1996  const auto &NoFreeArg = A.getAAFor<AANoFree>(
1997  *this, IRPosition::callsite_argument(*CB, ArgNo),
1999  return NoFreeArg.isAssumedNoFree();
2000  }
2001 
2002  if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
2003  isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
2004  Follow = true;
2005  return true;
2006  }
2007  if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) ||
2008  isa<ReturnInst>(UserI))
2009  return true;
2010 
2011  // Unknown user.
2012  return false;
2013  };
2014  if (!A.checkForAllUses(Pred, *this, AssociatedValue))
2015  return indicatePessimisticFixpoint();
2016 
2017  return ChangeStatus::UNCHANGED;
2018  }
2019 };
2020 
2021 /// NoFree attribute for a call site argument.
2024  : AANoFreeFloating(IRP, A) {}
2025 
2026  /// See AbstractAttribute::trackStatistics()
2027  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
2028 };
2029 
2030 /// NoFree attribute for call site arguments.
2033  : AANoFreeFloating(IRP, A) {}
2034 
2035  /// See AbstractAttribute::updateImpl(...).
2037  // TODO: Once we have call site specific value information we can provide
2038  // call site specific liveness information and then it makes
2039  // sense to specialize attributes for call sites arguments instead of
2040  // redirecting requests to the callee argument.
2041  Argument *Arg = getAssociatedArgument();
2042  if (!Arg)
2043  return indicatePessimisticFixpoint();
2044  const IRPosition &ArgPos = IRPosition::argument(*Arg);
2045  auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
2046  return clampStateAndIndicateChange(getState(), ArgAA.getState());
2047  }
2048 
2049  /// See AbstractAttribute::trackStatistics()
2050  void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
2051 };
2052 
2053 /// NoFree attribute for function return value.
2056  : AANoFreeFloating(IRP, A) {
2057  llvm_unreachable("NoFree is not applicable to function returns!");
2058  }
2059 
2060  /// See AbstractAttribute::initialize(...).
2061  void initialize(Attributor &A) override {
2062  llvm_unreachable("NoFree is not applicable to function returns!");
2063  }
2064 
2065  /// See AbstractAttribute::updateImpl(...).
2067  llvm_unreachable("NoFree is not applicable to function returns!");
2068  }
2069 
2070  /// See AbstractAttribute::trackStatistics()
2071  void trackStatistics() const override {}
2072 };
2073 
2074 /// NoFree attribute deduction for a call site return value.
2077  : AANoFreeFloating(IRP, A) {}
2078 
2080  return ChangeStatus::UNCHANGED;
2081  }
2082  /// See AbstractAttribute::trackStatistics()
2083  void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
2084 };
2085 
2086 /// ------------------------ NonNull Argument Attribute ------------------------
2088  Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
2089  const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
2090  TrackUse = false;
2091 
2092  const Value *UseV = U->get();
2093  if (!UseV->getType()->isPointerTy())
2094  return 0;
2095 
2096  // We need to follow common pointer manipulation uses to the accesses they
2097  // feed into. We can try to be smart to avoid looking through things we do not
2098  // like for now, e.g., non-inbounds GEPs.
2099  if (isa<CastInst>(I)) {
2100  TrackUse = true;
2101  return 0;
2102  }
2103 
2104  if (isa<GetElementPtrInst>(I)) {
2105  TrackUse = true;
2106  return 0;
2107  }
2108 
2109  Type *PtrTy = UseV->getType();
2110  const Function *F = I->getFunction();
2111  bool NullPointerIsDefined =
2113  const DataLayout &DL = A.getInfoCache().getDL();
2114  if (const auto *CB = dyn_cast<CallBase>(I)) {
2115  if (CB->isBundleOperand(U)) {
2117  U, {Attribute::NonNull, Attribute::Dereferenceable})) {
2118  IsNonNull |=
2119  (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
2120  return RK.ArgValue;
2121  }
2122  return 0;
2123  }
2124 
2125  if (CB->isCallee(U)) {
2126  IsNonNull |= !NullPointerIsDefined;
2127  return 0;
2128  }
2129 
2130  unsigned ArgNo = CB->getArgOperandNo(U);
2131  IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
2132  // As long as we only use known information there is no need to track
2133  // dependences here.
2134  auto &DerefAA =
2135  A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
2136  IsNonNull |= DerefAA.isKnownNonNull();
2137  return DerefAA.getKnownDereferenceableBytes();
2138  }
2139 
2140  int64_t Offset;
2141  const Value *Base =
2142  getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL);
2143  if (Base) {
2144  if (Base == &AssociatedValue &&
2145  getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
2146  int64_t DerefBytes =
2147  (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
2148 
2149  IsNonNull |= !NullPointerIsDefined;
2150  return std::max(int64_t(0), DerefBytes);
2151  }
2152  }
2153 
2154  /// Corner case when an offset is 0.
2156  /*AllowNonInbounds*/ true);
2157  if (Base) {
2158  if (Offset == 0 && Base == &AssociatedValue &&
2159  getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
2160  int64_t DerefBytes =
2161  (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
2162  IsNonNull |= !NullPointerIsDefined;
2163  return std::max(int64_t(0), DerefBytes);
2164  }
2165  }
2166 
2167  return 0;
2168 }
2169 
2172  : AANonNull(IRP, A),
2173  NullIsDefined(NullPointerIsDefined(
2174  getAnchorScope(),
2175  getAssociatedValue().getType()->getPointerAddressSpace())) {}
2176 
2177  /// See AbstractAttribute::initialize(...).
2178  void initialize(Attributor &A) override {
2179  Value &V = getAssociatedValue();
2180  if (!NullIsDefined &&
2181  hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
2182  /* IgnoreSubsumingPositions */ false, &A)) {
2183  indicateOptimisticFixpoint();
2184  return;
2185  }
2186 
2187  if (isa<ConstantPointerNull>(V)) {
2188  indicatePessimisticFixpoint();
2189  return;
2190  }
2191 
2193 
2194  bool CanBeNull, CanBeFreed;
2195  if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
2196  CanBeFreed)) {
2197  if (!CanBeNull) {
2198  indicateOptimisticFixpoint();
2199  return;
2200  }
2201  }
2202 
2203  if (isa<GlobalValue>(&getAssociatedValue())) {
2204  indicatePessimisticFixpoint();
2205  return;
2206  }
2207 
2208  if (Instruction *CtxI = getCtxI())
2209  followUsesInMBEC(*this, A, getState(), *CtxI);
2210  }
2211 
2212  /// See followUsesInMBEC
2213  bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
2214  AANonNull::StateType &State) {
2215  bool IsNonNull = false;
2216  bool TrackUse = false;
2217  getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
2218  IsNonNull, TrackUse);
2219  State.setKnown(IsNonNull);
2220  return TrackUse;
2221  }
2222 
2223  /// See AbstractAttribute::getAsStr().
2224  const std::string getAsStr() const override {
2225  return getAssumed() ? "nonnull" : "may-null";
2226  }
2227 
2228  /// Flag to determine if the underlying value can be null and still allow
2229  /// valid accesses.
2230  const bool NullIsDefined;
2231 };
2232 
2233 /// NonNull attribute for a floating value.
2236  : AANonNullImpl(IRP, A) {}
2237 
2238  /// See AbstractAttribute::updateImpl(...).
2240  const DataLayout &DL = A.getDataLayout();
2241 
2242  DominatorTree *DT = nullptr;
2243  AssumptionCache *AC = nullptr;
2244  InformationCache &InfoCache = A.getInfoCache();
2245  if (const Function *Fn = getAnchorScope()) {
2247  AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
2248  }
2249 
2250  auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
2251  AANonNull::StateType &T, bool Stripped) -> bool {
2252  const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
2254  if (!Stripped && this == &AA) {
2255  if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
2256  T.indicatePessimisticFixpoint();
2257  } else {
2258  // Use abstract attribute information.
2259  const AANonNull::StateType &NS = AA.getState();
2260  T ^= NS;
2261  }
2262  return T.isValidState();
2263  };
2264 
2265  StateType T;
2266  if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
2267  VisitValueCB, getCtxI()))
2268  return indicatePessimisticFixpoint();
2269 
2270  return clampStateAndIndicateChange(getState(), T);
2271  }
2272 
2273  /// See AbstractAttribute::trackStatistics()
2274  void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2275 };
2276 
2277 /// NonNull attribute for function return value.
2278 struct AANonNullReturned final
2279  : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
2282 
2283  /// See AbstractAttribute::getAsStr().
2284  const std::string getAsStr() const override {
2285  return getAssumed() ? "nonnull" : "may-null";
2286  }
2287 
2288  /// See AbstractAttribute::trackStatistics()
2289  void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2290 };
2291 
2292 /// NonNull attribute for function argument.
2293 struct AANonNullArgument final
2294  : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
2297 
2298  /// See AbstractAttribute::trackStatistics()
2299  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
2300 };
2301 
2304  : AANonNullFloating(IRP, A) {}
2305 
2306  /// See AbstractAttribute::trackStatistics()
2307  void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
2308 };
2309 
2310 /// NonNull attribute for a call site return position.
2312  : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
2315 
2316  /// See AbstractAttribute::trackStatistics()
2317  void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
2318 };
2319 
2320 /// ------------------------ No-Recurse Attributes ----------------------------
2321 
2322 struct AANoRecurseImpl : public AANoRecurse {
2323  AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
2324 
2325  /// See AbstractAttribute::getAsStr()
2326  const std::string getAsStr() const override {
2327  return getAssumed() ? "norecurse" : "may-recurse";
2328  }
2329 };
2330 
2333  : AANoRecurseImpl(IRP, A) {}
2334 
2335  /// See AbstractAttribute::initialize(...).
2336  void initialize(Attributor &A) override {
2338  if (const Function *F = getAnchorScope())
2339  if (A.getInfoCache().getSccSize(*F) != 1)
2340  indicatePessimisticFixpoint();
2341  }
2342 
2343  /// See AbstractAttribute::updateImpl(...).
2345 
2346  // If all live call sites are known to be no-recurse, we are as well.
2347  auto CallSitePred = [&](AbstractCallSite ACS) {
2348  const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2349  *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
2351  return NoRecurseAA.isKnownNoRecurse();
2352  };
2353  bool AllCallSitesKnown;
2354  if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
2355  // If we know all call sites and all are known no-recurse, we are done.
2356  // If all known call sites, which might not be all that exist, are known
2357  // to be no-recurse, we are not done but we can continue to assume
2358  // no-recurse. If one of the call sites we have not visited will become
2359  // live, another update is triggered.
2360  if (AllCallSitesKnown)
2361  indicateOptimisticFixpoint();
2362  return ChangeStatus::UNCHANGED;
2363  }
2364 
2365  // If the above check does not hold anymore we look at the calls.
2366  auto CheckForNoRecurse = [&](Instruction &I) {
2367  const auto &CB = cast<CallBase>(I);
2368  if (CB.hasFnAttr(Attribute::NoRecurse))
2369  return true;
2370 
2371  const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2373  if (!NoRecurseAA.isAssumedNoRecurse())
2374  return false;
2375 
2376  // Recursion to the same function
2377  if (CB.getCalledFunction() == getAnchorScope())
2378  return false;
2379 
2380  return true;
2381  };
2382 
2383  bool UsedAssumedInformation = false;
2384  if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this,
2385  UsedAssumedInformation))
2386  return indicatePessimisticFixpoint();
2387  return ChangeStatus::UNCHANGED;
2388  }
2389 
2390  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
2391 };
2392 
2393 /// NoRecurse attribute deduction for a call sites.
2396  : AANoRecurseImpl(IRP, A) {}
2397 
2398  /// See AbstractAttribute::initialize(...).
2399  void initialize(Attributor &A) override {
2401  Function *F = getAssociatedFunction();
2402  if (!F || F->isDeclaration())
2403  indicatePessimisticFixpoint();
2404  }
2405 
2406  /// See AbstractAttribute::updateImpl(...).
2408  // TODO: Once we have call site specific value information we can provide
2409  // call site specific liveness information and then it makes
2410  // sense to specialize attributes for call sites arguments instead of
2411  // redirecting requests to the callee argument.
2412  Function *F = getAssociatedFunction();
2413  const IRPosition &FnPos = IRPosition::function(*F);
2414  auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
2415  return clampStateAndIndicateChange(getState(), FnAA.getState());
2416  }
2417 
2418  /// See AbstractAttribute::trackStatistics()
2419  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2420 };
2421 
2422 /// -------------------- Undefined-Behavior Attributes ------------------------
2423 
2426  : AAUndefinedBehavior(IRP, A) {}
2427 
2428  /// See AbstractAttribute::updateImpl(...).
2429  // through a pointer (i.e. also branches etc.)
2431  const size_t UBPrevSize = KnownUBInsts.size();
2432  const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2433 
2434  auto InspectMemAccessInstForUB = [&](Instruction &I) {
2435  // Lang ref now states volatile store is not UB, let's skip them.
2436  if (I.isVolatile() && I.mayWriteToMemory())
2437  return true;
2438 
2439  // Skip instructions that are already saved.
2440  if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2441  return true;
2442 
2443  // If we reach here, we know we have an instruction
2444  // that accesses memory through a pointer operand,
2445  // for which getPointerOperand() should give it to us.
2446  Value *PtrOp =
2447  const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true));
2448  assert(PtrOp &&
2449  "Expected pointer operand of memory accessing instruction");
2450 
2451  // Either we stopped and the appropriate action was taken,
2452  // or we got back a simplified value to continue.
2453  Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
2454  if (!SimplifiedPtrOp.hasValue() || !SimplifiedPtrOp.getValue())
2455  return true;
2456  const Value *PtrOpVal = SimplifiedPtrOp.getValue();
2457 
2458  // A memory access through a pointer is considered UB
2459  // only if the pointer has constant null value.
2460  // TODO: Expand it to not only check constant values.
2461  if (!isa<ConstantPointerNull>(PtrOpVal)) {
2462  AssumedNoUBInsts.insert(&I);
2463  return true;
2464  }
2465  const Type *PtrTy = PtrOpVal->getType();
2466 
2467  // Because we only consider instructions inside functions,
2468  // assume that a parent function exists.
2469  const Function *F = I.getFunction();
2470 
2471  // A memory access using constant null pointer is only considered UB
2472  // if null pointer is _not_ defined for the target platform.
2474  AssumedNoUBInsts.insert(&I);
2475  else
2476  KnownUBInsts.insert(&I);
2477  return true;
2478  };
2479 
2480  auto InspectBrInstForUB = [&](Instruction &I) {
2481  // A conditional branch instruction is considered UB if it has `undef`
2482  // condition.
2483 
2484  // Skip instructions that are already saved.
2485  if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2486  return true;
2487 
2488  // We know we have a branch instruction.
2489  auto *BrInst = cast<BranchInst>(&I);
2490 
2491  // Unconditional branches are never considered UB.
2492  if (BrInst->isUnconditional())
2493  return true;
2494 
2495  // Either we stopped and the appropriate action was taken,
2496  // or we got back a simplified value to continue.
2497  Optional<Value *> SimplifiedCond =
2498  stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2499  if (!SimplifiedCond.hasValue() || !SimplifiedCond.getValue())
2500  return true;
2501  AssumedNoUBInsts.insert(&I);
2502  return true;
2503  };
2504 
2505  auto InspectCallSiteForUB = [&](Instruction &I) {
2506  // Check whether a callsite always cause UB or not
2507 
2508  // Skip instructions that are already saved.
2509  if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2510  return true;
2511 
2512  // Check nonnull and noundef argument attribute violation for each
2513  // callsite.
2514  CallBase &CB = cast<CallBase>(I);
2516  if (!Callee)
2517  return true;
2518  for (unsigned idx = 0; idx < CB.getNumArgOperands(); idx++) {
2519  // If current argument is known to be simplified to null pointer and the
2520  // corresponding argument position is known to have nonnull attribute,
2521  // the argument is poison. Furthermore, if the argument is poison and
2522  // the position is known to have noundef attriubte, this callsite is
2523  // considered UB.
2524  if (idx >= Callee->arg_size())
2525  break;
2526  Value *ArgVal = CB.getArgOperand(idx);
2527  if (!ArgVal)
2528  continue;
2529  // Here, we handle three cases.
2530  // (1) Not having a value means it is dead. (we can replace the value
2531  // with undef)
2532  // (2) Simplified to undef. The argument violate noundef attriubte.
2533  // (3) Simplified to null pointer where known to be nonnull.
2534  // The argument is a poison value and violate noundef attribute.
2535  IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2536  auto &NoUndefAA =
2537  A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2538  if (!NoUndefAA.isKnownNoUndef())
2539  continue;
2540  bool UsedAssumedInformation = false;
2541  Optional<Value *> SimplifiedVal = A.getAssumedSimplified(
2542  IRPosition::value(*ArgVal), *this, UsedAssumedInformation);
2543  if (UsedAssumedInformation)
2544  continue;
2545  if (SimplifiedVal.hasValue() && !SimplifiedVal.getValue())
2546  return true;
2547  if (!SimplifiedVal.hasValue() ||
2548  isa<UndefValue>(*SimplifiedVal.getValue())) {
2549  KnownUBInsts.insert(&I);
2550  continue;
2551  }
2552  if (!ArgVal->getType()->isPointerTy() ||
2553  !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2554  continue;
2555  auto &NonNullAA =
2556  A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2557  if (NonNullAA.isKnownNonNull())
2558  KnownUBInsts.insert(&I);
2559  }
2560  return true;
2561  };
2562 
2563  auto InspectReturnInstForUB =
2564  [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) {
2565  // Check if a return instruction always cause UB or not
2566  // Note: It is guaranteed that the returned position of the anchor
2567  // scope has noundef attribute when this is called.
2568  // We also ensure the return position is not "assumed dead"
2569  // because the returned value was then potentially simplified to
2570  // `undef` in AAReturnedValues without removing the `noundef`
2571  // attribute yet.
2572 
2573  // When the returned position has noundef attriubte, UB occur in the
2574  // following cases.
2575  // (1) Returned value is known to be undef.
2576  // (2) The value is known to be a null pointer and the returned
2577  // position has nonnull attribute (because the returned value is
2578  // poison).
2579  bool FoundUB = false;
2580  if (isa<UndefValue>(V)) {
2581  FoundUB = true;
2582  } else {
2583  if (isa<ConstantPointerNull>(V)) {
2584  auto &NonNullAA = A.getAAFor<AANonNull>(
2585  *this, IRPosition::returned(*getAnchorScope()),
2587  if (NonNullAA.isKnownNonNull())
2588  FoundUB = true;
2589  }
2590  }
2591 
2592  if (FoundUB)
2593  for (ReturnInst *RI : RetInsts)
2594  KnownUBInsts.insert(RI);
2595  return true;
2596  };
2597 
2598  bool UsedAssumedInformation = false;
2599  A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2601  Instruction::AtomicCmpXchg,
2602  Instruction::AtomicRMW},
2603  UsedAssumedInformation,
2604  /* CheckBBLivenessOnly */ true);
2605  A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2606  UsedAssumedInformation,
2607  /* CheckBBLivenessOnly */ true);
2608  A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this,
2609  UsedAssumedInformation);
2610 
2611  // If the returned position of the anchor scope has noundef attriubte, check
2612  // all returned instructions.
2613  if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2614  const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2615  if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) {
2616  auto &RetPosNoUndefAA =
2617  A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2618  if (RetPosNoUndefAA.isKnownNoUndef())
2619  A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB,
2620  *this);
2621  }
2622  }
2623 
2624  if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2625  UBPrevSize != KnownUBInsts.size())
2626  return ChangeStatus::CHANGED;
2627  return ChangeStatus::UNCHANGED;
2628  }
2629 
2630  bool isKnownToCauseUB(Instruction *I) const override {
2631  return KnownUBInsts.count(I);
2632  }
2633 
2634  bool isAssumedToCauseUB(Instruction *I) const override {
2635  // In simple words, if an instruction is not in the assumed to _not_
2636  // cause UB, then it is assumed UB (that includes those
2637  // in the KnownUBInsts set). The rest is boilerplate
2638  // is to ensure that it is one of the instructions we test
2639  // for UB.
2640 
2641  switch (I->getOpcode()) {
2642  case Instruction::Load:
2643  case Instruction::Store:
2644  case Instruction::AtomicCmpXchg:
2645  case Instruction::AtomicRMW:
2646  return !AssumedNoUBInsts.count(I);
2647  case Instruction::Br: {
2648  auto BrInst = cast<BranchInst>(I);
2649  if (BrInst->isUnconditional())
2650  return false;
2651  return !AssumedNoUBInsts.count(I);
2652  } break;
2653  default:
2654  return false;
2655  }
2656  return false;
2657  }
2658 
2660  if (KnownUBInsts.empty())
2661  return ChangeStatus::UNCHANGED;
2662  for (Instruction *I : KnownUBInsts)
2663  A.changeToUnreachableAfterManifest(I);
2664  return ChangeStatus::CHANGED;
2665  }
2666 
2667  /// See AbstractAttribute::getAsStr()
2668  const std::string getAsStr() const override {
2669  return getAssumed() ? "undefined-behavior" : "no-ub";
2670  }
2671 
2672  /// Note: The correctness of this analysis depends on the fact that the
2673  /// following 2 sets will stop changing after some point.
2674  /// "Change" here means that their size changes.
2675  /// The size of each set is monotonically increasing
2676  /// (we only add items to them) and it is upper bounded by the number of
2677  /// instructions in the processed function (we can never save more
2678  /// elements in either set than this number). Hence, at some point,
2679  /// they will stop increasing.
2680  /// Consequently, at some point, both sets will have stopped
2681  /// changing, effectively making the analysis reach a fixpoint.
2682 
2683  /// Note: These 2 sets are disjoint and an instruction can be considered
2684  /// one of 3 things:
2685  /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2686  /// the KnownUBInsts set.
2687  /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2688  /// has a reason to assume it).
2689  /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2690  /// could not find a reason to assume or prove that it can cause UB,
2691  /// hence it assumes it doesn't. We have a set for these instructions
2692  /// so that we don't reprocess them in every update.
2693  /// Note however that instructions in this set may cause UB.
2694 
2695 protected:
2696  /// A set of all live instructions _known_ to cause UB.
2698 
2699 private:
2700  /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2701  SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2702 
2703  // Should be called on updates in which if we're processing an instruction
2704  // \p I that depends on a value \p V, one of the following has to happen:
2705  // - If the value is assumed, then stop.
2706  // - If the value is known but undef, then consider it UB.
2707  // - Otherwise, do specific processing with the simplified value.
2708  // We return None in the first 2 cases to signify that an appropriate
2709  // action was taken and the caller should stop.
2710  // Otherwise, we return the simplified value that the caller should
2711  // use for specific processing.
2712  Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V,
2713  Instruction *I) {
2714  bool UsedAssumedInformation = false;
2715  Optional<Value *> SimplifiedV = A.getAssumedSimplified(
2716  IRPosition::value(*V), *this, UsedAssumedInformation);
2717  if (!UsedAssumedInformation) {
2718  // Don't depend on assumed values.
2719  if (!SimplifiedV.hasValue()) {
2720  // If it is known (which we tested above) but it doesn't have a value,
2721  // then we can assume `undef` and hence the instruction is UB.
2722  KnownUBInsts.insert(I);
2723  return llvm::None;
2724  }
2725  if (!SimplifiedV.getValue())
2726  return nullptr;
2727  V = *SimplifiedV;
2728  }
2729  if (isa<UndefValue>(V)) {
2730  KnownUBInsts.insert(I);
2731  return llvm::None;
2732  }
2733  return V;
2734  }
2735 };
2736 
2739  : AAUndefinedBehaviorImpl(IRP, A) {}
2740 
2741  /// See AbstractAttribute::trackStatistics()
2742  void trackStatistics() const override {
2743  STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2744  "Number of instructions known to have UB");
2745  BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2746  KnownUBInsts.size();
2747  }
2748 };
2749 
2750 /// ------------------------ Will-Return Attributes ----------------------------
2751 
2752 // Helper function that checks whether a function has any cycle which we don't
2753 // know if it is bounded or not.
2754 // Loops with maximum trip count are considered bounded, any other cycle not.
2756  ScalarEvolution *SE =
2757  A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2758  LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2759  // If either SCEV or LoopInfo is not available for the function then we assume
2760  // any cycle to be unbounded cycle.
2761  // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2762  // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2763  if (!SE || !LI) {
2764  for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2765  if (SCCI.hasCycle())
2766  return true;
2767  return false;
2768  }
2769 
2770  // If there's irreducible control, the function may contain non-loop cycles.
2772  return true;
2773 
2774  // Any loop that does not have a max trip count is considered unbounded cycle.
2775  for (auto *L : LI->getLoopsInPreorder()) {
2776  if (!SE->getSmallConstantMaxTripCount(L))
2777  return true;
2778  }
2779  return false;
2780 }
2781 
2784  : AAWillReturn(IRP, A) {}
2785 
2786  /// See AbstractAttribute::initialize(...).
2787  void initialize(Attributor &A) override {
2789 
2790  if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2791  indicateOptimisticFixpoint();
2792  return;
2793  }
2794  }
2795 
2796  /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
2798  // Check for `mustprogress` in the scope and the associated function which
2799  // might be different if this is a call site.
2800  if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2801  (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2802  return false;
2803 
2804  const auto &MemAA =
2805  A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
2806  if (!MemAA.isAssumedReadOnly())
2807  return false;
2808  if (KnownOnly && !MemAA.isKnownReadOnly())
2809  return false;
2810  if (!MemAA.isKnownReadOnly())
2811  A.recordDependence(MemAA, *this, DepClassTy::OPTIONAL);
2812 
2813  return true;
2814  }
2815 
2816  /// See AbstractAttribute::updateImpl(...).
2818  if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2819  return ChangeStatus::UNCHANGED;
2820 
2821  auto CheckForWillReturn = [&](Instruction &I) {
2822  IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2823  const auto &WillReturnAA =
2824  A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2825  if (WillReturnAA.isKnownWillReturn())
2826  return true;
2827  if (!WillReturnAA.isAssumedWillReturn())
2828  return false;
2829  const auto &NoRecurseAA =
2830  A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2831  return NoRecurseAA.isAssumedNoRecurse();
2832  };
2833 
2834  bool UsedAssumedInformation = false;
2835  if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this,
2836  UsedAssumedInformation))
2837  return indicatePessimisticFixpoint();
2838 
2839  return ChangeStatus::UNCHANGED;
2840  }
2841 
2842  /// See AbstractAttribute::getAsStr()
2843  const std::string getAsStr() const override {
2844  return getAssumed() ? "willreturn" : "may-noreturn";
2845  }
2846 };
2847 
2850  : AAWillReturnImpl(IRP, A) {}
2851 
2852  /// See AbstractAttribute::initialize(...).
2853  void initialize(Attributor &A) override {
2855 
2856  Function *F = getAnchorScope();
2857  if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2858  indicatePessimisticFixpoint();
2859  }
2860 
2861  /// See AbstractAttribute::trackStatistics()
2862  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2863 };
2864 
2865 /// WillReturn attribute deduction for a call sites.
2868  : AAWillReturnImpl(IRP, A) {}
2869 
2870  /// See AbstractAttribute::initialize(...).
2871  void initialize(Attributor &A) override {
2873  Function *F = getAssociatedFunction();
2874  if (!F || !A.isFunctionIPOAmendable(*F))
2875  indicatePessimisticFixpoint();
2876  }
2877 
2878  /// See AbstractAttribute::updateImpl(...).
2880  if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2881  return ChangeStatus::UNCHANGED;
2882 
2883  // TODO: Once we have call site specific value information we can provide
2884  // call site specific liveness information and then it makes
2885  // sense to specialize attributes for call sites arguments instead of
2886  // redirecting requests to the callee argument.
2887  Function *F = getAssociatedFunction();
2888  const IRPosition &FnPos = IRPosition::function(*F);
2889  auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
2890  return clampStateAndIndicateChange(getState(), FnAA.getState());
2891  }
2892 
2893  /// See AbstractAttribute::trackStatistics()
2894  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2895 };
2896 
2897 /// -------------------AAReachability Attribute--------------------------
2898 
2901  : AAReachability(IRP, A) {}
2902 
2903  const std::string getAsStr() const override {
2904  // TODO: Return the number of reachable queries.
2905  return "reachable";
2906  }
2907 
2908  /// See AbstractAttribute::updateImpl(...).
2910  return ChangeStatus::UNCHANGED;
2911  }
2912 };
2913 
2916  : AAReachabilityImpl(IRP, A) {}
2917 
2918  /// See AbstractAttribute::trackStatistics()
2919  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2920 };
2921 
2922 /// ------------------------ NoAlias Argument Attribute ------------------------
2923 
2925  AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2926  assert(getAssociatedType()->isPointerTy() &&
2927  "Noalias is a pointer attribute");
2928  }
2929 
2930  const std::string getAsStr() const override {
2931  return getAssumed() ? "noalias" : "may-alias";
2932  }
2933 };
2934 
2935 /// NoAlias attribute for a floating value.
2938  : AANoAliasImpl(IRP, A) {}
2939 
2940  /// See AbstractAttribute::initialize(...).
2941  void initialize(Attributor &A) override {
2943  Value *Val = &getAssociatedValue();
2944  do {
2945  CastInst *CI = dyn_cast<CastInst>(Val);
2946  if (!CI)
2947  break;
2948  Value *Base = CI->getOperand(0);
2949  if (!Base->hasOneUse())
2950  break;
2951  Val = Base;
2952  } while (true);
2953 
2954  if (!Val->getType()->isPointerTy()) {
2955  indicatePessimisticFixpoint();
2956  return;
2957  }
2958 
2959  if (isa<AllocaInst>(Val))
2960  indicateOptimisticFixpoint();
2961  else if (isa<ConstantPointerNull>(Val) &&
2962  !NullPointerIsDefined(getAnchorScope(),
2963  Val->getType()->getPointerAddressSpace()))
2964  indicateOptimisticFixpoint();
2965  else if (Val != &getAssociatedValue()) {
2966  const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
2967  *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
2968  if (ValNoAliasAA.isKnownNoAlias())
2969  indicateOptimisticFixpoint();
2970  }
2971  }
2972 
2973  /// See AbstractAttribute::updateImpl(...).
2975  // TODO: Implement this.
2976  return indicatePessimisticFixpoint();
2977  }
2978 
2979  /// See AbstractAttribute::trackStatistics()
2980  void trackStatistics() const override {
2982  }
2983 };
2984 
2985 /// NoAlias attribute for an argument.
2986 struct AANoAliasArgument final
2987  : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2989  AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2990 
2991  /// See AbstractAttribute::initialize(...).
2992  void initialize(Attributor &A) override {
2993  Base::initialize(A);
2994  // See callsite argument attribute and callee argument attribute.
2995  if (hasAttr({Attribute::ByVal}))
2996  indicateOptimisticFixpoint();
2997  }
2998 
2999  /// See AbstractAttribute::update(...).
3001  // We have to make sure no-alias on the argument does not break
3002  // synchronization when this is a callback argument, see also [1] below.
3003  // If synchronization cannot be affected, we delegate to the base updateImpl
3004  // function, otherwise we give up for now.
3005 
3006  // If the function is no-sync, no-alias cannot break synchronization.
3007  const auto &NoSyncAA =
3008  A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
3010  if (NoSyncAA.isAssumedNoSync())
3011  return Base::updateImpl(A);
3012 
3013  // If the argument is read-only, no-alias cannot break synchronization.
3014  const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3015  *this, getIRPosition(), DepClassTy::OPTIONAL);
3016  if (MemBehaviorAA.isAssumedReadOnly())
3017  return Base::updateImpl(A);
3018 
3019  // If the argument is never passed through callbacks, no-alias cannot break
3020  // synchronization.
3021  bool AllCallSitesKnown;
3022  if (A.checkForAllCallSites(
3023  [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
3024  true, AllCallSitesKnown))
3025  return Base::updateImpl(A);
3026 
3027  // TODO: add no-alias but make sure it doesn't break synchronization by
3028  // introducing fake uses. See:
3029  // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
3030  // International Workshop on OpenMP 2018,
3031  // http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
3032 
3033  return indicatePessimisticFixpoint();
3034  }
3035 
3036  /// See AbstractAttribute::trackStatistics()
3037  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
3038 };
3039 
3042  : AANoAliasImpl(IRP, A) {}
3043 
3044  /// See AbstractAttribute::initialize(...).
3045  void initialize(Attributor &A) override {
3046  // See callsite argument attribute and callee argument attribute.
3047  const auto &CB = cast<CallBase>(getAnchorValue());
3048  if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
3049  indicateOptimisticFixpoint();
3050  Value &Val = getAssociatedValue();
3051  if (isa<ConstantPointerNull>(Val) &&
3052  !NullPointerIsDefined(getAnchorScope(),
3053  Val.getType()->getPointerAddressSpace()))
3054  indicateOptimisticFixpoint();
3055  }
3056 
3057  /// Determine if the underlying value may alias with the call site argument
3058  /// \p OtherArgNo of \p ICS (= the underlying call site).
3060  const AAMemoryBehavior &MemBehaviorAA,
3061  const CallBase &CB, unsigned OtherArgNo) {
3062  // We do not need to worry about aliasing with the underlying IRP.
3063  if (this->getCalleeArgNo() == (int)OtherArgNo)
3064  return false;
3065 
3066  // If it is not a pointer or pointer vector we do not alias.
3067  const Value *ArgOp = CB.getArgOperand(OtherArgNo);
3068  if (!ArgOp->getType()->isPtrOrPtrVectorTy())
3069  return false;
3070 
3071  auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3072  *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
3073 
3074  // If the argument is readnone, there is no read-write aliasing.
3075  if (CBArgMemBehaviorAA.isAssumedReadNone()) {
3076  A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3077  return false;
3078  }
3079 
3080  // If the argument is readonly and the underlying value is readonly, there
3081  // is no read-write aliasing.
3082  bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
3083  if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
3084  A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3085  A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3086  return false;
3087  }
3088 
3089  // We have to utilize actual alias analysis queries so we need the object.
3090  if (!AAR)
3091  AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
3092 
3093  // Try to rule it out at the call site.
3094  bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
3095  LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
3096  "callsite arguments: "
3097  << getAssociatedValue() << " " << *ArgOp << " => "
3098  << (IsAliasing ? "" : "no-") << "alias \n");
3099 
3100  return IsAliasing;
3101  }
3102 
3103  bool
3105  const AAMemoryBehavior &MemBehaviorAA,
3106  const AANoAlias &NoAliasAA) {
3107  // We can deduce "noalias" if the following conditions hold.
3108  // (i) Associated value is assumed to be noalias in the definition.
3109  // (ii) Associated value is assumed to be no-capture in all the uses
3110  // possibly executed before this callsite.
3111  // (iii) There is no other pointer argument which could alias with the
3112  // value.
3113 
3114  bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
3115  if (!AssociatedValueIsNoAliasAtDef) {
3116  LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
3117  << " is not no-alias at the definition\n");
3118  return false;
3119  }
3120 
3121  A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
3122 
3123  const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3124  const Function *ScopeFn = VIRP.getAnchorScope();
3125  auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
3126  // Check whether the value is captured in the scope using AANoCapture.
3127  // Look at CFG and check only uses possibly executed before this
3128  // callsite.
3129  auto UsePred = [&](const Use &U, bool &Follow) -> bool {
3130  Instruction *UserI = cast<Instruction>(U.getUser());
3131 
3132  // If UserI is the curr instruction and there is a single potential use of
3133  // the value in UserI we allow the use.
3134  // TODO: We should inspect the operands and allow those that cannot alias
3135  // with the value.
3136  if (UserI == getCtxI() && UserI->getNumOperands() == 1)
3137  return true;
3138 
3139  if (ScopeFn) {
3140  const auto &ReachabilityAA = A.getAAFor<AAReachability>(
3141  *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL);
3142 
3143  if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI()))
3144  return true;
3145 
3146  if (auto *CB = dyn_cast<CallBase>(UserI)) {
3147  if (CB->isArgOperand(&U)) {
3148 
3149  unsigned ArgNo = CB->getArgOperandNo(&U);
3150 
3151  const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
3152  *this, IRPosition::callsite_argument(*CB, ArgNo),
3154 
3155  if (NoCaptureAA.isAssumedNoCapture())
3156  return true;
3157  }
3158  }
3159  }
3160 
3161  // For cases which can potentially have more users
3162  if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
3163  isa<SelectInst>(U)) {
3164  Follow = true;
3165  return true;
3166  }
3167 
3168  LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
3169  return false;
3170  };
3171 
3172  if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3173  if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
3174  LLVM_DEBUG(
3175  dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
3176  << " cannot be noalias as it is potentially captured\n");
3177  return false;
3178  }
3179  }
3180  A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
3181 
3182  // Check there is no other pointer argument which could alias with the
3183  // value passed at this call site.
3184  // TODO: AbstractCallSite
3185  const auto &CB = cast<CallBase>(getAnchorValue());
3186  for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
3187  OtherArgNo++)
3188  if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
3189  return false;
3190 
3191  return true;
3192  }
3193 
3194  /// See AbstractAttribute::updateImpl(...).
3196  // If the argument is readnone we are done as there are no accesses via the
3197  // argument.
3198  auto &MemBehaviorAA =
3199  A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
3200  if (MemBehaviorAA.isAssumedReadNone()) {
3201  A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3202  return ChangeStatus::UNCHANGED;
3203  }
3204 
3205  const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3206  const auto &NoAliasAA =
3207  A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
3208 
3209  AAResults *AAR = nullptr;
3210  if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
3211  NoAliasAA)) {
3212  LLVM_DEBUG(
3213  dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
3214  return ChangeStatus::UNCHANGED;
3215  }
3216 
3217  return indicatePessimisticFixpoint();
3218  }
3219 
3220  /// See AbstractAttribute::trackStatistics()
3221  void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
3222 };
3223 
3224 /// NoAlias attribute for function return value.
3227  : AANoAliasImpl(IRP, A) {}
3228 
3229  /// See AbstractAttribute::initialize(...).
3230  void initialize(Attributor &A) override {
3232  Function *F = getAssociatedFunction();
3233  if (!F || F->isDeclaration())
3234  indicatePessimisticFixpoint();
3235  }
3236 
3237  /// See AbstractAttribute::updateImpl(...).
3238  virtual ChangeStatus updateImpl(Attributor &A) override {
3239 
3240  auto CheckReturnValue = [&](Value &RV) -> bool {
3241  if (Constant *C = dyn_cast<Constant>(&RV))
3242  if (C->isNullValue() || isa<UndefValue>(C))
3243  return true;
3244 
3245  /// For now, we can only deduce noalias if we have call sites.
3246  /// FIXME: add more support.
3247  if (!isa<CallBase>(&RV))
3248  return false;
3249 
3250  const IRPosition &RVPos = IRPosition::value(RV);
3251  const auto &NoAliasAA =
3252  A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
3253  if (!NoAliasAA.isAssumedNoAlias())
3254  return false;
3255 
3256  const auto &NoCaptureAA =
3257  A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
3258  return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
3259  };
3260 
3261  if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
3262  return indicatePessimisticFixpoint();
3263 
3264  return ChangeStatus::UNCHANGED;
3265  }
3266 
3267  /// See AbstractAttribute::trackStatistics()
3268  void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
3269 };
3270 
3271 /// NoAlias attribute deduction for a call site return value.
3274  : AANoAliasImpl(IRP, A) {}
3275 
3276  /// See AbstractAttribute::initialize(...).
3277  void initialize(Attributor &A) override {
3279  Function *F = getAssociatedFunction();
3280  if (!F || F->isDeclaration())
3281  indicatePessimisticFixpoint();
3282  }
3283 
3284  /// See AbstractAttribute::updateImpl(...).
3286  // TODO: Once we have call site specific value information we can provide
3287  // call site specific liveness information and then it makes
3288  // sense to specialize attributes for call sites arguments instead of
3289  // redirecting requests to the callee argument.
3290  Function *F = getAssociatedFunction();
3291  const IRPosition &FnPos = IRPosition::returned(*F);
3292  auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
3293  return clampStateAndIndicateChange(getState(), FnAA.getState());
3294  }
3295 
3296  /// See AbstractAttribute::trackStatistics()
3297  void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
3298 };
3299 
3300 /// -------------------AAIsDead Function Attribute-----------------------
3301 
3302 struct AAIsDeadValueImpl : public AAIsDead {
3303  AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3304 
3305  /// See AAIsDead::isAssumedDead().
3306  bool isAssumedDead() const override { return isAssumed(IS_DEAD); }
3307 
3308  /// See AAIsDead::isKnownDead().
3309  bool isKnownDead() const override { return isKnown(IS_DEAD); }
3310 
3311  /// See AAIsDead::isAssumedDead(BasicBlock *).
3312  bool isAssumedDead(const BasicBlock *BB) const override { return false; }
3313 
3314  /// See AAIsDead::isKnownDead(BasicBlock *).
3315  bool isKnownDead(const BasicBlock *BB) const override { return false; }
3316 
3317  /// See AAIsDead::isAssumedDead(Instruction *I).
3318  bool isAssumedDead(const Instruction *I) const override {
3319  return I == getCtxI() && isAssumedDead();
3320  }
3321 
3322  /// See AAIsDead::isKnownDead(Instruction *I).
3323  bool isKnownDead(const Instruction *I) const override {
3324  return isAssumedDead(I) && isKnownDead();
3325  }
3326 
3327  /// See AbstractAttribute::getAsStr().
3328  const std::string getAsStr() const override {
3329  return isAssumedDead() ? "assumed-dead" : "assumed-live";
3330  }
3331 
3332  /// Check if all uses are assumed dead.
3334  // Callers might not check the type, void has no uses.
3335  if (V.getType()->isVoidTy())
3336  return true;
3337 
3338  // If we replace a value with a constant there are no uses left afterwards.
3339  if (!isa<Constant>(V)) {
3340  bool UsedAssumedInformation = false;
3342  A.getAssumedConstant(V, *this, UsedAssumedInformation);
3343  if (!C.hasValue() || *C)
3344  return true;
3345  }
3346 
3347  auto UsePred = [&](const Use &U, bool &Follow) { return false; };
3348  // Explicitly set the dependence class to required because we want a long
3349  // chain of N dependent instructions to be considered live as soon as one is
3350  // without going through N update cycles. This is not required for
3351  // correctness.
3352  return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false,
3354  }
3355 
3356  /// Determine if \p I is assumed to be side-effect free.
3359  return true;
3360 
3361  auto *CB = dyn_cast<CallBase>(I);
3362  if (!CB || isa<IntrinsicInst>(CB))
3363  return false;
3364 
3365  const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
3366  const auto &NoUnwindAA =
3367  A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
3368  if (!NoUnwindAA.isAssumedNoUnwind())
3369  return false;
3370  if (!NoUnwindAA.isKnownNoUnwind())
3371  A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
3372 
3373  const auto &MemBehaviorAA =
3374  A.getAndUpdateAAFor<AAMemoryBehavior>(*this, CallIRP, DepClassTy::NONE);
3375  if (MemBehaviorAA.isAssumedReadOnly()) {
3376  if (!MemBehaviorAA.isKnownReadOnly())
3377  A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3378  return true;
3379  }
3380  return false;
3381  }
3382 };
3383 
3386  : AAIsDeadValueImpl(IRP, A) {}
3387 
3388  /// See AbstractAttribute::initialize(...).
3389  void initialize(Attributor &A) override {
3390  if (isa<UndefValue>(getAssociatedValue())) {
3391  indicatePessimisticFixpoint();
3392  return;
3393  }
3394 
3395  Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3396  if (!isAssumedSideEffectFree(A, I)) {
3397  if (!isa_and_nonnull<StoreInst>(I))
3398  indicatePessimisticFixpoint();
3399  else
3400  removeAssumedBits(HAS_NO_EFFECT);
3401  }
3402  }
3403 
3405  // Lang ref now states volatile store is not UB/dead, let's skip them.
3406  if (SI.isVolatile())
3407  return false;
3408 
3409  bool UsedAssumedInformation = false;
3410  SmallSetVector<Value *, 4> PotentialCopies;
3411  if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this,
3412  UsedAssumedInformation))
3413  return false;
3414  return llvm::all_of(PotentialCopies, [&](Value *V) {
3415  return A.isAssumedDead(IRPosition::value(*V), this, nullptr,
3416  UsedAssumedInformation);
3417  });
3418  }
3419 
3420  /// See AbstractAttribute::updateImpl(...).
3422  Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3423  if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
3424  if (!isDeadStore(A, *SI))
3425  return indicatePessimisticFixpoint();
3426  } else {
3427  if (!isAssumedSideEffectFree(A, I))
3428  return indicatePessimisticFixpoint();
3429  if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3430  return indicatePessimisticFixpoint();
3431  }
3432  return ChangeStatus::UNCHANGED;
3433  }
3434 
3435  /// See AbstractAttribute::manifest(...).
3437  Value &V = getAssociatedValue();
3438  if (auto *I = dyn_cast<Instruction>(&V)) {
3439  // If we get here we basically know the users are all dead. We check if
3440  // isAssumedSideEffectFree returns true here again because it might not be
3441  // the case and only the users are dead but the instruction (=call) is
3442  // still needed.
3443  if (isa<StoreInst>(I) ||
3444  (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) {
3445  A.deleteAfterManifest(*I);
3446  return ChangeStatus::CHANGED;
3447  }
3448  }
3449  if (V.use_empty())
3450  return ChangeStatus::UNCHANGED;
3451 
3452  bool UsedAssumedInformation = false;
3454  A.getAssumedConstant(V, *this, UsedAssumedInformation);
3455  if (C.hasValue() && C.getValue())
3456  return ChangeStatus::UNCHANGED;
3457 
3458  // Replace the value with undef as it is dead but keep droppable uses around
3459  // as they provide information we don't want to give up on just yet.
3460  UndefValue &UV = *UndefValue::get(V.getType());
3461  bool AnyChange =
3462  A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
3463  return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3464  }
3465 
3466  /// See AbstractAttribute::trackStatistics()
3467  void trackStatistics() const override {
3469  }
3470 };
3471 
3474  : AAIsDeadFloating(IRP, A) {}
3475 
3476  /// See AbstractAttribute::initialize(...).
3477  void initialize(Attributor &A) override {
3478  if (!A.isFunctionIPOAmendable(*getAnchorScope()))
3479  indicatePessimisticFixpoint();
3480  }
3481 
3482  /// See AbstractAttribute::manifest(...).
3485  Argument &Arg = *getAssociatedArgument();
3486  if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
3487  if (A.registerFunctionSignatureRewrite(
3488  Arg, /* ReplacementTypes */ {},
3491  Arg.dropDroppableUses();
3492  return ChangeStatus::CHANGED;
3493  }
3494  return Changed;
3495  }
3496 
3497  /// See AbstractAttribute::trackStatistics()
3499 };
3500 
3503  : AAIsDeadValueImpl(IRP, A) {}
3504 
3505  /// See AbstractAttribute::initialize(...).
3506  void initialize(Attributor &A) override {
3507  if (isa<UndefValue>(getAssociatedValue()))
3508  indicatePessimisticFixpoint();
3509  }
3510 
3511  /// See AbstractAttribute::updateImpl(...).
3513  // TODO: Once we have call site specific value information we can provide
3514  // call site specific liveness information and then it makes
3515  // sense to specialize attributes for call sites arguments instead of
3516  // redirecting requests to the callee argument.
3517  Argument *Arg = getAssociatedArgument();
3518  if (!Arg)
3519  return indicatePessimisticFixpoint();
3520  const IRPosition &ArgPos = IRPosition::argument(*Arg);
3521  auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
3522  return clampStateAndIndicateChange(getState(), ArgAA.getState());
3523  }
3524 
3525  /// See AbstractAttribute::manifest(...).
3527  CallBase &CB = cast<CallBase>(getAnchorValue());
3528  Use &U = CB.getArgOperandUse(getCallSiteArgNo());
3529  assert(!isa<UndefValue>(U.get()) &&
3530  "Expected undef values to be filtered out!");
3531  UndefValue &UV = *UndefValue::get(U->getType());
3532  if (A.changeUseAfterManifest(U, UV))
3533  return ChangeStatus::CHANGED;
3534  return ChangeStatus::UNCHANGED;
3535  }
3536 
3537  /// See AbstractAttribute::trackStatistics()
3539 };
3540 
3543  : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
3544 
3545  /// See AAIsDead::isAssumedDead().
3546  bool isAssumedDead() const override {
3547  return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
3548  }
3549 
3550  /// See AbstractAttribute::initialize(...).
3551  void initialize(Attributor &A) override {
3552  if (isa<UndefValue>(getAssociatedValue())) {
3553  indicatePessimisticFixpoint();
3554  return;
3555  }
3556 
3557  // We track this separately as a secondary state.
3558  IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3559  }
3560 
3561  /// See AbstractAttribute::updateImpl(...).
3564  if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3565  IsAssumedSideEffectFree = false;
3566  Changed = ChangeStatus::CHANGED;
3567  }
3568  if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3569  return indicatePessimisticFixpoint();
3570  return Changed;
3571  }
3572 
3573  /// See AbstractAttribute::trackStatistics()
3574  void trackStatistics() const override {
3575  if (IsAssumedSideEffectFree)
3577  else
3578  STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3579  }
3580 
3581  /// See AbstractAttribute::getAsStr().
3582  const std::string getAsStr() const override {
3583  return isAssumedDead()
3584  ? "assumed-dead"
3585  : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3586  }
3587 
3588 private:
3589  bool IsAssumedSideEffectFree;
3590 };
3591 
3594  : AAIsDeadValueImpl(IRP, A) {}
3595 
3596  /// See AbstractAttribute::updateImpl(...).
3598 
3599  bool UsedAssumedInformation = false;
3600  A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3601  {Instruction::Ret}, UsedAssumedInformation);
3602 
3603  auto PredForCallSite = [&](AbstractCallSite ACS) {
3604  if (ACS.isCallbackCall() || !ACS.getInstruction())
3605  return false;
3606  return areAllUsesAssumedDead(A, *ACS.getInstruction());
3607  };
3608 
3609  bool AllCallSitesKnown;
3610  if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3611  AllCallSitesKnown))
3612  return indicatePessimisticFixpoint();
3613 
3614  return ChangeStatus::UNCHANGED;
3615  }
3616 
3617  /// See AbstractAttribute::manifest(...).
3619  // TODO: Rewrite the signature to return void?
3620  bool AnyChange = false;
3621  UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3622  auto RetInstPred = [&](Instruction &I) {
3623  ReturnInst &RI = cast<ReturnInst>(I);
3624  if (!isa<UndefValue>(RI.getReturnValue()))
3625  AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3626  return true;
3627  };
3628  bool UsedAssumedInformation = false;
3629  A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
3630  UsedAssumedInformation);
3631  return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3632  }
3633 
3634  /// See AbstractAttribute::trackStatistics()
3636 };
3637 
3638 struct AAIsDeadFunction : public AAIsDead {
3639  AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3640 
3641  /// See AbstractAttribute::initialize(...).
3642  void initialize(Attributor &A) override {
3643  const Function *F = getAnchorScope();
3644  if (F && !F->isDeclaration()) {
3645  // We only want to compute liveness once. If the function is not part of
3646  // the SCC, skip it.
3647  if (A.isRunOn(*const_cast<Function *>(F))) {
3648  ToBeExploredFrom.insert(&F->getEntryBlock().front());
3649  assumeLive(A, F->getEntryBlock());
3650  } else {
3651  indicatePessimisticFixpoint();
3652  }
3653  }
3654  }
3655 
3656  /// See AbstractAttribute::getAsStr().
3657  const std::string getAsStr() const override {
3658  return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3659  std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3660  std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3661  std::to_string(KnownDeadEnds.size()) + "]";
3662  }
3663 
3664  /// See AbstractAttribute::manifest(...).
3666  assert(getState().isValidState() &&
3667  "Attempted to manifest an invalid state!");
3668 
3670  Function &F = *getAnchorScope();
3671 
3672  if (AssumedLiveBlocks.empty()) {
3673  A.deleteAfterManifest(F);
3674  return ChangeStatus::CHANGED;
3675  }
3676 
3677  // Flag to determine if we can change an invoke to a call assuming the
3678  // callee is nounwind. This is not possible if the personality of the
3679  // function allows to catch asynchronous exceptions.
3680  bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3681 
3682  KnownDeadEnds.set_union(ToBeExploredFrom);
3683  for (const Instruction *DeadEndI : KnownDeadEnds) {
3684  auto *CB = dyn_cast<CallBase>(DeadEndI);
3685  if (!CB)
3686  continue;
3687  const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3689  bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3690  if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3691  continue;
3692 
3693  if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3694  A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3695  else
3696  A.changeToUnreachableAfterManifest(
3697  const_cast<Instruction *>(DeadEndI->getNextNode()));
3698  HasChanged = ChangeStatus::CHANGED;
3699  }
3700 
3701  STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3702  for (BasicBlock &BB : F)
3703  if (!AssumedLiveBlocks.count(&BB)) {
3704  A.deleteAfterManifest(BB);
3706  }
3707 
3708  return HasChanged;
3709  }
3710 
3711  /// See AbstractAttribute::updateImpl(...).
3712  ChangeStatus updateImpl(Attributor &A) override;
3713 
3714  bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3715  return !AssumedLiveEdges.count(std::make_pair(From, To));
3716  }
3717 
3718  /// See AbstractAttribute::trackStatistics()
3719  void trackStatistics() const override {}
3720 
3721  /// Returns true if the function is assumed dead.
3722  bool isAssumedDead() const override { return false; }
3723 
3724  /// See AAIsDead::isKnownDead().
3725  bool isKnownDead() const override { return false; }
3726 
3727  /// See AAIsDead::isAssumedDead(BasicBlock *).
3728  bool isAssumedDead(const BasicBlock *BB) const override {
3729  assert(BB->getParent() == getAnchorScope() &&
3730  "BB must be in the same anchor scope function.");
3731 
3732  if (!getAssumed())
3733  return false;
3734  return !AssumedLiveBlocks.count(BB);
3735  }
3736 
3737  /// See AAIsDead::isKnownDead(BasicBlock *).
3738  bool isKnownDead(const BasicBlock *BB) const override {
3739  return getKnown() && isAssumedDead(BB);
3740  }
3741 
3742  /// See AAIsDead::isAssumed(Instruction *I).
3743  bool isAssumedDead(const Instruction *I) const override {
3744  assert(I->getParent()->getParent() == getAnchorScope() &&
3745  "Instruction must be in the same anchor scope function.");
3746 
3747  if (!getAssumed())
3748  return false;
3749 
3750  // If it is not in AssumedLiveBlocks then it for sure dead.
3751  // Otherwise, it can still be after noreturn call in a live block.
3752  if (!AssumedLiveBlocks.count(I->getParent()))
3753  return true;
3754 
3755  // If it is not after a liveness barrier it is live.
3756  const Instruction *PrevI = I->getPrevNode();
3757  while (PrevI) {
3758  if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3759  return true;
3760  PrevI = PrevI->getPrevNode();
3761  }
3762  return false;
3763  }
3764 
3765  /// See AAIsDead::isKnownDead(Instruction *I).
3766  bool isKnownDead(const Instruction *I) const override {
3767  return getKnown() && isAssumedDead(I);
3768  }
3769 
3770  /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3771  /// that internal function called from \p BB should now be looked at.
3772  bool assumeLive(Attributor &A, const BasicBlock &BB) {
3773  if (!AssumedLiveBlocks.insert(&BB).second)
3774  return false;
3775 
3776  // We assume that all of BB is (probably) live now and if there are calls to
3777  // internal functions we will assume that those are now live as well. This
3778  // is a performance optimization for blocks with calls to a lot of internal
3779  // functions. It can however cause dead functions to be treated as live.
3780  for (const Instruction &I : BB)
3781  if (const auto *CB = dyn_cast<CallBase>(&I))
3782  if (const Function *F = CB->getCalledFunction())
3783  if (F->hasLocalLinkage())
3784  A.markLiveInternalFunction(*F);
3785  return true;
3786  }
3787 
3788  /// Collection of instructions that need to be explored again, e.g., we
3789  /// did assume they do not transfer control to (one of their) successors.
3791 
3792  /// Collection of instructions that are known to not transfer control.
3794 
3795  /// Collection of all assumed live edges
3797 
3798  /// Collection of all assumed live BasicBlocks.
3800 };
3801 
3802 static bool
3804  AbstractAttribute &AA,
3805  SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3806  const IRPosition &IPos = IRPosition::callsite_function(CB);
3807 
3808  const auto &NoReturnAA =
3809  A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3810  if (NoReturnAA.isAssumedNoReturn())
3811  return !NoReturnAA.isKnownNoReturn();
3812  if (CB.isTerminator())
3813  AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3814  else
3815  AliveSuccessors.push_back(CB.getNextNode());
3816  return false;
3817 }
3818 
3819 static bool
3821  AbstractAttribute &AA,
3822  SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3823  bool UsedAssumedInformation =
3824  identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3825 
3826  // First, determine if we can change an invoke to a call assuming the
3827  // callee is nounwind. This is not possible if the personality of the
3828  // function allows to catch asynchronous exceptions.
3830  AliveSuccessors.push_back(&II.getUnwindDest()->front());
3831  } else {
3832  const IRPosition &IPos = IRPosition::callsite_function(II);
3833  const auto &AANoUnw =
3834  A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
3835  if (AANoUnw.isAssumedNoUnwind()) {
3836  UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3837  } else {
3838  AliveSuccessors.push_back(&II.getUnwindDest()->front());
3839  }
3840  }
3841  return UsedAssumedInformation;
3842 }
3843 
3844 static bool
3846  AbstractAttribute &AA,
3847  SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3848  bool UsedAssumedInformation = false;
3849  if (BI.getNumSuccessors() == 1) {
3850  AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3851  } else {
3853  A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
3854  if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3855  // No value yet, assume both edges are dead.
3856  } else if (isa_and_nonnull<ConstantInt>(*C)) {
3857  const BasicBlock *SuccBB =
3858  BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
3859  AliveSuccessors.push_back(&SuccBB->front());
3860  } else {
3861  AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3862  AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3863  UsedAssumedInformation = false;
3864  }
3865  }
3866  return UsedAssumedInformation;
3867 }
3868 
3869 static bool
3871  AbstractAttribute &AA,
3872  SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3873  bool UsedAssumedInformation = false;
3875  A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
3876  if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3877  // No value yet, assume all edges are dead.
3878  } else if (isa_and_nonnull<ConstantInt>(C.getValue())) {
3879  for (auto &CaseIt : SI.cases()) {
3880  if (CaseIt.getCaseValue() == C.getValue()) {
3881  AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3882  return UsedAssumedInformation;
3883  }
3884  }
3885  AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3886  return UsedAssumedInformation;
3887  } else {
3888  for (const BasicBlock *SuccBB : successors(SI.getParent()))
3889  AliveSuccessors.push_back(&SuccBB->front());
3890  }
3891  return UsedAssumedInformation;
3892 }
3893 
3896 
3897  LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3898  << getAnchorScope()->size() << "] BBs and "
3899  << ToBeExploredFrom.size() << " exploration points and "
3900  << KnownDeadEnds.size() << " known dead ends\n");
3901 
3902  // Copy and clear the list of instructions we need to explore from. It is
3903  // refilled with instructions the next update has to look at.
3904  SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3905  ToBeExploredFrom.end());
3906  decltype(ToBeExploredFrom) NewToBeExploredFrom;
3907 
3908  SmallVector<const Instruction *, 8> AliveSuccessors;
3909  while (!Worklist.empty()) {
3910  const Instruction *I = Worklist.pop_back_val();
3911  LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3912 
3913  // Fast forward for uninteresting instructions. We could look for UB here
3914  // though.
3915  while (!I->isTerminator() && !isa<CallBase>(I))
3916  I = I->getNextNode();
3917 
3918  AliveSuccessors.clear();
3919 
3920  bool UsedAssumedInformation = false;
3921  switch (I->getOpcode()) {
3922  // TODO: look for (assumed) UB to backwards propagate "deadness".
3923  default:
3924  assert(I->isTerminator() &&
3925  "Expected non-terminators to be handled already!");
3926  for (const BasicBlock *SuccBB : successors(I->getParent()))
3927  AliveSuccessors.push_back(&SuccBB->front());
3928  break;
3929  case Instruction::Call:
3930  UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3931  *this, AliveSuccessors);
3932  break;
3933  case Instruction::Invoke:
3934  UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3935  *this, AliveSuccessors);
3936  break;
3937  case Instruction::Br:
3938  UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3939  *this, AliveSuccessors);
3940  break;
3941  case Instruction::Switch:
3942  UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3943  *this, AliveSuccessors);
3944  break;
3945  }
3946 
3947  if (UsedAssumedInformation) {
3948  NewToBeExploredFrom.insert(I);
3949  } else if (AliveSuccessors.empty() ||
3950  (I->isTerminator() &&
3951  AliveSuccessors.size() < I->getNumSuccessors())) {
3952  if (KnownDeadEnds.insert(I))
3953  Change = ChangeStatus::CHANGED;
3954  }
3955 
3956  LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3957  << AliveSuccessors.size() << " UsedAssumedInformation: "
3958  << UsedAssumedInformation << "\n");
3959 
3960  for (const Instruction *AliveSuccessor : AliveSuccessors) {
3961  if (!I->isTerminator()) {
3962  assert(AliveSuccessors.size() == 1 &&
3963  "Non-terminator expected to have a single successor!");
3964  Worklist.push_back(AliveSuccessor);
3965  } else {
3966  // record the assumed live edge
3967  auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent());
3968  if (AssumedLiveEdges.insert(Edge).second)
3969  Change = ChangeStatus::CHANGED;
3970  if (assumeLive(A, *AliveSuccessor->getParent()))
3971  Worklist.push_back(AliveSuccessor);
3972  }
3973  }
3974  }
3975 
3976  // Check if the content of ToBeExploredFrom changed, ignore the order.
3977  if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() ||
3978  llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) {
3979  return !ToBeExploredFrom.count(I);
3980  })) {
3981  Change = ChangeStatus::CHANGED;
3982  ToBeExploredFrom = std::move(NewToBeExploredFrom);
3983  }
3984 
3985  // If we know everything is live there is no need to query for liveness.
3986  // Instead, indicating a pessimistic fixpoint will cause the state to be
3987  // "invalid" and all queries to be answered conservatively without lookups.
3988  // To be in this state we have to (1) finished the exploration and (3) not
3989  // discovered any non-trivial dead end and (2) not ruled unreachable code
3990  // dead.
3991  if (ToBeExploredFrom.empty() &&
3992  getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3993  llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3994  return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3995  }))
3996  return indicatePessimisticFixpoint();
3997  return Change;
3998 }
3999 
4000 /// Liveness information for a call sites.
4003  : AAIsDeadFunction(IRP, A) {}
4004 
4005  /// See AbstractAttribute::initialize(...).
4006  void initialize(Attributor &A) override {
4007  // TODO: Once we have call site specific value information we can provide
4008  // call site specific liveness information and then it makes
4009  // sense to specialize attributes for call sites instead of
4010  // redirecting requests to the callee.
4011  llvm_unreachable("Abstract attributes for liveness are not "
4012  "supported for call sites yet!");
4013  }
4014 
4015  /// See AbstractAttribute::updateImpl(...).
4017  return indicatePessimisticFixpoint();
4018  }
4019 
4020  /// See AbstractAttribute::trackStatistics()
4021  void trackStatistics() const override {}
4022 };
4023 
4024 /// -------------------- Dereferenceable Argument Attribute --------------------
4025 
4028  : AADereferenceable(IRP, A) {}
4030 
4031  /// See AbstractAttribute::initialize(...).
4032  void initialize(Attributor &A) override {
4034  getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
4035  Attrs, /* IgnoreSubsumingPositions */ false, &A);
4036  for (const Attribute &Attr : Attrs)
4037  takeKnownDerefBytesMaximum(Attr.getValueAsInt());
4038 
4039  const IRPosition &IRP = this->getIRPosition();
4040  NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
4041 
4042  bool CanBeNull, CanBeFreed;
4043  takeKnownDerefBytesMaximum(
4045  A.getDataLayout(), CanBeNull, CanBeFreed));
4046 
4047  bool IsFnInterface = IRP.isFnInterfaceKind();
4048  Function *FnScope = IRP.getAnchorScope();
4049  if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
4050  indicatePessimisticFixpoint();
4051  return;
4052  }
4053 
4054  if (Instruction *CtxI = getCtxI())
4055  followUsesInMBEC(*this, A, getState(), *CtxI);
4056  }
4057 
4058  /// See AbstractAttribute::getState()
4059  /// {
4060  StateType &getState() override { return *this; }
4061  const StateType &getState() const override { return *this; }
4062  /// }
4063 
4064  /// Helper function for collecting accessed bytes in must-be-executed-context
4066  DerefState &State) {
4067  const Value *UseV = U->get();
4068  if (!UseV->getType()->isPointerTy())
4069  return;
4070 
4071  Type *PtrTy = UseV->getType();
4072  const DataLayout &DL = A.getDataLayout();
4073  int64_t Offset;
4075  I, Offset, DL, /*AllowNonInbounds*/ true)) {
4076  if (Base == &getAssociatedValue() &&
4077  getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
4078  uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
4079  State.addAccessedBytes(Offset, Size);
4080  }
4081  }
4082  }
4083 
4084  /// See followUsesInMBEC
4085  bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4087  bool IsNonNull = false;
4088  bool TrackUse = false;
4089  int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
4090  A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
4091  LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
4092  << " for instruction " << *I << "\n");
4093 
4094  addAccessedBytesForUse(A, U, I, State);
4095  State.takeKnownDerefBytesMaximum(DerefBytes);
4096  return TrackUse;
4097  }
4098 
4099  /// See AbstractAttribute::manifest(...).
4102  if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
4103  removeAttrs({Attribute::DereferenceableOrNull});
4104  return ChangeStatus::CHANGED;
4105  }
4106  return Change;
4107  }
4108 
4110  SmallVectorImpl<Attribute> &Attrs) const override {
4111  // TODO: Add *_globally support
4112  if (isAssumedNonNull())
4114  Ctx, getAssumedDereferenceableBytes()));
4115  else
4117  Ctx, getAssumedDereferenceableBytes()));
4118  }
4119 
4120  /// See AbstractAttribute::getAsStr().
4121  const std::string getAsStr() const override {
4122  if (!getAssumedDereferenceableBytes())
4123  return "unknown-dereferenceable";
4124  return std::string("dereferenceable") +
4125  (isAssumedNonNull() ? "" : "_or_null") +
4126  (isAssumedGlobal() ? "_globally" : "") + "<" +
4127  std::to_string(getKnownDereferenceableBytes()) + "-" +
4128  std::to_string(getAssumedDereferenceableBytes()) + ">";
4129  }
4130 };
4131 
4132 /// Dereferenceable attribute for a floating value.
4135  : AADereferenceableImpl(IRP, A) {}
4136 
4137  /// See AbstractAttribute::updateImpl(...).
4139  const DataLayout &DL = A.getDataLayout();
4140 
4141  auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
4142  bool Stripped) -> bool {
4143  unsigned IdxWidth =
4144  DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
4145  APInt Offset(IdxWidth, 0);
4146  const Value *Base =
4147  stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false);
4148 
4149  const auto &AA = A.getAAFor<AADereferenceable>(
4151  int64_t DerefBytes = 0;
4152  if (!Stripped && this == &AA) {
4153  // Use IR information if we did not strip anything.
4154  // TODO: track globally.
4155  bool CanBeNull, CanBeFreed;
4156  DerefBytes =
4157  Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
4158  T.GlobalState.indicatePessimisticFixpoint();
4159  } else {
4160  const DerefState &DS = AA.getState();
4161  DerefBytes = DS.DerefBytesState.getAssumed();
4162  T.GlobalState &= DS.GlobalState;
4163  }
4164 
4165  // For now we do not try to "increase" dereferenceability due to negative
4166  // indices as we first have to come up with code to deal with loops and
4167  // for overflows of the dereferenceable bytes.
4168  int64_t OffsetSExt = Offset.getSExtValue();
4169  if (OffsetSExt < 0)
4170  OffsetSExt = 0;
4171 
4172  T.takeAssumedDerefBytesMinimum(
4173  std::max(int64_t(0), DerefBytes - OffsetSExt));
4174 
4175  if (this == &AA) {
4176  if (!Stripped) {
4177  // If nothing was stripped IR information is all we got.
4178  T.takeKnownDerefBytesMaximum(
4179  std::max(int64_t(0), DerefBytes - OffsetSExt));
4180  T.indicatePessimisticFixpoint();
4181  } else if (OffsetSExt > 0) {
4182  // If something was stripped but there is circular reasoning we look
4183  // for the offset. If it is positive we basically decrease the
4184  // dereferenceable bytes in a circluar loop now, which will simply
4185  // drive them down to the known value in a very slow way which we
4186  // can accelerate.
4187  T.indicatePessimisticFixpoint();
4188  }
4189  }
4190 
4191  return T.isValidState();
4192  };
4193 
4194  DerefState T;
4195  if (!genericValueTraversal<DerefState>(A, getIRPosition(), *this, T,
4196  VisitValueCB, getCtxI()))
4197  return indicatePessimisticFixpoint();
4198 
4199  return clampStateAndIndicateChange(getState(), T);
4200  }
4201 
4202  /// See AbstractAttribute::trackStatistics()
4203  void trackStatistics() const override {
4204  STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
4205  }
4206 };
4207 
4208 /// Dereferenceable attribute for a return value.
4210  : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
4213  IRP, A) {}
4214 
4215  /// See AbstractAttribute::trackStatistics()
4216  void trackStatistics() const override {
4217  STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
4218  }
4219 };
4220 
4221 /// Dereferenceable attribute for an argument
4223  : AAArgumentFromCallSiteArguments<AADereferenceable,
4224  AADereferenceableImpl> {
4225  using Base =
4228  : Base(IRP, A) {}
4229 
4230  /// See AbstractAttribute::trackStatistics()
4231  void trackStatistics() const override {
4232  STATS_DECLTRACK_ARG_ATTR(dereferenceable)
4233  }
4234 };
4235 
4236 /// Dereferenceable attribute for a call site argument.
4239  : AADereferenceableFloating(IRP, A) {}
4240 
4241  /// See AbstractAttribute::trackStatistics()
4242  void trackStatistics() const override {
4243  STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
4244  }
4245 };
4246 
4247 /// Dereferenceable attribute deduction for a call site return value.
4249  : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
4250  using Base =
4253  : Base(IRP, A) {}
4254 
4255  /// See AbstractAttribute::trackStatistics()
4256  void trackStatistics() const override {
4257  STATS_DECLTRACK_CS_ATTR(dereferenceable);
4258  }
4259 };
4260 
4261 // ------------------------ Align Argument Attribute ------------------------
4262 
4263 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
4264  Value &AssociatedValue, const Use *U,
4265  const Instruction *I, bool &TrackUse) {
4266  // We need to follow common pointer manipulation uses to the accesses they
4267  // feed into.
4268  if (isa<CastInst>(I)) {
4269  // Follow all but ptr2int casts.
4270  TrackUse = !isa<PtrToIntInst>(I);
4271  return 0;
4272  }
4273  if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
4274  if (GEP->hasAllConstantIndices())
4275  TrackUse = true;
4276  return 0;
4277  }
4278 
4279  MaybeAlign MA;
4280  if (const auto *CB = dyn_cast<CallBase>(I)) {
4281  if (CB->isBundleOperand(U) || CB->isCallee(U))
4282  return 0;
4283 
4284  unsigned ArgNo = CB->getArgOperandNo(U);
4285  IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
4286  // As long as we only use known information there is no need to track
4287  // dependences here.
4288  auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
4289  MA = MaybeAlign(AlignAA.getKnownAlign());
4290  }
4291 
4292  const DataLayout &DL = A.getDataLayout();
4293  const Value *UseV = U->get();
4294  if (auto *SI = dyn_cast<StoreInst>(I)) {
4295  if (SI->getPointerOperand() == UseV)
4296  MA = SI->getAlign();
4297  } else if (auto *LI = dyn_cast<LoadInst>(I)) {
4298  if (LI->getPointerOperand() == UseV)
4299  MA = LI->getAlign();
4300  }
4301 
4302  if (!MA || *MA <= QueryingAA.getKnownAlign())
4303  return 0;
4304 
4305  unsigned Alignment = MA->value();
4306  int64_t Offset;
4307 
4308  if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
4309  if (Base == &AssociatedValue) {
4310  // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4311  // So we can say that the maximum power of two which is a divisor of
4312  // gcd(Offset, Alignment) is an alignment.
4313 
4314  uint32_t gcd =
4315  greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
4316  Alignment = llvm::PowerOf2Floor(gcd);
4317  }
4318  }
4319 
4320  return Alignment;
4321 }
4322 
4324  AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
4325 
4326  /// See AbstractAttribute::initialize(...).
4327  void initialize(Attributor &A) override {
4329  getAttrs({Attribute::Alignment}, Attrs);
4330  for (const Attribute &Attr : Attrs)
4331  takeKnownMaximum(Attr.getValueAsInt());
4332 
4333  Value &V = getAssociatedValue();
4334  // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
4335  // use of the function pointer. This was caused by D73131. We want to
4336  // avoid this for function pointers especially because we iterate
4337  // their uses and int2ptr is not handled. It is not a correctness
4338  // problem though!
4340  takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
4341 
4342  if (getIRPosition().isFnInterfaceKind() &&
4343  (!getAnchorScope() ||
4344  !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
4345  indicatePessimisticFixpoint();
4346  return;
4347  }
4348 
4349  if (Instruction *CtxI = getCtxI())
4350  followUsesInMBEC(*this, A, getState(), *CtxI);
4351  }
4352 
4353  /// See AbstractAttribute::manifest(...).
4355  ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
4356 
4357  // Check for users that allow alignment annotations.
4358  Value &AssociatedValue = getAssociatedValue();
4359  for (const Use &U : AssociatedValue.uses()) {
4360  if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
4361  if (SI->getPointerOperand() == &AssociatedValue)
4362  if (SI->getAlignment() < getAssumedAlign()) {
4364  "Number of times alignment added to a store");
4365  SI->setAlignment(Align(getAssumedAlign()));
4366  LoadStoreChanged = ChangeStatus::CHANGED;
4367  }
4368  } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
4369  if (LI->getPointerOperand() == &AssociatedValue)
4370  if (LI->getAlignment() < getAssumedAlign()) {
4371  LI->setAlignment(Align(getAssumedAlign()));
4373  "Number of times alignment added to a load");
4374  LoadStoreChanged = ChangeStatus::CHANGED;
4375  }
4376  }
4377  }
4378 
4379  ChangeStatus Changed = AAAlign::manifest(A);
4380 
4381  Align InheritAlign =
4382  getAssociatedValue().getPointerAlignment(A.getDataLayout());
4383  if (InheritAlign >= getAssumedAlign())
4384  return LoadStoreChanged;
4385  return Changed | LoadStoreChanged;
4386  }
4387 
4388  // TODO: Provide a helper to determine the implied ABI alignment and check in
4389  // the existing manifest method and a new one for AAAlignImpl that value
4390  // to avoid making the alignment explicit if it did not improve.
4391 
4392  /// See AbstractAttribute::getDeducedAttributes
4393  virtual void
4395  SmallVectorImpl<Attribute> &Attrs) const override {
4396  if (getAssumedAlign() > 1)
4397  Attrs.emplace_back(
4398  Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
4399  }
4400 
4401  /// See followUsesInMBEC
4402  bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4403  AAAlign::StateType &State) {
4404  bool TrackUse = false;
4405 
4406  unsigned int KnownAlign =
4407  getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
4408  State.takeKnownMaximum(KnownAlign);
4409 
4410  return TrackUse;
4411  }
4412 
4413  /// See AbstractAttribute::getAsStr().
4414  const std::string getAsStr() const override {
4415  return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
4416  "-" + std::to_string(getAssumedAlign()) + ">")
4417  : "unknown-align";
4418  }
4419 };
4420 
4421 /// Align attribute for a floating value.
4423  AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
4424 
4425  /// See AbstractAttribute::updateImpl(...).
4427  const DataLayout &DL = A.getDataLayout();
4428 
4429  auto VisitValueCB = [&](Value &V, const Instruction *,
4430  AAAlign::StateType &T, bool Stripped) -> bool {
4431  const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
4433  if (!Stripped && this == &AA) {
4434  int64_t Offset;
4435  unsigned Alignment = 1;
4436  if (const Value *Base =
4438  Align PA = Base->getPointerAlignment(DL);
4439  // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4440  // So we can say that the maximum power of two which is a divisor of
4441  // gcd(Offset, Alignment) is an alignment.
4442 
4444  uint32_t(PA.value()));
4445  Alignment = llvm::PowerOf2Floor(gcd);
4446  } else {
4447  Alignment = V.getPointerAlignment(DL).value();
4448  }
4449  // Use only IR information if we did not strip anything.
4450  T.takeKnownMaximum(Alignment);
4451  T.indicatePessimisticFixpoint();
4452  } else {
4453  // Use abstract attribute information.
4454  const AAAlign::StateType &DS = AA.getState();
4455  T ^= DS;
4456  }
4457  return T.isValidState();
4458  };
4459 
4460  StateType T;
4461  if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
4462  VisitValueCB, getCtxI()))
4463  return indicatePessimisticFixpoint();
4464 
4465  // TODO: If we know we visited all incoming values, thus no are assumed
4466  // dead, we can take the known information from the state T.
4467  return clampStateAndIndicateChange(getState(), T);
4468  }
4469 
4470  /// See AbstractAttribute::trackStatistics()
4472 };
4473 
4474 /// Align attribute for function return value.
4475 struct AAAlignReturned final
4476  : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
4478  AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4479 
4480  /// See AbstractAttribute::initialize(...).
4481  void initialize(Attributor &A) override {
4482  Base::initialize(A);
4483  Function *F = getAssociatedFunction();
4484  if (!F || F->isDeclaration())
4485  indicatePessimisticFixpoint();
4486  }
4487 
4488  /// See AbstractAttribute::trackStatistics()
4490 };
4491 
4492 /// Align attribute for function argument.
4493 struct AAAlignArgument final
4494  : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
4496  AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4497 
4498  /// See AbstractAttribute::manifest(...).
4500  // If the associated argument is involved in a must-tail call we give up
4501  // because we would need to keep the argument alignments of caller and
4502  // callee in-sync. Just does not seem worth the trouble right now.
4503  if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
4504  return ChangeStatus::UNCHANGED;
4505  return Base::manifest(A);
4506  }
4507 
4508  /// See AbstractAttribute::trackStatistics()
4510 };
4511 
4514  : AAAlignFloating(IRP, A) {}
4515 
4516  /// See AbstractAttribute::manifest(...).
4518  // If the associated argument is involved in a must-tail call we give up
4519  // because we would need to keep the argument alignments of caller and
4520  // callee in-sync. Just does not seem worth the trouble right now.
4521  if (Argument *Arg = getAssociatedArgument())
4522  if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
4523  return ChangeStatus::UNCHANGED;
4524  ChangeStatus Changed = AAAlignImpl::manifest(A);
4525  Align InheritAlign =
4526  getAssociatedValue().getPointerAlignment(A.getDataLayout());
4527  if (InheritAlign >= getAssumedAlign())
4528  Changed = ChangeStatus::UNCHANGED;
4529  return Changed;
4530  }
4531 
4532  /// See AbstractAttribute::updateImpl(Attributor &A).
4535  if (Argument *Arg = getAssociatedArgument()) {
4536  // We only take known information from the argument
4537  // so we do not need to track a dependence.
4538  const auto &ArgAlignAA = A.getAAFor<AAAlign>(
4540  takeKnownMaximum(ArgAlignAA.getKnownAlign());
4541  }
4542  return Changed;
4543  }
4544 
4545  /// See AbstractAttribute::trackStatistics()
4547 };
4548 
4549 /// Align attribute deduction for a call site return value.
4551  : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
4554  : Base(IRP, A) {}
4555 
4556  /// See AbstractAttribute::initialize(...).
4557  void initialize(Attributor &A) override {
4558  Base::initialize(A);
4559  Function *F = getAssociatedFunction();
4560  if (!F || F->isDeclaration())
4561  indicatePessimisticFixpoint();
4562  }
4563 
4564  /// See AbstractAttribute::trackStatistics()
4565  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4566 };
4567 
4568 /// ------------------ Function No-Return Attribute ----------------------------
4569 struct AANoReturnImpl : public AANoReturn {
4570  AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4571 
4572  /// See AbstractAttribute::initialize(...).
4573  void initialize(Attributor &A) override {
4575  Function *F = getAssociatedFunction();
4576  if (!F || F->isDeclaration())
4577  indicatePessimisticFixpoint();
4578  }
4579 
4580  /// See AbstractAttribute::getAsStr().
4581  const std::string getAsStr() const override {
4582  return getAssumed() ? "noreturn" : "may-return";
4583  }
4584 
4585  /// See AbstractAttribute::updateImpl(Attributor &A).
4586  virtual ChangeStatus updateImpl(Attributor &A) override {
4587  auto CheckForNoReturn = [](Instruction &) { return false; };
4588  bool UsedAssumedInformation = false;
4589  if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4590  {(unsigned)Instruction::Ret},
4591  UsedAssumedInformation))
4592  return indicatePessimisticFixpoint();
4593  return ChangeStatus::UNCHANGED;
4594  }
4595 };
4596 
4599  : AANoReturnImpl(IRP, A) {}
4600 
4601  /// See AbstractAttribute::trackStatistics()
4602  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4603 };
4604 
4605 /// NoReturn attribute deduction for a call sites.
4608  : AANoReturnImpl(IRP, A) {}
4609 
4610  /// See AbstractAttribute::initialize(...).
4611  void initialize(Attributor &A) override {
4613  if (Function *F = getAssociatedFunction()) {
4614  const IRPosition &FnPos = IRPosition::function(*F);
4615  auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4616  if (!FnAA.isAssumedNoReturn())
4617  indicatePessimisticFixpoint();
4618  }
4619  }
4620 
4621  /// See AbstractAttribute::updateImpl(...).
4623  // TODO: Once we have call site specific value information we can provide
4624  // call site specific liveness information and then it makes
4625  // sense to specialize attributes for call sites arguments instead of
4626  // redirecting requests to the callee argument.
4627  Function *F = getAssociatedFunction();
4628  const IRPosition &FnPos = IRPosition::function(*F);
4629  auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4630  return clampStateAndIndicateChange(getState(), FnAA.getState());
4631  }
4632 
4633  /// See AbstractAttribute::trackStatistics()
4634  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4635 };
4636 
4637 /// ----------------------- Variable Capturing ---------------------------------
4638 
4639 /// A class to hold the state of for no-capture attributes.
4640 struct AANoCaptureImpl : public AANoCapture {
4641  AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4642 
4643  /// See AbstractAttribute::initialize(...).
4644  void initialize(Attributor &A) override {
4645  if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4646  indicateOptimisticFixpoint();
4647  return;
4648  }
4649  Function *AnchorScope = getAnchorScope();
4650  if (isFnInterfaceKind() &&
4651  (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4652  indicatePessimisticFixpoint();
4653  return;
4654  }
4655 
4656  // You cannot "capture" null in the default address space.
4657  if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4658  getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4659  indicateOptimisticFixpoint();
4660  return;
4661  }
4662 
4663  const Function *F =
4664  isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4665 
4666  // Check what state the associated function can actually capture.
4667  if (F)
4668  determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4669  else
4670  indicatePessimisticFixpoint();
4671  }
4672 
4673  /// See AbstractAttribute::updateImpl(...).
4674  ChangeStatus updateImpl(Attributor &A) override;
4675 
4676  /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4677  virtual void
4679  SmallVectorImpl<Attribute> &Attrs) const override {
4680  if (!isAssumedNoCaptureMaybeReturned())
4681  return;
4682 
4683  if (isArgumentPosition()) {
4684  if (isAssumedNoCapture())
4685  Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4686  else if (ManifestInternal)
4687  Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4688  }
4689  }
4690 
4691  /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4692  /// depending on the ability of the function associated with \p IRP to capture
4693  /// state in memory and through "returning/throwing", respectively.
4695  const Function &F,
4696  BitIntegerState &State) {
4697  // TODO: Once we have memory behavior attributes we should use them here.
4698 
4699  // If we know we cannot communicate or write to memory, we do not care about
4700  // ptr2int anymore.
4701  if (F.onlyReadsMemory() && F.doesNotThrow() &&
4702  F.getReturnType()->isVoidTy()) {
4703  State.addKnownBits(NO_CAPTURE);
4704  return;
4705  }
4706 
4707  // A function cannot capture state in memory if it only reads memory, it can
4708  // however return/throw state and the state might be influenced by the
4709  // pointer value, e.g., loading from a returned pointer might reveal a bit.
4710  if (F.onlyReadsMemory())
4711  State.addKnownBits(NOT_CAPTURED_IN_MEM);
4712 
4713  // A function cannot communicate state back if it does not through
4714  // exceptions and doesn not return values.
4715  if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4716  State.addKnownBits(NOT_CAPTURED_IN_RET);
4717 
4718  // Check existing "returned" attributes.
4719  int ArgNo = IRP.getCalleeArgNo();
4720  if (F.doesNotThrow() && ArgNo >= 0) {
4721  for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4722  if (F.hasParamAttribute(u, Attribute::Returned)) {
4723  if (u == unsigned(ArgNo))
4724  State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4725  else if (F.onlyReadsMemory())
4726  State.addKnownBits(NO_CAPTURE);
4727  else
4728  State.addKnownBits(NOT_CAPTURED_IN_RET);
4729  break;
4730  }
4731  }
4732  }
4733 
4734  /// See AbstractState::getAsStr().
4735  const std::string getAsStr() const override {
4736  if (isKnownNoCapture())
4737  return "known not-captured";
4738  if (isAssumedNoCapture())
4739  return "assumed not-captured";
4740  if (isKnownNoCaptureMaybeReturned())
4741  return "known not-captured-maybe-returned";
4742  if (isAssumedNoCaptureMaybeReturned())
4743  return "assumed not-captured-maybe-returned";
4744  return "assumed-captured";
4745  }
4746 };
4747 
4748 /// Attributor-aware capture tracker.
4749 struct AACaptureUseTracker final : public CaptureTracker {
4750 
4751  /// Create a capture tracker that can lookup in-flight abstract attributes
4752  /// through the Attributor \p A.
4753  ///
4754  /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4755  /// search is stopped. If a use leads to a return instruction,
4756  /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4757  /// If a use leads to a ptr2int which may capture the value,
4758  /// \p CapturedInInteger is set. If a use is found that is currently assumed
4759  /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4760  /// set. All values in \p PotentialCopies are later tracked as well. For every
4761  /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4762  /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4763  /// conservatively set to true.
4765  const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4766  SmallSetVector<Value *, 4> &PotentialCopies,
4767  unsigned &RemainingUsesToExplore)
4768  : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4769  PotentialCopies(PotentialCopies),
4770  RemainingUsesToExplore(RemainingUsesToExplore) {}
4771 
4772  /// Determine if \p V maybe captured. *Also updates the state!*
4773  bool valueMayBeCaptured(const Value *V) {
4774  if (V->getType()->isPointerTy()) {
4775  PointerMayBeCaptured(V, this);
4776  } else {
4777  State.indicatePessimisticFixpoint();
4778  }
4779  return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4780  }
4781 
4782  /// See CaptureTracker::tooManyUses().
4783  void tooManyUses() override {
4784  State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4785  }
4786 
4787  bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4789  return true;
4790  const auto &DerefAA = A.getAAFor<AADereferenceable>(
4791  NoCaptureAA, IRPosition::value(*O), DepClassTy::OPTIONAL);
4792  return DerefAA.getAssumedDereferenceableBytes();
4793  }
4794 
4795  /// See CaptureTracker::captured(...).
4796  bool captured(const Use *U) override {
4797  Instruction *UInst = cast<Instruction>(U->getUser());
4798  LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4799  << "\n");
4800 
4801  // Because we may reuse the tracker multiple times we keep track of the
4802  // number of explored uses ourselves as well.
4803  if (RemainingUsesToExplore-- == 0) {
4804  LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4805  return isCapturedIn(/* Memory */ true, /* Integer */ true,
4806  /* Return */ true);
4807  }
4808 
4809  // Deal with ptr2int by following uses.
4810  if (isa<PtrToIntInst>(UInst)) {
4811  LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4812  return valueMayBeCaptured(UInst);
4813  }
4814 
4815  // For stores we check if we can follow the value through memory or not.
4816  if (auto *SI = dyn_cast<StoreInst>(UInst)) {
4817  if (SI->isVolatile())
4818  return isCapturedIn(/* Memory */ true, /* Integer */ false,
4819  /* Return */ false);
4820  bool UsedAssumedInformation = false;
4822  A, *SI, PotentialCopies, NoCaptureAA, UsedAssumedInformation))
4823  return isCapturedIn(/* Memory */ true, /* Integer */ false,
4824  /* Return */ false);
4825  // Not captured directly, potential copies will be checked.
4826  return isCapturedIn(/* Memory */ false, /* Integer */ false,
4827  /* Return */ false);
4828  }
4829 
4830  // Explicitly catch return instructions.
4831  if (isa<ReturnInst>(UInst)) {
4832  if (UInst->getFunction() == NoCaptureAA.getAnchorScope())
4833  return isCapturedIn(/* Memory */ false, /* Integer */ false,
4834  /* Return */ true);
4835  return isCapturedIn(/* Memory */ true, /* Integer */ true,
4836  /* Return */ true);
4837  }
4838 
4839  // For now we only use special logic for call sites. However, the tracker
4840  // itself knows about a lot of other non-capturing cases already.
4841  auto *CB = dyn_cast<CallBase>(UInst);
4842  if (!CB || !CB->isArgOperand(U))
4843  return isCapturedIn(/* Memory */ true, /* Integer */ true,
4844  /* Return */ true);
4845 
4846  unsigned ArgNo = CB->getArgOperandNo(U);
4847  const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4848  // If we have a abstract no-capture attribute for the argument we can use
4849  // it to justify a non-capture attribute here. This allows recursion!
4850  auto &ArgNoCaptureAA =
4851  A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos, DepClassTy::REQUIRED);
4852  if (ArgNoCaptureAA.isAssumedNoCapture())
4853  return isCapturedIn(/* Memory */ false, /* Integer */ false,
4854  /* Return */ false);
4855  if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4856  addPotentialCopy(*CB);
4857  return isCapturedIn(/* Memory */ false, /* Integer */ false,
4858  /* Return */ false);
4859  }
4860 
4861  // Lastly, we could not find a reason no-capture can be assumed so we don't.
4862  return isCapturedIn(/* Memory */ true, /* Integer */ true,
4863  /* Return */ true);
4864  }
4865 
4866  /// Register \p CS as potential copy of the value we are checking.
4867  void addPotentialCopy(CallBase &CB) { PotentialCopies.insert(&CB); }
4868 
4869  /// See CaptureTracker::shouldExplore(...).
4870  bool shouldExplore(const Use *U) override {
4871  // Check liveness and ignore droppable users.
4872  bool UsedAssumedInformation = false;
4873  return !U->getUser()->isDroppable() &&
4874  !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA,
4875  UsedAssumedInformation);
4876  }
4877 
4878  /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4879  /// \p CapturedInRet, then return the appropriate value for use in the
4880  /// CaptureTracker::captured() interface.
4881  bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4882  bool CapturedInRet) {
4883  LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4884  << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4885  if (CapturedInMem)
4886  State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4887  if (CapturedInInt)
4888  State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4889  if (CapturedInRet)
4890  State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4891  return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4892  }
4893 
4894 private:
4895  /// The attributor providing in-flight abstract attributes.
4896  Attributor &A;
4897 
4898  /// The abstract attribute currently updated.
4899  AANoCapture &NoCaptureAA;
4900 
4901  /// The abstract liveness state.
4902  const AAIsDead &IsDeadAA;
4903 
4904  /// The state currently updated.
4905  AANoCapture::StateType &State;
4906 
4907  /// Set of potential copies of the tracked value.
4908  SmallSetVector<Value *, 4> &PotentialCopies;
4909 
4910  /// Global counter to limit the number of explored uses.
4911  unsigned &RemainingUsesToExplore;
4912 };
4913 
4915  const IRPosition &IRP = getIRPosition();
4916  Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
4917  : &IRP.getAssociatedValue();
4918  if (!V)
4919  return indicatePessimisticFixpoint();
4920 
4921  const Function *F =
4922  isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4923  assert(F && "Expected a function!");
4924  const IRPosition &FnPos = IRPosition::function(*F);
4925  const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos, DepClassTy::NONE);
4926 
4928 
4929  // Readonly means we cannot capture through memory.
4930  const auto &FnMemAA =
4931  A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::NONE);
4932  if (FnMemAA.isAssumedReadOnly()) {
4933  T.addKnownBits(NOT_CAPTURED_IN_MEM);
4934  if (FnMemAA.isKnownReadOnly())
4935  addKnownBits(NOT_CAPTURED_IN_MEM);
4936  else
4937  A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4938  }
4939 
4940  // Make sure all returned values are different than the underlying value.
4941  // TODO: we could do this in a more sophisticated way inside
4942  // AAReturnedValues, e.g., track all values that escape through returns
4943  // directly somehow.
4944  auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4945  bool SeenConstant = false;
4946  for (auto &It : RVAA.returned_values()) {
4947  if (isa<Constant>(It.first)) {
4948  if (SeenConstant)
4949  return false;
4950  SeenConstant = true;
4951  } else if (!isa<Argument>(It.first) ||
4952  It.first == getAssociatedArgument())
4953  return false;
4954  }
4955  return true;
4956  };
4957 
4958  const auto &NoUnwindAA =
4959  A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
4960  if (NoUnwindAA.isAssumedNoUnwind()) {
4961  bool IsVoidTy = F->getReturnType()->isVoidTy();
4962  const AAReturnedValues *RVAA =
4963  IsVoidTy ? nullptr
4964  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4965 
4967  if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4968  T.addKnownBits(NOT_CAPTURED_IN_RET);
4969  if (T.isKnown(NOT_CAPTURED_IN_MEM))
4970  return ChangeStatus::UNCHANGED;
4971  if (NoUnwindAA.isKnownNoUnwind() &&
4972  (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4973  addKnownBits(NOT_CAPTURED_IN_RET);
4974  if (isKnown(NOT_CAPTURED_IN_MEM))
4975  return indicateOptimisticFixpoint();
4976  }
4977  }
4978  }
4979 
4980  // Use the CaptureTracker interface and logic with the specialized tracker,
4981  // defined in AACaptureUseTracker, that can look at in-flight abstract
4982  // attributes and directly updates the assumed state.
4983  SmallSetVector<Value *, 4> PotentialCopies;
4984  unsigned RemainingUsesToExplore =
4986  AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4987  RemainingUsesToExplore);
4988 
4989  // Check all potential copies of the associated value until we can assume
4990  // none will be captured or we have to assume at least one might be.
4991  unsigned Idx = 0;
4992  PotentialCopies.insert(V);
4993  while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4994  Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4995 
4996  AANoCapture::StateType &S = getState();
4997  auto Assumed = S.getAssumed();
4998  S.intersectAssumedBits(T.getAssumed());
4999  if (!isAssumedNoCaptureMaybeReturned())
5000  return indicatePessimisticFixpoint();
5001  return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
5003 }
5004 
5005 /// NoCapture attribute for function arguments.
5008  : AANoCaptureImpl(IRP, A) {}
5009 
5010  /// See AbstractAttribute::trackStatistics()
5011  void