LLVM 20.0.0git
AttributorAttributes.cpp
Go to the documentation of this file.
1//===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// See the Attributor.h file comment and the class descriptions in that file for
10// more information.
11//
12//===----------------------------------------------------------------------===//
13
15
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/MapVector.h"
21#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/SetVector.h"
26#include "llvm/ADT/Statistic.h"
39#include "llvm/IR/Argument.h"
40#include "llvm/IR/Assumptions.h"
41#include "llvm/IR/Attributes.h"
42#include "llvm/IR/BasicBlock.h"
43#include "llvm/IR/Constant.h"
44#include "llvm/IR/Constants.h"
45#include "llvm/IR/DataLayout.h"
47#include "llvm/IR/GlobalValue.h"
48#include "llvm/IR/IRBuilder.h"
49#include "llvm/IR/InlineAsm.h"
50#include "llvm/IR/InstrTypes.h"
51#include "llvm/IR/Instruction.h"
54#include "llvm/IR/IntrinsicsAMDGPU.h"
55#include "llvm/IR/IntrinsicsNVPTX.h"
56#include "llvm/IR/LLVMContext.h"
57#include "llvm/IR/MDBuilder.h"
58#include "llvm/IR/NoFolder.h"
59#include "llvm/IR/Value.h"
60#include "llvm/IR/ValueHandle.h"
73#include <cassert>
74#include <numeric>
75#include <optional>
76#include <string>
77
78using namespace llvm;
79
80#define DEBUG_TYPE "attributor"
81
83 "attributor-manifest-internal", cl::Hidden,
84 cl::desc("Manifest Attributor internal string attributes."),
85 cl::init(false));
86
87static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
89
90template <>
92
94
96 "attributor-max-potential-values", cl::Hidden,
97 cl::desc("Maximum number of potential values to be "
98 "tracked for each position."),
100 cl::init(7));
101
103 "attributor-max-potential-values-iterations", cl::Hidden,
104 cl::desc(
105 "Maximum number of iterations we keep dismantling potential values."),
106 cl::init(64));
107
108STATISTIC(NumAAs, "Number of abstract attributes created");
109STATISTIC(NumIndirectCallsPromoted, "Number of indirect calls promoted");
110
111// Some helper macros to deal with statistics tracking.
112//
113// Usage:
114// For simple IR attribute tracking overload trackStatistics in the abstract
115// attribute and choose the right STATS_DECLTRACK_********* macro,
116// e.g.,:
117// void trackStatistics() const override {
118// STATS_DECLTRACK_ARG_ATTR(returned)
119// }
120// If there is a single "increment" side one can use the macro
121// STATS_DECLTRACK with a custom message. If there are multiple increment
122// sides, STATS_DECL and STATS_TRACK can also be used separately.
123//
124#define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME) \
125 ("Number of " #TYPE " marked '" #NAME "'")
126#define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
127#define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
128#define STATS_DECL(NAME, TYPE, MSG) \
129 STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
130#define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
131#define STATS_DECLTRACK(NAME, TYPE, MSG) \
132 { \
133 STATS_DECL(NAME, TYPE, MSG) \
134 STATS_TRACK(NAME, TYPE) \
135 }
136#define STATS_DECLTRACK_ARG_ATTR(NAME) \
137 STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
138#define STATS_DECLTRACK_CSARG_ATTR(NAME) \
139 STATS_DECLTRACK(NAME, CSArguments, \
140 BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
141#define STATS_DECLTRACK_FN_ATTR(NAME) \
142 STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
143#define STATS_DECLTRACK_CS_ATTR(NAME) \
144 STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
145#define STATS_DECLTRACK_FNRET_ATTR(NAME) \
146 STATS_DECLTRACK(NAME, FunctionReturn, \
147 BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
148#define STATS_DECLTRACK_CSRET_ATTR(NAME) \
149 STATS_DECLTRACK(NAME, CSReturn, \
150 BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
151#define STATS_DECLTRACK_FLOATING_ATTR(NAME) \
152 STATS_DECLTRACK(NAME, Floating, \
153 ("Number of floating values known to be '" #NAME "'"))
154
155// Specialization of the operator<< for abstract attributes subclasses. This
156// disambiguates situations where multiple operators are applicable.
157namespace llvm {
158#define PIPE_OPERATOR(CLASS) \
159 raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) { \
160 return OS << static_cast<const AbstractAttribute &>(AA); \
161 }
162
200
201#undef PIPE_OPERATOR
202
203template <>
205 const DerefState &R) {
206 ChangeStatus CS0 =
207 clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
208 ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
209 return CS0 | CS1;
210}
211
212} // namespace llvm
213
214static bool mayBeInCycle(const CycleInfo *CI, const Instruction *I,
215 bool HeaderOnly, Cycle **CPtr = nullptr) {
216 if (!CI)
217 return true;
218 auto *BB = I->getParent();
219 auto *C = CI->getCycle(BB);
220 if (!C)
221 return false;
222 if (CPtr)
223 *CPtr = C;
224 return !HeaderOnly || BB == C->getHeader();
225}
226
227/// Checks if a type could have padding bytes.
228static bool isDenselyPacked(Type *Ty, const DataLayout &DL) {
229 // There is no size information, so be conservative.
230 if (!Ty->isSized())
231 return false;
232
233 // If the alloc size is not equal to the storage size, then there are padding
234 // bytes. For x86_fp80 on x86-64, size: 80 alloc size: 128.
235 if (DL.getTypeSizeInBits(Ty) != DL.getTypeAllocSizeInBits(Ty))
236 return false;
237
238 // FIXME: This isn't the right way to check for padding in vectors with
239 // non-byte-size elements.
240 if (VectorType *SeqTy = dyn_cast<VectorType>(Ty))
241 return isDenselyPacked(SeqTy->getElementType(), DL);
242
243 // For array types, check for padding within members.
244 if (ArrayType *SeqTy = dyn_cast<ArrayType>(Ty))
245 return isDenselyPacked(SeqTy->getElementType(), DL);
246
247 if (!isa<StructType>(Ty))
248 return true;
249
250 // Check for padding within and between elements of a struct.
251 StructType *StructTy = cast<StructType>(Ty);
252 const StructLayout *Layout = DL.getStructLayout(StructTy);
253 uint64_t StartPos = 0;
254 for (unsigned I = 0, E = StructTy->getNumElements(); I < E; ++I) {
255 Type *ElTy = StructTy->getElementType(I);
256 if (!isDenselyPacked(ElTy, DL))
257 return false;
258 if (StartPos != Layout->getElementOffsetInBits(I))
259 return false;
260 StartPos += DL.getTypeAllocSizeInBits(ElTy);
261 }
262
263 return true;
264}
265
266/// Get pointer operand of memory accessing instruction. If \p I is
267/// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
268/// is set to false and the instruction is volatile, return nullptr.
270 bool AllowVolatile) {
271 if (!AllowVolatile && I->isVolatile())
272 return nullptr;
273
274 if (auto *LI = dyn_cast<LoadInst>(I)) {
275 return LI->getPointerOperand();
276 }
277
278 if (auto *SI = dyn_cast<StoreInst>(I)) {
279 return SI->getPointerOperand();
280 }
281
282 if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
283 return CXI->getPointerOperand();
284 }
285
286 if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
287 return RMWI->getPointerOperand();
288 }
289
290 return nullptr;
291}
292
293/// Helper function to create a pointer based on \p Ptr, and advanced by \p
294/// Offset bytes.
296 IRBuilder<NoFolder> &IRB) {
297 LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
298 << "-bytes\n");
299
300 if (Offset)
301 Ptr = IRB.CreatePtrAdd(Ptr, IRB.getInt64(Offset),
302 Ptr->getName() + ".b" + Twine(Offset));
303 return Ptr;
304}
305
306static const Value *
308 const Value *Val, const DataLayout &DL, APInt &Offset,
309 bool GetMinOffset, bool AllowNonInbounds,
310 bool UseAssumed = false) {
311
312 auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
313 const IRPosition &Pos = IRPosition::value(V);
314 // Only track dependence if we are going to use the assumed info.
315 const AAValueConstantRange *ValueConstantRangeAA =
316 A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
317 UseAssumed ? DepClassTy::OPTIONAL
318 : DepClassTy::NONE);
319 if (!ValueConstantRangeAA)
320 return false;
321 ConstantRange Range = UseAssumed ? ValueConstantRangeAA->getAssumed()
322 : ValueConstantRangeAA->getKnown();
323 if (Range.isFullSet())
324 return false;
325
326 // We can only use the lower part of the range because the upper part can
327 // be higher than what the value can really be.
328 if (GetMinOffset)
329 ROffset = Range.getSignedMin();
330 else
331 ROffset = Range.getSignedMax();
332 return true;
333 };
334
335 return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
336 /* AllowInvariant */ true,
337 AttributorAnalysis);
338}
339
340static const Value *
342 const Value *Ptr, int64_t &BytesOffset,
343 const DataLayout &DL, bool AllowNonInbounds = false) {
344 APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
345 const Value *Base =
346 stripAndAccumulateOffsets(A, QueryingAA, Ptr, DL, OffsetAPInt,
347 /* GetMinOffset */ true, AllowNonInbounds);
348
349 BytesOffset = OffsetAPInt.getSExtValue();
350 return Base;
351}
352
353/// Clamp the information known for all returned values of a function
354/// (identified by \p QueryingAA) into \p S.
355template <typename AAType, typename StateType = typename AAType::StateType,
356 Attribute::AttrKind IRAttributeKind = AAType::IRAttributeKind,
357 bool RecurseForSelectAndPHI = true>
359 Attributor &A, const AAType &QueryingAA, StateType &S,
360 const IRPosition::CallBaseContext *CBContext = nullptr) {
361 LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
362 << QueryingAA << " into " << S << "\n");
363
364 assert((QueryingAA.getIRPosition().getPositionKind() ==
366 QueryingAA.getIRPosition().getPositionKind() ==
368 "Can only clamp returned value states for a function returned or call "
369 "site returned position!");
370
371 // Use an optional state as there might not be any return values and we want
372 // to join (IntegerState::operator&) the state of all there are.
373 std::optional<StateType> T;
374
375 // Callback for each possibly returned value.
376 auto CheckReturnValue = [&](Value &RV) -> bool {
377 const IRPosition &RVPos = IRPosition::value(RV, CBContext);
378 // If possible, use the hasAssumedIRAttr interface.
379 if (Attribute::isEnumAttrKind(IRAttributeKind)) {
380 bool IsKnown;
381 return AA::hasAssumedIRAttr<IRAttributeKind>(
382 A, &QueryingAA, RVPos, DepClassTy::REQUIRED, IsKnown);
383 }
384
385 const AAType *AA =
386 A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
387 if (!AA)
388 return false;
389 LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV
390 << " AA: " << AA->getAsStr(&A) << " @ " << RVPos << "\n");
391 const StateType &AAS = AA->getState();
392 if (!T)
393 T = StateType::getBestState(AAS);
394 *T &= AAS;
395 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
396 << "\n");
397 return T->isValidState();
398 };
399
400 if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA,
401 AA::ValueScope::Intraprocedural,
402 RecurseForSelectAndPHI))
403 S.indicatePessimisticFixpoint();
404 else if (T)
405 S ^= *T;
406}
407
408namespace {
409/// Helper class for generic deduction: return value -> returned position.
410template <typename AAType, typename BaseType,
411 typename StateType = typename BaseType::StateType,
412 bool PropagateCallBaseContext = false,
413 Attribute::AttrKind IRAttributeKind = AAType::IRAttributeKind,
414 bool RecurseForSelectAndPHI = true>
415struct AAReturnedFromReturnedValues : public BaseType {
416 AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
417 : BaseType(IRP, A) {}
418
419 /// See AbstractAttribute::updateImpl(...).
420 ChangeStatus updateImpl(Attributor &A) override {
421 StateType S(StateType::getBestState(this->getState()));
422 clampReturnedValueStates<AAType, StateType, IRAttributeKind,
423 RecurseForSelectAndPHI>(
424 A, *this, S,
425 PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
426 // TODO: If we know we visited all returned values, thus no are assumed
427 // dead, we can take the known information from the state T.
428 return clampStateAndIndicateChange<StateType>(this->getState(), S);
429 }
430};
431
432/// Clamp the information known at all call sites for a given argument
433/// (identified by \p QueryingAA) into \p S.
434template <typename AAType, typename StateType = typename AAType::StateType,
435 Attribute::AttrKind IRAttributeKind = AAType::IRAttributeKind>
436static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
437 StateType &S) {
438 LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
439 << QueryingAA << " into " << S << "\n");
440
441 assert(QueryingAA.getIRPosition().getPositionKind() ==
443 "Can only clamp call site argument states for an argument position!");
444
445 // Use an optional state as there might not be any return values and we want
446 // to join (IntegerState::operator&) the state of all there are.
447 std::optional<StateType> T;
448
449 // The argument number which is also the call site argument number.
450 unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
451
452 auto CallSiteCheck = [&](AbstractCallSite ACS) {
453 const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
454 // Check if a coresponding argument was found or if it is on not associated
455 // (which can happen for callback calls).
456 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
457 return false;
458
459 // If possible, use the hasAssumedIRAttr interface.
460 if (Attribute::isEnumAttrKind(IRAttributeKind)) {
461 bool IsKnown;
462 return AA::hasAssumedIRAttr<IRAttributeKind>(
463 A, &QueryingAA, ACSArgPos, DepClassTy::REQUIRED, IsKnown);
464 }
465
466 const AAType *AA =
467 A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
468 if (!AA)
469 return false;
470 LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
471 << " AA: " << AA->getAsStr(&A) << " @" << ACSArgPos
472 << "\n");
473 const StateType &AAS = AA->getState();
474 if (!T)
475 T = StateType::getBestState(AAS);
476 *T &= AAS;
477 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
478 << "\n");
479 return T->isValidState();
480 };
481
482 bool UsedAssumedInformation = false;
483 if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
484 UsedAssumedInformation))
485 S.indicatePessimisticFixpoint();
486 else if (T)
487 S ^= *T;
488}
489
490/// This function is the bridge between argument position and the call base
491/// context.
492template <typename AAType, typename BaseType,
493 typename StateType = typename AAType::StateType,
494 Attribute::AttrKind IRAttributeKind = AAType::IRAttributeKind>
495bool getArgumentStateFromCallBaseContext(Attributor &A,
496 BaseType &QueryingAttribute,
497 IRPosition &Pos, StateType &State) {
499 "Expected an 'argument' position !");
500 const CallBase *CBContext = Pos.getCallBaseContext();
501 if (!CBContext)
502 return false;
503
504 int ArgNo = Pos.getCallSiteArgNo();
505 assert(ArgNo >= 0 && "Invalid Arg No!");
506 const IRPosition CBArgPos = IRPosition::callsite_argument(*CBContext, ArgNo);
507
508 // If possible, use the hasAssumedIRAttr interface.
509 if (Attribute::isEnumAttrKind(IRAttributeKind)) {
510 bool IsKnown;
511 return AA::hasAssumedIRAttr<IRAttributeKind>(
512 A, &QueryingAttribute, CBArgPos, DepClassTy::REQUIRED, IsKnown);
513 }
514
515 const auto *AA =
516 A.getAAFor<AAType>(QueryingAttribute, CBArgPos, DepClassTy::REQUIRED);
517 if (!AA)
518 return false;
519 const StateType &CBArgumentState =
520 static_cast<const StateType &>(AA->getState());
521
522 LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
523 << "Position:" << Pos << "CB Arg state:" << CBArgumentState
524 << "\n");
525
526 // NOTE: If we want to do call site grouping it should happen here.
527 State ^= CBArgumentState;
528 return true;
529}
530
531/// Helper class for generic deduction: call site argument -> argument position.
532template <typename AAType, typename BaseType,
533 typename StateType = typename AAType::StateType,
534 bool BridgeCallBaseContext = false,
535 Attribute::AttrKind IRAttributeKind = AAType::IRAttributeKind>
536struct AAArgumentFromCallSiteArguments : public BaseType {
537 AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
538 : BaseType(IRP, A) {}
539
540 /// See AbstractAttribute::updateImpl(...).
541 ChangeStatus updateImpl(Attributor &A) override {
542 StateType S = StateType::getBestState(this->getState());
543
544 if (BridgeCallBaseContext) {
545 bool Success =
546 getArgumentStateFromCallBaseContext<AAType, BaseType, StateType,
547 IRAttributeKind>(
548 A, *this, this->getIRPosition(), S);
549 if (Success)
550 return clampStateAndIndicateChange<StateType>(this->getState(), S);
551 }
552 clampCallSiteArgumentStates<AAType, StateType, IRAttributeKind>(A, *this,
553 S);
554
555 // TODO: If we know we visited all incoming values, thus no are assumed
556 // dead, we can take the known information from the state T.
557 return clampStateAndIndicateChange<StateType>(this->getState(), S);
558 }
559};
560
561/// Helper class for generic replication: function returned -> cs returned.
562template <typename AAType, typename BaseType,
563 typename StateType = typename BaseType::StateType,
564 bool IntroduceCallBaseContext = false,
565 Attribute::AttrKind IRAttributeKind = AAType::IRAttributeKind>
566struct AACalleeToCallSite : public BaseType {
567 AACalleeToCallSite(const IRPosition &IRP, Attributor &A) : BaseType(IRP, A) {}
568
569 /// See AbstractAttribute::updateImpl(...).
570 ChangeStatus updateImpl(Attributor &A) override {
571 auto IRPKind = this->getIRPosition().getPositionKind();
573 IRPKind == IRPosition::IRP_CALL_SITE) &&
574 "Can only wrap function returned positions for call site "
575 "returned positions!");
576 auto &S = this->getState();
577
578 CallBase &CB = cast<CallBase>(this->getAnchorValue());
579 if (IntroduceCallBaseContext)
580 LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:" << CB
581 << "\n");
582
583 ChangeStatus Changed = ChangeStatus::UNCHANGED;
584 auto CalleePred = [&](ArrayRef<const Function *> Callees) {
585 for (const Function *Callee : Callees) {
586 IRPosition FnPos =
588 ? IRPosition::returned(*Callee,
589 IntroduceCallBaseContext ? &CB : nullptr)
591 *Callee, IntroduceCallBaseContext ? &CB : nullptr);
592 // If possible, use the hasAssumedIRAttr interface.
593 if (Attribute::isEnumAttrKind(IRAttributeKind)) {
594 bool IsKnown;
595 if (!AA::hasAssumedIRAttr<IRAttributeKind>(
596 A, this, FnPos, DepClassTy::REQUIRED, IsKnown))
597 return false;
598 continue;
599 }
600
601 const AAType *AA =
602 A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
603 if (!AA)
604 return false;
605 Changed |= clampStateAndIndicateChange(S, AA->getState());
606 if (S.isAtFixpoint())
607 return S.isValidState();
608 }
609 return true;
610 };
611 if (!A.checkForAllCallees(CalleePred, *this, CB))
612 return S.indicatePessimisticFixpoint();
613 return Changed;
614 }
615};
616
617/// Helper function to accumulate uses.
618template <class AAType, typename StateType = typename AAType::StateType>
619static void followUsesInContext(AAType &AA, Attributor &A,
621 const Instruction *CtxI,
623 StateType &State) {
624 auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
625 for (unsigned u = 0; u < Uses.size(); ++u) {
626 const Use *U = Uses[u];
627 if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
628 bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
629 if (Found && AA.followUseInMBEC(A, U, UserI, State))
630 for (const Use &Us : UserI->uses())
631 Uses.insert(&Us);
632 }
633 }
634}
635
636/// Use the must-be-executed-context around \p I to add information into \p S.
637/// The AAType class is required to have `followUseInMBEC` method with the
638/// following signature and behaviour:
639///
640/// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
641/// U - Underlying use.
642/// I - The user of the \p U.
643/// Returns true if the value should be tracked transitively.
644///
645template <class AAType, typename StateType = typename AAType::StateType>
646static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
647 Instruction &CtxI) {
649 A.getInfoCache().getMustBeExecutedContextExplorer();
650 if (!Explorer)
651 return;
652
653 // Container for (transitive) uses of the associated value.
655 for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
656 Uses.insert(&U);
657
658 followUsesInContext<AAType>(AA, A, *Explorer, &CtxI, Uses, S);
659
660 if (S.isAtFixpoint())
661 return;
662
664 auto Pred = [&](const Instruction *I) {
665 if (const BranchInst *Br = dyn_cast<BranchInst>(I))
666 if (Br->isConditional())
667 BrInsts.push_back(Br);
668 return true;
669 };
670
671 // Here, accumulate conditional branch instructions in the context. We
672 // explore the child paths and collect the known states. The disjunction of
673 // those states can be merged to its own state. Let ParentState_i be a state
674 // to indicate the known information for an i-th branch instruction in the
675 // context. ChildStates are created for its successors respectively.
676 //
677 // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
678 // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
679 // ...
680 // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
681 //
682 // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
683 //
684 // FIXME: Currently, recursive branches are not handled. For example, we
685 // can't deduce that ptr must be dereferenced in below function.
686 //
687 // void f(int a, int c, int *ptr) {
688 // if(a)
689 // if (b) {
690 // *ptr = 0;
691 // } else {
692 // *ptr = 1;
693 // }
694 // else {
695 // if (b) {
696 // *ptr = 0;
697 // } else {
698 // *ptr = 1;
699 // }
700 // }
701 // }
702
703 Explorer->checkForAllContext(&CtxI, Pred);
704 for (const BranchInst *Br : BrInsts) {
705 StateType ParentState;
706
707 // The known state of the parent state is a conjunction of children's
708 // known states so it is initialized with a best state.
709 ParentState.indicateOptimisticFixpoint();
710
711 for (const BasicBlock *BB : Br->successors()) {
712 StateType ChildState;
713
714 size_t BeforeSize = Uses.size();
715 followUsesInContext(AA, A, *Explorer, &BB->front(), Uses, ChildState);
716
717 // Erase uses which only appear in the child.
718 for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
719 It = Uses.erase(It);
720
721 ParentState &= ChildState;
722 }
723
724 // Use only known state.
725 S += ParentState;
726 }
727}
728} // namespace
729
730/// ------------------------ PointerInfo ---------------------------------------
731
732namespace llvm {
733namespace AA {
734namespace PointerInfo {
735
736struct State;
737
738} // namespace PointerInfo
739} // namespace AA
740
741/// Helper for AA::PointerInfo::Access DenseMap/Set usage.
742template <>
745 static inline Access getEmptyKey();
746 static inline Access getTombstoneKey();
747 static unsigned getHashValue(const Access &A);
748 static bool isEqual(const Access &LHS, const Access &RHS);
749};
750
751/// Helper that allows RangeTy as a key in a DenseMap.
752template <> struct DenseMapInfo<AA::RangeTy> {
753 static inline AA::RangeTy getEmptyKey() {
754 auto EmptyKey = DenseMapInfo<int64_t>::getEmptyKey();
755 return AA::RangeTy{EmptyKey, EmptyKey};
756 }
757
758 static inline AA::RangeTy getTombstoneKey() {
759 auto TombstoneKey = DenseMapInfo<int64_t>::getTombstoneKey();
760 return AA::RangeTy{TombstoneKey, TombstoneKey};
761 }
762
763 static unsigned getHashValue(const AA::RangeTy &Range) {
767 }
768
769 static bool isEqual(const AA::RangeTy &A, const AA::RangeTy B) {
770 return A == B;
771 }
772};
773
774/// Helper for AA::PointerInfo::Access DenseMap/Set usage ignoring everythign
775/// but the instruction
776struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> {
779 static inline Access getEmptyKey();
780 static inline Access getTombstoneKey();
781 static unsigned getHashValue(const Access &A);
782 static bool isEqual(const Access &LHS, const Access &RHS);
783};
784
785} // namespace llvm
786
787/// A type to track pointer/struct usage and accesses for AAPointerInfo.
789 /// Return the best possible representable state.
790 static State getBestState(const State &SIS) { return State(); }
791
792 /// Return the worst possible representable state.
793 static State getWorstState(const State &SIS) {
794 State R;
795 R.indicatePessimisticFixpoint();
796 return R;
797 }
798
799 State() = default;
800 State(State &&SIS) = default;
801
802 const State &getAssumed() const { return *this; }
803
804 /// See AbstractState::isValidState().
805 bool isValidState() const override { return BS.isValidState(); }
806
807 /// See AbstractState::isAtFixpoint().
808 bool isAtFixpoint() const override { return BS.isAtFixpoint(); }
809
810 /// See AbstractState::indicateOptimisticFixpoint().
814 }
815
816 /// See AbstractState::indicatePessimisticFixpoint().
820 }
821
822 State &operator=(const State &R) {
823 if (this == &R)
824 return *this;
825 BS = R.BS;
826 AccessList = R.AccessList;
827 OffsetBins = R.OffsetBins;
828 RemoteIMap = R.RemoteIMap;
829 ReturnedOffsets = R.ReturnedOffsets;
830 return *this;
831 }
832
834 if (this == &R)
835 return *this;
836 std::swap(BS, R.BS);
837 std::swap(AccessList, R.AccessList);
838 std::swap(OffsetBins, R.OffsetBins);
839 std::swap(RemoteIMap, R.RemoteIMap);
840 std::swap(ReturnedOffsets, R.ReturnedOffsets);
841 return *this;
842 }
843
844 /// Add a new Access to the state at offset \p Offset and with size \p Size.
845 /// The access is associated with \p I, writes \p Content (if anything), and
846 /// is of kind \p Kind. If an Access already exists for the same \p I and same
847 /// \p RemoteI, the two are combined, potentially losing information about
848 /// offset and size. The resulting access must now be moved from its original
849 /// OffsetBin to the bin for its new offset.
850 ///
851 /// \Returns CHANGED, if the state changed, UNCHANGED otherwise.
853 Instruction &I, std::optional<Value *> Content,
855 Instruction *RemoteI = nullptr);
856
859 int64_t numOffsetBins() const { return OffsetBins.size(); }
860
861 const AAPointerInfo::Access &getAccess(unsigned Index) const {
862 return AccessList[Index];
863 }
864
865protected:
866 // Every memory instruction results in an Access object. We maintain a list of
867 // all Access objects that we own, along with the following maps:
868 //
869 // - OffsetBins: RangeTy -> { Access }
870 // - RemoteIMap: RemoteI x LocalI -> Access
871 //
872 // A RemoteI is any instruction that accesses memory. RemoteI is different
873 // from LocalI if and only if LocalI is a call; then RemoteI is some
874 // instruction in the callgraph starting from LocalI. Multiple paths in the
875 // callgraph from LocalI to RemoteI may produce multiple accesses, but these
876 // are all combined into a single Access object. This may result in loss of
877 // information in RangeTy in the Access object.
881
882 /// Flag to determine if the underlying pointer is reaching a return statement
883 /// in the associated function or not. Returns in other functions cause
884 /// invalidation.
886
887 /// See AAPointerInfo::forallInterferingAccesses.
890 function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
892 return false;
893
894 for (const auto &It : OffsetBins) {
895 AA::RangeTy ItRange = It.getFirst();
896 if (!Range.mayOverlap(ItRange))
897 continue;
898 bool IsExact = Range == ItRange && !Range.offsetOrSizeAreUnknown();
899 for (auto Index : It.getSecond()) {
900 auto &Access = AccessList[Index];
901 if (!CB(Access, IsExact))
902 return false;
903 }
904 }
905 return true;
906 }
907
908 /// See AAPointerInfo::forallInterferingAccesses.
910 Instruction &I,
911 function_ref<bool(const AAPointerInfo::Access &, bool)> CB,
912 AA::RangeTy &Range) const {
914 return false;
915
916 auto LocalList = RemoteIMap.find(&I);
917 if (LocalList == RemoteIMap.end()) {
918 return true;
919 }
920
921 for (unsigned Index : LocalList->getSecond()) {
922 for (auto &R : AccessList[Index]) {
923 Range &= R;
924 if (Range.offsetAndSizeAreUnknown())
925 break;
926 }
927 }
929 }
930
931private:
932 /// State to track fixpoint and validity.
933 BooleanState BS;
934};
935
938 std::optional<Value *> Content, AAPointerInfo::AccessKind Kind, Type *Ty,
939 Instruction *RemoteI) {
940 RemoteI = RemoteI ? RemoteI : &I;
941
942 // Check if we have an access for this instruction, if not, simply add it.
943 auto &LocalList = RemoteIMap[RemoteI];
944 bool AccExists = false;
945 unsigned AccIndex = AccessList.size();
946 for (auto Index : LocalList) {
947 auto &A = AccessList[Index];
948 if (A.getLocalInst() == &I) {
949 AccExists = true;
950 AccIndex = Index;
951 break;
952 }
953 }
954
955 auto AddToBins = [&](const AAPointerInfo::RangeList &ToAdd) {
956 LLVM_DEBUG(if (ToAdd.size()) dbgs()
957 << "[AAPointerInfo] Inserting access in new offset bins\n";);
958
959 for (auto Key : ToAdd) {
960 LLVM_DEBUG(dbgs() << " key " << Key << "\n");
961 OffsetBins[Key].insert(AccIndex);
962 }
963 };
964
965 if (!AccExists) {
966 AccessList.emplace_back(&I, RemoteI, Ranges, Content, Kind, Ty);
967 assert((AccessList.size() == AccIndex + 1) &&
968 "New Access should have been at AccIndex");
969 LocalList.push_back(AccIndex);
970 AddToBins(AccessList[AccIndex].getRanges());
972 }
973
974 // Combine the new Access with the existing Access, and then update the
975 // mapping in the offset bins.
976 AAPointerInfo::Access Acc(&I, RemoteI, Ranges, Content, Kind, Ty);
977 auto &Current = AccessList[AccIndex];
978 auto Before = Current;
979 Current &= Acc;
980 if (Current == Before)
982
983 auto &ExistingRanges = Before.getRanges();
984 auto &NewRanges = Current.getRanges();
985
986 // Ranges that are in the old access but not the new access need to be removed
987 // from the offset bins.
989 AAPointerInfo::RangeList::set_difference(ExistingRanges, NewRanges, ToRemove);
990 LLVM_DEBUG(if (ToRemove.size()) dbgs()
991 << "[AAPointerInfo] Removing access from old offset bins\n";);
992
993 for (auto Key : ToRemove) {
994 LLVM_DEBUG(dbgs() << " key " << Key << "\n");
995 assert(OffsetBins.count(Key) && "Existing Access must be in some bin.");
996 auto &Bin = OffsetBins[Key];
997 assert(Bin.count(AccIndex) &&
998 "Expected bin to actually contain the Access.");
999 Bin.erase(AccIndex);
1000 }
1001
1002 // Ranges that are in the new access but not the old access need to be added
1003 // to the offset bins.
1005 AAPointerInfo::RangeList::set_difference(NewRanges, ExistingRanges, ToAdd);
1006 AddToBins(ToAdd);
1007 return ChangeStatus::CHANGED;
1008}
1009
1010namespace {
1011
1012#ifndef NDEBUG
1014 const AAPointerInfo::OffsetInfo &OI) {
1015 ListSeparator LS;
1016 OS << "[";
1017 for (auto Offset : OI) {
1018 OS << LS << Offset;
1019 }
1020 OS << "]";
1021 return OS;
1022}
1023#endif // NDEBUG
1024
1025struct AAPointerInfoImpl
1026 : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> {
1028 AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
1029
1030 /// See AbstractAttribute::getAsStr().
1031 const std::string getAsStr(Attributor *A) const override {
1032 return std::string("PointerInfo ") +
1033 (isValidState() ? (std::string("#") +
1034 std::to_string(OffsetBins.size()) + " bins")
1035 : "<invalid>") +
1036 (reachesReturn()
1037 ? (" (returned:" +
1038 join(map_range(ReturnedOffsets,
1039 [](int64_t O) { return std::to_string(O); }),
1040 ", ") +
1041 ")")
1042 : "");
1043 }
1044
1045 /// See AbstractAttribute::manifest(...).
1046 ChangeStatus manifest(Attributor &A) override {
1047 return AAPointerInfo::manifest(A);
1048 }
1049
1050 virtual const_bin_iterator begin() const override { return State::begin(); }
1051 virtual const_bin_iterator end() const override { return State::end(); }
1052 virtual int64_t numOffsetBins() const override {
1053 return State::numOffsetBins();
1054 }
1055 virtual bool reachesReturn() const override {
1056 return !ReturnedOffsets.isUnassigned();
1057 }
1058 virtual void addReturnedOffsetsTo(OffsetInfo &OI) const override {
1059 if (ReturnedOffsets.isUnknown()) {
1060 OI.setUnknown();
1061 return;
1062 }
1063
1064 OffsetInfo MergedOI;
1065 for (auto Offset : ReturnedOffsets) {
1066 OffsetInfo TmpOI = OI;
1067 TmpOI.addToAll(Offset);
1068 MergedOI.merge(TmpOI);
1069 }
1070 OI = std::move(MergedOI);
1071 }
1072
1073 ChangeStatus setReachesReturn(const OffsetInfo &ReachedReturnedOffsets) {
1074 if (ReturnedOffsets.isUnknown())
1075 return ChangeStatus::UNCHANGED;
1076 if (ReachedReturnedOffsets.isUnknown()) {
1077 ReturnedOffsets.setUnknown();
1078 return ChangeStatus::CHANGED;
1079 }
1080 if (ReturnedOffsets.merge(ReachedReturnedOffsets))
1081 return ChangeStatus::CHANGED;
1082 return ChangeStatus::UNCHANGED;
1083 }
1084
1085 bool forallInterferingAccesses(
1087 function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1088 const override {
1089 return State::forallInterferingAccesses(Range, CB);
1090 }
1091
1092 bool forallInterferingAccesses(
1093 Attributor &A, const AbstractAttribute &QueryingAA, Instruction &I,
1094 bool FindInterferingWrites, bool FindInterferingReads,
1095 function_ref<bool(const Access &, bool)> UserCB, bool &HasBeenWrittenTo,
1097 function_ref<bool(const Access &)> SkipCB) const override {
1098 HasBeenWrittenTo = false;
1099
1100 SmallPtrSet<const Access *, 8> DominatingWrites;
1101 SmallVector<std::pair<const Access *, bool>, 8> InterferingAccesses;
1102
1103 Function &Scope = *I.getFunction();
1104 bool IsKnownNoSync;
1105 bool IsAssumedNoSync = AA::hasAssumedIRAttr<Attribute::NoSync>(
1106 A, &QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL,
1107 IsKnownNoSync);
1108 const auto *ExecDomainAA = A.lookupAAFor<AAExecutionDomain>(
1109 IRPosition::function(Scope), &QueryingAA, DepClassTy::NONE);
1110 bool AllInSameNoSyncFn = IsAssumedNoSync;
1111 bool InstIsExecutedByInitialThreadOnly =
1112 ExecDomainAA && ExecDomainAA->isExecutedByInitialThreadOnly(I);
1113
1114 // If the function is not ending in aligned barriers, we need the stores to
1115 // be in aligned barriers. The load being in one is not sufficient since the
1116 // store might be executed by a thread that disappears after, causing the
1117 // aligned barrier guarding the load to unblock and the load to read a value
1118 // that has no CFG path to the load.
1119 bool InstIsExecutedInAlignedRegion =
1120 FindInterferingReads && ExecDomainAA &&
1121 ExecDomainAA->isExecutedInAlignedRegion(A, I);
1122
1123 if (InstIsExecutedInAlignedRegion || InstIsExecutedByInitialThreadOnly)
1124 A.recordDependence(*ExecDomainAA, QueryingAA, DepClassTy::OPTIONAL);
1125
1126 InformationCache &InfoCache = A.getInfoCache();
1127 bool IsThreadLocalObj =
1128 AA::isAssumedThreadLocalObject(A, getAssociatedValue(), *this);
1129
1130 // Helper to determine if we need to consider threading, which we cannot
1131 // right now. However, if the function is (assumed) nosync or the thread
1132 // executing all instructions is the main thread only we can ignore
1133 // threading. Also, thread-local objects do not require threading reasoning.
1134 // Finally, we can ignore threading if either access is executed in an
1135 // aligned region.
1136 auto CanIgnoreThreadingForInst = [&](const Instruction &I) -> bool {
1137 if (IsThreadLocalObj || AllInSameNoSyncFn)
1138 return true;
1139 const auto *FnExecDomainAA =
1140 I.getFunction() == &Scope
1141 ? ExecDomainAA
1142 : A.lookupAAFor<AAExecutionDomain>(
1143 IRPosition::function(*I.getFunction()), &QueryingAA,
1144 DepClassTy::NONE);
1145 if (!FnExecDomainAA)
1146 return false;
1147 if (InstIsExecutedInAlignedRegion ||
1148 (FindInterferingWrites &&
1149 FnExecDomainAA->isExecutedInAlignedRegion(A, I))) {
1150 A.recordDependence(*FnExecDomainAA, QueryingAA, DepClassTy::OPTIONAL);
1151 return true;
1152 }
1153 if (InstIsExecutedByInitialThreadOnly &&
1154 FnExecDomainAA->isExecutedByInitialThreadOnly(I)) {
1155 A.recordDependence(*FnExecDomainAA, QueryingAA, DepClassTy::OPTIONAL);
1156 return true;
1157 }
1158 return false;
1159 };
1160
1161 // Helper to determine if the access is executed by the same thread as the
1162 // given instruction, for now it is sufficient to avoid any potential
1163 // threading effects as we cannot deal with them anyway.
1164 auto CanIgnoreThreading = [&](const Access &Acc) -> bool {
1165 return CanIgnoreThreadingForInst(*Acc.getRemoteInst()) ||
1166 (Acc.getRemoteInst() != Acc.getLocalInst() &&
1167 CanIgnoreThreadingForInst(*Acc.getLocalInst()));
1168 };
1169
1170 // TODO: Use inter-procedural reachability and dominance.
1171 bool IsKnownNoRecurse;
1172 AA::hasAssumedIRAttr<Attribute::NoRecurse>(
1173 A, this, IRPosition::function(Scope), DepClassTy::OPTIONAL,
1174 IsKnownNoRecurse);
1175
1176 // TODO: Use reaching kernels from AAKernelInfo (or move it to
1177 // AAExecutionDomain) such that we allow scopes other than kernels as long
1178 // as the reaching kernels are disjoint.
1179 bool InstInKernel = Scope.hasFnAttribute("kernel");
1180 bool ObjHasKernelLifetime = false;
1181 const bool UseDominanceReasoning =
1182 FindInterferingWrites && IsKnownNoRecurse;
1183 const DominatorTree *DT =
1185
1186 // Helper to check if a value has "kernel lifetime", that is it will not
1187 // outlive a GPU kernel. This is true for shared, constant, and local
1188 // globals on AMD and NVIDIA GPUs.
1189 auto HasKernelLifetime = [&](Value *V, Module &M) {
1190 if (!AA::isGPU(M))
1191 return false;
1192 switch (AA::GPUAddressSpace(V->getType()->getPointerAddressSpace())) {
1193 case AA::GPUAddressSpace::Shared:
1194 case AA::GPUAddressSpace::Constant:
1195 case AA::GPUAddressSpace::Local:
1196 return true;
1197 default:
1198 return false;
1199 };
1200 };
1201
1202 // The IsLiveInCalleeCB will be used by the AA::isPotentiallyReachable query
1203 // to determine if we should look at reachability from the callee. For
1204 // certain pointers we know the lifetime and we do not have to step into the
1205 // callee to determine reachability as the pointer would be dead in the
1206 // callee. See the conditional initialization below.
1207 std::function<bool(const Function &)> IsLiveInCalleeCB;
1208
1209 if (auto *AI = dyn_cast<AllocaInst>(&getAssociatedValue())) {
1210 // If the alloca containing function is not recursive the alloca
1211 // must be dead in the callee.
1212 const Function *AIFn = AI->getFunction();
1213 ObjHasKernelLifetime = AIFn->hasFnAttribute("kernel");
1214 bool IsKnownNoRecurse;
1215 if (AA::hasAssumedIRAttr<Attribute::NoRecurse>(
1216 A, this, IRPosition::function(*AIFn), DepClassTy::OPTIONAL,
1217 IsKnownNoRecurse)) {
1218 IsLiveInCalleeCB = [AIFn](const Function &Fn) { return AIFn != &Fn; };
1219 }
1220 } else if (auto *GV = dyn_cast<GlobalValue>(&getAssociatedValue())) {
1221 // If the global has kernel lifetime we can stop if we reach a kernel
1222 // as it is "dead" in the (unknown) callees.
1223 ObjHasKernelLifetime = HasKernelLifetime(GV, *GV->getParent());
1224 if (ObjHasKernelLifetime)
1225 IsLiveInCalleeCB = [](const Function &Fn) {
1226 return !Fn.hasFnAttribute("kernel");
1227 };
1228 }
1229
1230 // Set of accesses/instructions that will overwrite the result and are
1231 // therefore blockers in the reachability traversal.
1232 AA::InstExclusionSetTy ExclusionSet;
1233
1234 auto AccessCB = [&](const Access &Acc, bool Exact) {
1235 Function *AccScope = Acc.getRemoteInst()->getFunction();
1236 bool AccInSameScope = AccScope == &Scope;
1237
1238 // If the object has kernel lifetime we can ignore accesses only reachable
1239 // by other kernels. For now we only skip accesses *in* other kernels.
1240 if (InstInKernel && ObjHasKernelLifetime && !AccInSameScope &&
1241 AccScope->hasFnAttribute("kernel"))
1242 return true;
1243
1244 if (Exact && Acc.isMustAccess() && Acc.getRemoteInst() != &I) {
1245 if (Acc.isWrite() || (isa<LoadInst>(I) && Acc.isWriteOrAssumption()))
1246 ExclusionSet.insert(Acc.getRemoteInst());
1247 }
1248
1249 if ((!FindInterferingWrites || !Acc.isWriteOrAssumption()) &&
1250 (!FindInterferingReads || !Acc.isRead()))
1251 return true;
1252
1253 bool Dominates = FindInterferingWrites && DT && Exact &&
1254 Acc.isMustAccess() && AccInSameScope &&
1255 DT->dominates(Acc.getRemoteInst(), &I);
1256 if (Dominates)
1257 DominatingWrites.insert(&Acc);
1258
1259 // Track if all interesting accesses are in the same `nosync` function as
1260 // the given instruction.
1261 AllInSameNoSyncFn &= Acc.getRemoteInst()->getFunction() == &Scope;
1262
1263 InterferingAccesses.push_back({&Acc, Exact});
1264 return true;
1265 };
1266 if (!State::forallInterferingAccesses(I, AccessCB, Range))
1267 return false;
1268
1269 HasBeenWrittenTo = !DominatingWrites.empty();
1270
1271 // Dominating writes form a chain, find the least/lowest member.
1272 Instruction *LeastDominatingWriteInst = nullptr;
1273 for (const Access *Acc : DominatingWrites) {
1274 if (!LeastDominatingWriteInst) {
1275 LeastDominatingWriteInst = Acc->getRemoteInst();
1276 } else if (DT->dominates(LeastDominatingWriteInst,
1277 Acc->getRemoteInst())) {
1278 LeastDominatingWriteInst = Acc->getRemoteInst();
1279 }
1280 }
1281
1282 // Helper to determine if we can skip a specific write access.
1283 auto CanSkipAccess = [&](const Access &Acc, bool Exact) {
1284 if (SkipCB && SkipCB(Acc))
1285 return true;
1286 if (!CanIgnoreThreading(Acc))
1287 return false;
1288
1289 // Check read (RAW) dependences and write (WAR) dependences as necessary.
1290 // If we successfully excluded all effects we are interested in, the
1291 // access can be skipped.
1292 bool ReadChecked = !FindInterferingReads;
1293 bool WriteChecked = !FindInterferingWrites;
1294
1295 // If the instruction cannot reach the access, the former does not
1296 // interfere with what the access reads.
1297 if (!ReadChecked) {
1298 if (!AA::isPotentiallyReachable(A, I, *Acc.getRemoteInst(), QueryingAA,
1299 &ExclusionSet, IsLiveInCalleeCB))
1300 ReadChecked = true;
1301 }
1302 // If the instruction cannot be reach from the access, the latter does not
1303 // interfere with what the instruction reads.
1304 if (!WriteChecked) {
1305 if (!AA::isPotentiallyReachable(A, *Acc.getRemoteInst(), I, QueryingAA,
1306 &ExclusionSet, IsLiveInCalleeCB))
1307 WriteChecked = true;
1308 }
1309
1310 // If we still might be affected by the write of the access but there are
1311 // dominating writes in the function of the instruction
1312 // (HasBeenWrittenTo), we can try to reason that the access is overwritten
1313 // by them. This would have happend above if they are all in the same
1314 // function, so we only check the inter-procedural case. Effectively, we
1315 // want to show that there is no call after the dominting write that might
1316 // reach the access, and when it returns reach the instruction with the
1317 // updated value. To this end, we iterate all call sites, check if they
1318 // might reach the instruction without going through another access
1319 // (ExclusionSet) and at the same time might reach the access. However,
1320 // that is all part of AAInterFnReachability.
1321 if (!WriteChecked && HasBeenWrittenTo &&
1322 Acc.getRemoteInst()->getFunction() != &Scope) {
1323
1324 const auto *FnReachabilityAA = A.getAAFor<AAInterFnReachability>(
1325 QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL);
1326 if (FnReachabilityAA) {
1327 // Without going backwards in the call tree, can we reach the access
1328 // from the least dominating write. Do not allow to pass the
1329 // instruction itself either.
1330 bool Inserted = ExclusionSet.insert(&I).second;
1331
1332 if (!FnReachabilityAA->instructionCanReach(
1333 A, *LeastDominatingWriteInst,
1334 *Acc.getRemoteInst()->getFunction(), &ExclusionSet))
1335 WriteChecked = true;
1336
1337 if (Inserted)
1338 ExclusionSet.erase(&I);
1339 }
1340 }
1341
1342 if (ReadChecked && WriteChecked)
1343 return true;
1344
1345 if (!DT || !UseDominanceReasoning)
1346 return false;
1347 if (!DominatingWrites.count(&Acc))
1348 return false;
1349 return LeastDominatingWriteInst != Acc.getRemoteInst();
1350 };
1351
1352 // Run the user callback on all accesses we cannot skip and return if
1353 // that succeeded for all or not.
1354 for (auto &It : InterferingAccesses) {
1355 if ((!AllInSameNoSyncFn && !IsThreadLocalObj && !ExecDomainAA) ||
1356 !CanSkipAccess(*It.first, It.second)) {
1357 if (!UserCB(*It.first, It.second))
1358 return false;
1359 }
1360 }
1361 return true;
1362 }
1363
1364 ChangeStatus translateAndAddStateFromCallee(Attributor &A,
1365 const AAPointerInfo &OtherAA,
1366 CallBase &CB) {
1367 using namespace AA::PointerInfo;
1368 if (!OtherAA.getState().isValidState() || !isValidState())
1369 return indicatePessimisticFixpoint();
1370
1371 ChangeStatus Changed = ChangeStatus::UNCHANGED;
1372 const auto &OtherAAImpl = static_cast<const AAPointerInfoImpl &>(OtherAA);
1373 bool IsByval = OtherAAImpl.getAssociatedArgument()->hasByValAttr();
1374 Changed |= setReachesReturn(OtherAAImpl.ReturnedOffsets);
1375
1376 // Combine the accesses bin by bin.
1377 const auto &State = OtherAAImpl.getState();
1378 for (const auto &It : State) {
1379 for (auto Index : It.getSecond()) {
1380 const auto &RAcc = State.getAccess(Index);
1381 if (IsByval && !RAcc.isRead())
1382 continue;
1383 bool UsedAssumedInformation = false;
1384 AccessKind AK = RAcc.getKind();
1385 auto Content = A.translateArgumentToCallSiteContent(
1386 RAcc.getContent(), CB, *this, UsedAssumedInformation);
1387 AK = AccessKind(AK & (IsByval ? AccessKind::AK_R : AccessKind::AK_RW));
1388 AK = AccessKind(AK | (RAcc.isMayAccess() ? AK_MAY : AK_MUST));
1389
1390 Changed |= addAccess(A, RAcc.getRanges(), CB, Content, AK,
1391 RAcc.getType(), RAcc.getRemoteInst());
1392 }
1393 }
1394 return Changed;
1395 }
1396
1397 ChangeStatus translateAndAddState(Attributor &A, const AAPointerInfo &OtherAA,
1398 const OffsetInfo &Offsets, CallBase &CB,
1399 bool IsMustAcc) {
1400 using namespace AA::PointerInfo;
1401 if (!OtherAA.getState().isValidState() || !isValidState())
1402 return indicatePessimisticFixpoint();
1403
1404 const auto &OtherAAImpl = static_cast<const AAPointerInfoImpl &>(OtherAA);
1405
1406 // Combine the accesses bin by bin.
1407 ChangeStatus Changed = ChangeStatus::UNCHANGED;
1408 const auto &State = OtherAAImpl.getState();
1409 for (const auto &It : State) {
1410 for (auto Index : It.getSecond()) {
1411 const auto &RAcc = State.getAccess(Index);
1412 if (!IsMustAcc && RAcc.isAssumption())
1413 continue;
1414 for (auto Offset : Offsets) {
1415 auto NewRanges = Offset == AA::RangeTy::Unknown
1417 : RAcc.getRanges();
1418 if (!NewRanges.isUnknown()) {
1419 NewRanges.addToAllOffsets(Offset);
1420 }
1421 AccessKind AK = RAcc.getKind();
1422 if (!IsMustAcc)
1423 AK = AccessKind((AK & ~AK_MUST) | AK_MAY);
1424 Changed |= addAccess(A, NewRanges, CB, RAcc.getContent(), AK,
1425 RAcc.getType(), RAcc.getRemoteInst());
1426 }
1427 }
1428 }
1429 return Changed;
1430 }
1431
1432 /// Statistic tracking for all AAPointerInfo implementations.
1433 /// See AbstractAttribute::trackStatistics().
1434 void trackPointerInfoStatistics(const IRPosition &IRP) const {}
1435
1436 /// Dump the state into \p O.
1437 void dumpState(raw_ostream &O) {
1438 for (auto &It : OffsetBins) {
1439 O << "[" << It.first.Offset << "-" << It.first.Offset + It.first.Size
1440 << "] : " << It.getSecond().size() << "\n";
1441 for (auto AccIndex : It.getSecond()) {
1442 auto &Acc = AccessList[AccIndex];
1443 O << " - " << Acc.getKind() << " - " << *Acc.getLocalInst() << "\n";
1444 if (Acc.getLocalInst() != Acc.getRemoteInst())
1445 O << " --> " << *Acc.getRemoteInst()
1446 << "\n";
1447 if (!Acc.isWrittenValueYetUndetermined()) {
1448 if (isa_and_nonnull<Function>(Acc.getWrittenValue()))
1449 O << " - c: func " << Acc.getWrittenValue()->getName()
1450 << "\n";
1451 else if (Acc.getWrittenValue())
1452 O << " - c: " << *Acc.getWrittenValue() << "\n";
1453 else
1454 O << " - c: <unknown>\n";
1455 }
1456 }
1457 }
1458 }
1459};
1460
1461struct AAPointerInfoFloating : public AAPointerInfoImpl {
1463 AAPointerInfoFloating(const IRPosition &IRP, Attributor &A)
1464 : AAPointerInfoImpl(IRP, A) {}
1465
1466 /// Deal with an access and signal if it was handled successfully.
1467 bool handleAccess(Attributor &A, Instruction &I,
1468 std::optional<Value *> Content, AccessKind Kind,
1469 OffsetInfo::VecTy &Offsets, ChangeStatus &Changed,
1470 Type &Ty) {
1471 using namespace AA::PointerInfo;
1473 const DataLayout &DL = A.getDataLayout();
1474 TypeSize AccessSize = DL.getTypeStoreSize(&Ty);
1475 if (!AccessSize.isScalable())
1476 Size = AccessSize.getFixedValue();
1477
1478 // Make a strictly ascending list of offsets as required by addAccess()
1479 SmallVector<int64_t> OffsetsSorted(Offsets.begin(), Offsets.end());
1480 llvm::sort(OffsetsSorted);
1481
1482 VectorType *VT = dyn_cast<VectorType>(&Ty);
1483 if (!VT || VT->getElementCount().isScalable() ||
1484 !Content.value_or(nullptr) || !isa<Constant>(*Content) ||
1485 (*Content)->getType() != VT ||
1486 DL.getTypeStoreSize(VT->getElementType()).isScalable()) {
1487 Changed =
1488 Changed | addAccess(A, {OffsetsSorted, Size}, I, Content, Kind, &Ty);
1489 } else {
1490 // Handle vector stores with constant content element-wise.
1491 // TODO: We could look for the elements or create instructions
1492 // representing them.
1493 // TODO: We need to push the Content into the range abstraction
1494 // (AA::RangeTy) to allow different content values for different
1495 // ranges. ranges. Hence, support vectors storing different values.
1496 Type *ElementType = VT->getElementType();
1497 int64_t ElementSize = DL.getTypeStoreSize(ElementType).getFixedValue();
1498 auto *ConstContent = cast<Constant>(*Content);
1499 Type *Int32Ty = Type::getInt32Ty(ElementType->getContext());
1500 SmallVector<int64_t> ElementOffsets(Offsets.begin(), Offsets.end());
1501
1502 for (int i = 0, e = VT->getElementCount().getFixedValue(); i != e; ++i) {
1503 Value *ElementContent = ConstantExpr::getExtractElement(
1504 ConstContent, ConstantInt::get(Int32Ty, i));
1505
1506 // Add the element access.
1507 Changed = Changed | addAccess(A, {ElementOffsets, ElementSize}, I,
1508 ElementContent, Kind, ElementType);
1509
1510 // Advance the offsets for the next element.
1511 for (auto &ElementOffset : ElementOffsets)
1512 ElementOffset += ElementSize;
1513 }
1514 }
1515 return true;
1516 };
1517
1518 /// See AbstractAttribute::updateImpl(...).
1519 ChangeStatus updateImpl(Attributor &A) override;
1520
1521 /// If the indices to \p GEP can be traced to constants, incorporate all
1522 /// of these into \p UsrOI.
1523 ///
1524 /// \return true iff \p UsrOI is updated.
1525 bool collectConstantsForGEP(Attributor &A, const DataLayout &DL,
1526 OffsetInfo &UsrOI, const OffsetInfo &PtrOI,
1527 const GEPOperator *GEP);
1528
1529 /// See AbstractAttribute::trackStatistics()
1530 void trackStatistics() const override {
1531 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1532 }
1533};
1534
1535bool AAPointerInfoFloating::collectConstantsForGEP(Attributor &A,
1536 const DataLayout &DL,
1537 OffsetInfo &UsrOI,
1538 const OffsetInfo &PtrOI,
1539 const GEPOperator *GEP) {
1540 unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
1541 SmallMapVector<Value *, APInt, 4> VariableOffsets;
1542 APInt ConstantOffset(BitWidth, 0);
1543
1544 assert(!UsrOI.isUnknown() && !PtrOI.isUnknown() &&
1545 "Don't look for constant values if the offset has already been "
1546 "determined to be unknown.");
1547
1548 if (!GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset)) {
1549 UsrOI.setUnknown();
1550 return true;
1551 }
1552
1553 LLVM_DEBUG(dbgs() << "[AAPointerInfo] GEP offset is "
1554 << (VariableOffsets.empty() ? "" : "not") << " constant "
1555 << *GEP << "\n");
1556
1557 auto Union = PtrOI;
1558 Union.addToAll(ConstantOffset.getSExtValue());
1559
1560 // Each VI in VariableOffsets has a set of potential constant values. Every
1561 // combination of elements, picked one each from these sets, is separately
1562 // added to the original set of offsets, thus resulting in more offsets.
1563 for (const auto &VI : VariableOffsets) {
1564 auto *PotentialConstantsAA = A.getAAFor<AAPotentialConstantValues>(
1565 *this, IRPosition::value(*VI.first), DepClassTy::OPTIONAL);
1566 if (!PotentialConstantsAA || !PotentialConstantsAA->isValidState()) {
1567 UsrOI.setUnknown();
1568 return true;
1569 }
1570
1571 // UndefValue is treated as a zero, which leaves Union as is.
1572 if (PotentialConstantsAA->undefIsContained())
1573 continue;
1574
1575 // We need at least one constant in every set to compute an actual offset.
1576 // Otherwise, we end up pessimizing AAPointerInfo by respecting offsets that
1577 // don't actually exist. In other words, the absence of constant values
1578 // implies that the operation can be assumed dead for now.
1579 auto &AssumedSet = PotentialConstantsAA->getAssumedSet();
1580 if (AssumedSet.empty())
1581 return false;
1582
1583 OffsetInfo Product;
1584 for (const auto &ConstOffset : AssumedSet) {
1585 auto CopyPerOffset = Union;
1586 CopyPerOffset.addToAll(ConstOffset.getSExtValue() *
1587 VI.second.getZExtValue());
1588 Product.merge(CopyPerOffset);
1589 }
1590 Union = Product;
1591 }
1592
1593 UsrOI = std::move(Union);
1594 return true;
1595}
1596
1597ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) {
1598 using namespace AA::PointerInfo;
1600 const DataLayout &DL = A.getDataLayout();
1601 Value &AssociatedValue = getAssociatedValue();
1602
1603 DenseMap<Value *, OffsetInfo> OffsetInfoMap;
1604 OffsetInfoMap[&AssociatedValue].insert(0);
1605
1606 auto HandlePassthroughUser = [&](Value *Usr, Value *CurPtr, bool &Follow) {
1607 // One does not simply walk into a map and assign a reference to a possibly
1608 // new location. That can cause an invalidation before the assignment
1609 // happens, like so:
1610 //
1611 // OffsetInfoMap[Usr] = OffsetInfoMap[CurPtr]; /* bad idea! */
1612 //
1613 // The RHS is a reference that may be invalidated by an insertion caused by
1614 // the LHS. So we ensure that the side-effect of the LHS happens first.
1615
1616 assert(OffsetInfoMap.contains(CurPtr) &&
1617 "CurPtr does not exist in the map!");
1618
1619 auto &UsrOI = OffsetInfoMap[Usr];
1620 auto &PtrOI = OffsetInfoMap[CurPtr];
1621 assert(!PtrOI.isUnassigned() &&
1622 "Cannot pass through if the input Ptr was not visited!");
1623 UsrOI.merge(PtrOI);
1624 Follow = true;
1625 return true;
1626 };
1627
1628 auto UsePred = [&](const Use &U, bool &Follow) -> bool {
1629 Value *CurPtr = U.get();
1630 User *Usr = U.getUser();
1631 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in " << *Usr
1632 << "\n");
1633 assert(OffsetInfoMap.count(CurPtr) &&
1634 "The current pointer offset should have been seeded!");
1635 assert(!OffsetInfoMap[CurPtr].isUnassigned() &&
1636 "Current pointer should be assigned");
1637
1638 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) {
1639 if (CE->isCast())
1640 return HandlePassthroughUser(Usr, CurPtr, Follow);
1641 if (!isa<GEPOperator>(CE)) {
1642 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE
1643 << "\n");
1644 return false;
1645 }
1646 }
1647 if (auto *GEP = dyn_cast<GEPOperator>(Usr)) {
1648 // Note the order here, the Usr access might change the map, CurPtr is
1649 // already in it though.
1650 auto &UsrOI = OffsetInfoMap[Usr];
1651 auto &PtrOI = OffsetInfoMap[CurPtr];
1652
1653 if (UsrOI.isUnknown())
1654 return true;
1655
1656 if (PtrOI.isUnknown()) {
1657 Follow = true;
1658 UsrOI.setUnknown();
1659 return true;
1660 }
1661
1662 Follow = collectConstantsForGEP(A, DL, UsrOI, PtrOI, GEP);
1663 return true;
1664 }
1665 if (isa<PtrToIntInst>(Usr))
1666 return false;
1667 if (isa<CastInst>(Usr) || isa<SelectInst>(Usr))
1668 return HandlePassthroughUser(Usr, CurPtr, Follow);
1669 // Returns are allowed if they are in the associated functions. Users can
1670 // then check the call site return. Returns from other functions can't be
1671 // tracked and are cause for invalidation.
1672 if (auto *RI = dyn_cast<ReturnInst>(Usr)) {
1673 if (RI->getFunction() == getAssociatedFunction()) {
1674 auto &PtrOI = OffsetInfoMap[CurPtr];
1675 Changed |= setReachesReturn(PtrOI);
1676 return true;
1677 }
1678 return false;
1679 }
1680
1681 // For PHIs we need to take care of the recurrence explicitly as the value
1682 // might change while we iterate through a loop. For now, we give up if
1683 // the PHI is not invariant.
1684 if (auto *PHI = dyn_cast<PHINode>(Usr)) {
1685 // Note the order here, the Usr access might change the map, CurPtr is
1686 // already in it though.
1687 bool IsFirstPHIUser = !OffsetInfoMap.count(PHI);
1688 auto &UsrOI = OffsetInfoMap[PHI];
1689 auto &PtrOI = OffsetInfoMap[CurPtr];
1690
1691 // Check if the PHI operand has already an unknown offset as we can't
1692 // improve on that anymore.
1693 if (PtrOI.isUnknown()) {
1694 LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand offset unknown "
1695 << *CurPtr << " in " << *PHI << "\n");
1696 Follow = !UsrOI.isUnknown();
1697 UsrOI.setUnknown();
1698 return true;
1699 }
1700
1701 // Check if the PHI is invariant (so far).
1702 if (UsrOI == PtrOI) {
1703 assert(!PtrOI.isUnassigned() &&
1704 "Cannot assign if the current Ptr was not visited!");
1705 LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI is invariant (so far)");
1706 return true;
1707 }
1708
1709 // Check if the PHI operand can be traced back to AssociatedValue.
1710 APInt Offset(
1711 DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()),
1712 0);
1713 Value *CurPtrBase = CurPtr->stripAndAccumulateConstantOffsets(
1714 DL, Offset, /* AllowNonInbounds */ true);
1715 auto It = OffsetInfoMap.find(CurPtrBase);
1716 if (It == OffsetInfoMap.end()) {
1717 LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex "
1718 << *CurPtr << " in " << *PHI
1719 << " (base: " << *CurPtrBase << ")\n");
1720 UsrOI.setUnknown();
1721 Follow = true;
1722 return true;
1723 }
1724
1725 // Check if the PHI operand is not dependent on the PHI itself. Every
1726 // recurrence is a cyclic net of PHIs in the data flow, and has an
1727 // equivalent Cycle in the control flow. One of those PHIs must be in the
1728 // header of that control flow Cycle. This is independent of the choice of
1729 // Cycles reported by CycleInfo. It is sufficient to check the PHIs in
1730 // every Cycle header; if such a node is marked unknown, this will
1731 // eventually propagate through the whole net of PHIs in the recurrence.
1732 const auto *CI =
1733 A.getInfoCache().getAnalysisResultForFunction<CycleAnalysis>(
1734 *PHI->getFunction());
1735 if (mayBeInCycle(CI, cast<Instruction>(Usr), /* HeaderOnly */ true)) {
1736 auto BaseOI = It->getSecond();
1737 BaseOI.addToAll(Offset.getZExtValue());
1738 if (IsFirstPHIUser || BaseOI == UsrOI) {
1739 LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI is invariant " << *CurPtr
1740 << " in " << *Usr << "\n");
1741 return HandlePassthroughUser(Usr, CurPtr, Follow);
1742 }
1743
1744 LLVM_DEBUG(
1745 dbgs() << "[AAPointerInfo] PHI operand pointer offset mismatch "
1746 << *CurPtr << " in " << *PHI << "\n");
1747 UsrOI.setUnknown();
1748 Follow = true;
1749 return true;
1750 }
1751
1752 UsrOI.merge(PtrOI);
1753 Follow = true;
1754 return true;
1755 }
1756
1757 if (auto *LoadI = dyn_cast<LoadInst>(Usr)) {
1758 // If the access is to a pointer that may or may not be the associated
1759 // value, e.g. due to a PHI, we cannot assume it will be read.
1760 AccessKind AK = AccessKind::AK_R;
1761 if (getUnderlyingObject(CurPtr) == &AssociatedValue)
1762 AK = AccessKind(AK | AccessKind::AK_MUST);
1763 else
1764 AK = AccessKind(AK | AccessKind::AK_MAY);
1765 if (!handleAccess(A, *LoadI, /* Content */ nullptr, AK,
1766 OffsetInfoMap[CurPtr].Offsets, Changed,
1767 *LoadI->getType()))
1768 return false;
1769
1770 auto IsAssumption = [](Instruction &I) {
1771 if (auto *II = dyn_cast<IntrinsicInst>(&I))
1772 return II->isAssumeLikeIntrinsic();
1773 return false;
1774 };
1775
1776 auto IsImpactedInRange = [&](Instruction *FromI, Instruction *ToI) {
1777 // Check if the assumption and the load are executed together without
1778 // memory modification.
1779 do {
1780 if (FromI->mayWriteToMemory() && !IsAssumption(*FromI))
1781 return true;
1782 FromI = FromI->getNextNonDebugInstruction();
1783 } while (FromI && FromI != ToI);
1784 return false;
1785 };
1786
1787 BasicBlock *BB = LoadI->getParent();
1788 auto IsValidAssume = [&](IntrinsicInst &IntrI) {
1789 if (IntrI.getIntrinsicID() != Intrinsic::assume)
1790 return false;
1791 BasicBlock *IntrBB = IntrI.getParent();
1792 if (IntrI.getParent() == BB) {
1793 if (IsImpactedInRange(LoadI->getNextNonDebugInstruction(), &IntrI))
1794 return false;
1795 } else {
1796 auto PredIt = pred_begin(IntrBB);
1797 if (PredIt == pred_end(IntrBB))
1798 return false;
1799 if ((*PredIt) != BB)
1800 return false;
1801 if (++PredIt != pred_end(IntrBB))
1802 return false;
1803 for (auto *SuccBB : successors(BB)) {
1804 if (SuccBB == IntrBB)
1805 continue;
1806 if (isa<UnreachableInst>(SuccBB->getTerminator()))
1807 continue;
1808 return false;
1809 }
1810 if (IsImpactedInRange(LoadI->getNextNonDebugInstruction(),
1811 BB->getTerminator()))
1812 return false;
1813 if (IsImpactedInRange(&IntrBB->front(), &IntrI))
1814 return false;
1815 }
1816 return true;
1817 };
1818
1819 std::pair<Value *, IntrinsicInst *> Assumption;
1820 for (const Use &LoadU : LoadI->uses()) {
1821 if (auto *CmpI = dyn_cast<CmpInst>(LoadU.getUser())) {
1822 if (!CmpI->isEquality() || !CmpI->isTrueWhenEqual())
1823 continue;
1824 for (const Use &CmpU : CmpI->uses()) {
1825 if (auto *IntrI = dyn_cast<IntrinsicInst>(CmpU.getUser())) {
1826 if (!IsValidAssume(*IntrI))
1827 continue;
1828 int Idx = CmpI->getOperandUse(0) == LoadU;
1829 Assumption = {CmpI->getOperand(Idx), IntrI};
1830 break;
1831 }
1832 }
1833 }
1834 if (Assumption.first)
1835 break;
1836 }
1837
1838 // Check if we found an assumption associated with this load.
1839 if (!Assumption.first || !Assumption.second)
1840 return true;
1841
1842 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Assumption found "
1843 << *Assumption.second << ": " << *LoadI
1844 << " == " << *Assumption.first << "\n");
1845 bool UsedAssumedInformation = false;
1846 std::optional<Value *> Content = nullptr;
1847 if (Assumption.first)
1848 Content =
1849 A.getAssumedSimplified(*Assumption.first, *this,
1850 UsedAssumedInformation, AA::Interprocedural);
1851 return handleAccess(
1852 A, *Assumption.second, Content, AccessKind::AK_ASSUMPTION,
1853 OffsetInfoMap[CurPtr].Offsets, Changed, *LoadI->getType());
1854 }
1855
1856 auto HandleStoreLike = [&](Instruction &I, Value *ValueOp, Type &ValueTy,
1857 ArrayRef<Value *> OtherOps, AccessKind AK) {
1858 for (auto *OtherOp : OtherOps) {
1859 if (OtherOp == CurPtr) {
1860 LLVM_DEBUG(
1861 dbgs()
1862 << "[AAPointerInfo] Escaping use in store like instruction " << I
1863 << "\n");
1864 return false;
1865 }
1866 }
1867
1868 // If the access is to a pointer that may or may not be the associated
1869 // value, e.g. due to a PHI, we cannot assume it will be written.
1870 if (getUnderlyingObject(CurPtr) == &AssociatedValue)
1871 AK = AccessKind(AK | AccessKind::AK_MUST);
1872 else
1873 AK = AccessKind(AK | AccessKind::AK_MAY);
1874 bool UsedAssumedInformation = false;
1875 std::optional<Value *> Content = nullptr;
1876 if (ValueOp)
1877 Content = A.getAssumedSimplified(
1878 *ValueOp, *this, UsedAssumedInformation, AA::Interprocedural);
1879 return handleAccess(A, I, Content, AK, OffsetInfoMap[CurPtr].Offsets,
1880 Changed, ValueTy);
1881 };
1882
1883 if (auto *StoreI = dyn_cast<StoreInst>(Usr))
1884 return HandleStoreLike(*StoreI, StoreI->getValueOperand(),
1885 *StoreI->getValueOperand()->getType(),
1886 {StoreI->getValueOperand()}, AccessKind::AK_W);
1887 if (auto *RMWI = dyn_cast<AtomicRMWInst>(Usr))
1888 return HandleStoreLike(*RMWI, nullptr, *RMWI->getValOperand()->getType(),
1889 {RMWI->getValOperand()}, AccessKind::AK_RW);
1890 if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(Usr))
1891 return HandleStoreLike(
1892 *CXI, nullptr, *CXI->getNewValOperand()->getType(),
1893 {CXI->getCompareOperand(), CXI->getNewValOperand()},
1894 AccessKind::AK_RW);
1895
1896 if (auto *CB = dyn_cast<CallBase>(Usr)) {
1897 if (CB->isLifetimeStartOrEnd())
1898 return true;
1899 const auto *TLI =
1900 A.getInfoCache().getTargetLibraryInfoForFunction(*CB->getFunction());
1901 if (getFreedOperand(CB, TLI) == U)
1902 return true;
1903 if (CB->isArgOperand(&U)) {
1904 unsigned ArgNo = CB->getArgOperandNo(&U);
1905 const auto *CSArgPI = A.getAAFor<AAPointerInfo>(
1906 *this, IRPosition::callsite_argument(*CB, ArgNo),
1908 if (!CSArgPI)
1909 return false;
1910 bool IsArgMustAcc = (getUnderlyingObject(CurPtr) == &AssociatedValue);
1911 Changed = translateAndAddState(A, *CSArgPI, OffsetInfoMap[CurPtr], *CB,
1912 IsArgMustAcc) |
1913 Changed;
1914 if (!CSArgPI->reachesReturn())
1915 return isValidState();
1916
1918 if (!Callee || Callee->arg_size() <= ArgNo)
1919 return false;
1920 bool UsedAssumedInformation = false;
1921 auto ReturnedValue = A.getAssumedSimplified(
1922 IRPosition::returned(*Callee), *this, UsedAssumedInformation,
1924 auto *ReturnedArg =
1925 dyn_cast_or_null<Argument>(ReturnedValue.value_or(nullptr));
1926 auto *Arg = Callee->getArg(ArgNo);
1927 if (ReturnedArg && Arg != ReturnedArg)
1928 return true;
1929 bool IsRetMustAcc = IsArgMustAcc && (ReturnedArg == Arg);
1930 const auto *CSRetPI = A.getAAFor<AAPointerInfo>(
1932 if (!CSRetPI)
1933 return false;
1934 OffsetInfo OI = OffsetInfoMap[CurPtr];
1935 CSArgPI->addReturnedOffsetsTo(OI);
1936 Changed =
1937 translateAndAddState(A, *CSRetPI, OI, *CB, IsRetMustAcc) | Changed;
1938 return isValidState();
1939 }
1940 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB
1941 << "\n");
1942 return false;
1943 }
1944
1945 LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n");
1946 return false;
1947 };
1948 auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
1949 assert(OffsetInfoMap.count(OldU) && "Old use should be known already!");
1950 assert(!OffsetInfoMap[OldU].isUnassigned() && "Old use should be assinged");
1951 if (OffsetInfoMap.count(NewU)) {
1952 LLVM_DEBUG({
1953 if (!(OffsetInfoMap[NewU] == OffsetInfoMap[OldU])) {
1954 dbgs() << "[AAPointerInfo] Equivalent use callback failed: "
1955 << OffsetInfoMap[NewU] << " vs " << OffsetInfoMap[OldU]
1956 << "\n";
1957 }
1958 });
1959 return OffsetInfoMap[NewU] == OffsetInfoMap[OldU];
1960 }
1961 bool Unused;
1962 return HandlePassthroughUser(NewU.get(), OldU.get(), Unused);
1963 };
1964 if (!A.checkForAllUses(UsePred, *this, AssociatedValue,
1965 /* CheckBBLivenessOnly */ true, DepClassTy::OPTIONAL,
1966 /* IgnoreDroppableUses */ true, EquivalentUseCB)) {
1967 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Check for all uses failed, abort!\n");
1968 return indicatePessimisticFixpoint();
1969 }
1970
1971 LLVM_DEBUG({
1972 dbgs() << "Accesses by bin after update:\n";
1973 dumpState(dbgs());
1974 });
1975
1976 return Changed;
1977}
1978
1979struct AAPointerInfoReturned final : AAPointerInfoImpl {
1980 AAPointerInfoReturned(const IRPosition &IRP, Attributor &A)
1981 : AAPointerInfoImpl(IRP, A) {}
1982
1983 /// See AbstractAttribute::updateImpl(...).
1984 ChangeStatus updateImpl(Attributor &A) override {
1985 return indicatePessimisticFixpoint();
1986 }
1987
1988 /// See AbstractAttribute::trackStatistics()
1989 void trackStatistics() const override {
1990 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1991 }
1992};
1993
1994struct AAPointerInfoArgument final : AAPointerInfoFloating {
1995 AAPointerInfoArgument(const IRPosition &IRP, Attributor &A)
1996 : AAPointerInfoFloating(IRP, A) {}
1997
1998 /// See AbstractAttribute::trackStatistics()
1999 void trackStatistics() const override {
2000 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
2001 }
2002};
2003
2004struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
2005 AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
2006 : AAPointerInfoFloating(IRP, A) {}
2007
2008 /// See AbstractAttribute::updateImpl(...).
2009 ChangeStatus updateImpl(Attributor &A) override {
2010 using namespace AA::PointerInfo;
2011 // We handle memory intrinsics explicitly, at least the first (=
2012 // destination) and second (=source) arguments as we know how they are
2013 // accessed.
2014 if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) {
2015 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
2016 int64_t LengthVal = AA::RangeTy::Unknown;
2017 if (Length)
2018 LengthVal = Length->getSExtValue();
2019 unsigned ArgNo = getIRPosition().getCallSiteArgNo();
2020 ChangeStatus Changed = ChangeStatus::UNCHANGED;
2021 if (ArgNo > 1) {
2022 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "
2023 << *MI << "\n");
2024 return indicatePessimisticFixpoint();
2025 } else {
2026 auto Kind =
2027 ArgNo == 0 ? AccessKind::AK_MUST_WRITE : AccessKind::AK_MUST_READ;
2028 Changed =
2029 Changed | addAccess(A, {0, LengthVal}, *MI, nullptr, Kind, nullptr);
2030 }
2031 LLVM_DEBUG({
2032 dbgs() << "Accesses by bin after update:\n";
2033 dumpState(dbgs());
2034 });
2035
2036 return Changed;
2037 }
2038
2039 // TODO: Once we have call site specific value information we can provide
2040 // call site specific liveness information and then it makes
2041 // sense to specialize attributes for call sites arguments instead of
2042 // redirecting requests to the callee argument.
2043 Argument *Arg = getAssociatedArgument();
2044 if (Arg) {
2045 const IRPosition &ArgPos = IRPosition::argument(*Arg);
2046 auto *ArgAA =
2047 A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED);
2048 if (ArgAA && ArgAA->getState().isValidState())
2049 return translateAndAddStateFromCallee(A, *ArgAA,
2050 *cast<CallBase>(getCtxI()));
2051 if (!Arg->getParent()->isDeclaration())
2052 return indicatePessimisticFixpoint();
2053 }
2054
2055 bool IsKnownNoCapture;
2056 if (!AA::hasAssumedIRAttr<Attribute::NoCapture>(
2057 A, this, getIRPosition(), DepClassTy::OPTIONAL, IsKnownNoCapture))
2058 return indicatePessimisticFixpoint();
2059
2060 bool IsKnown = false;
2061 if (AA::isAssumedReadNone(A, getIRPosition(), *this, IsKnown))
2062 return ChangeStatus::UNCHANGED;
2063 bool ReadOnly = AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown);
2064 auto Kind =
2065 ReadOnly ? AccessKind::AK_MAY_READ : AccessKind::AK_MAY_READ_WRITE;
2066 return addAccess(A, AA::RangeTy::getUnknown(), *getCtxI(), nullptr, Kind,
2067 nullptr);
2068 }
2069
2070 /// See AbstractAttribute::trackStatistics()
2071 void trackStatistics() const override {
2072 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
2073 }
2074};
2075
2076struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating {
2077 AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
2078 : AAPointerInfoFloating(IRP, A) {}
2079
2080 /// See AbstractAttribute::trackStatistics()
2081 void trackStatistics() const override {
2082 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
2083 }
2084};
2085} // namespace
2086
2087/// -----------------------NoUnwind Function Attribute--------------------------
2088
2089namespace {
2090struct AANoUnwindImpl : AANoUnwind {
2091 AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
2092
2093 /// See AbstractAttribute::initialize(...).
2094 void initialize(Attributor &A) override {
2095 bool IsKnown;
2096 assert(!AA::hasAssumedIRAttr<Attribute::NoUnwind>(
2097 A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown));
2098 (void)IsKnown;
2099 }
2100
2101 const std::string getAsStr(Attributor *A) const override {
2102 return getAssumed() ? "nounwind" : "may-unwind";
2103 }
2104
2105 /// See AbstractAttribute::updateImpl(...).
2106 ChangeStatus updateImpl(Attributor &A) override {
2107 auto Opcodes = {
2108 (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
2109 (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet,
2110 (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
2111
2112 auto CheckForNoUnwind = [&](Instruction &I) {
2113 if (!I.mayThrow(/* IncludePhaseOneUnwind */ true))
2114 return true;
2115
2116 if (const auto *CB = dyn_cast<CallBase>(&I)) {
2117 bool IsKnownNoUnwind;
2118 return AA::hasAssumedIRAttr<Attribute::NoUnwind>(
2119 A, this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED,
2120 IsKnownNoUnwind);
2121 }
2122 return false;
2123 };
2124
2125 bool UsedAssumedInformation = false;
2126 if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes,
2127 UsedAssumedInformation))
2128 return indicatePessimisticFixpoint();
2129
2130 return ChangeStatus::UNCHANGED;
2131 }
2132};
2133
2134struct AANoUnwindFunction final : public AANoUnwindImpl {
2135 AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
2136 : AANoUnwindImpl(IRP, A) {}
2137
2138 /// See AbstractAttribute::trackStatistics()
2139 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
2140};
2141
2142/// NoUnwind attribute deduction for a call sites.
2143struct AANoUnwindCallSite final
2144 : AACalleeToCallSite<AANoUnwind, AANoUnwindImpl> {
2145 AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
2146 : AACalleeToCallSite<AANoUnwind, AANoUnwindImpl>(IRP, A) {}
2147
2148 /// See AbstractAttribute::trackStatistics()
2149 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
2150};
2151} // namespace
2152
2153/// ------------------------ NoSync Function Attribute -------------------------
2154
2155bool AANoSync::isAlignedBarrier(const CallBase &CB, bool ExecutedAligned) {
2156 switch (CB.getIntrinsicID()) {
2157 case Intrinsic::nvvm_barrier0:
2158 case Intrinsic::nvvm_barrier0_and:
2159 case Intrinsic::nvvm_barrier0_or:
2160 case Intrinsic::nvvm_barrier0_popc:
2161 return true;
2162 case Intrinsic::amdgcn_s_barrier:
2163 if (ExecutedAligned)
2164 return true;
2165 break;
2166 default:
2167 break;
2168 }
2169 return hasAssumption(CB, KnownAssumptionString("ompx_aligned_barrier"));
2170}
2171
2173 if (!I->isAtomic())
2174 return false;
2175
2176 if (auto *FI = dyn_cast<FenceInst>(I))
2177 // All legal orderings for fence are stronger than monotonic.
2178 return FI->getSyncScopeID() != SyncScope::SingleThread;
2179 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
2180 // Unordered is not a legal ordering for cmpxchg.
2181 return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
2182 AI->getFailureOrdering() != AtomicOrdering::Monotonic);
2183 }
2184
2185 AtomicOrdering Ordering;
2186 switch (I->getOpcode()) {
2187 case Instruction::AtomicRMW:
2188 Ordering = cast<AtomicRMWInst>(I)->getOrdering();
2189 break;
2190 case Instruction::Store:
2191 Ordering = cast<StoreInst>(I)->getOrdering();
2192 break;
2193 case Instruction::Load:
2194 Ordering = cast<LoadInst>(I)->getOrdering();
2195 break;
2196 default:
2198 "New atomic operations need to be known in the attributor.");
2199 }
2200
2201 return (Ordering != AtomicOrdering::Unordered &&
2202 Ordering != AtomicOrdering::Monotonic);
2203}
2204
2205/// Return true if this intrinsic is nosync. This is only used for intrinsics
2206/// which would be nosync except that they have a volatile flag. All other
2207/// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
2209 if (auto *MI = dyn_cast<MemIntrinsic>(I))
2210 return !MI->isVolatile();
2211 return false;
2212}
2213
2214namespace {
2215struct AANoSyncImpl : AANoSync {
2216 AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
2217
2218 /// See AbstractAttribute::initialize(...).
2219 void initialize(Attributor &A) override {
2220 bool IsKnown;
2221 assert(!AA::hasAssumedIRAttr<Attribute::NoSync>(A, nullptr, getIRPosition(),
2222 DepClassTy::NONE, IsKnown));
2223 (void)IsKnown;
2224 }
2225
2226 const std::string getAsStr(Attributor *A) const override {
2227 return getAssumed() ? "nosync" : "may-sync";
2228 }
2229
2230 /// See AbstractAttribute::updateImpl(...).
2231 ChangeStatus updateImpl(Attributor &A) override;
2232};
2233
2234ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
2235
2236 auto CheckRWInstForNoSync = [&](Instruction &I) {
2237 return AA::isNoSyncInst(A, I, *this);
2238 };
2239
2240 auto CheckForNoSync = [&](Instruction &I) {
2241 // At this point we handled all read/write effects and they are all
2242 // nosync, so they can be skipped.
2243 if (I.mayReadOrWriteMemory())
2244 return true;
2245
2246 bool IsKnown;
2247 CallBase &CB = cast<CallBase>(I);
2248 if (AA::hasAssumedIRAttr<Attribute::NoSync>(
2250 IsKnown))
2251 return true;
2252
2253 // non-convergent and readnone imply nosync.
2254 return !CB.isConvergent();
2255 };
2256
2257 bool UsedAssumedInformation = false;
2258 if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this,
2259 UsedAssumedInformation) ||
2260 !A.checkForAllCallLikeInstructions(CheckForNoSync, *this,
2261 UsedAssumedInformation))
2262 return indicatePessimisticFixpoint();
2263
2265}
2266
2267struct AANoSyncFunction final : public AANoSyncImpl {
2268 AANoSyncFunction(const IRPosition &IRP, Attributor &A)
2269 : AANoSyncImpl(IRP, A) {}
2270
2271 /// See AbstractAttribute::trackStatistics()
2272 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
2273};
2274
2275/// NoSync attribute deduction for a call sites.
2276struct AANoSyncCallSite final : AACalleeToCallSite<AANoSync, AANoSyncImpl> {
2277 AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
2278 : AACalleeToCallSite<AANoSync, AANoSyncImpl>(IRP, A) {}
2279
2280 /// See AbstractAttribute::trackStatistics()
2281 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
2282};
2283} // namespace
2284
2285/// ------------------------ No-Free Attributes ----------------------------
2286
2287namespace {
2288struct AANoFreeImpl : public AANoFree {
2289 AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
2290
2291 /// See AbstractAttribute::initialize(...).
2292 void initialize(Attributor &A) override {
2293 bool IsKnown;
2294 assert(!AA::hasAssumedIRAttr<Attribute::NoFree>(A, nullptr, getIRPosition(),
2295 DepClassTy::NONE, IsKnown));
2296 (void)IsKnown;
2297 }
2298
2299 /// See AbstractAttribute::updateImpl(...).
2300 ChangeStatus updateImpl(Attributor &A) override {
2301 auto CheckForNoFree = [&](Instruction &I) {
2302 bool IsKnown;
2303 return AA::hasAssumedIRAttr<Attribute::NoFree>(
2304 A, this, IRPosition::callsite_function(cast<CallBase>(I)),
2305 DepClassTy::REQUIRED, IsKnown);
2306 };
2307
2308 bool UsedAssumedInformation = false;
2309 if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this,
2310 UsedAssumedInformation))
2311 return indicatePessimisticFixpoint();
2312 return ChangeStatus::UNCHANGED;
2313 }
2314
2315 /// See AbstractAttribute::getAsStr().
2316 const std::string getAsStr(Attributor *A) const override {
2317 return getAssumed() ? "nofree" : "may-free";
2318 }
2319};
2320
2321struct AANoFreeFunction final : public AANoFreeImpl {
2322 AANoFreeFunction(const IRPosition &IRP, Attributor &A)
2323 : AANoFreeImpl(IRP, A) {}
2324
2325 /// See AbstractAttribute::trackStatistics()
2326 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
2327};
2328
2329/// NoFree attribute deduction for a call sites.
2330struct AANoFreeCallSite final : AACalleeToCallSite<AANoFree, AANoFreeImpl> {
2331 AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
2332 : AACalleeToCallSite<AANoFree, AANoFreeImpl>(IRP, A) {}
2333
2334 /// See AbstractAttribute::trackStatistics()
2335 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
2336};
2337
2338/// NoFree attribute for floating values.
2339struct AANoFreeFloating : AANoFreeImpl {
2340 AANoFreeFloating(const IRPosition &IRP, Attributor &A)
2341 : AANoFreeImpl(IRP, A) {}
2342
2343 /// See AbstractAttribute::trackStatistics()
2344 void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
2345
2346 /// See Abstract Attribute::updateImpl(...).
2347 ChangeStatus updateImpl(Attributor &A) override {
2348 const IRPosition &IRP = getIRPosition();
2349
2350 bool IsKnown;
2351 if (AA::hasAssumedIRAttr<Attribute::NoFree>(A, this,
2353 DepClassTy::OPTIONAL, IsKnown))
2354 return ChangeStatus::UNCHANGED;
2355
2356 Value &AssociatedValue = getIRPosition().getAssociatedValue();
2357 auto Pred = [&](const Use &U, bool &Follow) -> bool {
2358 Instruction *UserI = cast<Instruction>(U.getUser());
2359 if (auto *CB = dyn_cast<CallBase>(UserI)) {
2360 if (CB->isBundleOperand(&U))
2361 return false;
2362 if (!CB->isArgOperand(&U))
2363 return true;
2364 unsigned ArgNo = CB->getArgOperandNo(&U);
2365
2366 bool IsKnown;
2367 return AA::hasAssumedIRAttr<Attribute::NoFree>(
2368 A, this, IRPosition::callsite_argument(*CB, ArgNo),
2369 DepClassTy::REQUIRED, IsKnown);
2370 }
2371
2372 if (isa<GetElementPtrInst>(UserI) || isa<PHINode>(UserI) ||
2373 isa<SelectInst>(UserI)) {
2374 Follow = true;
2375 return true;
2376 }
2377 if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI))
2378 return true;
2379
2380 if (isa<ReturnInst>(UserI) && getIRPosition().isArgumentPosition())
2381 return true;
2382
2383 // Unknown user.
2384 return false;
2385 };
2386 if (!A.checkForAllUses(Pred, *this, AssociatedValue))
2387 return indicatePessimisticFixpoint();
2388
2389 return ChangeStatus::UNCHANGED;
2390 }
2391};
2392
2393/// NoFree attribute for a call site argument.
2394struct AANoFreeArgument final : AANoFreeFloating {
2395 AANoFreeArgument(const IRPosition &IRP, Attributor &A)
2396 : AANoFreeFloating(IRP, A) {}
2397
2398 /// See AbstractAttribute::trackStatistics()
2399 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
2400};
2401
2402/// NoFree attribute for call site arguments.
2403struct AANoFreeCallSiteArgument final : AANoFreeFloating {
2404 AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
2405 : AANoFreeFloating(IRP, A) {}
2406
2407 /// See AbstractAttribute::updateImpl(...).
2408 ChangeStatus updateImpl(Attributor &A) override {
2409 // TODO: Once we have call site specific value information we can provide
2410 // call site specific liveness information and then it makes
2411 // sense to specialize attributes for call sites arguments instead of
2412 // redirecting requests to the callee argument.
2413 Argument *Arg = getAssociatedArgument();
2414 if (!Arg)
2415 return indicatePessimisticFixpoint();
2416 const IRPosition &ArgPos = IRPosition::argument(*Arg);
2417 bool IsKnown;
2418 if (AA::hasAssumedIRAttr<Attribute::NoFree>(A, this, ArgPos,
2419 DepClassTy::REQUIRED, IsKnown))
2420 return ChangeStatus::UNCHANGED;
2421 return indicatePessimisticFixpoint();
2422 }
2423
2424 /// See AbstractAttribute::trackStatistics()
2425 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
2426};
2427
2428/// NoFree attribute for function return value.
2429struct AANoFreeReturned final : AANoFreeFloating {
2430 AANoFreeReturned(const IRPosition &IRP, Attributor &A)
2431 : AANoFreeFloating(IRP, A) {
2432 llvm_unreachable("NoFree is not applicable to function returns!");
2433 }
2434
2435 /// See AbstractAttribute::initialize(...).
2436 void initialize(Attributor &A) override {
2437 llvm_unreachable("NoFree is not applicable to function returns!");
2438 }
2439
2440 /// See AbstractAttribute::updateImpl(...).
2441 ChangeStatus updateImpl(Attributor &A) override {
2442 llvm_unreachable("NoFree is not applicable to function returns!");
2443 }
2444
2445 /// See AbstractAttribute::trackStatistics()
2446 void trackStatistics() const override {}
2447};
2448
2449/// NoFree attribute deduction for a call site return value.
2450struct AANoFreeCallSiteReturned final : AANoFreeFloating {
2451 AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
2452 : AANoFreeFloating(IRP, A) {}
2453
2454 ChangeStatus manifest(Attributor &A) override {
2455 return ChangeStatus::UNCHANGED;
2456 }
2457 /// See AbstractAttribute::trackStatistics()
2458 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
2459};
2460} // namespace
2461
2462/// ------------------------ NonNull Argument Attribute ------------------------
2463
2465 Attribute::AttrKind ImpliedAttributeKind,
2466 bool IgnoreSubsumingPositions) {
2468 AttrKinds.push_back(Attribute::NonNull);
2471 AttrKinds.push_back(Attribute::Dereferenceable);
2472 if (A.hasAttr(IRP, AttrKinds, IgnoreSubsumingPositions, Attribute::NonNull))
2473 return true;
2474
2475 DominatorTree *DT = nullptr;
2476 AssumptionCache *AC = nullptr;
2477 InformationCache &InfoCache = A.getInfoCache();
2478 if (const Function *Fn = IRP.getAnchorScope()) {
2479 if (!Fn->isDeclaration()) {
2482 }
2483 }
2484
2486 if (IRP.getPositionKind() != IRP_RETURNED) {
2487 Worklist.push_back({IRP.getAssociatedValue(), IRP.getCtxI()});
2488 } else {
2489 bool UsedAssumedInformation = false;
2490 if (!A.checkForAllInstructions(
2491 [&](Instruction &I) {
2492 Worklist.push_back({*cast<ReturnInst>(I).getReturnValue(), &I});
2493 return true;
2494 },
2495 IRP.getAssociatedFunction(), nullptr, {Instruction::Ret},
2496 UsedAssumedInformation, false, /*CheckPotentiallyDead=*/true))
2497 return false;
2498 }
2499
2500 if (llvm::any_of(Worklist, [&](AA::ValueAndContext VAC) {
2501 return !isKnownNonZero(
2502 VAC.getValue(),
2503 SimplifyQuery(A.getDataLayout(), DT, AC, VAC.getCtxI()));
2504 }))
2505 return false;
2506
2507 A.manifestAttrs(IRP, {Attribute::get(IRP.getAnchorValue().getContext(),
2508 Attribute::NonNull)});
2509 return true;
2510}
2511
2512namespace {
2513static int64_t getKnownNonNullAndDerefBytesForUse(
2514 Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
2515 const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
2516 TrackUse = false;
2517
2518 const Value *UseV = U->get();
2519 if (!UseV->getType()->isPointerTy())
2520 return 0;
2521
2522 // We need to follow common pointer manipulation uses to the accesses they
2523 // feed into. We can try to be smart to avoid looking through things we do not
2524 // like for now, e.g., non-inbounds GEPs.
2525 if (isa<CastInst>(I)) {
2526 TrackUse = true;
2527 return 0;
2528 }
2529
2530 if (isa<GetElementPtrInst>(I)) {
2531 TrackUse = true;
2532 return 0;
2533 }
2534
2535 Type *PtrTy = UseV->getType();
2536 const Function *F = I->getFunction();
2539 const DataLayout &DL = A.getInfoCache().getDL();
2540 if (const auto *CB = dyn_cast<CallBase>(I)) {
2541 if (CB->isBundleOperand(U)) {
2543 U, {Attribute::NonNull, Attribute::Dereferenceable})) {
2544 IsNonNull |=
2545 (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
2546 return RK.ArgValue;
2547 }
2548 return 0;
2549 }
2550
2551 if (CB->isCallee(U)) {
2552 IsNonNull |= !NullPointerIsDefined;
2553 return 0;
2554 }
2555
2556 unsigned ArgNo = CB->getArgOperandNo(U);
2557 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
2558 // As long as we only use known information there is no need to track
2559 // dependences here.
2560 bool IsKnownNonNull;
2561 AA::hasAssumedIRAttr<Attribute::NonNull>(A, &QueryingAA, IRP,
2562 DepClassTy::NONE, IsKnownNonNull);
2563 IsNonNull |= IsKnownNonNull;
2564 auto *DerefAA =
2565 A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
2566 return DerefAA ? DerefAA->getKnownDereferenceableBytes() : 0;
2567 }
2568
2569 std::optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
2570 if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() ||
2571 Loc->Size.isScalable() || I->isVolatile())
2572 return 0;
2573
2574 int64_t Offset;
2575 const Value *Base =
2576 getMinimalBaseOfPointer(A, QueryingAA, Loc->Ptr, Offset, DL);
2577 if (Base && Base == &AssociatedValue) {
2578 int64_t DerefBytes = Loc->Size.getValue() + Offset;
2579 IsNonNull |= !NullPointerIsDefined;
2580 return std::max(int64_t(0), DerefBytes);
2581 }
2582
2583 /// Corner case when an offset is 0.
2585 /*AllowNonInbounds*/ true);
2586 if (Base && Base == &AssociatedValue && Offset == 0) {
2587 int64_t DerefBytes = Loc->Size.getValue();
2588 IsNonNull |= !NullPointerIsDefined;
2589 return std::max(int64_t(0), DerefBytes);
2590 }
2591
2592 return 0;
2593}
2594
2595struct AANonNullImpl : AANonNull {
2596 AANonNullImpl(const IRPosition &IRP, Attributor &A) : AANonNull(IRP, A) {}
2597
2598 /// See AbstractAttribute::initialize(...).
2599 void initialize(Attributor &A) override {
2600 Value &V = *getAssociatedValue().stripPointerCasts();
2601 if (isa<ConstantPointerNull>(V)) {
2602 indicatePessimisticFixpoint();
2603 return;
2604 }
2605
2606 if (Instruction *CtxI = getCtxI())
2607 followUsesInMBEC(*this, A, getState(), *CtxI);
2608 }
2609
2610 /// See followUsesInMBEC
2611 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
2612 AANonNull::StateType &State) {
2613 bool IsNonNull = false;
2614 bool TrackUse = false;
2615 getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
2616 IsNonNull, TrackUse);
2617 State.setKnown(IsNonNull);
2618 return TrackUse;
2619 }
2620
2621 /// See AbstractAttribute::getAsStr().
2622 const std::string getAsStr(Attributor *A) const override {
2623 return getAssumed() ? "nonnull" : "may-null";
2624 }
2625};
2626
2627/// NonNull attribute for a floating value.
2628struct AANonNullFloating : public AANonNullImpl {
2629 AANonNullFloating(const IRPosition &IRP, Attributor &A)
2630 : AANonNullImpl(IRP, A) {}
2631
2632 /// See AbstractAttribute::updateImpl(...).
2633 ChangeStatus updateImpl(Attributor &A) override {
2634 auto CheckIRP = [&](const IRPosition &IRP) {
2635 bool IsKnownNonNull;
2636 return AA::hasAssumedIRAttr<Attribute::NonNull>(
2637 A, *this, IRP, DepClassTy::OPTIONAL, IsKnownNonNull);
2638 };
2639
2640 bool Stripped;
2641 bool UsedAssumedInformation = false;
2642 Value *AssociatedValue = &getAssociatedValue();
2644 if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
2645 AA::AnyScope, UsedAssumedInformation))
2646 Stripped = false;
2647 else
2648 Stripped =
2649 Values.size() != 1 || Values.front().getValue() != AssociatedValue;
2650
2651 if (!Stripped) {
2652 bool IsKnown;
2653 if (auto *PHI = dyn_cast<PHINode>(AssociatedValue))
2654 if (llvm::all_of(PHI->incoming_values(), [&](Value *Op) {
2655 return AA::hasAssumedIRAttr<Attribute::NonNull>(
2656 A, this, IRPosition::value(*Op), DepClassTy::OPTIONAL,
2657 IsKnown);
2658 }))
2659 return ChangeStatus::UNCHANGED;
2660 if (auto *Select = dyn_cast<SelectInst>(AssociatedValue))
2661 if (AA::hasAssumedIRAttr<Attribute::NonNull>(
2662 A, this, IRPosition::value(*Select->getFalseValue()),
2663 DepClassTy::OPTIONAL, IsKnown) &&
2664 AA::hasAssumedIRAttr<Attribute::NonNull>(
2665 A, this, IRPosition::value(*Select->getTrueValue()),
2666 DepClassTy::OPTIONAL, IsKnown))
2667 return ChangeStatus::UNCHANGED;
2668
2669 // If we haven't stripped anything we might still be able to use a
2670 // different AA, but only if the IRP changes. Effectively when we
2671 // interpret this not as a call site value but as a floating/argument
2672 // value.
2673 const IRPosition AVIRP = IRPosition::value(*AssociatedValue);
2674 if (AVIRP == getIRPosition() || !CheckIRP(AVIRP))
2675 return indicatePessimisticFixpoint();
2676 return ChangeStatus::UNCHANGED;
2677 }
2678
2679 for (const auto &VAC : Values)
2680 if (!CheckIRP(IRPosition::value(*VAC.getValue())))
2681 return indicatePessimisticFixpoint();
2682
2683 return ChangeStatus::UNCHANGED;
2684 }
2685
2686 /// See AbstractAttribute::trackStatistics()
2687 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2688};
2689
2690/// NonNull attribute for function return value.
2691struct AANonNullReturned final
2692 : AAReturnedFromReturnedValues<AANonNull, AANonNull, AANonNull::StateType,
2693 false, AANonNull::IRAttributeKind, false> {
2694 AANonNullReturned(const IRPosition &IRP, Attributor &A)
2695 : AAReturnedFromReturnedValues<AANonNull, AANonNull, AANonNull::StateType,
2696 false, Attribute::NonNull, false>(IRP, A) {
2697 }
2698
2699 /// See AbstractAttribute::getAsStr().
2700 const std::string getAsStr(Attributor *A) const override {
2701 return getAssumed() ? "nonnull" : "may-null";
2702 }
2703
2704 /// See AbstractAttribute::trackStatistics()
2705 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2706};
2707
2708/// NonNull attribute for function argument.
2709struct AANonNullArgument final
2710 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
2711 AANonNullArgument(const IRPosition &IRP, Attributor &A)
2712 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
2713
2714 /// See AbstractAttribute::trackStatistics()
2715 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
2716};
2717
2718struct AANonNullCallSiteArgument final : AANonNullFloating {
2719 AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
2720 : AANonNullFloating(IRP, A) {}
2721
2722 /// See AbstractAttribute::trackStatistics()
2723 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
2724};
2725
2726/// NonNull attribute for a call site return position.
2727struct AANonNullCallSiteReturned final
2728 : AACalleeToCallSite<AANonNull, AANonNullImpl> {
2729 AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
2730 : AACalleeToCallSite<AANonNull, AANonNullImpl>(IRP, A) {}
2731
2732 /// See AbstractAttribute::trackStatistics()
2733 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
2734};
2735} // namespace
2736
2737/// ------------------------ Must-Progress Attributes --------------------------
2738namespace {
2739struct AAMustProgressImpl : public AAMustProgress {
2740 AAMustProgressImpl(const IRPosition &IRP, Attributor &A)
2741 : AAMustProgress(IRP, A) {}
2742
2743 /// See AbstractAttribute::initialize(...).
2744 void initialize(Attributor &A) override {
2745 bool IsKnown;
2746 assert(!AA::hasAssumedIRAttr<Attribute::MustProgress>(
2747 A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown));
2748 (void)IsKnown;
2749 }
2750
2751 /// See AbstractAttribute::getAsStr()
2752 const std::string getAsStr(Attributor *A) const override {
2753 return getAssumed() ? "mustprogress" : "may-not-progress";
2754 }
2755};
2756
2757struct AAMustProgressFunction final : AAMustProgressImpl {
2758 AAMustProgressFunction(const IRPosition &IRP, Attributor &A)
2759 : AAMustProgressImpl(IRP, A) {}
2760
2761 /// See AbstractAttribute::updateImpl(...).
2762 ChangeStatus updateImpl(Attributor &A) override {
2763 bool IsKnown;
2764 if (AA::hasAssumedIRAttr<Attribute::WillReturn>(
2765 A, this, getIRPosition(), DepClassTy::OPTIONAL, IsKnown)) {
2766 if (IsKnown)
2767 return indicateOptimisticFixpoint();
2768 return ChangeStatus::UNCHANGED;
2769 }
2770
2771 auto CheckForMustProgress = [&](AbstractCallSite ACS) {
2772 IRPosition IPos = IRPosition::callsite_function(*ACS.getInstruction());
2773 bool IsKnownMustProgress;
2774 return AA::hasAssumedIRAttr<Attribute::MustProgress>(
2775 A, this, IPos, DepClassTy::REQUIRED, IsKnownMustProgress,
2776 /* IgnoreSubsumingPositions */ true);
2777 };
2778
2779 bool AllCallSitesKnown = true;
2780 if (!A.checkForAllCallSites(CheckForMustProgress, *this,
2781 /* RequireAllCallSites */ true,
2782 AllCallSitesKnown))
2783 return indicatePessimisticFixpoint();
2784
2785 return ChangeStatus::UNCHANGED;
2786 }
2787
2788 /// See AbstractAttribute::trackStatistics()
2789 void trackStatistics() const override {
2790 STATS_DECLTRACK_FN_ATTR(mustprogress)
2791 }
2792};
2793
2794/// MustProgress attribute deduction for a call sites.
2795struct AAMustProgressCallSite final : AAMustProgressImpl {
2796 AAMustProgressCallSite(const IRPosition &IRP, Attributor &A)
2797 : AAMustProgressImpl(IRP, A) {}
2798
2799 /// See AbstractAttribute::updateImpl(...).
2800 ChangeStatus updateImpl(Attributor &A) override {
2801 // TODO: Once we have call site specific value information we can provide
2802 // call site specific liveness information and then it makes
2803 // sense to specialize attributes for call sites arguments instead of
2804 // redirecting requests to the callee argument.
2805 const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
2806 bool IsKnownMustProgress;
2807 if (!AA::hasAssumedIRAttr<Attribute::MustProgress>(
2808 A, this, FnPos, DepClassTy::REQUIRED, IsKnownMustProgress))
2809 return indicatePessimisticFixpoint();
2810 return ChangeStatus::UNCHANGED;
2811 }
2812
2813 /// See AbstractAttribute::trackStatistics()
2814 void trackStatistics() const override {
2815 STATS_DECLTRACK_CS_ATTR(mustprogress);
2816 }
2817};
2818} // namespace
2819
2820/// ------------------------ No-Recurse Attributes ----------------------------
2821
2822namespace {
2823struct AANoRecurseImpl : public AANoRecurse {
2824 AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
2825
2826 /// See AbstractAttribute::initialize(...).
2827 void initialize(Attributor &A) override {
2828 bool IsKnown;
2829 assert(!AA::hasAssumedIRAttr<Attribute::NoRecurse>(
2830 A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown));
2831 (void)IsKnown;
2832 }
2833
2834 /// See AbstractAttribute::getAsStr()
2835 const std::string getAsStr(Attributor *A) const override {
2836 return getAssumed() ? "norecurse" : "may-recurse";
2837 }
2838};
2839
2840struct AANoRecurseFunction final : AANoRecurseImpl {
2841 AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
2842 : AANoRecurseImpl(IRP, A) {}
2843
2844 /// See AbstractAttribute::updateImpl(...).
2845 ChangeStatus updateImpl(Attributor &A) override {
2846
2847 // If all live call sites are known to be no-recurse, we are as well.
2848 auto CallSitePred = [&](AbstractCallSite ACS) {
2849 bool IsKnownNoRecurse;
2850 if (!AA::hasAssumedIRAttr<Attribute::NoRecurse>(
2851 A, this,
2852 IRPosition::function(*ACS.getInstruction()->getFunction()),
2853 DepClassTy::NONE, IsKnownNoRecurse))
2854 return false;
2855 return IsKnownNoRecurse;
2856 };
2857 bool UsedAssumedInformation = false;
2858 if (A.checkForAllCallSites(CallSitePred, *this, true,
2859 UsedAssumedInformation)) {
2860 // If we know all call sites and all are known no-recurse, we are done.
2861 // If all known call sites, which might not be all that exist, are known
2862 // to be no-recurse, we are not done but we can continue to assume
2863 // no-recurse. If one of the call sites we have not visited will become
2864 // live, another update is triggered.
2865 if (!UsedAssumedInformation)
2866 indicateOptimisticFixpoint();
2867 return ChangeStatus::UNCHANGED;
2868 }
2869
2870 const AAInterFnReachability *EdgeReachability =
2871 A.getAAFor<AAInterFnReachability>(*this, getIRPosition(),
2872 DepClassTy::REQUIRED);
2873 if (EdgeReachability && EdgeReachability->canReach(A, *getAnchorScope()))
2874 return indicatePessimisticFixpoint();
2875 return ChangeStatus::UNCHANGED;
2876 }
2877
2878 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
2879};
2880
2881/// NoRecurse attribute deduction for a call sites.
2882struct AANoRecurseCallSite final
2883 : AACalleeToCallSite<AANoRecurse, AANoRecurseImpl> {
2884 AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
2885 : AACalleeToCallSite<AANoRecurse, AANoRecurseImpl>(IRP, A) {}
2886
2887 /// See AbstractAttribute::trackStatistics()
2888 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2889};
2890} // namespace
2891
2892/// ------------------------ No-Convergent Attribute --------------------------
2893
2894namespace {
2895struct AANonConvergentImpl : public AANonConvergent {
2896 AANonConvergentImpl(const IRPosition &IRP, Attributor &A)
2897 : AANonConvergent(IRP, A) {}
2898
2899 /// See AbstractAttribute::getAsStr()
2900 const std::string getAsStr(Attributor *A) const override {
2901 return getAssumed() ? "non-convergent" : "may-be-convergent";
2902 }
2903};
2904
2905struct AANonConvergentFunction final : AANonConvergentImpl {
2906 AANonConvergentFunction(const IRPosition &IRP, Attributor &A)
2907 : AANonConvergentImpl(IRP, A) {}
2908
2909 /// See AbstractAttribute::updateImpl(...).
2910 ChangeStatus updateImpl(Attributor &A) override {
2911 // If all function calls are known to not be convergent, we are not
2912 // convergent.
2913 auto CalleeIsNotConvergent = [&](Instruction &Inst) {
2914 CallBase &CB = cast<CallBase>(Inst);
2915 auto *Callee = dyn_cast_if_present<Function>(CB.getCalledOperand());
2916 if (!Callee || Callee->isIntrinsic()) {
2917 return false;
2918 }
2919 if (Callee->isDeclaration()) {
2920 return !Callee->hasFnAttribute(Attribute::Convergent);
2921 }
2922 const auto *ConvergentAA = A.getAAFor<AANonConvergent>(
2923 *this, IRPosition::function(*Callee), DepClassTy::REQUIRED);
2924 return ConvergentAA && ConvergentAA->isAssumedNotConvergent();
2925 };
2926
2927 bool UsedAssumedInformation = false;
2928 if (!A.checkForAllCallLikeInstructions(CalleeIsNotConvergent, *this,
2929 UsedAssumedInformation)) {
2930 return indicatePessimisticFixpoint();
2931 }
2932 return ChangeStatus::UNCHANGED;
2933 }
2934
2935 ChangeStatus manifest(Attributor &A) override {
2936 if (isKnownNotConvergent() &&
2937 A.hasAttr(getIRPosition(), Attribute::Convergent)) {
2938 A.removeAttrs(getIRPosition(), {Attribute::Convergent});
2939 return ChangeStatus::CHANGED;
2940 }
2941 return ChangeStatus::UNCHANGED;
2942 }
2943
2944 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(convergent) }
2945};
2946} // namespace
2947
2948/// -------------------- Undefined-Behavior Attributes ------------------------
2949
2950namespace {
2951struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2952 AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
2953 : AAUndefinedBehavior(IRP, A) {}
2954
2955 /// See AbstractAttribute::updateImpl(...).
2956 // through a pointer (i.e. also branches etc.)
2957 ChangeStatus updateImpl(Attributor &A) override {
2958 const size_t UBPrevSize = KnownUBInsts.size();
2959 const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2960
2961 auto InspectMemAccessInstForUB = [&](Instruction &I) {
2962 // Lang ref now states volatile store is not UB, let's skip them.
2963 if (I.isVolatile() && I.mayWriteToMemory())
2964 return true;
2965
2966 // Skip instructions that are already saved.
2967 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2968 return true;
2969
2970 // If we reach here, we know we have an instruction
2971 // that accesses memory through a pointer operand,
2972 // for which getPointerOperand() should give it to us.
2973 Value *PtrOp =
2974 const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true));
2975 assert(PtrOp &&
2976 "Expected pointer operand of memory accessing instruction");
2977
2978 // Either we stopped and the appropriate action was taken,
2979 // or we got back a simplified value to continue.
2980 std::optional<Value *> SimplifiedPtrOp =
2981 stopOnUndefOrAssumed(A, PtrOp, &I);
2982 if (!SimplifiedPtrOp || !*SimplifiedPtrOp)
2983 return true;
2984 const Value *PtrOpVal = *SimplifiedPtrOp;
2985
2986 // A memory access through a pointer is considered UB
2987 // only if the pointer has constant null value.
2988 // TODO: Expand it to not only check constant values.
2989 if (!isa<ConstantPointerNull>(PtrOpVal)) {
2990 AssumedNoUBInsts.insert(&I);
2991 return true;
2992 }
2993 const Type *PtrTy = PtrOpVal->getType();
2994
2995 // Because we only consider instructions inside functions,
2996 // assume that a parent function exists.
2997 const Function *F = I.getFunction();
2998
2999 // A memory access using constant null pointer is only considered UB
3000 // if null pointer is _not_ defined for the target platform.
3002 AssumedNoUBInsts.insert(&I);
3003 else
3004 KnownUBInsts.insert(&I);
3005 return true;
3006 };
3007
3008 auto InspectBrInstForUB = [&](Instruction &I) {
3009 // A conditional branch instruction is considered UB if it has `undef`
3010 // condition.
3011
3012 // Skip instructions that are already saved.
3013 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
3014 return true;
3015
3016 // We know we have a branch instruction.
3017 auto *BrInst = cast<BranchInst>(&I);
3018
3019 // Unconditional branches are never considered UB.
3020 if (BrInst->isUnconditional())
3021 return true;
3022
3023 // Either we stopped and the appropriate action was taken,
3024 // or we got back a simplified value to continue.
3025 std::optional<Value *> SimplifiedCond =
3026 stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
3027 if (!SimplifiedCond || !*SimplifiedCond)
3028 return true;
3029 AssumedNoUBInsts.insert(&I);
3030 return true;
3031 };
3032
3033 auto InspectCallSiteForUB = [&](Instruction &I) {
3034 // Check whether a callsite always cause UB or not
3035
3036 // Skip instructions that are already saved.
3037 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
3038 return true;
3039
3040 // Check nonnull and noundef argument attribute violation for each
3041 // callsite.
3042 CallBase &CB = cast<CallBase>(I);
3043 auto *Callee = dyn_cast_if_present<Function>(CB.getCalledOperand());
3044 if (!Callee)
3045 return true;
3046 for (unsigned idx = 0; idx < CB.arg_size(); idx++) {
3047 // If current argument is known to be simplified to null pointer and the
3048 // corresponding argument position is known to have nonnull attribute,
3049 // the argument is poison. Furthermore, if the argument is poison and
3050 // the position is known to have noundef attriubte, this callsite is
3051 // considered UB.
3052 if (idx >= Callee->arg_size())
3053 break;
3054 Value *ArgVal = CB.getArgOperand(idx);
3055 if (!ArgVal)
3056 continue;
3057 // Here, we handle three cases.
3058 // (1) Not having a value means it is dead. (we can replace the value
3059 // with undef)
3060 // (2) Simplified to undef. The argument violate noundef attriubte.
3061 // (3) Simplified to null pointer where known to be nonnull.
3062 // The argument is a poison value and violate noundef attribute.
3063 IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
3064 bool IsKnownNoUndef;
3065 AA::hasAssumedIRAttr<Attribute::NoUndef>(
3066 A, this, CalleeArgumentIRP, DepClassTy::NONE, IsKnownNoUndef);
3067 if (!IsKnownNoUndef)
3068 continue;
3069 bool UsedAssumedInformation = false;
3070 std::optional<Value *> SimplifiedVal =
3071 A.getAssumedSimplified(IRPosition::value(*ArgVal), *this,
3072 UsedAssumedInformation, AA::Interprocedural);
3073 if (UsedAssumedInformation)
3074 continue;
3075 if (SimplifiedVal && !*SimplifiedVal)
3076 return true;
3077 if (!SimplifiedVal || isa<UndefValue>(**SimplifiedVal)) {
3078 KnownUBInsts.insert(&I);
3079 continue;
3080 }
3081 if (!ArgVal->getType()->isPointerTy() ||
3082 !isa<ConstantPointerNull>(**SimplifiedVal))
3083 continue;
3084 bool IsKnownNonNull;
3085 AA::hasAssumedIRAttr<Attribute::NonNull>(
3086 A, this, CalleeArgumentIRP, DepClassTy::NONE, IsKnownNonNull);
3087 if (IsKnownNonNull)
3088 KnownUBInsts.insert(&I);
3089 }
3090 return true;
3091 };
3092
3093 auto InspectReturnInstForUB = [&](Instruction &I) {
3094 auto &RI = cast<ReturnInst>(I);
3095 // Either we stopped and the appropriate action was taken,
3096 // or we got back a simplified return value to continue.
3097 std::optional<Value *> SimplifiedRetValue =
3098 stopOnUndefOrAssumed(A, RI.getReturnValue(), &I);
3099 if (!SimplifiedRetValue || !*SimplifiedRetValue)
3100 return true;
3101
3102 // Check if a return instruction always cause UB or not
3103 // Note: It is guaranteed that the returned position of the anchor
3104 // scope has noundef attribute when this is called.
3105 // We also ensure the return position is not "assumed dead"
3106 // because the returned value was then potentially simplified to
3107 // `undef` in AAReturnedValues without removing the `noundef`
3108 // attribute yet.
3109
3110 // When the returned position has noundef attriubte, UB occurs in the
3111 // following cases.
3112 // (1) Returned value is known to be undef.
3113 // (2) The value is known to be a null pointer and the returned
3114 // position has nonnull attribute (because the returned value is
3115 // poison).
3116 if (isa<ConstantPointerNull>(*SimplifiedRetValue)) {
3117 bool IsKnownNonNull;
3118 AA::hasAssumedIRAttr<Attribute::NonNull>(
3119 A, this, IRPosition::returned(*getAnchorScope()), DepClassTy::NONE,
3120 IsKnownNonNull);
3121 if (IsKnownNonNull)
3122 KnownUBInsts.insert(&I);
3123 }
3124
3125 return true;
3126 };
3127
3128 bool UsedAssumedInformation = false;
3129 A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
3130 {Instruction::Load, Instruction::Store,
3131 Instruction::AtomicCmpXchg,
3132 Instruction::AtomicRMW},
3133 UsedAssumedInformation,
3134 /* CheckBBLivenessOnly */ true);
3135 A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
3136 UsedAssumedInformation,
3137 /* CheckBBLivenessOnly */ true);
3138 A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this,
3139 UsedAssumedInformation);
3140
3141 // If the returned position of the anchor scope has noundef attriubte, check
3142 // all returned instructions.
3143 if (!getAnchorScope()->getReturnType()->isVoidTy()) {
3144 const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
3145 if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) {
3146 bool IsKnownNoUndef;
3147 AA::hasAssumedIRAttr<Attribute::NoUndef>(
3148 A, this, ReturnIRP, DepClassTy::NONE, IsKnownNoUndef);
3149 if (IsKnownNoUndef)
3150 A.checkForAllInstructions(InspectReturnInstForUB, *this,
3151 {Instruction::Ret}, UsedAssumedInformation,
3152 /* CheckBBLivenessOnly */ true);
3153 }
3154 }
3155
3156 if (NoUBPrevSize != AssumedNoUBInsts.size() ||
3157 UBPrevSize != KnownUBInsts.size())
3158 return ChangeStatus::CHANGED;
3159 return ChangeStatus::UNCHANGED;
3160 }
3161
3162 bool isKnownToCauseUB(Instruction *I) const override {
3163 return KnownUBInsts.count(I);
3164 }
3165
3166 bool isAssumedToCauseUB(Instruction *I) const override {
3167 // In simple words, if an instruction is not in the assumed to _not_
3168 // cause UB, then it is assumed UB (that includes those
3169 // in the KnownUBInsts set). The rest is boilerplate
3170 // is to ensure that it is one of the instructions we test
3171 // for UB.
3172
3173 switch (I->getOpcode()) {
3174 case Instruction::Load:
3175 case Instruction::Store:
3176 case Instruction::AtomicCmpXchg:
3177 case Instruction::AtomicRMW:
3178 return !AssumedNoUBInsts.count(I);
3179 case Instruction::Br: {
3180 auto *BrInst = cast<BranchInst>(I);
3181 if (BrInst->isUnconditional())
3182 return false;
3183 return !AssumedNoUBInsts.count(I);
3184 } break;
3185 default:
3186 return false;
3187 }
3188 return false;
3189 }
3190
3191 ChangeStatus manifest(Attributor &A) override {
3192 if (KnownUBInsts.empty())
3193 return ChangeStatus::UNCHANGED;
3194 for (Instruction *I : KnownUBInsts)
3195 A.changeToUnreachableAfterManifest(I);
3196 return ChangeStatus::CHANGED;
3197 }
3198
3199 /// See AbstractAttribute::getAsStr()
3200 const std::string getAsStr(Attributor *A) const override {
3201 return getAssumed() ? "undefined-behavior" : "no-ub";
3202 }
3203
3204 /// Note: The correctness of this analysis depends on the fact that the
3205 /// following 2 sets will stop changing after some point.
3206 /// "Change" here means that their size changes.
3207 /// The size of each set is monotonically increasing
3208 /// (we only add items to them) and it is upper bounded by the number of
3209 /// instructions in the processed function (we can never save more
3210 /// elements in either set than this number). Hence, at some point,
3211 /// they will stop increasing.
3212 /// Consequently, at some point, both sets will have stopped
3213 /// changing, effectively making the analysis reach a fixpoint.
3214
3215 /// Note: These 2 sets are disjoint and an instruction can be considered
3216 /// one of 3 things:
3217 /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
3218 /// the KnownUBInsts set.
3219 /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
3220 /// has a reason to assume it).
3221 /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
3222 /// could not find a reason to assume or prove that it can cause UB,
3223 /// hence it assumes it doesn't. We have a set for these instructions
3224 /// so that we don't reprocess them in every update.
3225 /// Note however that instructions in this set may cause UB.
3226
3227protected:
3228 /// A set of all live instructions _known_ to cause UB.
3229 SmallPtrSet<Instruction *, 8> KnownUBInsts;
3230
3231private:
3232 /// A set of all the (live) instructions that are assumed to _not_ cause UB.
3233 SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
3234
3235 // Should be called on updates in which if we're processing an instruction
3236 // \p I that depends on a value \p V, one of the following has to happen:
3237 // - If the value is assumed, then stop.
3238 // - If the value is known but undef, then consider it UB.
3239 // - Otherwise, do specific processing with the simplified value.
3240 // We return std::nullopt in the first 2 cases to signify that an appropriate
3241 // action was taken and the caller should stop.
3242 // Otherwise, we return the simplified value that the caller should
3243 // use for specific processing.
3244 std::optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V,
3245 Instruction *I) {
3246 bool UsedAssumedInformation = false;
3247 std::optional<Value *> SimplifiedV =
3248 A.getAssumedSimplified(IRPosition::value(*V), *this,
3249 UsedAssumedInformation, AA::Interprocedural);
3250 if (!UsedAssumedInformation) {
3251 // Don't depend on assumed values.
3252 if (!SimplifiedV) {
3253 // If it is known (which we tested above) but it doesn't have a value,
3254 // then we can assume `undef` and hence the instruction is UB.
3255 KnownUBInsts.insert(I);
3256 return std::nullopt;
3257 }
3258 if (!*SimplifiedV)
3259 return nullptr;
3260 V = *SimplifiedV;
3261 }
3262 if (isa<UndefValue>(V)) {
3263 KnownUBInsts.insert(I);
3264 return std::nullopt;
3265 }
3266 return V;
3267 }
3268};
3269
3270struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
3271 AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
3272 : AAUndefinedBehaviorImpl(IRP, A) {}
3273
3274 /// See AbstractAttribute::trackStatistics()
3275 void trackStatistics() const override {
3276 STATS_DECL(UndefinedBehaviorInstruction, Instruction,
3277 "Number of instructions known to have UB");
3278 BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
3279 KnownUBInsts.size();
3280 }
3281};
3282} // namespace
3283
3284/// ------------------------ Will-Return Attributes ----------------------------
3285
3286namespace {
3287// Helper function that checks whether a function has any cycle which we don't
3288// know if it is bounded or not.
3289// Loops with maximum trip count are considered bounded, any other cycle not.
3290static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
3291 ScalarEvolution *SE =
3292 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
3293 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
3294 // If either SCEV or LoopInfo is not available for the function then we assume
3295 // any cycle to be unbounded cycle.
3296 // We use scc_iterator which uses Tarjan algorithm to find all the maximal
3297 // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
3298 if (!SE || !LI) {
3299 for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
3300 if (SCCI.hasCycle())
3301 return true;
3302 return false;
3303 }
3304
3305 // If there's irreducible control, the function may contain non-loop cycles.
3307 return true;
3308
3309 // Any loop that does not have a max trip count is considered unbounded cycle.
3310 for (auto *L : LI->getLoopsInPreorder()) {
3311 if (!SE->getSmallConstantMaxTripCount(L))
3312 return true;
3313 }
3314 return false;
3315}
3316
3317struct AAWillReturnImpl : public AAWillReturn {
3318 AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
3319 : AAWillReturn(IRP, A) {}
3320
3321 /// See AbstractAttribute::initialize(...).
3322 void initialize(Attributor &A) override {
3323 bool IsKnown;
3324 assert(!AA::hasAssumedIRAttr<Attribute::WillReturn>(
3325 A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown));
3326 (void)IsKnown;
3327 }
3328
3329 /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
3330 bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
3331 if (!A.hasAttr(getIRPosition(), {Attribute::MustProgress}))
3332 return false;
3333
3334 bool IsKnown;
3335 if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
3336 return IsKnown || !KnownOnly;
3337 return false;
3338 }
3339
3340 /// See AbstractAttribute::updateImpl(...).
3341 ChangeStatus updateImpl(Attributor &A) override {
3342 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
3343 return ChangeStatus::UNCHANGED;
3344
3345 auto CheckForWillReturn = [&](Instruction &I) {
3346 IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
3347 bool IsKnown;
3348 if (AA::hasAssumedIRAttr<Attribute::WillReturn>(
3349 A, this, IPos, DepClassTy::REQUIRED, IsKnown)) {
3350 if (IsKnown)
3351 return true;
3352 } else {
3353 return false;
3354 }
3355 bool IsKnownNoRecurse;
3356 return AA::hasAssumedIRAttr<Attribute::NoRecurse>(
3357 A, this, IPos, DepClassTy::REQUIRED, IsKnownNoRecurse);
3358 };
3359
3360 bool UsedAssumedInformation = false;
3361 if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this,
3362 UsedAssumedInformation))
3363 return indicatePessimisticFixpoint();
3364
3365 return ChangeStatus::UNCHANGED;
3366 }
3367
3368 /// See AbstractAttribute::getAsStr()
3369 const std::string getAsStr(Attributor *A) const override {
3370 return getAssumed() ? "willreturn" : "may-noreturn";
3371 }
3372};
3373
3374struct AAWillReturnFunction final : AAWillReturnImpl {
3375 AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
3376 : AAWillReturnImpl(IRP, A) {}
3377
3378 /// See AbstractAttribute::initialize(...).
3379 void initialize(Attributor &A) override {
3380 AAWillReturnImpl::initialize(A);
3381
3382 Function *F = getAnchorScope();
3383 assert(F && "Did expect an anchor function");
3384 if (F->isDeclaration() || mayContainUnboundedCycle(*F, A))
3385 indicatePessimisticFixpoint();
3386 }
3387
3388 /// See AbstractAttribute::trackStatistics()
3389 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
3390};
3391
3392/// WillReturn attribute deduction for a call sites.
3393struct AAWillReturnCallSite final
3394 : AACalleeToCallSite<AAWillReturn, AAWillReturnImpl> {
3395 AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
3396 : AACalleeToCallSite<AAWillReturn, AAWillReturnImpl>(IRP, A) {}
3397
3398 /// See AbstractAttribute::updateImpl(...).
3399 ChangeStatus updateImpl(Attributor &A) override {
3400 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
3401 return ChangeStatus::UNCHANGED;
3402
3403 return AACalleeToCallSite::updateImpl(A);
3404 }
3405
3406 /// See AbstractAttribute::trackStatistics()
3407 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
3408};
3409} // namespace
3410
3411/// -------------------AAIntraFnReachability Attribute--------------------------
3412
3413/// All information associated with a reachability query. This boilerplate code
3414/// is used by both AAIntraFnReachability and AAInterFnReachability, with
3415/// different \p ToTy values.
3416template <typename ToTy> struct ReachabilityQueryInfo {
3417 enum class Reachable {
3418 No,
3419 Yes,
3420 };
3421
3422 /// Start here,
3423 const Instruction *From = nullptr;
3424 /// reach this place,
3425 const ToTy *To = nullptr;
3426 /// without going through any of these instructions,
3427 const AA::InstExclusionSetTy *ExclusionSet = nullptr;
3428 /// and remember if it worked:
3429 Reachable Result = Reachable::No;
3430
3431 /// Precomputed hash for this RQI.
3432 unsigned Hash = 0;
3433
3434 unsigned computeHashValue() const {
3435 assert(Hash == 0 && "Computed hash twice!");
3438 return const_cast<ReachabilityQueryInfo<ToTy> *>(this)->Hash =
3439 detail::combineHashValue(PairDMI ::getHashValue({From, To}),
3440 InstSetDMI::getHashValue(ExclusionSet));
3441 }
3442
3444 : From(From), To(To) {}
3445
3446 /// Constructor replacement to ensure unique and stable sets are used for the
3447 /// cache.
3449 const AA::InstExclusionSetTy *ES, bool MakeUnique)
3450 : From(&From), To(&To), ExclusionSet(ES) {
3451
3452 if (!ES || ES->empty()) {
3453 ExclusionSet = nullptr;
3454 } else if (MakeUnique) {
3455 ExclusionSet = A.getInfoCache().getOrCreateUniqueBlockExecutionSet(ES);
3456 }
3457 }
3458
3460 : From(RQI.From), To(RQI.To), ExclusionSet(RQI.ExclusionSet) {}
3461};
3462
3463namespace llvm {
3464template <typename ToTy> struct DenseMapInfo<ReachabilityQueryInfo<ToTy> *> {
3467
3470
3471 static inline ReachabilityQueryInfo<ToTy> *getEmptyKey() { return &EmptyKey; }
3473 return &TombstoneKey;
3474 }
3475 static unsigned getHashValue(const ReachabilityQueryInfo<ToTy> *RQI) {
3476 return RQI->Hash ? RQI->Hash : RQI->computeHashValue();
3477 }
3480 if (!PairDMI::isEqual({LHS->From, LHS->To}, {RHS->From, RHS->To}))
3481 return false;
3482 return InstSetDMI::isEqual(LHS->ExclusionSet, RHS->ExclusionSet);
3483 }
3484};
3485
3486#define DefineKeys(ToTy) \
3487 template <> \
3488 ReachabilityQueryInfo<ToTy> \
3489 DenseMapInfo<ReachabilityQueryInfo<ToTy> *>::EmptyKey = \
3490 ReachabilityQueryInfo<ToTy>( \
3491 DenseMapInfo<const Instruction *>::getEmptyKey(), \
3492 DenseMapInfo<const ToTy *>::getEmptyKey()); \
3493 template <> \
3494 ReachabilityQueryInfo<ToTy> \
3495 DenseMapInfo<ReachabilityQueryInfo<ToTy> *>::TombstoneKey = \
3496 ReachabilityQueryInfo<ToTy>( \
3497 DenseMapInfo<const Instruction *>::getTombstoneKey(), \
3498 DenseMapInfo<const ToTy *>::getTombstoneKey());
3499
3501#undef DefineKeys
3502
3503} // namespace llvm
3504
3505namespace {
3506
3507template <typename BaseTy, typename ToTy>
3508struct CachedReachabilityAA : public BaseTy {
3509 using RQITy = ReachabilityQueryInfo<ToTy>;
3510
3511 CachedReachabilityAA(const IRPosition &IRP, Attributor &A) : BaseTy(IRP, A) {}
3512
3513 /// See AbstractAttribute::isQueryAA.
3514 bool isQueryAA() const override { return true; }
3515
3516 /// See AbstractAttribute::updateImpl(...).
3517 ChangeStatus updateImpl(Attributor &A) override {
3518 ChangeStatus Changed = ChangeStatus::UNCHANGED;
3519 for (unsigned u = 0, e = QueryVector.size(); u < e; ++u) {
3520 RQITy *RQI = QueryVector[u];
3521 if (RQI->Result == RQITy::Reachable::No &&
3522 isReachableImpl(A, *RQI, /*IsTemporaryRQI=*/false))
3523 Changed = ChangeStatus::CHANGED;
3524 }
3525 return Changed;
3526 }
3527
3528 virtual bool isReachableImpl(Attributor &A, RQITy &RQI,
3529 bool IsTemporaryRQI) = 0;
3530
3531 bool rememberResult(Attributor &A, typename RQITy::Reachable Result,
3532 RQITy &RQI, bool UsedExclusionSet, bool IsTemporaryRQI) {
3533 RQI.Result = Result;
3534
3535 // Remove the temporary RQI from the cache.
3536 if (IsTemporaryRQI)
3537 QueryCache.erase(&RQI);
3538
3539 // Insert a plain RQI (w/o exclusion set) if that makes sense. Two options:
3540 // 1) If it is reachable, it doesn't matter if we have an exclusion set for
3541 // this query. 2) We did not use the exclusion set, potentially because
3542 // there is none.
3543 if (Result == RQITy::Reachable::Yes || !UsedExclusionSet) {
3544 RQITy PlainRQI(RQI.From, RQI.To);
3545 if (!QueryCache.count(&PlainRQI)) {
3546 RQITy *RQIPtr = new (A.Allocator) RQITy(RQI.From, RQI.To);
3547 RQIPtr->Result = Result;
3548 QueryVector.push_back(RQIPtr);
3549 QueryCache.insert(RQIPtr);
3550 }
3551 }
3552
3553 // Check if we need to insert a new permanent RQI with the exclusion set.
3554 if (IsTemporaryRQI && Result != RQITy::Reachable::Yes && UsedExclusionSet) {
3555 assert((!RQI.ExclusionSet || !RQI.ExclusionSet->empty()) &&
3556 "Did not expect empty set!");
3557 RQITy *RQIPtr = new (A.Allocator)
3558 RQITy(A, *RQI.From, *RQI.To, RQI.ExclusionSet, true);
3559 assert(RQIPtr->Result == RQITy::Reachable::No && "Already reachable?");
3560 RQIPtr->Result = Result;
3561 assert(!QueryCache.count(RQIPtr));
3562 QueryVector.push_back(RQIPtr);
3563 QueryCache.insert(RQIPtr);
3564 }
3565
3566 if (Result == RQITy::Reachable::No && IsTemporaryRQI)
3567 A.registerForUpdate(*this);
3568 return Result == RQITy::Reachable::Yes;
3569 }
3570
3571 const std::string getAsStr(Attributor *A) const override {
3572 // TODO: Return the number of reachable queries.
3573 return "#queries(" + std::to_string(QueryVector.size()) + ")";
3574 }
3575
3576 bool checkQueryCache(Attributor &A, RQITy &StackRQI,
3577 typename RQITy::Reachable &Result) {
3578 if (!this->getState().isValidState()) {
3579 Result = RQITy::Reachable::Yes;
3580 return true;
3581 }
3582
3583 // If we have an exclusion set we might be able to find our answer by
3584 // ignoring it first.
3585 if (StackRQI.ExclusionSet) {
3586 RQITy PlainRQI(StackRQI.From, StackRQI.To);
3587 auto It = QueryCache.find(&PlainRQI);
3588 if (It != QueryCache.end() && (*It)->Result == RQITy::Reachable::No) {
3589 Result = RQITy::Reachable::No;
3590 return true;
3591 }
3592 }
3593
3594 auto It = QueryCache.find(&StackRQI);
3595 if (It != QueryCache.end()) {
3596 Result = (*It)->Result;
3597 return true;
3598 }
3599
3600 // Insert a temporary for recursive queries. We will replace it with a
3601 // permanent entry later.
3602 QueryCache.insert(&StackRQI);
3603 return false;
3604 }
3605
3606private:
3607 SmallVector<RQITy *> QueryVector;
3608 DenseSet<RQITy *> QueryCache;
3609};
3610
3611struct AAIntraFnReachabilityFunction final
3612 : public CachedReachabilityAA<AAIntraFnReachability, Instruction> {
3613 using Base = CachedReachabilityAA<AAIntraFnReachability, Instruction>;
3614 AAIntraFnReachabilityFunction(const IRPosition &IRP, Attributor &A)
3615 : Base(IRP, A) {
3616 DT = A.getInfoCache().getAnalysisResultForFunction<DominatorTreeAnalysis>(
3617 *IRP.getAssociatedFunction());
3618 }
3619
3620 bool isAssumedReachable(
3621 Attributor &A, const Instruction &From, const Instruction &To,
3622 const AA::InstExclusionSetTy *ExclusionSet) const override {
3623 auto *NonConstThis = const_cast<AAIntraFnReachabilityFunction *>(this);
3624 if (&From == &To)
3625 return true;
3626
3627 RQITy StackRQI(A, From, To, ExclusionSet, false);
3628 typename RQITy::Reachable Result;
3629 if (!NonConstThis->checkQueryCache(A, StackRQI, Result))
3630 return NonConstThis->isReachableImpl(A, StackRQI,
3631 /*IsTemporaryRQI=*/true);
3632 return Result == RQITy::Reachable::Yes;
3633 }
3634
3635 ChangeStatus updateImpl(Attributor &A) override {
3636 // We only depend on liveness. DeadEdges is all we care about, check if any
3637 // of them changed.
3638 auto *LivenessAA =
3639 A.getAAFor<AAIsDead>(*this, getIRPosition(), DepClassTy::OPTIONAL);
3640 if (LivenessAA &&
3641 llvm::all_of(DeadEdges,
3642 [&](const auto &DeadEdge) {
3643 return LivenessAA->isEdgeDead(DeadEdge.first,
3644 DeadEdge.second);
3645 }) &&
3646 llvm::all_of(DeadBlocks, [&](const BasicBlock *BB) {
3647 return LivenessAA->isAssumedDead(BB);
3648 })) {
3649 return ChangeStatus::UNCHANGED;
3650 }
3651 DeadEdges.clear();
3652 DeadBlocks.clear();
3653 return Base::updateImpl(A);
3654 }
3655
3656 bool isReachableImpl(Attributor &A, RQITy &RQI,
3657 bool IsTemporaryRQI) override {
3658 const Instruction *Origin = RQI.From;
3659 bool UsedExclusionSet = false;
3660
3661 auto WillReachInBlock = [&](const Instruction &From, const Instruction &To,
3662 const AA::InstExclusionSetTy *ExclusionSet) {
3663 const Instruction *IP = &From;
3664 while (IP && IP != &To) {
3665 if (ExclusionSet && IP != Origin && ExclusionSet->count(IP)) {
3666 UsedExclusionSet = true;
3667 break;
3668 }
3669 IP = IP->getNextNode();
3670 }
3671 return IP == &To;
3672 };
3673
3674 const BasicBlock *FromBB = RQI.From->getParent();
3675 const BasicBlock *ToBB = RQI.To->getParent();
3676 assert(FromBB->getParent() == ToBB->getParent() &&
3677 "Not an intra-procedural query!");
3678
3679 // Check intra-block reachability, however, other reaching paths are still
3680 // possible.
3681 if (FromBB == ToBB &&
3682 WillReachInBlock(*RQI.From, *RQI.To, RQI.ExclusionSet))
3683 return rememberResult(A, RQITy::Reachable::Yes, RQI, UsedExclusionSet,
3684 IsTemporaryRQI);
3685
3686 // Check if reaching the ToBB block is sufficient or if even that would not
3687 // ensure reaching the target. In the latter case we are done.
3688 if (!WillReachInBlock(ToBB->front(), *RQI.To, RQI.ExclusionSet))
3689 return rememberResult(A, RQITy::Reachable::No, RQI, UsedExclusionSet,
3690 IsTemporaryRQI);
3691
3692 const Function *Fn = FromBB->getParent();
3694 if (RQI.ExclusionSet)
3695 for (auto *I : *RQI.ExclusionSet)
3696 if (I->getFunction() == Fn)
3697 ExclusionBlocks.insert(I->getParent());
3698
3699 // Check if we make it out of the FromBB block at all.
3700 if (ExclusionBlocks.count(FromBB) &&
3701 !WillReachInBlock(*RQI.From, *FromBB->getTerminator(),
3702 RQI.ExclusionSet))
3703 return rememberResult(A, RQITy::Reachable::No, RQI, true, IsTemporaryRQI);
3704
3705 auto *LivenessAA =
3706 A.getAAFor<AAIsDead>(*this, getIRPosition(), DepClassTy::OPTIONAL);
3707 if (LivenessAA && LivenessAA->isAssumedDead(ToBB)) {
3708 DeadBlocks.insert(ToBB);
3709 return rememberResult(A, RQITy::Reachable::No, RQI, UsedExclusionSet,
3710 IsTemporaryRQI);
3711 }
3712
3715 Worklist.push_back(FromBB);
3716
3718 while (!Worklist.empty()) {
3719 const BasicBlock *BB = Worklist.pop_back_val();
3720 if (!Visited.insert(BB).second)
3721 continue;
3722 for (const BasicBlock *SuccBB : successors(BB)) {
3723 if (LivenessAA && LivenessAA->isEdgeDead(BB, SuccBB)) {
3724 LocalDeadEdges.insert({BB, SuccBB});
3725 continue;
3726 }
3727 // We checked before if we just need to reach the ToBB block.
3728 if (SuccBB == ToBB)
3729 return rememberResult(A, RQITy::Reachable::Yes, RQI, UsedExclusionSet,
3730 IsTemporaryRQI);
3731 if (DT && ExclusionBlocks.empty() && DT->dominates(BB, ToBB))
3732 return rememberResult(A, RQITy::Reachable::Yes, RQI, UsedExclusionSet,
3733 IsTemporaryRQI);
3734
3735 if (ExclusionBlocks.count(SuccBB)) {
3736 UsedExclusionSet = true;
3737 continue;
3738 }
3739 Worklist.push_back(SuccBB);
3740 }
3741 }
3742
3743 DeadEdges.insert(LocalDeadEdges.begin(), LocalDeadEdges.end());
3744 return rememberResult(A, RQITy::Reachable::No, RQI, UsedExclusionSet,
3745 IsTemporaryRQI);
3746 }
3747
3748 /// See AbstractAttribute::trackStatistics()
3749 void trackStatistics() const override {}
3750
3751private:
3752 // Set of assumed dead blocks we used in the last query. If any changes we
3753 // update the state.
3755
3756 // Set of assumed dead edges we used in the last query. If any changes we
3757 // update the state.
3759
3760 /// The dominator tree of the function to short-circuit reasoning.
3761 const DominatorTree *DT = nullptr;
3762};
3763} // namespace
3764
3765/// ------------------------ NoAlias Argument Attribute ------------------------
3766
3768 Attribute::AttrKind ImpliedAttributeKind,
3769 bool IgnoreSubsumingPositions) {
3770 assert(ImpliedAttributeKind == Attribute::NoAlias &&
3771 "Unexpected attribute kind");
3772 Value *Val = &IRP.getAssociatedValue();
3773 if (IRP.getPositionKind() != IRP_CALL_SITE_ARGUMENT) {
3774 if (isa<AllocaInst>(Val))
3775 return true;
3776 } else {
3777 IgnoreSubsumingPositions = true;
3778 }
3779
3780 if (isa<UndefValue>(Val))
3781 return true;
3782
3783 if (isa<ConstantPointerNull>(Val) &&
3786 return true;
3787
3788 if (A.hasAttr(IRP, {Attribute::ByVal, Attribute::NoAlias},
3789 IgnoreSubsumingPositions, Attribute::NoAlias))
3790 return true;
3791
3792 return false;
3793}
3794
3795namespace {
3796struct AANoAliasImpl : AANoAlias {
3797 AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
3798 assert(getAssociatedType()->isPointerTy() &&
3799 "Noalias is a pointer attribute");
3800 }
3801
3802 const std::string getAsStr(Attributor *A) const override {
3803 return getAssumed() ? "noalias" : "may-alias";
3804 }
3805};
3806
3807/// NoAlias attribute for a floating value.
3808struct AANoAliasFloating final : AANoAliasImpl {
3809 AANoAliasFloating(const IRPosition &IRP, Attributor &A)
3810 : AANoAliasImpl(IRP, A) {}
3811
3812 /// See AbstractAttribute::updateImpl(...).
3813 ChangeStatus updateImpl(Attributor &A) override {
3814 // TODO: Implement this.
3815 return indicatePessimisticFixpoint();
3816 }
3817
3818 /// See AbstractAttribute::trackStatistics()
3819 void trackStatistics() const override {
3821 }
3822};
3823
3824/// NoAlias attribute for an argument.
3825struct AANoAliasArgument final
3826 : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
3827 using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
3828 AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3829
3830 /// See AbstractAttribute::update(...).
3831 ChangeStatus updateImpl(Attributor &A) override {
3832 // We have to make sure no-alias on the argument does not break
3833 // synchronization when this is a callback argument, see also [1] below.
3834 // If synchronization cannot be affected, we delegate to the base updateImpl
3835 // function, otherwise we give up for now.
3836
3837 // If the function is no-sync, no-alias cannot break synchronization.
3838 bool IsKnownNoSycn;
3839 if (AA::hasAssumedIRAttr<Attribute::NoSync>(
3840 A, this, IRPosition::function_scope(getIRPosition()),
3841 DepClassTy::OPTIONAL, IsKnownNoSycn))
3842 return Base::updateImpl(A);
3843
3844 // If the argument is read-only, no-alias cannot break synchronization.
3845 bool IsKnown;
3846 if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
3847 return Base::updateImpl(A);
3848
3849 // If the argument is never passed through callbacks, no-alias cannot break
3850 // synchronization.
3851 bool UsedAssumedInformation = false;
3852 if (A.checkForAllCallSites(
3853 [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
3854 true, UsedAssumedInformation))
3855 return Base::updateImpl(A);
3856
3857 // TODO: add no-alias but make sure it doesn't break synchronization by
3858 // introducing fake uses. See:
3859 // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
3860 // International Workshop on OpenMP 2018,
3861 // http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
3862
3863 return indicatePessimisticFixpoint();
3864 }
3865
3866 /// See AbstractAttribute::trackStatistics()
3867 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
3868};
3869
3870struct AANoAliasCallSiteArgument final : AANoAliasImpl {
3871 AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
3872 : AANoAliasImpl(IRP, A) {}
3873
3874 /// Determine if the underlying value may alias with the call site argument
3875 /// \p OtherArgNo of \p ICS (= the underlying call site).
3876 bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
3877 const AAMemoryBehavior &MemBehaviorAA,
3878 const CallBase &CB, unsigned OtherArgNo) {
3879 // We do not need to worry about aliasing with the underlying IRP.
3880 if (this->getCalleeArgNo() == (int)OtherArgNo)
3881 return false;
3882
3883 // If it is not a pointer or pointer vector we do not alias.
3884 const Value *ArgOp = CB.getArgOperand(OtherArgNo);
3885 if (!ArgOp->getType()->isPtrOrPtrVectorTy())
3886 return false;
3887
3888 auto *CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3889 *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
3890
3891 // If the argument is readnone, there is no read-write aliasing.
3892 if (CBArgMemBehaviorAA && CBArgMemBehaviorAA->isAssumedReadNone()) {
3893 A.recordDependence(*CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3894 return false;
3895 }
3896
3897 // If the argument is readonly and the underlying value is readonly, there
3898 // is no read-write aliasing.
3899 bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
3900 if (CBArgMemBehaviorAA && CBArgMemBehaviorAA->isAssumedReadOnly() &&
3901 IsReadOnly) {
3902 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3903 A.recordDependence(*CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3904 return false;
3905 }
3906
3907 // We have to utilize actual alias analysis queries so we need the object.
3908 if (!AAR)
3909 AAR = A.getInfoCache().getAnalysisResultForFunction<AAManager>(
3910 *getAnchorScope());
3911
3912 // Try to rule it out at the call site.
3913 bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
3914 LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
3915 "callsite arguments: "
3916 << getAssociatedValue() << " " << *ArgOp << " => "
3917 << (IsAliasing ? "" : "no-") << "alias \n");
3918
3919 return IsAliasing;
3920 }
3921
3922 bool isKnownNoAliasDueToNoAliasPreservation(
3923 Attributor &A, AAResults *&AAR, const AAMemoryBehavior &MemBehaviorAA) {
3924 // We can deduce "noalias" if the following conditions hold.
3925 // (i) Associated value is assumed to be noalias in the definition.
3926 // (ii) Associated value is assumed to be no-capture in all the uses
3927 // possibly executed before this callsite.
3928 // (iii) There is no other pointer argument which could alias with the
3929 // value.
3930
3931 auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) {
3932 const auto *DerefAA = A.getAAFor<AADereferenceable>(
3933 *this, IRPosition::value(*O), DepClassTy::OPTIONAL);
3934 return DerefAA ? DerefAA->getAssumedDereferenceableBytes() : 0;
3935 };
3936
3937 const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3938 const Function *ScopeFn = VIRP.getAnchorScope();
3939 // Check whether the value is captured in the scope using AANoCapture.
3940 // Look at CFG and check only uses possibly executed before this
3941 // callsite.
3942 auto UsePred = [&](const Use &U, bool &Follow) -> bool {
3943 Instruction *UserI = cast<Instruction>(U.getUser());
3944
3945 // If UserI is the curr instruction and there is a single potential use of
3946 // the value in UserI we allow the use.
3947 // TODO: We should inspect the operands and allow those that cannot alias
3948 // with the value.
3949 if (UserI == getCtxI() && UserI->getNumOperands() == 1)
3950 return true;
3951
3952 if (ScopeFn) {
3953 if (auto *CB = dyn_cast<CallBase>(UserI)) {
3954 if (CB->isArgOperand(&U)) {
3955
3956 unsigned ArgNo = CB->getArgOperandNo(&U);
3957
3958 bool IsKnownNoCapture;
3959 if (AA::hasAssumedIRAttr<Attribute::NoCapture>(
3960 A, this, IRPosition::callsite_argument(*CB, ArgNo),
3961 DepClassTy::OPTIONAL, IsKnownNoCapture))
3962 return true;
3963 }
3964 }
3965
3967 A, *UserI, *getCtxI(), *this, /* ExclusionSet */ nullptr,
3968 [ScopeFn](const Function &Fn) { return &Fn != ScopeFn; }))
3969 return true;
3970 }
3971
3972 // TODO: We should track the capturing uses in AANoCapture but the problem
3973 // is CGSCC runs. For those we would need to "allow" AANoCapture for
3974 // a value in the module slice.
3975 switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) {
3976 case UseCaptureKind::NO_CAPTURE:
3977 return true;
3978 case UseCaptureKind::MAY_CAPTURE:
3979 LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *UserI
3980 << "\n");
3981 return false;
3982 case UseCaptureKind::PASSTHROUGH:
3983 Follow = true;
3984 return true;
3985 }
3986 llvm_unreachable("unknown UseCaptureKind");
3987 };
3988
3989 bool IsKnownNoCapture;
3990 const AANoCapture *NoCaptureAA = nullptr;
3991 bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::NoCapture>(
3992 A, this, VIRP, DepClassTy::NONE, IsKnownNoCapture, false, &NoCaptureAA);
3993 if (!IsAssumedNoCapture &&
3994 (!NoCaptureAA || !NoCaptureAA->isAssumedNoCaptureMaybeReturned())) {
3995 if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
3996 LLVM_DEBUG(
3997 dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
3998 << " cannot be noalias as it is potentially captured\n");
3999 return false;
4000 }
4001 }
4002 if (NoCaptureAA)
4003 A.recordDependence(*NoCaptureAA, *this, DepClassTy::OPTIONAL);
4004
4005 // Check there is no other pointer argument which could alias with the
4006 // value passed at this call site.
4007 // TODO: AbstractCallSite
4008 const auto &CB = cast<CallBase>(getAnchorValue());
4009 for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++)
4010 if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
4011 return false;
4012
4013 return true;
4014 }
4015
4016 /// See AbstractAttribute::updateImpl(...).
4017 ChangeStatus updateImpl(Attributor &A) override {
4018 // If the argument is readnone we are done as there are no accesses via the
4019 // argument.
4020 auto *MemBehaviorAA =
4021 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
4022 if (MemBehaviorAA && MemBehaviorAA->isAssumedReadNone()) {
4023 A.recordDependence(*MemBehaviorAA, *this, DepClassTy::OPTIONAL);
4024 return ChangeStatus::UNCHANGED;
4025 }
4026
4027 bool IsKnownNoAlias;
4028 const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
4029 if (!AA::hasAssumedIRAttr<Attribute::NoAlias>(
4030 A, this, VIRP, DepClassTy::REQUIRED, IsKnownNoAlias)) {
4031 LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
4032 << " is not no-alias at the definition\n");
4033 return indicatePessimisticFixpoint();
4034 }
4035
4036 AAResults *AAR = nullptr;
4037 if (MemBehaviorAA &&
4038 isKnownNoAliasDueToNoAliasPreservation(A, AAR, *MemBehaviorAA)) {
4039 LLVM_DEBUG(
4040 dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
4041 return ChangeStatus::UNCHANGED;
4042 }
4043
4044 return indicatePessimisticFixpoint();
4045 }
4046
4047 /// See AbstractAttribute::trackStatistics()
4048 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
4049};
4050
4051/// NoAlias attribute for function return value.
4052struct AANoAliasReturned final : AANoAliasImpl {
4053 AANoAliasReturned(const IRPosition &IRP, Attributor &A)
4054 : AANoAliasImpl(IRP, A) {}
4055
4056 /// See AbstractAttribute::updateImpl(...).
4057 ChangeStatus updateImpl(Attributor &A) override {
4058
4059 auto CheckReturnValue = [&](Value &RV) -> bool {
4060 if (Constant *C = dyn_cast<Constant>(&RV))
4061 if (C->isNullValue() || isa<UndefValue>(C))
4062 return true;
4063
4064 /// For now, we can only deduce noalias if we have call sites.
4065 /// FIXME: add more support.
4066 if (!isa<CallBase>(&RV))
4067 return false;
4068
4069 const IRPosition &RVPos = IRPosition::value(RV);
4070 bool IsKnownNoAlias;
4071 if (!AA::hasAssumedIRAttr<Attribute::NoAlias>(
4072 A, this, RVPos, DepClassTy::REQUIRED, IsKnownNoAlias))
4073 return false;
4074
4075 bool IsKnownNoCapture;
4076 const AANoCapture *NoCaptureAA = nullptr;
4077 bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::NoCapture>(
4078 A, this, RVPos, DepClassTy::REQUIRED, IsKnownNoCapture, false,
4079 &NoCaptureAA);
4080 return IsAssumedNoCapture ||
4081 (NoCaptureAA && NoCaptureAA->isAssumedNoCaptureMaybeReturned());
4082 };
4083
4084 if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
4085 return indicatePessimisticFixpoint();
4086
4087 return ChangeStatus::UNCHANGED;
4088 }
4089
4090 /// See AbstractAttribute::trackStatistics()
4091 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
4092};
4093
4094/// NoAlias attribute deduction for a call site return value.
4095struct AANoAliasCallSiteReturned final
4096 : AACalleeToCallSite<AANoAlias, AANoAliasImpl> {
4097 AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
4098 : AACalleeToCallSite<AANoAlias, AANoAliasImpl>(IRP, A) {}
4099
4100 /// See AbstractAttribute::trackStatistics()
4101 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
4102};
4103} // namespace
4104
4105/// -------------------AAIsDead Function Attribute-----------------------
4106
4107namespace {
4108struct AAIsDeadValueImpl : public AAIsDead {
4109 AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
4110
4111 /// See AAIsDead::isAssumedDead().
4112 bool isAssumedDead() const override { return isAssumed(IS_DEAD); }
4113
4114 /// See AAIsDead::isKnownDead().
4115 bool isKnownDead() const override { return isKnown(IS_DEAD); }
4116
4117 /// See AAIsDead::isAssumedDead(BasicBlock *).
4118 bool isAssumedDead(const BasicBlock *BB) const override { return false; }
4119
4120 /// See AAIsDead::isKnownDead(BasicBlock *).
4121 bool isKnownDead(const BasicBlock *BB) const override { return false; }
4122
4123 /// See AAIsDead::isAssumedDead(Instruction *I).
4124 bool isAssumedDead(const Instruction *I) const override {
4125 return I == getCtxI() && isAssumedDead();
4126 }
4127
4128 /// See AAIsDead::isKnownDead(Instruction *I).
4129 bool isKnownDead(const Instruction *I) const override {
4130 return isAssumedDead(I) && isKnownDead();
4131 }
4132
4133 /// See AbstractAttribute::getAsStr().
4134 const std::string getAsStr(Attributor *A) const override {
4135 return isAssumedDead() ? "assumed-dead" : "assumed-live";
4136 }
4137
4138 /// Check if all uses are assumed dead.
4139 bool areAllUsesAssumedDead(Attributor &A, Value &V) {
4140 // Callers might not check the type, void has no uses.
4141 if (V.getType()->isVoidTy() || V.use_empty())
4142 return true;
4143
4144 // If we replace a value with a constant there are no uses left afterwards.
4145 if (!isa<Constant>(V)) {
4146 if (auto *I = dyn_cast<Instruction>(&V))
4147 if (!A.isRunOn(*I->getFunction()))
4148 return false;
4149 bool UsedAssumedInformation = false;
4150 std::optional<Constant *> C =
4151 A.getAssumedConstant(V, *this, UsedAssumedInformation);
4152 if (!C || *C)
4153 return true;
4154 }
4155
4156 auto UsePred = [&](const Use &U, bool &Follow) { return false; };
4157 // Explicitly set the dependence class to required because we want a long
4158 // chain of N dependent instructions to be considered live as soon as one is
4159 // without going through N update cycles. This is not required for
4160 // correctness.
4161 return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false,
4162 DepClassTy::REQUIRED,
4163 /* IgnoreDroppableUses */ false);
4164 }
4165
4166 /// Determine if \p I is assumed to be side-effect free.
4167 bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
4169 return true;
4170
4171 auto *CB = dyn_cast<CallBase>(I);
4172 if (!CB || isa<IntrinsicInst>(CB))
4173 return false;
4174
4175 const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
4176
4177 bool IsKnownNoUnwind;
4178 if (!AA::hasAssumedIRAttr<Attribute::NoUnwind>(
4179 A, this, CallIRP, DepClassTy::OPTIONAL, IsKnownNoUnwind))
4180 return false;
4181
4182 bool IsKnown;
4183 return AA::isAssumedReadOnly(A, CallIRP, *this, IsKnown);
4184 }
4185};
4186
4187struct AAIsDeadFloating : public AAIsDeadValueImpl {
4188 AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
4189 : AAIsDeadValueImpl(IRP, A) {}
4190
4191 /// See AbstractAttribute::initialize(...).
4192 void initialize(Attributor &A) override {
4193 AAIsDeadValueImpl::initialize(A);
4194
4195 if (isa<UndefValue>(getAssociatedValue())) {
4196 indicatePessimisticFixpoint();
4197 return;
4198 }
4199
4200 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
4201 if (!isAssumedSideEffectFree(A, I)) {
4202 if (!isa_and_nonnull<StoreInst>(I) && !isa_and_nonnull<FenceInst>(I))
4203 indicatePessimisticFixpoint();
4204 else
4205 removeAssumedBits(HAS_NO_EFFECT);
4206 }
4207 }
4208
4209 bool isDeadFence(Attributor &A, FenceInst &FI) {
4210 const auto *ExecDomainAA = A.lookupAAFor<AAExecutionDomain>(
4211 IRPosition::function(*FI.getFunction()), *this, DepClassTy::NONE);
4212 if (!ExecDomainAA || !ExecDomainAA->isNoOpFence(FI))
4213 return false;
4214 A.recordDependence(*ExecDomainAA, *this, DepClassTy::OPTIONAL);
4215 return true;
4216 }
4217
4218 bool isDeadStore(Attributor &A, StoreInst &SI,
4219 SmallSetVector<Instruction *, 8> *AssumeOnlyInst = nullptr) {
4220 // Lang ref now states volatile store is not UB/dead, let's skip them.
4221 if (SI.isVolatile())
4222 return false;
4223
4224 // If we are collecting assumes to be deleted we are in the manifest stage.
4225 // It's problematic to collect the potential copies again now so we use the
4226 // cached ones.
4227 bool UsedAssumedInformation = false;
4228 if (!AssumeOnlyInst) {
4229 PotentialCopies.clear();
4230 if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this,
4231 UsedAssumedInformation)) {
4232 LLVM_DEBUG(
4233 dbgs()
4234 << "[AAIsDead] Could not determine potential copies of store!\n");
4235 return false;
4236 }
4237 }
4238 LLVM_DEBUG(dbgs() << "[AAIsDead] Store has " << PotentialCopies.size()
4239 << " potential copies.\n");
4240
4241 InformationCache &InfoCache = A.getInfoCache();
4242 return llvm::all_of(PotentialCopies, [&](Value *V) {
4243 if (A.isAssumedDead(IRPosition::value(*V), this, nullptr,
4244 UsedAssumedInformation))
4245 return true;
4246 if (auto *LI = dyn_cast<LoadInst>(V)) {
4247 if (llvm::all_of(LI->uses(), [&](const Use &U) {
4248 auto &UserI = cast<Instruction>(*U.getUser());
4249 if (InfoCache.isOnlyUsedByAssume(UserI)) {
4250 if (AssumeOnlyInst)
4251 AssumeOnlyInst->insert(&UserI);
4252 return true;
4253 }
4254 return A.isAssumedDead(U, this, nullptr, UsedAssumedInformation);
4255 })) {
4256 return true;
4257 }
4258 }
4259 LLVM_DEBUG(dbgs() << "[AAIsDead] Potential copy " << *V
4260 << " is assumed live!\n");
4261 return false;
4262 });
4263 }
4264
4265 /// See AbstractAttribute::getAsStr().
4266 const std::string getAsStr(Attributor *A) const override {
4267 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
4268 if (isa_and_nonnull<StoreInst>(I))
4269 if (isValidState())
4270 return "assumed-dead-store";
4271 if (isa_and_nonnull<FenceInst>(I))
4272 if (isValidState())
4273 return "assumed-dead-fence";
4274 return AAIsDeadValueImpl::getAsStr(A);
4275 }
4276
4277 /// See AbstractAttribute::updateImpl(...).
4278 ChangeStatus updateImpl(Attributor &A) override {
4279 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
4280 if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
4281 if (!isDeadStore(A, *SI))
4282 return indicatePessimisticFixpoint();
4283 } else if (auto *FI = dyn_cast_or_null<FenceInst>(I)) {
4284 if (!isDeadFence(A, *FI))
4285 return indicatePessimisticFixpoint();
4286 } else {
4287 if (!isAssumedSideEffectFree(A, I))
4288 return indicatePessimisticFixpoint();
4289 if (!areAllUsesAssumedDead(A, getAssociatedValue()))
4290 return indicatePessimisticFixpoint();
4291 }
4293 }
4294
4295 bool isRemovableStore() const override {
4296 return isAssumed(IS_REMOVABLE) && isa<StoreInst>(&getAssociatedValue());
4297 }
4298
4299 /// See AbstractAttribute::manifest(...).
4300 ChangeStatus manifest(Attributor &A) override {
4301 Value &V = getAssociatedValue();
4302 if (auto *I = dyn_cast<Instruction>(&V)) {
4303 // If we get here we basically know the users are all dead. We check if
4304 // isAssumedSideEffectFree returns true here again because it might not be
4305 // the case and only the users are dead but the instruction (=call) is
4306 // still needed.
4307 if (auto *SI = dyn_cast<StoreInst>(I)) {
4308 SmallSetVector<Instruction *, 8> AssumeOnlyInst;
4309 bool IsDead = isDeadStore(A, *SI, &AssumeOnlyInst);
4310 (void)IsDead;
4311 assert(IsDead && "Store was assumed to be dead!");
4312 A.deleteAfterManifest(*I);
4313 for (size_t i = 0; i < AssumeOnlyInst.size(); ++i) {
4314 Instruction *AOI = AssumeOnlyInst[i];
4315 for (auto *Usr : AOI->users())
4316 AssumeOnlyInst.insert(cast<Instruction>(Usr));
4317 A.deleteAfterManifest(*AOI);
4318 }
4319 return ChangeStatus::CHANGED;
4320 }
4321 if (auto *FI = dyn_cast<FenceInst>(I)) {
4322 assert(isDeadFence(A, *FI));
4323 A.deleteAfterManifest(*FI);
4324 return ChangeStatus::CHANGED;
4325 }
4326 if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
4327 A.deleteAfterManifest(*I);
4328 return ChangeStatus::CHANGED;
4329 }
4330 }
4332 }
4333
4334 /// See AbstractAttribute::trackStatistics()
4335 void trackStatistics() const override {
4337 }
4338
4339private:
4340 // The potential copies of a dead store, used for deletion during manifest.
4341 SmallSetVector<Value *, 4> PotentialCopies;
4342};
4343
4344struct AAIsDeadArgument : public AAIsDeadFloating {
4345 AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
4346 : AAIsDeadFloating(IRP, A) {}
4347
4348 /// See AbstractAttribute::manifest(...).
4349 ChangeStatus manifest(Attributor &A) override {
4350 Argument &Arg = *getAssociatedArgument();
4351 if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
4352 if (A.registerFunctionSignatureRewrite(
4353 Arg, /* ReplacementTypes */ {},
4356 return ChangeStatus::CHANGED;
4357 }
4358 return ChangeStatus::UNCHANGED;
4359 }
4360
4361 /// See AbstractAttribute::trackStatistics()
4362 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
4363};
4364
4365struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
4366 AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
4367 : AAIsDeadValueImpl(IRP, A) {}
4368
4369 /// See AbstractAttribute::initialize(...).
4370 void initialize(Attributor &A) override {
4371 AAIsDeadValueImpl::initialize(A);
4372 if (isa<UndefValue>(getAssociatedValue()))
4373 indicatePessimisticFixpoint();
4374 }
4375
4376 /// See AbstractAttribute::updateImpl(...).
4377 ChangeStatus updateImpl(Attributor &A) override {
4378 // TODO: Once we have call site specific value information we can provide
4379 // call site specific liveness information and then it makes
4380 // sense to specialize attributes for call sites arguments instead of
4381 // redirecting requests to the callee argument.
4382 Argument *Arg = getAssociatedArgument();
4383 if (!Arg)
4384 return indicatePessimisticFixpoint();
4385 const IRPosition &ArgPos = IRPosition::argument(*Arg);
4386 auto *ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
4387 if (!ArgAA)
4388 return indicatePessimisticFixpoint();
4389 return clampStateAndIndicateChange(getState(), ArgAA->getState());
4390 }
4391
4392 /// See AbstractAttribute::manifest(...).
4393 ChangeStatus manifest(Attributor &A) override {
4394 CallBase &CB = cast<CallBase>(getAnchorValue());
4395 Use &U = CB.getArgOperandUse(getCallSiteArgNo());
4396 assert(!isa<UndefValue>(U.get()) &&
4397 "Expected undef values to be filtered out!");
4398 UndefValue &UV = *UndefValue::get(U->getType());
4399 if (A.changeUseAfterManifest(U, UV))
4400 return ChangeStatus::CHANGED;
4401 return ChangeStatus::UNCHANGED;
4402 }
4403
4404 /// See AbstractAttribute::trackStatistics()
4405 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
4406};
4407
4408struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
4409 AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
4410 : AAIsDeadFloating(IRP, A) {}
4411
4412 /// See AAIsDead::isAssumedDead().
4413 bool isAssumedDead() const override {
4414 return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
4415 }
4416
4417 /// See AbstractAttribute::initialize(...).
4418 void initialize(Attributor &A) override {
4419 AAIsDeadFloating::initialize(A);
4420 if (isa<UndefValue>(getAssociatedValue())) {
4421 indicatePessimisticFixpoint();
4422 return;
4423 }
4424
4425 // We track this separately as a secondary state.
4426 IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
4427 }
4428
4429 /// See AbstractAttribute::updateImpl(...).
4430 ChangeStatus updateImpl(Attributor &A) override {
4431 ChangeStatus Changed = ChangeStatus::UNCHANGED;
4432 if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
4433 IsAssumedSideEffectFree = false;
4434 Changed = ChangeStatus::CHANGED;
4435 }
4436 if (!areAllUsesAssumedDead(A, getAssociatedValue()))
4437 return indicatePessimisticFixpoint();
4438 return Changed;
4439 }
4440
4441 /// See AbstractAttribute::trackStatistics()
4442 void trackStatistics() const override {
4443 if (IsAssumedSideEffectFree)
4445 else
4446 STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
4447 }
4448
4449 /// See AbstractAttribute::getAsStr().
4450 const std::string getAsStr(Attributor *A) const override {
4451 return isAssumedDead()
4452 ? "assumed-dead"
4453 : (getAssumed() ? "assumed-dead-users" : "assumed-live");
4454 }
4455
4456private:
4457 bool IsAssumedSideEffectFree = true;
4458};
4459
4460struct AAIsDeadReturned : public AAIsDeadValueImpl {
4461 AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
4462 : AAIsDeadValueImpl(IRP, A) {}
4463
4464 /// See AbstractAttribute::updateImpl(...).
4465 ChangeStatus updateImpl(Attributor &A) override {
4466
4467 bool UsedAssumedInformation = false;
4468 A.checkForAllInstructions([](Instruction &) { return true; }, *this,
4469 {Instruction::Ret}, UsedAssumedInformation);
4470
4471 auto PredForCallSite = [&](AbstractCallSite ACS) {
4472 if (ACS.isCallbackCall() || !ACS.getInstruction())
4473 return false;
4474 return areAllUsesAssumedDead(A, *ACS.getInstruction());
4475 };
4476
4477 if (!A.checkForAllCallSites(PredForCallSite, *this, true,
4478 UsedAssumedInformation))
4479 return indicatePessimisticFixpoint();
4480
4481 return ChangeStatus::UNCHANGED;
4482 }
4483
4484 /// See AbstractAttribute::manifest(...).
4485 ChangeStatus manifest(Attributor &A) override {
4486 // TODO: Rewrite the signature to return void?
4487 bool AnyChange = false;
4488 UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
4489 auto RetInstPred = [&](Instruction &I) {
4490 ReturnInst &RI = cast<ReturnInst>(I);
4491 if (!isa<UndefValue>(RI.getReturnValue()))
4492 AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
4493 return true;
4494 };
4495 bool UsedAssumedInformation = false;
4496 A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
4497 UsedAssumedInformation);
4498 return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
4499 }
4500
4501 /// See AbstractAttribute::trackStatistics()
4502 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
4503};
4504
4505struct AAIsDeadFunction : public AAIsDead {
4506 AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
4507
4508 /// See AbstractAttribute::initialize(...).
4509 void initialize(Attributor &A) override {
4510 Function *F = getAnchorScope();
4511 assert(F && "Did expect an anchor function");
4512 if (!isAssumedDeadInternalFunction(A)) {
4513 ToBeExploredFrom.insert(&F->getEntryBlock().front());
4514 assumeLive(A, F->getEntryBlock());
4515 }
4516 }
4517
4518 bool isAssumedDeadInternalFunction(Attributor &A) {
4519 if (!getAnchorScope()->hasLocalLinkage())
4520 return false;
4521 bool UsedAssumedInformation = false;
4522 return A.checkForAllCallSites([](AbstractCallSite) { return false; }, *this,
4523 true, UsedAssumedInformation);
4524 }
4525
4526 /// See AbstractAttribute::getAsStr().
4527 const std::string getAsStr(Attributor *A) const override {
4528 return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
4529 std::to_string(getAnchorScope()->size()) + "][#TBEP " +
4530 std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
4531 std::to_string(KnownDeadEnds.size()) + "]";
4532 }
4533
4534 /// See AbstractAttribute::manifest(...).
4535 ChangeStatus manifest(Attributor &A) override {
4536 assert(getState().isValidState() &&
4537 "Attempted to manifest an invalid state!");
4538
4539 ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
4540 Function &F = *getAnchorScope();
4541
4542 if (AssumedLiveBlocks.empty()) {
4543 A.deleteAfterManifest(F);
4544 return ChangeStatus::CHANGED;
4545 }
4546
4547 // Flag to determine if we can change an invoke to a call assuming the
4548 // callee is nounwind. This is not possible if the personality of the
4549 // function allows to catch asynchronous exceptions.
4550 bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
4551
4552 KnownDeadEnds.set_union(ToBeExploredFrom);
4553 for (const Instruction *DeadEndI : KnownDeadEnds) {
4554 auto *CB = dyn_cast<CallBase>(DeadEndI);
4555 if (!CB)
4556 continue;
4557 bool IsKnownNoReturn;
4558 bool MayReturn = !AA::hasAssumedIRAttr<Attribute::NoReturn>(
4559 A, this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL,
4560 IsKnownNoReturn);
4561 if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
4562 continue;
4563
4564 if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
4565 A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
4566 else
4567 A.changeToUnreachableAfterManifest(
4568 const_cast<Instruction *>(DeadEndI->getNextNode()));
4569 HasChanged = ChangeStatus::CHANGED;
4570 }
4571
4572 STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
4573 for (BasicBlock &BB : F)
4574 if (!AssumedLiveBlocks.count(&BB)) {
4575 A.deleteAfterManifest(BB);
4577 HasChanged = ChangeStatus::CHANGED;
4578 }
4579
4580 return HasChanged;
4581 }
4582
4583 /// See AbstractAttribute::updateImpl(...).
4584 ChangeStatus updateImpl(Attributor &A) override;
4585
4586 bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
4587 assert(From->getParent() == getAnchorScope() &&
4588 To->getParent() == getAnchorScope() &&
4589 "Used AAIsDead of the wrong function");
4590 return isValidState() && !AssumedLiveEdges.count(std::make_pair(From, To));
4591 }
4592
4593 /// See AbstractAttribute::trackStatistics()
4594 void trackStatistics() const override {}
4595
4596 /// Returns true if the function is assumed dead.
4597 bool isAssumedDead() const override { return false; }
4598
4599 /// See AAIsDead::isKnownDead().
4600 bool isKnownDead() const override { return false; }
4601
4602 /// See AAIsDead::isAssumedDead(BasicBlock *).
4603 bool isAssumedDead(const BasicBlock *BB) const override {
4604 assert(BB->getParent() == getAnchorScope() &&
4605 "BB must be in the same anchor scope function.");
4606
4607 if (!getAssumed())
4608 return false;
4609 return !AssumedLiveBlocks.count(BB);
4610 }
4611
4612 /// See AAIsDead::isKnownDead(BasicBlock *).
4613 bool isKnownDead(const BasicBlock *BB) const override {
4614 return getKnown() && isAssumedDead(BB);
4615 }
4616
4617 /// See AAIsDead::isAssumed(Instruction *I).
4618 bool isAssumedDead(const Instruction *I) const override {
4619 assert(I->getParent()->getParent() == getAnchorScope() &&
4620 "Instruction must be in the same anchor scope function.");
4621
4622 if (!getAssumed())
4623 return false;
4624
4625 // If it is not in AssumedLiveBlocks then it for sure dead.
4626 // Otherwise, it can still be after noreturn call in a live block.
4627 if (!AssumedLiveBlocks.count(I->getParent()))
4628 return true;
4629
4630 // If it is not after a liveness barrier it is live.
4631 const Instruction *PrevI = I->getPrevNode();
4632 while (PrevI) {
4633 if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
4634 return true;
4635 PrevI = PrevI->getPrevNode();
4636 }
4637 return false;
4638 }
4639
4640 /// See AAIsDead::isKnownDead(Instruction *I).
4641 bool isKnownDead(const Instruction *I) const override {
4642 return getKnown() && isAssumedDead(I);
4643 }
4644
4645 /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
4646 /// that internal function called from \p BB should now be looked at.
4647 bool assumeLive(Attributor &A, const BasicBlock &BB) {
4648 if (!AssumedLiveBlocks.insert(&BB).second)
4649 return false;
4650
4651 // We assume that all of BB is (probably) live now and if there are calls to
4652 // internal functions we will assume that those are now live as well. This
4653 // is a performance optimization for blocks with calls to a lot of internal
4654 // functions. It can however cause dead functions to be treated as live.
4655 for (const Instruction &I : BB)
4656 if (const auto *CB = dyn_cast<CallBase>(&I))
4657 if (auto *F = dyn_cast_if_present<Function>(CB->getCalledOperand()))
4658 if (F->hasLocalLinkage())
4659 A.markLiveInternalFunction(*F);
4660 return true;
4661 }
4662
4663 /// Collection of instructions that need to be explored again, e.g., we
4664 /// did assume they do not transfer control to (one of their) successors.
4666
4667 /// Collection of instructions that are known to not transfer control.
4669
4670 /// Collection of all assumed live edges
4672
4673 /// Collection of all assumed live BasicBlocks.
4674 DenseSet<const BasicBlock *> AssumedLiveBlocks;
4675};
4676
4677static bool
4678identifyAliveSuccessors(Attributor &A, const CallBase &CB,
4680 SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4681 const IRPosition &IPos = IRPosition::callsite_function(CB);
4682
4683 bool IsKnownNoReturn;
4684 if (AA::hasAssumedIRAttr<Attribute::NoReturn>(
4685 A, &AA, IPos, DepClassTy::OPTIONAL, IsKnownNoReturn))
4686 return !IsKnownNoReturn;
4687 if (CB.isTerminator())
4688 AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
4689 else
4690 AliveSuccessors.push_back(CB.getNextNode());
4691 return false;
4692}
4693
4694static bool
4695identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
4697 SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4698 bool UsedAssumedInformation =
4699 identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
4700
4701 // First, determine if we can change an invoke to a call assuming the
4702 // callee is nounwind. This is not possible if the personality of the
4703 // function allows to catch asynchronous exceptions.
4704 if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
4705 AliveSuccessors.push_back(&II.getUnwindDest()->front());
4706 } else {
4708
4709 bool IsKnownNoUnwind;
4710 if (AA::hasAssumedIRAttr<Attribute::NoUnwind>(
4711 A, &AA, IPos, DepClassTy::OPTIONAL, IsKnownNoUnwind)) {
4712 UsedAssumedInformation |= !IsKnownNoUnwind;
4713 } else {
4714 AliveSuccessors.push_back(&II.getUnwindDest()->front());
4715 }
4716 }
4717 return UsedAssumedInformation;
4718}
4719
4720static bool
4721identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
4723 SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4724 bool UsedAssumedInformation = false;
4725 if (BI.getNumSuccessors() == 1) {
4726 AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
4727 } else {
4728 std::optional<Constant *> C =
4729 A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
4730 if (!C || isa_and_nonnull<UndefValue>(*C)) {
4731 // No value yet, assume both edges are dead.
4732 } else if (isa_and_nonnull<ConstantInt>(*C)) {
4733 const BasicBlock *SuccBB =
4734 BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
4735 AliveSuccessors.push_back(&SuccBB->front());
4736 } else {
4737 AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
4738 AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
4739 UsedAssumedInformation = false;
4740 }
4741 }
4742 return UsedAssumedInformation;
4743}
4744
4745static bool
4746identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
4748 SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4749 bool UsedAssumedInformation = false;
4751 if (!A.getAssumedSimplifiedValues(IRPosition::value(*SI.getCondition()), &AA,
4752 Values, AA::AnyScope,
4753 UsedAssumedInformation)) {
4754 // Something went wrong, assume all successors are live.
4755 for (const BasicBlock *SuccBB : successors(SI.getParent()))
4756 AliveSuccessors.push_back(&SuccBB->front());
4757 return false;
4758 }
4759
4760 if (Values.empty() ||
4761 (Values.size() == 1 &&
4762 isa_and_nonnull<UndefValue>(Values.front().getValue()))) {
4763 // No valid value yet, assume all edges are dead.
4764 return UsedAssumedInformation;
4765 }
4766
4767 Type &Ty = *SI.getCondition()->getType();
4769 auto CheckForConstantInt = [&](Value *V) {
4770 if (auto *CI = dyn_cast_if_present<ConstantInt>(AA::getWithType(*V, Ty))) {
4771 Constants.insert(CI);
4772 return true;
4773 }
4774 return false;
4775 };
4776
4777 if (!all_of(Values, [&](AA::ValueAndContext &VAC) {
4778 return CheckForConstantInt(VAC.getValue());
4779 })) {
4780 for (const BasicBlock *SuccBB : successors(SI.getParent()))
4781 AliveSuccessors.push_back(&SuccBB->front());
4782 return UsedAssumedInformation;
4783 }
4784
4785 unsigned MatchedCases = 0;
4786 for (const auto &CaseIt : SI.cases()) {
4787 if (Constants.count(CaseIt.getCaseValue())) {
4788 ++MatchedCases;
4789 AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
4790 }
4791 }
4792
4793 // If all potential values have been matched, we will not visit the default
4794 // case.
4795 if (MatchedCases < Constants.size())
4796 AliveSuccessors.push_back(&SI.getDefaultDest()->front());
4797 return UsedAssumedInformation;
4798}
4799
4800ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
4802
4803 if (AssumedLiveBlocks.empty()) {
4804 if (isAssumedDeadInternalFunction(A))
4806
4807 Function *F = getAnchorScope();
4808 ToBeExploredFrom.insert(&F->getEntryBlock().front());
4809 assumeLive(A, F->getEntryBlock());
4810 Change = ChangeStatus::CHANGED;
4811 }
4812
4813 LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
4814 << getAnchorScope()->size() << "] BBs and "
4815 << ToBeExploredFrom.size() << " exploration points and "
4816 << KnownDeadEnds.size() << " known dead ends\n");
4817
4818 // Copy and clear the list of instructions we need to explore from. It is
4819 // refilled with instructions the next update has to look at.
4820 SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
4821 ToBeExploredFrom.end());
4822 decltype(ToBeExploredFrom) NewToBeExploredFrom;
4823
4825 while (!Worklist.empty()) {
4826 const Instruction *I = Worklist.pop_back_val();
4827 LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
4828
4829 // Fast forward for uninteresting instructions. We could look for UB here
4830 // though.
4831 while (!I->isTerminator() && !isa<CallBase>(I))
4832 I = I->getNextNode();
4833
4834 AliveSuccessors.clear();
4835
4836 bool UsedAssumedInformation = false;
4837 switch (I->getOpcode()) {
4838 // TODO: look for (assumed) UB to backwards propagate "deadness".
4839 default:
4840 assert(I->isTerminator() &&
4841 "Expected non-terminators to be handled already!");
4842 for (const BasicBlock *SuccBB : successors(I->getParent()))
4843 AliveSuccessors.push_back(&SuccBB->front());
4844 break;
4845 case Instruction::Call:
4846 UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
4847 *this, AliveSuccessors);
4848 break;
4849 case Instruction::Invoke:
4850 UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
4851 *this, AliveSuccessors);
4852 break;
4853 case Instruction::Br:
4854 UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
4855 *this, AliveSuccessors);
4856 break;
4857 case Instruction::Switch:
4858 UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
4859 *this, AliveSuccessors);
4860 break;
4861 }
4862
4863 if (UsedAssumedInformation) {
4864 NewToBeExploredFrom.insert(I);
4865 } else if (AliveSuccessors.empty() ||
4866 (I->isTerminator() &&
4867 AliveSuccessors.size() < I->getNumSuccessors())) {
4868 if (KnownDeadEnds.insert(I))
4869 Change = ChangeStatus::CHANGED;
4870 }
4871
4872 LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
4873 << AliveSuccessors.size() << " UsedAssumedInformation: "
4874 << UsedAssumedInformation << "\n");
4875
4876 for (const Instruction *AliveSuccessor : AliveSuccessors) {
4877 if (!I->isTerminator()) {
4878 assert(AliveSuccessors.size() == 1 &&
4879 "Non-terminator expected to have a single successor!");
4880 Worklist.push_back(AliveSuccessor);
4881 } else {
4882 // record the assumed live edge
4883 auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent());
4884 if (AssumedLiveEdges.insert(Edge).second)
4885 Change = ChangeStatus::CHANGED;
4886 if (assumeLive(A, *AliveSuccessor->getParent()))
4887 Worklist.push_back(AliveSuccessor);
4888 }
4889 }
4890 }
4891
4892 // Check if the content of ToBeExploredFrom changed, ignore the order.
4893 if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() ||
4894 llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) {
4895 return !ToBeExploredFrom.count(I);
4896 })) {
4897 Change = ChangeStatus::CHANGED;
4898 ToBeExploredFrom = std::move(NewToBeExploredFrom);
4899 }
4900
4901 // If we know everything is live there is no need to query for liveness.
4902 // Instead, indicating a pessimistic fixpoint will cause the state to be
4903 // "invalid" and all queries to be answered conservatively without lookups.
4904 // To be in this state we have to (1) finished the exploration and (3) not
4905 // discovered any non-trivial dead end and (2) not ruled unreachable code
4906 // dead.
4907 if (ToBeExploredFrom.empty() &&
4908 getAnchorScope()->size() == AssumedLiveBlocks.size() &&
4909 llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
4910 return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
4911 }))
4912 return indicatePessimisticFixpoint();
4913 return Change;
4914}
4915
4916/// Liveness information for a call sites.
4917struct AAIsDeadCallSite final : AAIsDeadFunction {
4918 AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
4919 : AAIsDeadFunction(IRP, A) {}
4920
4921 /// See AbstractAttribute::initialize(...).
4922 void initialize(Attributor &A) override {
4923 // TODO: Once we have call site specific value information we can provide
4924 // call site specific liveness information and then it makes
4925 // sense to specialize attributes for call sites instead of
4926 // redirecting requests to the callee.
4927 llvm_unreachable("Abstract attributes for liveness are not "
4928 "supported for call sites yet!");
4929 }
4930
4931 /// See AbstractAttribute::updateImpl(...).
4932 ChangeStatus updateImpl(Attributor &A) override {
4933 return indicatePessimisticFixpoint();
4934 }
4935
4936 /// See AbstractAttribute::trackStatistics()
4937 void trackStatistics() const override {}
4938};
4939} // namespace
4940
4941/// -------------------- Dereferenceable Argument Attribute --------------------
4942
4943namespace {
4944struct AADereferenceableImpl : AADereferenceable {
4945 AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
4946 : AADereferenceable(IRP, A) {}
4947 using StateType = DerefState;
4948
4949 /// See AbstractAttribute::initialize(...).
4950 void initialize(Attributor &A) override {
4951 Value &V = *getAssociatedValue().stripPointerCasts();
4953 A.getAttrs(getIRPosition(),
4954 {Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
4955 Attrs, /* IgnoreSubsumingPositions */ false);
4956 for (const Attribute &Attr : Attrs)
4957 takeKnownDerefBytesMaximum(Attr.getValueAsInt());
4958
4959 // Ensure we initialize the non-null AA (if necessary).
4960 bool IsKnownNonNull;
4961 AA::hasAssumedIRAttr<Attribute::NonNull>(
4962 A, this, getIRPosition(), DepClassTy::OPTIONAL, IsKnownNonNull);
4963
4964 bool CanBeNull, CanBeFreed;
4965 takeKnownDerefBytesMaximum(V.getPointerDereferenceableBytes(
4966 A.getDataLayout(), CanBeNull, CanBeFreed));
4967
4968 if (Instruction *CtxI = getCtxI())
4969 followUsesInMBEC(*this, A, getState(), *CtxI);
4970 }
4971
4972 /// See AbstractAttribute::getState()
4973 /// {
4974 StateType &getState() override { return *this; }
4975 const StateType &getState() const override { return *this; }
4976 /// }
4977
4978 /// Helper function for collecting accessed bytes in must-be-executed-context
4979 void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
4980 DerefState &State) {
4981 const Value *UseV = U->get();
4982 if (!UseV->getType()->isPointerTy())
4983 return;
4984
4985 std::optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
4986 if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
4987 return;
4988
4989 int64_t Offset;
4991 Loc->Ptr, Offset, A.getDataLayout(), /*AllowNonInbounds*/ true);
4992 if (Base && Base == &getAssociatedValue())
4993 State.addAccessedBytes(Offset, Loc->Size.getValue());
4994 }
4995
4996 /// See followUsesInMBEC
4997 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4999 bool IsNonNull = false;
5000 bool TrackUse = false;
5001 int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
5002 A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
5003 LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
5004 << " for instruction " << *I << "\n");
5005
5006 addAccessedBytesForUse(A, U, I, State);
5007 State.takeKnownDerefBytesMaximum(DerefBytes);
5008 return TrackUse;
5009 }
5010
5011 /// See AbstractAttribute::manifest(...).
5012 ChangeStatus manifest(Attributor &A) override {
5013 ChangeStatus Change = AADereferenceable::manifest(A);
5014 bool IsKnownNonNull;
5015 bool IsAssumedNonNull = AA::hasAssumedIRAttr<Attribute::NonNull>(
5016 A, this, getIRPosition(), DepClassTy::NONE, IsKnownNonNull);
5017 if (IsAssumedNonNull &&
5018 A.hasAttr(getIRPosition(), Attribute::DereferenceableOrNull)) {
5019 A.removeAttrs(getIRPosition(), {Attribute::DereferenceableOrNull});
5020 return ChangeStatus::CHANGED;
5021 }
5022 return Change;
5023 }
5024
5025 void getDeducedAttributes(Attributor &A, LLVMContext &Ctx,
5026 SmallVectorImpl<Attribute> &Attrs) const override {
5027 // TODO: Add *_globally support
5028 bool IsKnownNonNull;
5029 bool IsAssumedNonNull = AA::hasAssumedIRAttr<Attribute::NonNull>(
5030 A, this, getIRPosition(), DepClassTy::NONE, IsKnownNonNull);
5031 if (IsAssumedNonNull)
5033 Ctx, getAssumedDereferenceableBytes()));
5034 else
5036 Ctx, getAssumedDereferenceableBytes()));
5037 }
5038
5039 /// See AbstractAttribute::getAsStr().
5040 const std::string getAsStr(Attributor *A) const override {
5041 if (!getAssumedDereferenceableBytes())
5042 return "unknown-dereferenceable";
5043 bool IsKnownNonNull;
5044 bool IsAssumedNonNull = false;
5045 if (A)
5046 IsAssumedNonNull = AA::hasAssumedIRAttr<Attribute::NonNull>(
5047 *A, this, getIRPosition(), DepClassTy::NONE, IsKnownNonNull);
5048 return std::string("dereferenceable") +
5049 (IsAssumedNonNull ? "" : "_or_null") +
5050 (isAssumedGlobal() ? "_globally" : "") + "<" +
5051 std::to_string(getKnownDereferenceableBytes()) + "-" +
5052 std::to_string(getAssumedDereferenceableBytes()) + ">" +
5053 (!A ? " [non-null is unknown]" : "");
5054 }
5055};
5056
5057/// Dereferenceable attribute for a floating value.
5058struct AADereferenceableFloating : AADereferenceableImpl {
5059 AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
5060 : AADereferenceableImpl(IRP, A) {}
5061
5062 /// See AbstractAttribute::updateImpl(...).
5063 ChangeStatus updateImpl(Attributor &A) override {
5064 bool Stripped;
5065 bool UsedAssumedInformation = false;
5067 if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
5068 AA::AnyScope, UsedAssumedInformation)) {
5069 Values.push_back({getAssociatedValue(), getCtxI()});
5070 Stripped = false;
5071 } else {
5072 Stripped = Values.size() != 1 ||
5073 Values.front().getValue() != &getAssociatedValue();
5074 }
5075
5076 const DataLayout &DL = A.getDataLayout();
5077 DerefState T;
5078
5079 auto VisitValueCB = [&](const Value &V) -> bool {
5080 unsigned IdxWidth =
5081 DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
5082 APInt Offset(IdxWidth, 0);
5084 A, *this, &V, DL, Offset, /* GetMinOffset */ false,
5085 /* AllowNonInbounds */ true);
5086
5087 const auto *AA = A.getAAFor<AADereferenceable>(
5088 *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
5089 int64_t DerefBytes = 0;
5090 if (!AA || (!Stripped && this == AA)) {
5091 // Use IR information if we did not strip anything.
5092 // TODO: track globally.
5093 bool CanBeNull, CanBeFreed;
5094 DerefBytes =
5095 Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
5096 T.GlobalState.indicatePessimisticFixpoint();
5097 } else {
5098 const DerefState &DS = AA->getState();
5099 DerefBytes = DS.DerefBytesState.getAssumed();
5100 T.GlobalState &= DS.GlobalState;
5101 }
5102
5103 // For now we do not try to "increase" dereferenceability due to negative
5104 // indices as we first have to come up with code to deal with loops and
5105 // for overflows of the dereferenceable bytes.
5106 int64_t OffsetSExt = Offset.getSExtValue();
5107 if (OffsetSExt < 0)
5108 OffsetSExt = 0;
5109
5110 T.takeAssumedDerefBytesMinimum(
5111 std::max(int64_t(0), DerefBytes - OffsetSExt));
5112
5113 if (this == AA) {
5114 if (!Stripped) {
5115 // If nothing was stripped IR information is all we got.
5116 T.takeKnownDerefBytesMaximum(
5117 std::max(int64_t(0), DerefBytes - OffsetSExt));
5118 T.indicatePessimisticFixpoint();
5119 } else if (OffsetSExt > 0) {
5120 // If something was stripped but there is circular reasoning we look
5121 // for the offset. If it is positive we basically decrease the
5122 // dereferenceable bytes in a circular loop now, which will simply
5123 // drive them down to the known value in a very slow way which we
5124 // can accelerate.
5125 T.indicatePessimisticFixpoint();
5126 }
5127 }
5128
5129 return T.isValidState();
5130 };
5131
5132 for (const auto &VAC : Values)
5133 if (!VisitValueCB(*VAC.getValue()))
5134 return indicatePessimisticFixpoint();
5135
5136 return clampStateAndIndicateChange(getState(), T);
5137 }
5138
5139 /// See AbstractAttribute::trackStatistics()
5140 void trackStatistics() const override {
5141 STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
5142 }
5143};
5144
5145/// Dereferenceable attribute for a return value.
5146struct AADereferenceableReturned final
5147 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
5148 using Base =
5149 AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>;
5150 AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
5151 : Base(IRP, A) {}
5152
5153 /// See AbstractAttribute::trackStatistics()
5154 void trackStatistics() const override {
5155 STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
5156 }
5157};
5158
5159/// Dereferenceable attribute for an argument
5160struct AADereferenceableArgument final
5161 : AAArgumentFromCallSiteArguments<AADereferenceable,
5162 AADereferenceableImpl> {
5163 using Base =
5164 AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
5165 AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
5166 : Base(IRP, A) {}
5167
5168 /// See AbstractAttribute::trackStatistics()
5169 void trackStatistics() const override {
5170 STATS_DECLTRACK_ARG_ATTR(dereferenceable)
5171 }
5172};
5173
5174/// Dereferenceable attribute for a call site argument.
5175struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
5176 AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
5177 : AADereferenceableFloating(IRP, A) {}
5178
5179 /// See AbstractAttribute::trackStatistics()
5180 void trackStatistics() const override {
5181 STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
5182 }
5183};
5184
5185/// Dereferenceable attribute deduction for a call site return value.
5186struct AADereferenceableCallSiteReturned final
5187 : AACalleeToCallSite<AADereferenceable, AADereferenceableImpl> {
5188 using Base = AACalleeToCallSite<AADereferenceable, AADereferenceableImpl>;
5189 AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
5190 : Base(IRP, A) {}
5191
5192 /// See AbstractAttribute::trackStatistics()
5193 void trackStatistics() const override {
5194 STATS_DECLTRACK_CS_ATTR(dereferenceable);
5195 }
5196};
5197} // namespace
5198
5199// ------------------------ Align Argument Attribute ------------------------
5200
5201namespace {
5202static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
5203 Value &AssociatedValue, const Use *U,
5204 const Instruction *I, bool &TrackUse) {
5205 // We need to follow common pointer manipulation uses to the accesses they
5206 // feed into.
5207 if (isa<CastInst>(I)) {
5208 // Follow all but ptr2int casts.
5209 TrackUse = !isa<PtrToIntInst>(I);
5210 return 0;
5211 }
5212 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
5213 if (GEP->hasAllConstantIndices())
5214 TrackUse = true;
5215 return 0;
5216 }
5217
5218 MaybeAlign MA;
5219 if (const auto *CB = dyn_cast<CallBase>(I)) {
5220 if (CB->isBundleOperand(U) || CB->isCallee(U))
5221 return 0;
5222
5223 unsigned ArgNo = CB->getArgOperandNo(U);
5224 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
5225 // As long as we only use known information there is no need to track
5226 // dependences here.
5227 auto *AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
5228 if (AlignAA)
5229 MA = MaybeAlign(AlignAA->getKnownAlign());
5230 }
5231
5232 const DataLayout &DL = A.getDataLayout();
5233 const Value *UseV = U->get();
5234 if (auto *SI = dyn_cast<StoreInst>(I)) {
5235 if (SI->getPointerOperand() == UseV)
5236 MA = SI->getAlign();
5237 } else if (auto *LI = dyn_cast<LoadInst>(I)) {
5238 if (LI->getPointerOperand() == UseV)
5239 MA = LI->getAlign();
5240 } else if (auto *AI = dyn_cast<AtomicRMWInst>(I)) {
5241 if (AI->getPointerOperand() == UseV)
5242 MA = AI->getAlign();
5243 } else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
5244 if (AI->getPointerOperand() == UseV)
5245 MA = AI->getAlign();
5246 }
5247
5248 if (!MA || *MA <= QueryingAA.getKnownAlign())
5249 return 0;
5250
5251 unsigned Alignment = MA->value();
5252 int64_t Offset;
5253
5254 if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
5255 if (Base == &AssociatedValue) {
5256 // BasePointerAddr + Offset = Alignment * Q for some integer Q.
5257 // So we can say that the maximum power of two which is a divisor of
5258 // gcd(Offset, Alignment) is an alignment.
5259
5260 uint32_t gcd = std::gcd(uint32_t(abs((int32_t)Offset)), Alignment);
5261 Alignment = llvm::bit_floor(gcd);
5262 }
5263 }
5264
5265 return Alignment;
5266}
5267
5268struct AAAlignImpl : AAAlign {
5269 AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
5270
5271 /// See AbstractAttribute::initialize(...).
5272 void initialize(Attributor &A) override {
5274 A.getAttrs(getIRPosition(), {Attribute::Alignment}, Attrs);
5275 for (const Attribute &Attr : Attrs)
5276 takeKnownMaximum(Attr.getValueAsInt());
5277
5278 Value &V = *getAssociatedValue().stripPointerCasts();
5279 takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
5280
5281 if (Instruction *CtxI = getCtxI())
5282 followUsesInMBEC(*this, A, getState(), *CtxI);
5283 }
5284
5285 /// See AbstractAttribute::manifest(...).
5286 ChangeStatus manifest(Attributor &A) override {
5287 ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
5288
5289 // Check for users that allow alignment annotations.
5290 Value &AssociatedValue = getAssociatedValue();
5291 for (const Use &U : AssociatedValue.uses()) {
5292 if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
5293 if (SI->getPointerOperand() == &AssociatedValue)
5294 if (SI->getAlign() < getAssumedAlign()) {
5295 STATS_DECLTRACK(AAAlign, Store,
5296 "Number of times alignment added to a store");
5297 SI->setAlignment(getAssumedAlign());
5298 LoadStoreChanged = ChangeStatus::CHANGED;
5299 }
5300 } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
5301 if (LI->getPointerOperand() == &AssociatedValue)
5302 if (LI->getAlign() < getAssumedAlign()) {
5303 LI->setAlignment(getAssumedAlign());
5305 "Number of times alignment added to a load");
5306 LoadStoreChanged = ChangeStatus::CHANGED;
5307 }
5308 }
5309 }
5310
5311 ChangeStatus Changed = AAAlign::manifest(A);
5312
5313 Align InheritAlign =
5314 getAssociatedValue().getPointerAlignment(A.getDataLayout());
5315 if (InheritAlign >= getAssumedAlign())
5316 return LoadStoreChanged;
5317 return Changed | LoadStoreChanged;
5318 }
5319
5320 // TODO: Provide a helper to determine the implied ABI alignment and check in
5321 // the existing manifest method and a new one for AAAlignImpl that value
5322 // to avoid making the alignment explicit if it did not improve.
5323
5324 /// See AbstractAttribute::getDeducedAttributes
5325 void getDeducedAttributes(Attributor &A, LLVMContext &Ctx,
5326 SmallVectorImpl<Attribute> &Attrs) const override {
5327 if (getAssumedAlign() > 1)
5328 Attrs.emplace_back(
5329 Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
5330 }
5331
5332 /// See followUsesInMBEC
5333 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
5334 AAAlign::StateType &State) {
5335 bool TrackUse = false;
5336
5337 unsigned int KnownAlign =
5338 getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
5339 State.takeKnownMaximum(KnownAlign);
5340
5341 return TrackUse;
5342 }
5343
5344 /// See AbstractAttribute::getAsStr().
5345 const std::string getAsStr(Attributor *A) const override {
5346 return "align<" + std::to_string(getKnownAlign().value()) + "-" +
5347 std::to_string(getAssumedAlign().value()) + ">";
5348 }
5349};
5350
5351/// Align attribute for a floating value.
5352struct AAAlignFloating : AAAlignImpl {
5353 AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
5354
5355 /// See AbstractAttribute::updateImpl(...).
5356 ChangeStatus updateImpl(Attributor &A) override {
5357 const DataLayout &DL = A.getDataLayout();
5358
5359 bool Stripped;
5360 bool UsedAssumedInformation = false;
5362 if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
5363 AA::AnyScope, UsedAssumedInformation)) {
5364 Values.push_back({getAssociatedValue(), getCtxI()});
5365 Stripped = false;
5366 } else {
5367 Stripped = Values.size() != 1 ||
5368 Values.front().getValue() != &getAssociatedValue();
5369 }
5370
5371 StateType T;
5372 auto VisitValueCB = [&](Value &V) -> bool {
5373 if (isa<UndefValue>(V) || isa<ConstantPointerNull>(V))
5374 return true;
5375 const auto *AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
5376 DepClassTy::REQUIRED);
5377 if (!AA || (!Stripped && this == AA)) {
5378 int64_t Offset;
5379 unsigned Alignment = 1;
5380 if (const Value *Base =
5382 // TODO: Use AAAlign for the base too.
5383 Align PA = Base->getPointerAlignment(DL);
5384 // BasePointerAddr + Offset = Alignment * Q for some integer Q.
5385 // So we can say that the maximum power of two which is a divisor of
5386 // gcd(Offset, Alignment) is an alignment.
5387
5388 uint32_t gcd =
5389 std::gcd(uint32_t(abs((int32_t)Offset)), uint32_t(PA.value()));
5390 Alignment = llvm::bit_floor(gcd);
5391 } else {
5392 Alignment = V.getPointerAlignment(DL).value();
5393 }
5394 // Use only IR information if we did not strip anything.
5395 T.takeKnownMaximum(Alignment);
5396 T.indicatePessimisticFixpoint();
5397 } else {
5398 // Use abstract attribute information.
5399 const AAAlign::StateType &DS = AA->getState();
5400 T ^= DS;
5401 }
5402 return T.isValidState();
5403 };
5404
5405 for (const auto &VAC : Values) {
5406 if (!VisitValueCB(*VAC.getValue()))
5407 return indicatePessimisticFixpoint();
5408 }
5409
5410 // TODO: If we know we visited all incoming values, thus no are assumed
5411 // dead, we can take the known information from the state T.
5412 return clampStateAndIndicateChange(getState(), T);
5413 }
5414
5415 /// See AbstractAttribute::trackStatistics()
5416 void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
5417};
5418
5419/// Align attribute for function return value.
5420struct AAAlignReturned final
5421 : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
5422 using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
5423 AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
5424
5425 /// See AbstractAttribute::trackStatistics()
5426 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
5427};
5428
5429/// Align attribute for function argument.
5430struct AAAlignArgument final
5431 : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
5432 using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
5433 AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
5434
5435 /// See AbstractAttribute::manifest(...).
5436 ChangeStatus manifest(Attributor &A) override {
5437 // If the associated argument is involved in a must-tail call we give up
5438 // because we would need to keep the argument alignments of caller and
5439 // callee in-sync. Just does not seem worth the trouble right now.
5440 if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
5441 return ChangeStatus::UNCHANGED;
5442 return Base::manifest(A);
5443 }
5444
5445 /// See AbstractAttribute::trackStatistics()
5446 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
5447};
5448
5449struct AAAlignCallSiteArgument final : AAAlignFloating {
5450 AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
5451 : AAAlignFloating(IRP, A) {}
5452
5453 /// See AbstractAttribute::manifest(...).
5454 ChangeStatus manifest(Attributor &A) override {
5455 // If the associated argument is involved in a must-tail call we give up
5456 // because we would need to keep the argument alignments of caller and
5457 // callee in-sync. Just does not seem worth the trouble right now.
5458 if (Argument *Arg = getAssociatedArgument())
5459 if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
5460 return ChangeStatus::UNCHANGED;
5461 ChangeStatus Changed = AAAlignImpl::manifest(A);
5462 Align InheritAlign =
5463 getAssociatedValue().getPointerAlignment(A.getDataLayout());
5464 if (InheritAlign >= getAssumedAlign())
5465 Changed = ChangeStatus::UNCHANGED;
5466 return Changed;
5467 }
5468
5469 /// See AbstractAttribute::updateImpl(Attributor &A).
5470 ChangeStatus updateImpl(Attributor &A) override {
5471 ChangeStatus Changed = AAAlignFloating::updateImpl(A);
5472 if (Argument *Arg = getAssociatedArgument()) {
5473 // We only take known information from the argument
5474 // so we do not need to track a dependence.
5475 const auto *ArgAlignAA = A.getAAFor<AAAlign>(
5476 *this, IRPosition::argument(*Arg), DepClassTy::NONE);
5477 if (ArgAlignAA)
5478 takeKnownMaximum(ArgAlignAA->getKnownAlign().value());
5479 }
5480 return Changed;
5481 }
5482
5483 /// See AbstractAttribute::trackStatistics()
5484 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
5485};
5486
5487/// Align attribute deduction for a call site return value.
5488struct AAAlignCallSiteReturned final
5489 : AACalleeToCallSite<AAAlign, AAAlignImpl> {
5490 using Base = AACalleeToCallSite<AAAlign, AAAlignImpl>;
5491 AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
5492 : Base(IRP, A) {}
5493
5494 /// See AbstractAttribute::trackStatistics()
5495 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
5496};
5497} // namespace
5498
5499/// ------------------ Function No-Return Attribute ----------------------------
5500namespace {
5501struct AANoReturnImpl : public AANoReturn {
5502 AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
5503
5504 /// See AbstractAttribute::initialize(...).
5505 void initialize(Attributor &A) override {
5506 bool IsKnown;
5507 assert(!AA::hasAssumedIRAttr<Attribute::NoReturn>(
5508 A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown));
5509 (void)IsKnown;
5510 }
5511
5512 /// See AbstractAttribute::getAsStr().
5513 const std::string getAsStr(Attributor *A) const override {
5514 return getAssumed() ? "noreturn" : "may-return";
5515 }
5516
5517 /// See AbstractAttribute::updateImpl(Attributor &A).
5518 ChangeStatus updateImpl(Attributor &A) override {
5519 auto CheckForNoReturn = [](Instruction &) { return false; };
5520 bool UsedAssumedInformation = false;
5521 if (!A.checkForAllInstructions(CheckForNoReturn, *this,
5522 {(unsigned)Instruction::Ret},
5523 UsedAssumedInformation))
5524 return indicatePessimisticFixpoint();
5525 return ChangeStatus::UNCHANGED;
5526 }
5527};
5528
5529struct AANoReturnFunction final : AANoReturnImpl {
5530 AANoReturnFunction(const IRPosition &IRP, Attributor &A)
5531 : AANoReturnImpl(IRP, A) {}
5532
5533 /// See AbstractAttribute::trackStatistics()
5534 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
5535};
5536
5537/// NoReturn attribute deduction for a call sites.
5538struct AANoReturnCallSite final
5539 : AACalleeToCallSite<AANoReturn, AANoReturnImpl> {
5540 AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
5541 : AACalleeToCallSite<AANoReturn, AANoReturnImpl>(IRP, A) {}
5542
5543 /// See AbstractAttribute::trackStatistics()
5544 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
5545};
5546} // namespace
5547
5548/// ----------------------- Instance Info ---------------------------------
5549
5550namespace {
5551/// A class to hold the state of for no-capture attributes.
5552struct AAInstanceInfoImpl : public AAInstanceInfo {
5553 AAInstanceInfoImpl(const IRPosition &IRP, Attributor &A)
5554 : AAInstanceInfo(IRP, A) {}
5555
5556 /// See AbstractAttribute::initialize(...).
5557 void initialize(Attributor &A) override {
5558 Value &V = getAssociatedValue();
5559 if (auto *C = dyn_cast<Constant>(&V)) {
5560 if (C->isThreadDependent())
5561 indicatePessimisticFixpoint();
5562 else
5563 indicateOptimisticFixpoint();
5564 return;
5565 }
5566 if (auto *CB = dyn_cast<CallBase>(&V))
5567 if (CB->arg_size() == 0 && !CB->mayHaveSideEffects() &&
5568 !CB->mayReadFromMemory()) {
5569 indicateOptimisticFixpoint();
5570 return;
5571 }
5572 if (auto *I = dyn_cast<Instruction>(&V)) {
5573 const auto *CI =
5574 A.getInfoCache().getAnalysisResultForFunction<CycleAnalysis>(
5575 *I->getFunction());
5576 if (mayBeInCycle(CI, I, /* HeaderOnly */ false)) {
5577 indicatePessimisticFixpoint();
5578 return;
5579 }
5580 }
5581 }
5582
5583 /// See AbstractAttribute::updateImpl(...).
5584 ChangeStatus updateImpl(Attributor &A) override {
5585 ChangeStatus Changed = ChangeStatus::UNCHANGED;
5586
5587 Value &V = getAssociatedValue();
5588 const Function *Scope = nullptr;
5589 if (auto *I = dyn_cast<Instruction>(&V))
5590 Scope = I->getFunction();
5591 if (auto *A = dyn_cast<Argument>(&V)) {
5592 Scope = A->getParent();
5593 if (!Scope->hasLocalLinkage())
5594 return Changed;
5595 }
5596 if (!Scope)
5597 return indicateOptimisticFixpoint();
5598
5599 bool IsKnownNoRecurse;
5600 if (AA::hasAssumedIRAttr<Attribute::NoRecurse>(
5601 A, this, IRPosition::function(*Scope), DepClassTy::OPTIONAL,
5602 IsKnownNoRecurse))
5603 return Changed;
5604
5605 auto UsePred = [&](const Use &U, bool &Follow) {
5606 const Instruction *UserI = dyn_cast<Instruction>(U.getUser());
5607 if (!UserI || isa<GetElementPtrInst>(UserI) || isa<CastInst>(UserI) ||
5608 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
5609 Follow = true;
5610 return true;
5611 }
5612 if (isa<LoadInst>(UserI) || isa<CmpInst>(UserI) ||
5613 (isa<StoreInst>(UserI) &&
5614 cast<StoreInst>(UserI)->getValueOperand() != U.get()))
5615 return true;
5616 if (auto *CB = dyn_cast<CallBase>(UserI)) {
5617 // This check is not guaranteeing uniqueness but for now that we cannot
5618 // end up with two versions of \p U thinking it was one.
5619 auto *Callee = dyn_cast_if_present<Function>(CB->getCalledOperand());
5620 if (!Callee || !Callee->hasLocalLinkage())
5621 return true;
5622 if (!CB->isArgOperand(&U))
5623 return false;
5624 const auto *ArgInstanceInfoAA = A.getAAFor<AAInstanceInfo>(
5626 DepClassTy::OPTIONAL);
5627 if (!ArgInstanceInfoAA ||
5628 !ArgInstanceInfoAA->isAssumedUniqueForAnalysis())
5629 return false;
5630 // If this call base might reach the scope again we might forward the
5631 // argument back here. This is very conservative.
5633 A, *CB, *Scope, *this, /* ExclusionSet */ nullptr,
5634 [Scope](const Function &Fn) { return &Fn != Scope; }))
5635 return false;
5636 return true;
5637 }
5638 return false;
5639 };
5640
5641 auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
5642 if (auto *SI = dyn_cast<StoreInst>(OldU.getUser())) {
5643 auto *Ptr = SI->getPointerOperand()->stripPointerCasts();
5644 if ((isa<AllocaInst>(Ptr) || isNoAliasCall(Ptr)) &&
5645 AA::isDynamicallyUnique(A, *this, *Ptr))
5646 return true;
5647 }
5648 return false;
5649 };
5650
5651 if (!A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ true,
5652 DepClassTy::OPTIONAL,
5653 /* IgnoreDroppableUses */ true, EquivalentUseCB))
5654 return indicatePessimisticFixpoint();
5655
5656 return Changed;
5657 }
5658
5659 /// See AbstractState::getAsStr().
5660 const std::string getAsStr(Attributor *A) const override {
5661 return isAssumedUniqueForAnalysis() ? "<unique [fAa]>" : "<unknown>";
5662 }
5663
5664 /// See AbstractAttribute::trackStatistics()
5665 void trackStatistics() const override {}
5666};
5667
5668/// InstanceInfo attribute for floating values.
5669struct AAInstanceInfoFloating : AAInstanceInfoImpl {
5670 AAInstanceInfoFloating(const IRPosition &IRP, Attributor &A)
5671 : AAInstanceInfoImpl(IRP, A) {}
5672};
5673
5674/// NoCapture attribute for function arguments.
5675struct AAInstanceInfoArgument final : AAInstanceInfoFloating {
5676 AAInstanceInfoArgument(const IRPosition &IRP, Attributor &A)
5677 : AAInstanceInfoFloating(IRP, A) {}
5678};
5679
5680/// InstanceInfo attribute for call site arguments.
5681struct AAInstanceInfoCallSiteArgument final : AAInstanceInfoImpl {
5682 AAInstanceInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
5683 : AAInstanceInfoImpl(IRP, A) {}
5684
5685 /// See AbstractAttribute::updateImpl(...).
5686 ChangeStatus updateImpl(Attributor &A) override {
5687 // TODO: Once we have call site specific value information we can provide
5688 // call site specific liveness information and then it makes
5689 // sense to specialize attributes for call sites arguments instead of
5690 // redirecting requests to the callee argument.
5691 Argument *Arg = getAssociatedArgument();
5692 if (!Arg)
5693 return indicatePessimisticFixpoint();
5694 const IRPosition &ArgPos = IRPosition::argument(*Arg);
5695 auto *ArgAA =
5696 A.getAAFor<AAInstanceInfo>(*this, ArgPos, DepClassTy::REQUIRED);
5697 if (!ArgAA)
5698 return indicatePessimisticFixpoint();
5699 return clampStateAndIndicateChange(getState(), ArgAA->getState());
5700 }
5701};
5702
5703/// InstanceInfo attribute for function return value.
5704struct AAInstanceInfoReturned final : AAInstanceInfoImpl {
5705 AAInstanceInfoReturned(const IRPosition &IRP, Attributor &A)
5706 : AAInstanceInfoImpl(IRP, A) {
5707 llvm_unreachable("InstanceInfo is not applicable to function returns!");
5708 }
5709
5710 /// See AbstractAttribute::initialize(...).
5711 void initialize(Attributor &A) override {
5712 llvm_unreachable("InstanceInfo is not applicable to function returns!");
5713 }
5714
5715 /// See AbstractAttribute::updateImpl(...).
5716 ChangeStatus updateImpl(Attributor &A) override {
5717 llvm_unreachable("InstanceInfo is not applicable to function returns!");
5718 }
5719};
5720
5721/// InstanceInfo attribute deduction for a call site return value.
5722struct AAInstanceInfoCallSiteReturned final : AAInstanceInfoFloating {
5723 AAInstanceInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
5724 : AAInstanceInfoFloating(IRP, A) {}
5725};
5726} // namespace
5727
5728/// ----------------------- Variable Capturing ---------------------------------
5730 Attribute::AttrKind ImpliedAttributeKind,
5731 bool IgnoreSubsumingPositions) {
5732 assert(ImpliedAttributeKind == Attribute::NoCapture &&
5733 "Unexpected attribute kind");
5734 Value &V = IRP.getAssociatedValue();
5735 if (!IRP.isArgumentPosition())
5736 return V.use_empty();
5737
5738 // You cannot "capture" null in the default address space.
5739 //
5740 // FIXME: This should use NullPointerIsDefined to account for the function
5741 // attribute.
5742 if (isa<UndefValue>(V) || (isa<ConstantPointerNull>(V) &&
5743 V.getType()->getPointerAddressSpace() == 0)) {
5744 return true;
5745 }
5746
5747 if (A.hasAttr(IRP, {Attribute::NoCapture},
5748 /* IgnoreSubsumingPositions */ true, Attribute::NoCapture))
5749 return true;
5750
5751 if (IRP.getPositionKind() == IRP_CALL_SITE_ARGUMENT)
5752 if (Argument *Arg = IRP.getAssociatedArgument())
5753 if (A.hasAttr(IRPosition::argument(*Arg),
5754 {Attribute::NoCapture, Attribute::ByVal},
5755 /* IgnoreSubsumingPositions */ true)) {
5756 A.manifestAttrs(IRP,
5757 Attribute::get(V.getContext(), Attribute::NoCapture));
5758 return true;
5759 }
5760
5761 if (const Function *F = IRP.getAssociatedFunction()) {
5762 // Check what state the associated function can actually capture.
5764 determineFunctionCaptureCapabilities(IRP, *F, State);
5765 if (State.isKnown(NO_CAPTURE)) {
5766 A.manifestAttrs(IRP,
5767 Attribute::get(V.getContext(), Attribute::NoCapture));
5768 return true;
5769 }
5770 }
5771
5772 return false;
5773}
5774
5775/// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
5776/// depending on the ability of the function associated with \p IRP to capture
5777/// state in memory and through "returning/throwing", respectively.
5779 const Function &F,
5780 BitIntegerState &State) {
5781 // TODO: Once we have memory behavior attributes we should use them here.
5782
5783 // If we know we cannot communicate or write to memory, we do not care about
5784 // ptr2int anymore.
5785 bool ReadOnly = F.onlyReadsMemory();
5786 bool NoThrow = F.doesNotThrow();
5787 bool IsVoidReturn = F.getReturnType()->isVoidTy();
5788 if (ReadOnly && NoThrow && IsVoidReturn) {
5789 State.addKnownBits(NO_CAPTURE);
5790 return;
5791 }
5792
5793 // A function cannot capture state in memory if it only reads memory, it can
5794 // however return/throw state and the state might be influenced by the
5795 // pointer value, e.g., loading from a returned pointer might reveal a bit.
5796 if (ReadOnly)
5797 State.addKnownBits(NOT_CAPTURED_IN_MEM);
5798
5799 // A function cannot communicate state back if it does not through
5800 // exceptions and doesn not return values.
5801 if (NoThrow && IsVoidReturn)
5802 State.addKnownBits(NOT_CAPTURED_IN_RET);
5803
5804 // Check existing "returned" attributes.
5805 int ArgNo = IRP.getCalleeArgNo();
5806 if (!NoThrow || ArgNo < 0 ||
5807 !F.getAttributes().hasAttrSomewhere(Attribute::Returned))
5808 return;
5809
5810 for (unsigned U = 0, E = F.arg_size(); U < E; ++U)
5811 if (F.hasParamAttribute(U, Attribute::Returned)) {
5812 if (U == unsigned(ArgNo))
5813 State.removeAssumedBits(NOT_CAPTURED_IN_RET);
5814 else if (ReadOnly)
5815 State.addKnownBits(NO_CAPTURE);
5816 else
5817 State.addKnownBits(NOT_CAPTURED_IN_RET);
5818 break;
5819 }
5820}
5821
5822namespace {
5823/// A class to hold the state of for no-capture attributes.
5824struct AANoCaptureImpl : public AANoCapture {
5825 AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
5826
5827 /// See AbstractAttribute::initialize(...).
5828 void initialize(Attributor &A) override {
5829 bool IsKnown;
5830 assert(!AA::hasAssumedIRAttr<Attribute::NoCapture>(
5831 A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown));
5832 (void)IsKnown;
5833 }
5834
5835 /// See AbstractAttribute::updateImpl(...).
5836 ChangeStatus updateImpl(Attributor &A) override;
5837
5838 /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
5839 void getDeducedAttributes(Attributor &A, LLVMContext &Ctx,
5840 SmallVectorImpl<Attribute> &Attrs) const override {
5841 if (!isAssumedNoCaptureMaybeReturned())
5842 return;
5843
5844 if (isArgumentPosition()) {
5845 if (isAssumedNoCapture())
5846 Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
5847 else if (ManifestInternal)
5848 Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
5849 }
5850 }
5851
5852 /// See AbstractState::getAsStr().
5853 const std::string getAsStr(Attributor *A) const override {
5854 if (isKnownNoCapture())
5855 return "known not-captured";
5856 if (isAssumedNoCapture())
5857 return "assumed not-captured";
5858 if (isKnownNoCaptureMaybeReturned())
5859 return "known not-captured-maybe-returned";
5860 if (isAssumedNoCaptureMaybeReturned())
5861 return "assumed not-captured-maybe-returned";
5862 return "assumed-captured";
5863 }
5864
5865 /// Check the use \p U and update \p State accordingly. Return true if we
5866 /// should continue to update the state.
5867 bool checkUse(Attributor &A, AANoCapture::StateType &State, const Use &U,
5868 bool &Follow) {
5869 Instruction *UInst = cast<Instruction>(U.getUser());
5870 LLVM_DEBUG(dbgs() << "[AANoCapture] Check use: " << *U.get() << " in "
5871 << *UInst << "\n");
5872
5873 // Deal with ptr2int by following uses.
5874 if (isa<PtrToIntInst>(UInst)) {
5875 LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
5876 return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5877 /* Return */ true);
5878 }
5879
5880 // For stores we already checked if we can follow them, if they make it
5881 // here we give up.
5882 if (isa<StoreInst>(UInst))
5883 return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5884 /* Return */ true);
5885
5886 // Explicitly catch return instructions.
5887 if (isa<ReturnInst>(UInst)) {
5888 if (UInst->getFunction() == getAnchorScope())
5889 return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5890 /* Return */ true);
5891 return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5892 /* Return */ true);
5893 }
5894
5895 // For now we only use special logic for call sites. However, the tracker
5896 // itself knows about a lot of other non-capturing cases already.
5897 auto *CB = dyn_cast<CallBase>(UInst);
5898 if (!CB || !CB->isArgOperand(&U))
5899 return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5900 /* Return */ true);
5901
5902 unsigned ArgNo = CB->getArgOperandNo(&U);
5903 const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
5904 // If we have a abstract no-capture attribute for the argument we can use
5905 // it to justify a non-capture attribute here. This allows recursion!
5906 bool IsKnownNoCapture;
5907 const AANoCapture *ArgNoCaptureAA = nullptr;
5908 bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::NoCapture>(
5909 A, this, CSArgPos, DepClassTy::REQUIRED, IsKnownNoCapture, false,
5910 &ArgNoCaptureAA);
5911 if (IsAssumedNoCapture)
5912 return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5913 /* Return */ false);
5914 if (ArgNoCaptureAA && ArgNoCaptureAA->isAssumedNoCaptureMaybeReturned()) {
5915 Follow = true;
5916 return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5917 /* Return */ false);
5918 }
5919
5920 // Lastly, we could not find a reason no-capture can be assumed so we don't.
5921 return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5922 /* Return */ true);
5923 }
5924
5925 /// Update \p State according to \p CapturedInMem, \p CapturedInInt, and
5926 /// \p CapturedInRet, then return true if we should continue updating the
5927 /// state.
5928 static bool isCapturedIn(AANoCapture::StateType &State, bool CapturedInMem,
5929 bool CapturedInInt, bool CapturedInRet) {
5930 LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
5931 << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
5932 if (CapturedInMem)
5933 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
5934 if (CapturedInInt)
5935 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
5936 if (CapturedInRet)
5937 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
5938 return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
5939 }
5940};
5941
5942ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
5943 const IRPosition &IRP = getIRPosition();
5944 Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
5945 : &IRP.getAssociatedValue();
5946 if (!V)
5947 return indicatePessimisticFixpoint();
5948
5949 const Function *F =
5950 isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
5951
5952 // TODO: Is the checkForAllUses below useful for constants?
5953 if (!F)
5954 return indicatePessimisticFixpoint();
5955
5957 const IRPosition &FnPos = IRPosition::function(*F);
5958
5959 // Readonly means we cannot capture through memory.
5960 bool IsKnown;
5961 if (AA::isAssumedReadOnly(A, FnPos, *this, IsKnown)) {
5962 T.addKnownBits(NOT_CAPTURED_IN_MEM);
5963 if (IsKnown)
5964 addKnownBits(NOT_CAPTURED_IN_MEM);
5965 }
5966
5967 // Make sure all returned values are different than the underlying value.
5968 // TODO: we could do this in a more sophisticated way inside
5969 // AAReturnedValues, e.g., track all values that escape through returns
5970 // directly somehow.
5971 auto CheckReturnedArgs = [&](bool &UsedAssumedInformation) {
5973 if (!A.getAssumedSimplifiedValues(IRPosition::returned(*F), this, Values,
5975 UsedAssumedInformation))
5976 return false;
5977 bool SeenConstant = false;
5978 for (const AA::ValueAndContext &VAC : Values) {
5979 if (isa<Constant>(VAC.getValue())) {
5980 if (SeenConstant)
5981 return false;
5982 SeenConstant = true;
5983 } else if (!isa<Argument>(VAC.getValue()) ||
5984 VAC.getValue() == getAssociatedArgument())
5985 return false;
5986 }
5987 return true;
5988 };
5989
5990 bool IsKnownNoUnwind;
5991 if (AA::hasAssumedIRAttr<Attribute::NoUnwind>(
5992 A, this, FnPos, DepClassTy::OPTIONAL, IsKnownNoUnwind)) {
5993 bool IsVoidTy = F->getReturnType()->isVoidTy();
5994 bool UsedAssumedInformation = false;
5995 if (IsVoidTy || CheckReturnedArgs(UsedAssumedInformation)) {
5996 T.addKnownBits(NOT_CAPTURED_IN_RET);
5997 if (T.isKnown(NOT_CAPTURED_IN_MEM))
5999 if (IsKnownNoUnwind && (IsVoidTy || !UsedAssumedInformation)) {
6000 addKnownBits(NOT_CAPTURED_IN_RET);
6001 if (isKnown(NOT_CAPTURED_IN_MEM))
6002 return indicateOptimisticFixpoint();
6003 }
6004 }
6005 }
6006
6007 auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) {
6008 const auto *DerefAA = A.getAAFor<AADereferenceable>(
6010 return DerefAA && DerefAA->getAssumedDereferenceableBytes();
6011 };
6012
6013 auto UseCheck = [&](const Use &U, bool &Follow) -> bool {
6014 switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) {
6016 return true;
6018 return checkUse(A, T, U, Follow);
6020 Follow = true;
6021 return true;
6022 }
6023 llvm_unreachable("Unexpected use capture kind!");
6024 };
6025
6026 if (!A.checkForAllUses(UseCheck, *this, *V))
6027 return indicatePessimisticFixpoint();
6028
6029 AANoCapture::StateType &S = getState();
6030 auto Assumed = S.getAssumed();
6031 S.intersectAssumedBits(T.getAssumed());
6032 if (!isAssumedNoCaptureMaybeReturned())
6033 return indicatePessimisticFixpoint();
6034 return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
6036}
6037
6038/// NoCapture attribute for function arguments.
6039struct AANoCaptureArgument final : AANoCaptureImpl {
6040 AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
6041 : AANoCaptureImpl(IRP, A) {}
6042
6043 /// See AbstractAttribute::trackStatistics()
6044 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
6045};
6046
6047/// NoCapture attribute for call site arguments.
6048struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
6049 AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
6050 : AANoCaptureImpl(IRP, A) {}
6051
6052 /// See AbstractAttribute::updateImpl(...).
6053 ChangeStatus updateImpl(Attributor &A) override {
6054 // TODO: Once we have call site specific value information we can provide
6055 // call site specific liveness information and then it makes
6056 // sense to specialize attributes for call sites arguments instead of
6057 // redirecting requests to the callee argument.
6058 Argument *Arg = getAssociatedArgument();
6059 if (!Arg)
6060 return indicatePessimisticFixpoint();
6061 const IRPosition &ArgPos = IRPosition::argument(*Arg);
6062 bool IsKnownNoCapture;
6063 const AANoCapture *ArgAA = nullptr;
6064 if (AA::hasAssumedIRAttr<Attribute::NoCapture>(
6065 A, this, ArgPos, DepClassTy::REQUIRED, IsKnownNoCapture, false,
6066 &ArgAA))
6067 return ChangeStatus::UNCHANGED;
6068 if (!ArgAA || !ArgAA->isAssumedNoCaptureMaybeReturned())
6069 return indicatePessimisticFixpoint();
6070 return clampStateAndIndicateChange(getState(), ArgAA->getState());
6071 }
6072
6073 /// See AbstractAttribute::trackStatistics()
6074 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
6075};
6076
6077/// NoCapture attribute for floating values.
6078struct AANoCaptureFloating final : AANoCaptureImpl {
6079 AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
6080 : AANoCaptureImpl(IRP, A) {}
6081
6082 /// See AbstractAttribute::trackStatistics()
6083 void trackStatistics() const override {
6085 }
6086};
6087
6088/// NoCapture attribute for function return value.
6089struct AANoCaptureReturned final : AANoCaptureImpl {
6090 AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
6091 : AANoCaptureImpl(IRP, A) {
6092 llvm_unreachable("NoCapture is not applicable to function returns!");
6093 }
6094
6095 /// See AbstractAttribute::initialize(...).
6096 void initialize(Attributor &A) override {
6097 llvm_unreachable("NoCapture is not applicable to function returns!");
6098 }
6099
6100 /// See AbstractAttribute::updateImpl(...).
6101 ChangeStatus updateImpl(Attributor &A) override {
6102 llvm_unreachable("NoCapture is not applicable to function returns!");
6103 }
6104
6105 /// See AbstractAttribute::trackStatistics()
6106 void trackStatistics() const override {}
6107};
6108
6109/// NoCapture attribute deduction for a call site return value.
6110struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
6111 AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
6112 : AANoCaptureImpl(IRP, A) {}
6113
6114 /// See AbstractAttribute::initialize(...).
6115 void initialize(Attributor &A) override {
6116 const Function *F = getAnchorScope();
6117 // Check what state the associated function can actually capture.
6118 determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
6119 }
6120
6121 /// See AbstractAttribute::trackStatistics()
6122 void trackStatistics() const override {
6124 }
6125};
6126} // namespace
6127
6128/// ------------------ Value Simplify Attribute ----------------------------
6129
6130bool ValueSimplifyStateType::unionAssumed(std::optional<Value *> Other) {
6131 // FIXME: Add a typecast support.
6132 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
6133 SimplifiedAssociatedValue, Other, Ty);
6134 if (SimplifiedAssociatedValue == std::optional<Value *>(nullptr))
6135 return false;
6136
6137 LLVM_DEBUG({
6138 if (SimplifiedAssociatedValue)
6139 dbgs() << "[ValueSimplify] is assumed to be "
6140 << **SimplifiedAssociatedValue << "\n";
6141 else
6142 dbgs() << "[ValueSimplify] is assumed to be <none>\n";
6143 });
6144 return true;
6145}
6146
6147namespace {
6148struct AAValueSimplifyImpl : AAValueSimplify {
6149 AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
6150 : AAValueSimplify(IRP, A) {}
6151
6152 /// See AbstractAttribute::initialize(...).
6153 void initialize(Attributor &A) override {
6154 if (getAssociatedValue().getType()->isVoidTy())
6155 indicatePessimisticFixpoint();
6156 if (A.hasSimplificationCallback(getIRPosition()))
6157 indicatePessimisticFixpoint();
6158 }
6159
6160 /// See AbstractAttribute::getAsStr().
6161 const std::string getAsStr(Attributor *A) const override {
6162 LLVM_DEBUG({
6163 dbgs() << "SAV: " << (bool)SimplifiedAssociatedValue << " ";
6164 if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue)
6165 dbgs() << "SAV: " << **SimplifiedAssociatedValue << " ";
6166 });
6167 return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple")
6168 : "not-simple";
6169 }
6170
6171 /// See AbstractAttribute::trackStatistics()
6172 void trackStatistics() const override {}
6173
6174 /// See AAValueSimplify::getAssumedSimplifiedValue()
6175 std::optional<Value *>
6176 getAssumedSimplifiedValue(Attributor &A) const override {
6177 return SimplifiedAssociatedValue;
6178 }
6179
6180 /// Ensure the return value is \p V with type \p Ty, if not possible return
6181 /// nullptr. If \p Check is true we will only verify such an operation would
6182 /// suceed and return a non-nullptr value if that is the case. No IR is
6183 /// generated or modified.
6184 static Value *ensureType(Attributor &A, Value &V, Type &Ty, Instruction *CtxI,
6185 bool Check) {
6186 if (auto *TypedV = AA::getWithType(V, Ty))
6187 return TypedV;
6188 if (CtxI && V.getType()->canLosslesslyBitCastTo(&Ty))
6189 return Check ? &V
6190 : BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
6191 &V, &Ty, "", CtxI->getIterator());
6192 return nullptr;
6193 }
6194
6195 /// Reproduce \p I with type \p Ty or return nullptr if that is not posisble.
6196 /// If \p Check is true we will only verify such an operation would suceed and
6197 /// return a non-nullptr value if that is the case. No IR is generated or
6198 /// modified.
6199 static Value *reproduceInst(Attributor &A,
6200 const AbstractAttribute &QueryingAA,
6201 Instruction &I, Type &Ty, Instruction *CtxI,
6202 bool Check, ValueToValueMapTy &VMap) {
6203 assert(CtxI && "Cannot reproduce an instruction without context!");
6204 if (Check && (I.mayReadFromMemory() ||
6205 !isSafeToSpeculativelyExecute(&I, CtxI, /* DT */ nullptr,
6206 /* TLI */ nullptr)))
6207 return nullptr;
6208 for (Value *Op : I.operands()) {
6209 Value *NewOp = reproduceValue(A, QueryingAA, *Op, Ty, CtxI, Check, VMap);
6210 if (!NewOp) {
6211 assert(Check && "Manifest of new value unexpectedly failed!");
6212 return nullptr;
6213 }
6214 if (!Check)
6215 VMap[Op] = NewOp;
6216 }
6217 if (Check)
6218 return &I;
6219
6220 Instruction *CloneI = I.clone();
6221 // TODO: Try to salvage debug information here.
6222 CloneI->setDebugLoc(DebugLoc());
6223 VMap[&I] = CloneI;
6224 CloneI->insertBefore(CtxI);
6225 RemapInstruction(CloneI, VMap);
6226 return CloneI;
6227 }
6228
6229 /// Reproduce \p V with type \p Ty or return nullptr if that is not posisble.
6230 /// If \p Check is true we will only verify such an operation would suceed and
6231 /// return a non-nullptr value if that is the case. No IR is generated or
6232 /// modified.
6233 static Value *reproduceValue(Attributor &A,
6234 const AbstractAttribute &QueryingAA, Value &V,
6235 Type &Ty, Instruction *CtxI, bool Check,
6236 ValueToValueMapTy &VMap) {
6237 if (const auto &NewV = VMap.lookup(&V))
6238 return NewV;
6239 bool UsedAssumedInformation = false;
6240 std::optional<Value *> SimpleV = A.getAssumedSimplified(
6241 V, QueryingAA, UsedAssumedInformation, AA::Interprocedural);
6242 if (!SimpleV.has_value())
6243 return PoisonValue::get(&Ty);
6244 Value *EffectiveV = &V;
6245 if (*SimpleV)
6246 EffectiveV = *SimpleV;
6247 if (auto *C = dyn_cast<Constant>(EffectiveV))
6248 return C;
6249 if (CtxI && AA::isValidAtPosition(AA::ValueAndContext(*EffectiveV, *CtxI),
6250 A.getInfoCache()))
6251 return ensureType(A, *EffectiveV, Ty, CtxI, Check);
6252 if (auto *I = dyn_cast<Instruction>(EffectiveV))
6253 if (Value *NewV = reproduceInst(A, QueryingAA, *I, Ty, CtxI, Check, VMap))
6254 return ensureType(A, *NewV, Ty, CtxI, Check);
6255 return nullptr;
6256 }
6257
6258 /// Return a value we can use as replacement for the associated one, or
6259 /// nullptr if we don't have one that makes sense.
6260 Value *manifestReplacementValue(Attributor &A, Instruction *CtxI) const {
6261 Value *NewV = SimplifiedAssociatedValue
6262 ? *SimplifiedAssociatedValue
6263 : UndefValue::get(getAssociatedType());
6264 if (NewV && NewV != &getAssociatedValue()) {
6265 ValueToValueMapTy VMap;
6266 // First verify we can reprduce the value with the required type at the
6267 // context location before we actually start modifying the IR.
6268 if (reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI,
6269 /* CheckOnly */ true, VMap))
6270 return reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI,
6271 /* CheckOnly */ false, VMap);
6272 }
6273 return nullptr;
6274 }
6275
6276 /// Helper function for querying AAValueSimplify and updating candidate.
6277 /// \param IRP The value position we are trying to unify with SimplifiedValue
6278 bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
6279 const IRPosition &IRP, bool Simplify = true) {
6280 bool UsedAssumedInformation = false;
6281 std::optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue();
6282 if (Simplify)
6283 QueryingValueSimplified = A.getAssumedSimplified(
6284 IRP, QueryingAA, UsedAssumedInformation, AA::Interprocedural);
6285 return unionAssumed(QueryingValueSimplified);
6286 }
6287
6288 /// Returns a candidate is found or not
6289 template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
6290 if (!getAssociatedValue().getType()->isIntegerTy())
6291 return false;
6292
6293 // This will also pass the call base context.
6294 const auto *AA =
6295 A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
6296 if (!AA)
6297 return false;
6298
6299 std::optional<Constant *> COpt = AA->getAssumedConstant(A);
6300
6301 if (!COpt) {
6302 SimplifiedAssociatedValue = std::nullopt;
6303 A.recordDependence(*AA, *this, DepClassTy::OPTIONAL);
6304 return true;
6305 }
6306 if (auto *C = *COpt) {
6307 SimplifiedAssociatedValue = C;
6308 A.recordDependence(*AA, *this, DepClassTy::OPTIONAL);
6309 return true;
6310 }
6311 return false;
6312 }
6313
6314 bool askSimplifiedValueForOtherAAs(Attributor &A) {
6315 if (askSimplifiedValueFor<AAValueConstantRange>(A))
6316 return true;
6317 if (askSimplifiedValueFor<AAPotentialConstantValues>(A))
6318 return true;
6319 return false;
6320 }
6321
6322 /// See AbstractAttribute::manifest(...).
6323 ChangeStatus manifest(Attributor &A) override {
6324 ChangeStatus Changed = ChangeStatus::UNCHANGED;
6325 for (auto &U : getAssociatedValue().uses()) {
6326 // Check if we need to adjust the insertion point to make sure the IR is
6327 // valid.
6328 Instruction *IP = dyn_cast<Instruction>(U.getUser());
6329 if (auto *PHI = dyn_cast_or_null<PHINode>(IP))
6330 IP = PHI->getIncomingBlock(U)->getTerminator();
6331 if (auto *NewV = manifestReplacementValue(A, IP)) {
6332 LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue()
6333 << " -> " << *NewV << " :: " << *this << "\n");
6334 if (A.changeUseAfterManifest(U, *NewV))
6335 Changed = ChangeStatus::CHANGED;
6336 }
6337 }
6338
6339 return Changed | AAValueSimplify::manifest(A);
6340 }
6341
6342 /// See AbstractState::indicatePessimisticFixpoint(...).
6343 ChangeStatus indicatePessimisticFixpoint() override {
6344 SimplifiedAssociatedValue = &getAssociatedValue();
6345 return AAValueSimplify::indicatePessimisticFixpoint();
6346 }
6347};
6348
6349struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
6350 AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
6351 : AAValueSimplifyImpl(IRP, A) {}
6352
6353 void initialize(Attributor &A) override {
6354 AAValueSimplifyImpl::initialize(A);
6355 if (A.hasAttr(getIRPosition(),
6356 {Attribute::InAlloca, Attribute::Preallocated,
6357 Attribute::StructRet, Attribute::Nest, Attribute::ByVal},
6358 /* IgnoreSubsumingPositions */ true))
6359 indicatePessimisticFixpoint();
6360 }
6361
6362 /// See AbstractAttribute::updateImpl(...).
6363 ChangeStatus updateImpl(Attributor &A) override {
6364 // Byval is only replacable if it is readonly otherwise we would write into
6365 // the replaced value and not the copy that byval creates implicitly.
6366 Argument *Arg = getAssociatedArgument();
6367 if (Arg->hasByValAttr()) {
6368 // TODO: We probably need to verify synchronization is not an issue, e.g.,
6369 // there is no race by not copying a constant byval.
6370 bool IsKnown;
6371 if (!AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
6372 return indicatePessimisticFixpoint();
6373 }
6374
6375 auto Before = SimplifiedAssociatedValue;
6376
6377 auto PredForCallSite = [&](AbstractCallSite ACS) {
6378 const IRPosition &ACSArgPos =
6379 IRPosition::callsite_argument(ACS, getCallSiteArgNo());
6380 // Check if a coresponding argument was found or if it is on not
6381 // associated (which can happen for callback calls).
6382 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
6383 return false;
6384
6385 // Simplify the argument operand explicitly and check if the result is
6386 // valid in the current scope. This avoids refering to simplified values
6387 // in other functions, e.g., we don't want to say a an argument in a
6388 // static function is actually an argument in a different function.
6389 bool UsedAssumedInformation = false;
6390 std::optional<Constant *> SimpleArgOp =
6391 A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation);
6392 if (!SimpleArgOp)
6393 return true;
6394 if (!*SimpleArgOp)
6395 return false;
6396 if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp))
6397 return false;
6398 return unionAssumed(*SimpleArgOp);
6399 };
6400
6401 // Generate a answer specific to a call site context.
6402 bool Success;
6403 bool UsedAssumedInformation = false;
6404 if (hasCallBaseContext() &&
6405 getCallBaseContext()->getCalledOperand() == Arg->getParent())
6406 Success = PredForCallSite(
6407 AbstractCallSite(&getCallBaseContext()->getCalledOperandUse()));
6408 else
6409 Success = A.checkForAllCallSites(PredForCallSite, *this, true,
6410 UsedAssumedInformation);
6411
6412 if (!Success)
6413 if (!askSimplifiedValueForOtherAAs(A))
6414 return indicatePessimisticFixpoint();
6415
6416 // If a candidate was found in this update, return CHANGED.
6417 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
6418 : ChangeStatus ::CHANGED;
6419 }
6420
6421 /// See AbstractAttribute::trackStatistics()
6422 void trackStatistics() const override {
6423 STATS_DECLTRACK_ARG_ATTR(value_simplify)
6424 }
6425};
6426
6427struct AAValueSimplifyReturned : AAValueSimplifyImpl {
6428 AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
6429 : AAValueSimplifyImpl(IRP, A) {}
6430
6431 /// See AAValueSimplify::getAssumedSimplifiedValue()
6432 std::optional<Value *>
6433 getAssumedSimplifiedValue(Attributor &A) const override {
6434 if (!isValidState())
6435 return nullptr;
6436 return SimplifiedAssociatedValue;
6437 }
6438
6439 /// See AbstractAttribute::updateImpl(...).
6440 ChangeStatus updateImpl(Attributor &A) override {
6441 auto Before = SimplifiedAssociatedValue;
6442
6443 auto ReturnInstCB = [&](Instruction &I) {
6444 auto &RI = cast<ReturnInst>(I);
6445 return checkAndUpdate(
6446 A, *this,
6447 IRPosition::value(*RI.getReturnValue(), getCallBaseContext()));
6448 };
6449
6450 bool UsedAssumedInformation = false;
6451 if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
6452 UsedAssumedInformation))
6453 if (!askSimplifiedValueForOtherAAs(A))
6454 return indicatePessimisticFixpoint();
6455
6456 // If a candidate was found in this update, return CHANGED.
6457 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
6458 : ChangeStatus ::CHANGED;
6459 }
6460
6461 ChangeStatus manifest(Attributor &A) override {
6462 // We queried AAValueSimplify for the returned values so they will be
6463 // replaced if a simplified form was found. Nothing to do here.
6464 return ChangeStatus::UNCHANGED;
6465 }
6466
6467 /// See AbstractAttribute::trackStatistics()
6468 void trackStatistics() const override {
6469 STATS_DECLTRACK_FNRET_ATTR(value_simplify)
6470 }
6471};
6472
6473struct AAValueSimplifyFloating : AAValueSimplifyImpl {
6474 AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
6475 : AAValueSimplifyImpl(IRP, A) {}
6476
6477 /// See AbstractAttribute::initialize(...).
6478 void initialize(Attributor &A) override {
6479 AAValueSimplifyImpl::initialize(A);
6480 Value &V = getAnchorValue();
6481
6482 // TODO: add other stuffs
6483 if (isa<Constant>(V))
6484 indicatePessimisticFixpoint();
6485 }
6486
6487 /// See AbstractAttribute::updateImpl(...).
6488 ChangeStatus updateImpl(Attributor &A) override {
6489 auto Before = SimplifiedAssociatedValue;
6490 if (!askSimplifiedValueForOtherAAs(A))
6491 return indicatePessimisticFixpoint();
6492
6493 // If a candidate was found in this update, return CHANGED.
6494 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
6495 : ChangeStatus ::CHANGED;
6496 }
6497
6498 /// See AbstractAttribute::trackStatistics()
6499 void trackStatistics() const override {
6500 STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
6501 }
6502};
6503
6504struct AAValueSimplifyFunction : AAValueSimplifyImpl {
6505 AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
6506 : AAValueSimplifyImpl(IRP, A) {}
6507
6508 /// See AbstractAttribute::initialize(...).
6509 void initialize(Attributor &A) override {
6510 SimplifiedAssociatedValue = nullptr;
6511 indicateOptimisticFixpoint();
6512 }
6513 /// See AbstractAttribute::initialize(...).
6514 ChangeStatus updateImpl(Attributor &A) override {
6516 "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
6517 }
6518 /// See AbstractAttribute::trackStatistics()
6519 void trackStatistics() const override {
6520 STATS_DECLTRACK_FN_ATTR(value_simplify)
6521 }
6522};
6523
6524struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
6525 AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
6526 : AAValueSimplifyFunction(IRP, A) {}
6527 /// See AbstractAttribute::trackStatistics()
6528 void trackStatistics() const override {
6529 STATS_DECLTRACK_CS_ATTR(value_simplify)
6530 }
6531};
6532
6533struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl {
6534 AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
6535 : AAValueSimplifyImpl(IRP, A) {}
6536
6537 void initialize(Attributor &A) override {
6538 AAValueSimplifyImpl::initialize(A);
6539 Function *Fn = getAssociatedFunction();
6540 assert(Fn && "Did expect an associted function");
6541 for (Argument &Arg : Fn->args()) {
6542 if (Arg.hasReturnedAttr()) {
6543 auto IRP = IRPosition::callsite_argument(*cast<CallBase>(getCtxI()),
6544 Arg.getArgNo());
6546 checkAndUpdate(A, *this, IRP))
6547 indicateOptimisticFixpoint();
6548 else
6549 indicatePessimisticFixpoint();
6550 return;
6551 }
6552 }
6553 }
6554
6555 /// See AbstractAttribute::updateImpl(...).
6556 ChangeStatus updateImpl(Attributor &A) override {
6557 return indicatePessimisticFixpoint();
6558 }
6559
6560 void trackStatistics() const override {
6561 STATS_DECLTRACK_CSRET_ATTR(value_simplify)
6562 }
6563};
6564
6565struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
6566 AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
6567 : AAValueSimplifyFloating(IRP, A) {}
6568
6569 /// See AbstractAttribute::manifest(...).
6570 ChangeStatus manifest(Attributor &A) override {
6571 ChangeStatus Changed = ChangeStatus::UNCHANGED;
6572 // TODO: We should avoid simplification duplication to begin with.
6573 auto *FloatAA = A.lookupAAFor<AAValueSimplify>(
6574 IRPosition::value(getAssociatedValue()), this, DepClassTy::NONE);
6575 if (FloatAA && FloatAA->getState().isValidState())
6576 return Changed;
6577
6578 if (auto *NewV = manifestReplacementValue(A, getCtxI())) {
6579 Use &U = cast<CallBase>(&getAnchorValue())
6580 ->getArgOperandUse(getCallSiteArgNo());
6581 if (A.changeUseAfterManifest(U, *NewV))
6582 Changed = ChangeStatus::CHANGED;
6583 }
6584
6585 return Changed | AAValueSimplify::manifest(A);
6586 }
6587
6588 void trackStatistics() const override {
6589 STATS_DECLTRACK_CSARG_ATTR(value_simplify)
6590 }
6591};
6592} // namespace
6593
6594/// ----------------------- Heap-To-Stack Conversion ---------------------------
6595namespace {
6596struct AAHeapToStackFunction final : public AAHeapToStack {
6597
6598 struct AllocationInfo {
6599 /// The call that allocates the memory.
6600 CallBase *const CB;
6601
6602 /// The library function id for the allocation.
6603 LibFunc LibraryFunctionId = NotLibFunc;
6604
6605 /// The status wrt. a rewrite.
6606 enum {
6607 STACK_DUE_TO_USE,
6608 STACK_DUE_TO_FREE,
6609 INVALID,
6610 } Status = STACK_DUE_TO_USE;
6611
6612 /// Flag to indicate if we encountered a use that might free this allocation
6613 /// but which is not in the deallocation infos.
6614 bool HasPotentiallyFreeingUnknownUses = false;
6615
6616 /// Flag to indicate that we should place the new alloca in the function
6617 /// entry block rather than where the call site (CB) is.
6618 bool MoveAllocaIntoEntry = true;
6619
6620 /// The set of free calls that use this allocation.
6621 SmallSetVector<CallBase *, 1> PotentialFreeCalls{};
6622 };
6623
6624 struct DeallocationInfo {
6625 /// The call that deallocates the memory.
6626 CallBase *const CB;
6627 /// The value freed by the call.
6628 Value *FreedOp;
6629
6630 /// Flag to indicate if we don't know all objects this deallocation might
6631 /// free.
6632 bool MightFreeUnknownObjects = false;
6633
6634 /// The set of allocation calls that are potentially freed.
6635 SmallSetVector<CallBase *, 1> PotentialAllocationCalls{};
6636 };
6637
6638 AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
6639 : AAHeapToStack(IRP, A) {}
6640
6641 ~AAHeapToStackFunction() {
6642 // Ensure we call the destructor so we release any memory allocated in the
6643 // sets.
6644 for (auto &It : AllocationInfos)
6645 It.second->~AllocationInfo();
6646 for (auto &It : DeallocationInfos)
6647 It.second->~DeallocationInfo();
6648 }
6649
6650 void initialize(Attributor &A) override {
6651 AAHeapToStack::initialize(A);
6652
6653 const Function *F = getAnchorScope();
6654 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6655
6656 auto AllocationIdentifierCB = [&](Instruction &I) {
6657 CallBase *CB = dyn_cast<CallBase>(&I);
6658 if (!CB)
6659 return true;
6660 if (Value *FreedOp = getFreedOperand(CB, TLI)) {
6661 DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB, FreedOp};
6662 return true;
6663 }
6664 // To do heap to stack, we need to know that the allocation itself is
6665 // removable once uses are rewritten, and that we can initialize the
6666 // alloca to the same pattern as the original allocation result.
6667 if (isRemovableAlloc(CB, TLI)) {
6668 auto *I8Ty = Type::getInt8Ty(CB->getParent()->getContext());
6669 if (nullptr != getInitialValueOfAllocation(CB, TLI, I8Ty)) {
6670 AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB};
6671 AllocationInfos[CB] = AI;
6672 if (TLI)
6673 TLI->getLibFunc(*CB, AI->LibraryFunctionId);
6674 }
6675 }
6676 return true;
6677 };
6678
6679 bool UsedAssumedInformation = false;
6680 bool Success = A.checkForAllCallLikeInstructions(
6681 AllocationIdentifierCB, *this, UsedAssumedInformation,
6682 /* CheckBBLivenessOnly */ false,
6683 /* CheckPotentiallyDead */ true);
6684 (void)Success;
6685 assert(Success && "Did not expect the call base visit callback to fail!");
6686
6688 [](const IRPosition &, const AbstractAttribute *,
6689 bool &) -> std::optional<Value *> { return nullptr; };
6690 for (const auto &It : AllocationInfos)
6691 A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first),
6692 SCB);
6693 for (const auto &It : DeallocationInfos)
6694 A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first),
6695 SCB);
6696 }
6697
6698 const std::string getAsStr(Attributor *A) const override {
6699 unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0;
6700 for (const auto &It : AllocationInfos) {
6701 if (It.second->Status == AllocationInfo::INVALID)
6702 ++NumInvalidMallocs;
6703 else
6704 ++NumH2SMallocs;
6705 }
6706 return "[H2S] Mallocs Good/Bad: " + std::to_string(NumH2SMallocs) + "/" +
6707 std::to_string(NumInvalidMallocs);
6708 }
6709
6710 /// See AbstractAttribute::trackStatistics().
6711 void trackStatistics() const override {
6712 STATS_DECL(
6713 MallocCalls, Function,
6714 "Number of malloc/calloc/aligned_alloc calls converted to allocas");
6715 for (const auto &It : AllocationInfos)
6716 if (It.second->Status != AllocationInfo::INVALID)
6717 ++BUILD_STAT_NAME(MallocCalls, Function);
6718 }
6719
6720 bool isAssumedHeapToStack(const CallBase &CB) const override {
6721 if (isValidState())
6722 if (AllocationInfo *AI =
6723 AllocationInfos.lookup(const_cast<CallBase *>(&CB)))
6724 return AI->Status != AllocationInfo::INVALID;
6725 return false;
6726 }
6727
6728 bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override {
6729 if (!isValidState())
6730 return false;
6731
6732 for (const auto &It : AllocationInfos) {
6733 AllocationInfo &AI = *It.second;
6734 if (AI.Status == AllocationInfo::INVALID)
6735 continue;
6736
6737 if (AI.PotentialFreeCalls.count(&CB))
6738 return true;
6739 }
6740
6741 return false;
6742 }
6743
6744 ChangeStatus manifest(Attributor &A) override {
6745 assert(getState().isValidState() &&
6746 "Attempted to manifest an invalid state!");
6747
6748 ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
6749 Function *F = getAnchorScope();
6750 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6751
6752 for (auto &It : AllocationInfos) {
6753 AllocationInfo &AI = *It.second;
6754 if (AI.Status == AllocationInfo::INVALID)
6755 continue;
6756
6757 for (CallBase *FreeCall : AI.PotentialFreeCalls) {
6758 LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
6759 A.deleteAfterManifest(*FreeCall);
6760 HasChanged = ChangeStatus::CHANGED;
6761 }
6762
6763 LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CB
6764 << "\n");
6765
6766 auto Remark = [&](OptimizationRemark OR) {
6767 LibFunc IsAllocShared;
6768 if (TLI->getLibFunc(*AI.CB, IsAllocShared))
6769 if (IsAllocShared == LibFunc___kmpc_alloc_shared)
6770 return OR << "Moving globalized variable to the stack.";
6771 return OR << "Moving memory allocation from the heap to the stack.";
6772 };
6773 if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
6774 A.emitRemark<OptimizationRemark>(AI.CB, "OMP110", Remark);
6775 else
6776 A.emitRemark<OptimizationRemark>(AI.CB, "HeapToStack", Remark);
6777
6778 const DataLayout &DL = A.getInfoCache().getDL();
6779 Value *Size;
6780 std::optional<APInt> SizeAPI = getSize(A, *this, AI);
6781 if (SizeAPI) {
6782 Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI);
6783 } else {
6784 LLVMContext &Ctx = AI.CB->getContext();
6785 ObjectSizeOpts Opts;
6786 ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, Opts);
6787 SizeOffsetValue SizeOffsetPair = Eval.compute(AI.CB);
6788 assert(SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown() &&
6789 cast<ConstantInt>(SizeOffsetPair.Offset)->isZero());
6790 Size = SizeOffsetPair.Size;
6791 }
6792
6793 BasicBlock::iterator IP = AI.MoveAllocaIntoEntry
6794 ? F->getEntryBlock().begin()
6795 : AI.CB->getIterator();
6796
6797 Align Alignment(1);
6798 if (MaybeAlign RetAlign = AI.CB->getRetAlign())
6799 Alignment = std::max(Alignment, *RetAlign);
6800 if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
6801 std::optional<APInt> AlignmentAPI = getAPInt(A, *this, *Align);
6802 assert(AlignmentAPI && AlignmentAPI->getZExtValue() > 0 &&
6803 "Expected an alignment during manifest!");
6804 Alignment =
6805 std::max(Alignment, assumeAligned(AlignmentAPI->getZExtValue()));
6806 }
6807
6808 // TODO: Hoist the alloca towards the function entry.
6809 unsigned AS = DL.getAllocaAddrSpace();
6810 Instruction *Alloca =
6811 new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
6812 AI.CB->getName() + ".h2s", IP);
6813
6814 if (Alloca->getType() != AI.CB->getType())
6815 Alloca = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
6816 Alloca, AI.CB->getType(), "malloc_cast", AI.CB->getIterator());
6817
6818 auto *I8Ty = Type::getInt8Ty(F->getContext());
6819 auto *InitVal = getInitialValueOfAllocation(AI.CB, TLI, I8Ty);
6820 assert(InitVal &&
6821 "Must be able to materialize initial memory state of allocation");
6822
6823 A.changeAfterManifest(IRPosition::inst(*AI.CB), *Alloca);
6824
6825 if (auto *II = dyn_cast<InvokeInst>(AI.CB)) {
6826 auto *NBB = II->getNormalDest();
6827 BranchInst::Create(NBB, AI.CB->getParent());
6828 A.deleteAfterManifest(*AI.CB);
6829 } else {
6830 A.deleteAfterManifest(*AI.CB);
6831 }
6832
6833 // Initialize the alloca with the same value as used by the allocation
6834 // function. We can skip undef as the initial value of an alloc is
6835 // undef, and the memset would simply end up being DSEd.
6836 if (!isa<UndefValue>(InitVal)) {
6837 IRBuilder<> Builder(Alloca->getNextNode());
6838 // TODO: Use alignment above if align!=1
6839 Builder.CreateMemSet(Alloca, InitVal, Size, std::nullopt);
6840 }
6841 HasChanged = ChangeStatus::CHANGED;
6842 }
6843
6844 return HasChanged;
6845 }
6846
6847 std::optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA,
6848 Value &V) {
6849 bool UsedAssumedInformation = false;
6850 std::optional<Constant *> SimpleV =
6851 A.getAssumedConstant(V, AA, UsedAssumedInformation);
6852 if (!SimpleV)
6853 return APInt(64, 0);
6854 if (auto *CI = dyn_cast_or_null<ConstantInt>(*SimpleV))
6855 return CI->getValue();
6856 return std::nullopt;
6857 }
6858
6859 std::optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA,
6860 AllocationInfo &AI) {
6861 auto Mapper = [&](const Value *V) -> const Value * {
6862 bool UsedAssumedInformation = false;
6863 if (std::optional<Constant *> SimpleV =
6864 A.getAssumedConstant(*V, AA, UsedAssumedInformation))
6865 if (*SimpleV)
6866 return *SimpleV;
6867 return V;
6868 };
6869
6870 const Function *F = getAnchorScope();
6871 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6872 return getAllocSize(AI.CB, TLI, Mapper);
6873 }
6874
6875 /// Collection of all malloc-like calls in a function with associated
6876 /// information.
6878
6879 /// Collection of all free-like calls in a function with associated
6880 /// information.
6882
6883 ChangeStatus updateImpl(Attributor &A) override;
6884};
6885
6886ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) {
6888 const Function *F = getAnchorScope();
6889 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6890
6891 const auto *LivenessAA =
6892 A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE);
6893
6895 A.getInfoCache().getMustBeExecutedContextExplorer();
6896
6897 bool StackIsAccessibleByOtherThreads =
6898 A.getInfoCache().stackIsAccessibleByOtherThreads();
6899
6900 LoopInfo *LI =
6901 A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(*F);
6902 std::optional<bool> MayContainIrreducibleControl;
6903 auto IsInLoop = [&](BasicBlock &BB) {
6904 if (&F->getEntryBlock() == &BB)
6905 return false;
6906 if (!MayContainIrreducibleControl.has_value())
6907 MayContainIrreducibleControl = mayContainIrreducibleControl(*F, LI);
6908 if (*MayContainIrreducibleControl)
6909 return true;
6910 if (!LI)
6911 return true;
6912 return LI->getLoopFor(&BB) != nullptr;
6913 };
6914
6915 // Flag to ensure we update our deallocation information at most once per
6916 // updateImpl call and only if we use the free check reasoning.
6917 bool HasUpdatedFrees = false;
6918
6919 auto UpdateFrees = [&]() {
6920 HasUpdatedFrees = true;
6921
6922 for (auto &It : DeallocationInfos) {
6923 DeallocationInfo &DI = *It.second;
6924 // For now we cannot use deallocations that have unknown inputs, skip
6925 // them.
6926 if (DI.MightFreeUnknownObjects)
6927 continue;
6928
6929 // No need to analyze dead calls, ignore them instead.
6930 bool UsedAssumedInformation = false;
6931 if (A.isAssumedDead(*DI.CB, this, LivenessAA, UsedAssumedInformation,
6932 /* CheckBBLivenessOnly */ true))
6933 continue;
6934
6935 // Use the non-optimistic version to get the freed object.
6936 Value *Obj = getUnderlyingObject(DI.FreedOp);
6937 if (!Obj) {
6938 LLVM_DEBUG(dbgs() << "[H2S] Unknown underlying object for free!\n");
6939 DI.MightFreeUnknownObjects = true;
6940 continue;
6941 }
6942
6943 // Free of null and undef can be ignored as no-ops (or UB in the latter
6944 // case).
6945 if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj))
6946 continue;
6947
6948 CallBase *ObjCB = dyn_cast<CallBase>(Obj);
6949 if (!ObjCB) {
6950 LLVM_DEBUG(dbgs() << "[H2S] Free of a non-call object: " << *Obj
6951 << "\n");
6952 DI.MightFreeUnknownObjects = true;
6953 continue;
6954 }
6955
6956 AllocationInfo *AI = AllocationInfos.lookup(ObjCB);
6957 if (!AI) {
6958 LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj
6959 << "\n");
6960 DI.MightFreeUnknownObjects = true;
6961 continue;
6962 }
6963
6964 DI.PotentialAllocationCalls.insert(ObjCB);
6965 }
6966 };
6967
6968 auto FreeCheck = [&](AllocationInfo &AI) {
6969 // If the stack is not accessible by other threads, the "must-free" logic
6970 // doesn't apply as the pointer could be shared and needs to be places in
6971 // "shareable" memory.
6972 if (!StackIsAccessibleByOtherThreads) {
6973 bool IsKnownNoSycn;
6974 if (!AA::hasAssumedIRAttr<Attribute::NoSync>(
6975 A, this, getIRPosition(), DepClassTy::OPTIONAL, IsKnownNoSycn)) {
6976 LLVM_DEBUG(
6977 dbgs() << "[H2S] found an escaping use, stack is not accessible by "
6978 "other threads and function is not nosync:\n");
6979 return false;
6980 }
6981 }
6982 if (!HasUpdatedFrees)
6983 UpdateFrees();
6984
6985 // TODO: Allow multi exit functions that have different free calls.
6986 if (AI.PotentialFreeCalls.size() != 1) {
6987 LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but "
6988 << AI.PotentialFreeCalls.size() << "\n");
6989 return false;
6990 }
6991 CallBase *UniqueFree = *AI.PotentialFreeCalls.begin();
6992 DeallocationInfo *DI = DeallocationInfos.lookup(UniqueFree);
6993 if (!DI) {
6994 LLVM_DEBUG(
6995 dbgs() << "[H2S] unique free call was not known as deallocation call "
6996 << *UniqueFree << "\n");
6997 return false;
6998 }
6999 if (DI->MightFreeUnknownObjects) {
7000 LLVM_DEBUG(
7001 dbgs() << "[H2S] unique free call might free unknown allocations\n");
7002 return false;
7003 }
7004 if (DI->PotentialAllocationCalls.empty())
7005 return true;
7006 if (DI->PotentialAllocationCalls.size() > 1) {
7007 LLVM_DEBUG(dbgs() << "[H2S] unique free call might free "
7008 << DI->PotentialAllocationCalls.size()
7009 << " different allocations\n");
7010 return false;
7011 }
7012 if (*DI->PotentialAllocationCalls.begin() != AI.CB) {
7013 LLVM_DEBUG(
7014 dbgs()
7015 << "[H2S] unique free call not known to free this allocation but "
7016 << **DI->PotentialAllocationCalls.begin() << "\n");
7017 return false;
7018 }
7019
7020 // __kmpc_alloc_shared and __kmpc_alloc_free are by construction matched.
7021 if (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared) {
7022 Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode();
7023 if (!Explorer || !Explorer->findInContextOf(UniqueFree, CtxI)) {
7024 LLVM_DEBUG(dbgs() << "[H2S] unique free call might not be executed "
7025 "with the allocation "
7026 << *UniqueFree << "\n");
7027 return false;
7028 }
7029 }
7030 return true;
7031 };
7032
7033 auto UsesCheck = [&](AllocationInfo &AI) {
7034 bool ValidUsesOnly = true;
7035
7036 auto Pred = [&](const Use &U, bool &Follow) -> bool {
7037 Instruction *UserI = cast<Instruction>(U.getUser());
7038 if (isa<LoadInst>(UserI))
7039 return true;
7040 if (auto *SI = dyn_cast<StoreInst>(UserI)) {
7041 if (SI->getValueOperand() == U.get()) {
7043 << "[H2S] escaping store to memory: " << *UserI << "\n");
7044 ValidUsesOnly = false;
7045 } else {
7046 // A store into the malloc'ed memory is fine.
7047 }
7048 return true;
7049 }
7050 if (auto *CB = dyn_cast<CallBase>(UserI)) {
7051 if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
7052 return true;
7053 if (DeallocationInfos.count(CB)) {
7054 AI.PotentialFreeCalls.insert(CB);
7055 return true;
7056 }
7057
7058 unsigned ArgNo = CB->getArgOperandNo(&U);
7059 auto CBIRP = IRPosition::callsite_argument(*CB, ArgNo);
7060
7061 bool IsKnownNoCapture;
7062 bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::NoCapture>(
7063 A, this, CBIRP, DepClassTy::OPTIONAL, IsKnownNoCapture);
7064
7065 // If a call site argument use is nofree, we are fine.
7066 bool IsKnownNoFree;
7067 bool IsAssumedNoFree = AA::hasAssumedIRAttr<Attribute::NoFree>(
7068 A, this, CBIRP, DepClassTy::OPTIONAL, IsKnownNoFree);
7069
7070 if (!IsAssumedNoCapture ||
7071 (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared &&
7072 !IsAssumedNoFree)) {
7073 AI.HasPotentiallyFreeingUnknownUses |= !IsAssumedNoFree;
7074
7075 // Emit a missed remark if this is missed OpenMP globalization.
7076 auto Remark = [&](OptimizationRemarkMissed ORM) {
7077 return ORM
7078 << "Could not move globalized variable to the stack. "
7079 "Variable is potentially captured in call. Mark "
7080 "parameter as `__attribute__((noescape))` to override.";
7081 };
7082
7083 if (ValidUsesOnly &&
7084 AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
7085 A.emitRemark<OptimizationRemarkMissed>(CB, "OMP113", Remark);
7086
7087 LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
7088 ValidUsesOnly = false;
7089 }
7090 return true;
7091 }
7092
7093 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
7094 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
7095 Follow = true;
7096 return true;
7097 }
7098 // Unknown user for which we can not track uses further (in a way that
7099 // makes sense).
7100 LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
7101 ValidUsesOnly = false;
7102 return true;
7103 };
7104 if (!A.checkForAllUses(Pred, *this, *AI.CB, /* CheckBBLivenessOnly */ false,
7105 DepClassTy::OPTIONAL, /* IgnoreDroppableUses */ true,
7106 [&](const Use &OldU, const Use &NewU) {
7107 auto *SI = dyn_cast<StoreInst>(OldU.getUser());
7108 return !SI || StackIsAccessibleByOtherThreads ||
7109 AA::isAssumedThreadLocalObject(
7110 A, *SI->getPointerOperand(), *this);
7111 }))
7112 return false;
7113 return ValidUsesOnly;
7114 };
7115
7116 // The actual update starts here. We look at all allocations and depending on
7117 // their status perform the appropriate check(s).
7118 for (auto &It : AllocationInfos) {
7119 AllocationInfo &AI = *It.second;
7120 if (AI.Status == AllocationInfo::INVALID)
7121 continue;
7122
7123 if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
7124 std::optional<APInt> APAlign = getAPInt(A, *this, *Align);
7125 if (!APAlign) {
7126 // Can't generate an alloca which respects the required alignment
7127 // on the allocation.
7128 LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB
7129 << "\n");
7130 AI.Status = AllocationInfo::INVALID;
7131 Changed = ChangeStatus::CHANGED;
7132 continue;
7133 }
7134 if (APAlign->ugt(llvm::Value::MaximumAlignment) ||
7135 !APAlign->isPowerOf2()) {
7136 LLVM_DEBUG(dbgs() << "[H2S] Invalid allocation alignment: " << APAlign
7137 << "\n");
7138 AI.Status = AllocationInfo::INVALID;
7139 Changed = ChangeStatus::CHANGED;
7140 continue;
7141 }
7142 }
7143
7144 std::optional<APInt> Size = getSize(A, *this, AI);
7145 if (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared &&
7146 MaxHeapToStackSize != -1) {
7147 if (!Size || Size->ugt(MaxHeapToStackSize)) {
7148 LLVM_DEBUG({
7149 if (!Size)
7150 dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n";
7151 else
7152 dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. "
7153 << MaxHeapToStackSize << "\n";
7154 });
7155
7156 AI.Status = AllocationInfo::INVALID;
7157 Changed = ChangeStatus::CHANGED;
7158 continue;
7159 }
7160 }
7161
7162 switch (AI.Status) {
7163 case AllocationInfo::STACK_DUE_TO_USE:
7164 if (UsesCheck(AI))
7165 break;
7166 AI.Status = AllocationInfo::STACK_DUE_TO_FREE;
7167 [[fallthrough]];
7168 case AllocationInfo::STACK_DUE_TO_FREE:
7169 if (FreeCheck(AI))
7170 break;
7171 AI.Status = AllocationInfo::INVALID;
7172 Changed = ChangeStatus::CHANGED;
7173 break;
7174 case AllocationInfo::INVALID:
7175 llvm_unreachable("Invalid allocations should never reach this point!");
7176 };
7177
7178 // Check if we still think we can move it into the entry block. If the
7179 // alloca comes from a converted __kmpc_alloc_shared then we can usually
7180 // ignore the potential compilations associated with loops.
7181 bool IsGlobalizedLocal =
7182 AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared;
7183 if (AI.MoveAllocaIntoEntry &&
7184 (!Size.has_value() ||
7185 (!IsGlobalizedLocal && IsInLoop(*AI.CB->getParent()))))
7186 AI.MoveAllocaIntoEntry = false;
7187 }
7188
7189 return Changed;
7190}
7191} // namespace
7192
7193/// ----------------------- Privatizable Pointers ------------------------------
7194namespace {
7195struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
7196 AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
7197 : AAPrivatizablePtr(IRP, A), PrivatizableType(std::nullopt) {}
7198
7199 ChangeStatus indicatePessimisticFixpoint() override {
7200 AAPrivatizablePtr::indicatePessimisticFixpoint();
7201 PrivatizableType = nullptr;
7202 return ChangeStatus::CHANGED;
7203 }
7204
7205 /// Identify the type we can chose for a private copy of the underlying
7206 /// argument. std::nullopt means it is not clear yet, nullptr means there is
7207 /// none.
7208 virtual std::optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
7209
7210 /// Return a privatizable type that encloses both T0 and T1.
7211 /// TODO: This is merely a stub for now as we should manage a mapping as well.
7212 std::optional<Type *> combineTypes(std::optional<Type *> T0,
7213 std::optional<Type *> T1) {
7214 if (!T0)
7215 return T1;
7216 if (!T1)
7217 return T0;
7218 if (T0 == T1)
7219 return T0;
7220 return nullptr;
7221 }
7222
7223 std::optional<Type *> getPrivatizableType() const override {
7224 return PrivatizableType;
7225 }
7226
7227 const std::string getAsStr(Attributor *A) const override {
7228 return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
7229 }
7230
7231protected:
7232 std::optional<Type *> PrivatizableType;
7233};
7234
7235// TODO: Do this for call site arguments (probably also other values) as well.
7236
7237struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
7238 AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
7239 : AAPrivatizablePtrImpl(IRP, A) {}
7240
7241 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
7242 std::optional<Type *> identifyPrivatizableType(Attributor &A) override {
7243 // If this is a byval argument and we know all the call sites (so we can
7244 // rewrite them), there is no need to check them explicitly.
7245 bool UsedAssumedInformation = false;
7247 A.getAttrs(getIRPosition(), {Attribute::ByVal}, Attrs,
7248 /* IgnoreSubsumingPositions */ true);
7249 if (!Attrs.empty() &&
7250 A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
7251 true, UsedAssumedInformation))
7252 return Attrs[0].getValueAsType();
7253
7254 std::optional<Type *> Ty;
7255 unsigned ArgNo = getIRPosition().getCallSiteArgNo();
7256
7257 // Make sure the associated call site argument has the same type at all call
7258 // sites and it is an allocation we know is safe to privatize, for now that
7259 // means we only allow alloca instructions.
7260 // TODO: We can additionally analyze the accesses in the callee to create
7261 // the type from that information instead. That is a little more
7262 // involved and will be done in a follow up patch.
7263 auto CallSiteCheck = [&](AbstractCallSite ACS) {
7264 IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
7265 // Check if a coresponding argument was found or if it is one not
7266 // associated (which can happen for callback calls).
7267 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
7268 return false;
7269
7270 // Check that all call sites agree on a type.
7271 auto *PrivCSArgAA =
7272 A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
7273 if (!PrivCSArgAA)
7274 return false;
7275 std::optional<Type *> CSTy = PrivCSArgAA->getPrivatizableType();
7276
7277 LLVM_DEBUG({
7278 dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
7279 if (CSTy && *CSTy)
7280 (*CSTy)->print(dbgs());
7281 else if (CSTy)
7282 dbgs() << "<nullptr>";
7283 else
7284 dbgs() << "<none>";
7285 });
7286
7287 Ty = combineTypes(Ty, CSTy);
7288
7289 LLVM_DEBUG({
7290 dbgs() << " : New Type: ";
7291 if (Ty && *Ty)
7292 (*Ty)->print(dbgs());
7293 else if (Ty)
7294 dbgs() << "<nullptr>";
7295 else
7296 dbgs() << "<none>";
7297 dbgs() << "\n";
7298 });
7299
7300 return !Ty || *Ty;
7301 };
7302
7303 if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
7304 UsedAssumedInformation))
7305 return nullptr;
7306 return Ty;
7307 }
7308
7309 /// See AbstractAttribute::updateImpl(...).
7310 ChangeStatus updateImpl(Attributor &A) override {
7311 PrivatizableType = identifyPrivatizableType(A);
7312 if (!PrivatizableType)
7313 return ChangeStatus::UNCHANGED;
7314 if (!*PrivatizableType)
7315 return indicatePessimisticFixpoint();
7316
7317 // The dependence is optional so we don't give up once we give up on the
7318 // alignment.
7319 A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
7320 DepClassTy::OPTIONAL);
7321
7322 // Avoid arguments with padding for now.
7323 if (!A.hasAttr(getIRPosition(), Attribute::ByVal) &&
7324 !isDenselyPacked(*PrivatizableType, A.getInfoCache().getDL())) {
7325 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
7326 return indicatePessimisticFixpoint();
7327 }
7328
7329 // Collect the types that will replace the privatizable type in the function
7330 // signature.
7331 SmallVector<Type *, 16> ReplacementTypes;
7332 identifyReplacementTypes(*PrivatizableType, ReplacementTypes);
7333
7334 // Verify callee and caller agree on how the promoted argument would be
7335 // passed.
7336 Function &Fn = *getIRPosition().getAnchorScope();
7337 const auto *TTI =
7338 A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
7339 if (!TTI) {
7340 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Missing TTI for function "
7341 << Fn.getName() << "\n");
7342 return indicatePessimisticFixpoint();
7343 }
7344
7345 auto CallSiteCheck = [&](AbstractCallSite ACS) {
7346 CallBase *CB = ACS.getInstruction();
7347 return TTI->areTypesABICompatible(
7348 CB->getCaller(),
7349 dyn_cast_if_present<Function>(CB->getCalledOperand()),
7350 ReplacementTypes);
7351 };
7352 bool UsedAssumedInformation = false;
7353 if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
7354 UsedAssumedInformation)) {
7355 LLVM_DEBUG(
7356 dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
7357 << Fn.getName() << "\n");
7358 return indicatePessimisticFixpoint();
7359 }
7360
7361 // Register a rewrite of the argument.
7362 Argument *Arg = getAssociatedArgument();
7363 if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
7364 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
7365 return indicatePessimisticFixpoint();
7366 }
7367
7368 unsigned ArgNo = Arg->getArgNo();
7369
7370 // Helper to check if for the given call site the associated argument is
7371 // passed to a callback where the privatization would be different.
7372 auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
7373 SmallVector<const Use *, 4> CallbackUses;
7374 AbstractCallSite::getCallbackUses(CB, CallbackUses);
7375 for (const Use *U : CallbackUses) {
7376 AbstractCallSite CBACS(U);
7377 assert(CBACS && CBACS.isCallbackCall());
7378 for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
7379 int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
7380
7381 LLVM_DEBUG({
7382 dbgs()
7383 << "[AAPrivatizablePtr] Argument " << *Arg
7384 << "check if can be privatized in the context of its parent ("
7385 << Arg->getParent()->getName()
7386 << ")\n[AAPrivatizablePtr] because it is an argument in a "
7387 "callback ("
7388 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
7389 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
7390 << CBACS.getCallArgOperand(CBArg) << " vs "
7391 << CB.getArgOperand(ArgNo) << "\n"
7392 << "[AAPrivatizablePtr] " << CBArg << " : "
7393 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
7394 });
7395
7396 if (CBArgNo != int(ArgNo))
7397 continue;
7398 const auto *CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
7399 *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
7400 if (CBArgPrivAA && CBArgPrivAA->isValidState()) {
7401 auto CBArgPrivTy = CBArgPrivAA->getPrivatizableType();
7402 if (!CBArgPrivTy)
7403 continue;
7404 if (*CBArgPrivTy == PrivatizableType)
7405 continue;
7406 }
7407
7408 LLVM_DEBUG({
7409 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
7410 << " cannot be privatized in the context of its parent ("
7411 << Arg->getParent()->getName()
7412 << ")\n[AAPrivatizablePtr] because it is an argument in a "
7413 "callback ("
7414 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
7415 << ").\n[AAPrivatizablePtr] for which the argument "
7416 "privatization is not compatible.\n";
7417 });
7418 return false;
7419 }
7420 }
7421 return true;
7422 };
7423
7424 // Helper to check if for the given call site the associated argument is
7425 // passed to a direct call where the privatization would be different.
7426 auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
7427 CallBase *DC = cast<CallBase>(ACS.getInstruction());
7428 int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
7429 assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() &&
7430 "Expected a direct call operand for callback call operand");
7431
7432 Function *DCCallee =
7433 dyn_cast_if_present<Function>(DC->getCalledOperand());
7434 LLVM_DEBUG({
7435 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
7436 << " check if be privatized in the context of its parent ("
7437 << Arg->getParent()->getName()
7438 << ")\n[AAPrivatizablePtr] because it is an argument in a "
7439 "direct call of ("
7440 << DCArgNo << "@" << DCCallee->getName() << ").\n";
7441 });
7442
7443 if (unsigned(DCArgNo) < DCCallee->arg_size()) {
7444 const auto *DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
7445 *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
7446 DepClassTy::REQUIRED);
7447 if (DCArgPrivAA && DCArgPrivAA->isValidState()) {
7448 auto DCArgPrivTy = DCArgPrivAA->getPrivatizableType();
7449 if (!DCArgPrivTy)
7450 return true;
7451 if (*DCArgPrivTy == PrivatizableType)
7452 return true;
7453 }
7454 }
7455
7456 LLVM_DEBUG({
7457 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
7458 << " cannot be privatized in the context of its parent ("
7459 << Arg->getParent()->getName()
7460 << ")\n[AAPrivatizablePtr] because it is an argument in a "
7461 "direct call of ("
7463 << ").\n[AAPrivatizablePtr] for which the argument "
7464 "privatization is not compatible.\n";
7465 });
7466 return false;
7467 };
7468
7469 // Helper to check if the associated argument is used at the given abstract
7470 // call site in a way that is incompatible with the privatization assumed
7471 // here.
7472 auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
7473 if (ACS.isDirectCall())
7474 return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
7475 if (ACS.isCallbackCall())
7476 return IsCompatiblePrivArgOfDirectCS(ACS);
7477 return false;
7478 };
7479
7480 if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
7481 UsedAssumedInformation))
7482 return indicatePessimisticFixpoint();
7483
7484 return ChangeStatus::UNCHANGED;
7485 }
7486
7487 /// Given a type to private \p PrivType, collect the constituates (which are
7488 /// used) in \p ReplacementTypes.
7489 static void
7490 identifyReplacementTypes(Type *PrivType,
7491 SmallVectorImpl<Type *> &ReplacementTypes) {
7492 // TODO: For now we expand the privatization type to the fullest which can
7493 // lead to dead arguments that need to be removed later.
7494 assert(PrivType && "Expected privatizable type!");
7495
7496 // Traverse the type, extract constituate types on the outermost level.
7497 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
7498 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
7499 ReplacementTypes.push_back(PrivStructType->getElementType(u));
7500 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
7501 ReplacementTypes.append(PrivArrayType->getNumElements(),
7502 PrivArrayType->getElementType());
7503 } else {
7504 ReplacementTypes.push_back(PrivType);
7505 }
7506 }
7507
7508 /// Initialize \p Base according to the type \p PrivType at position \p IP.
7509 /// The values needed are taken from the arguments of \p F starting at
7510 /// position \p ArgNo.
7511 static void createInitialization(Type *PrivType, Value &Base, Function &F,
7512 unsigned ArgNo, BasicBlock::iterator IP) {
7513 assert(PrivType && "Expected privatizable type!");
7514
7515 IRBuilder<NoFolder> IRB(IP->getParent(), IP);
7516 const DataLayout &DL = F.getDataLayout();
7517
7518 // Traverse the type, build GEPs and stores.
7519 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
7520 const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
7521 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
7522 Value *Ptr =
7523 constructPointer(&Base, PrivStructLayout->getElementOffset(u), IRB);
7524 new StoreInst(F.getArg(ArgNo + u), Ptr, IP);
7525 }
7526 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
7527 Type *PointeeTy = PrivArrayType->getElementType();
7528 uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
7529 for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
7530 Value *Ptr = constructPointer(&Base, u * PointeeTySize, IRB);
7531 new StoreInst(F.getArg(ArgNo + u), Ptr, IP);
7532 }
7533 } else {
7534 new StoreInst(F.getArg(ArgNo), &Base, IP);
7535 }
7536 }
7537
7538 /// Extract values from \p Base according to the type \p PrivType at the
7539 /// call position \p ACS. The values are appended to \p ReplacementValues.
7540 void createReplacementValues(Align Alignment, Type *PrivType,
7542 SmallVectorImpl<Value *> &ReplacementValues) {
7543 assert(Base && "Expected base value!");
7544 assert(PrivType && "Expected privatizable type!");
7545 Instruction *IP = ACS.getInstruction();
7546
7547 IRBuilder<NoFolder> IRB(IP);
7548 const DataLayout &DL = IP->getDataLayout();
7549
7550 // Traverse the type, build GEPs and loads.
7551 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
7552 const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
7553 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
7554 Type *PointeeTy = PrivStructType->getElementType(u);
7555 Value *Ptr =
7556 constructPointer(Base, PrivStructLayout->getElementOffset(u), IRB);
7557 LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP->getIterator());
7558 L->setAlignment(Alignment);
7559 ReplacementValues.push_back(L);
7560 }
7561 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
7562 Type *PointeeTy = PrivArrayType->getElementType();
7563 uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
7564 for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
7565 Value *Ptr = constructPointer(Base, u * PointeeTySize, IRB);
7566 LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP->getIterator());
7567 L->setAlignment(Alignment);
7568 ReplacementValues.push_back(L);
7569 }
7570 } else {
7571 LoadInst *L = new LoadInst(PrivType, Base, "", IP->getIterator());
7572 L->setAlignment(Alignment);
7573 ReplacementValues.push_back(L);
7574 }
7575 }
7576
7577 /// See AbstractAttribute::manifest(...)
7578 ChangeStatus manifest(Attributor &A) override {
7579 if (!PrivatizableType)
7580 return ChangeStatus::UNCHANGED;
7581 assert(*PrivatizableType && "Expected privatizable type!");
7582
7583 // Collect all tail calls in the function as we cannot allow new allocas to
7584 // escape into tail recursion.
7585 // TODO: Be smarter about new allocas escaping into tail calls.
7587 bool UsedAssumedInformation = false;
7588 if (!A.checkForAllInstructions(
7589 [&](Instruction &I) {
7590 CallInst &CI = cast<CallInst>(I);
7591 if (CI.isTailCall())
7592 TailCalls.push_back(&CI);
7593 return true;
7594 },
7595 *this, {Instruction::Call}, UsedAssumedInformation))
7596 return ChangeStatus::UNCHANGED;
7597
7598 Argument *Arg = getAssociatedArgument();
7599 // Query AAAlign attribute for alignment of associated argument to
7600 // determine the best alignment of loads.
7601 const auto *AlignAA =
7602 A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
7603
7604 // Callback to repair the associated function. A new alloca is placed at the
7605 // beginning and initialized with the values passed through arguments. The
7606 // new alloca replaces the use of the old pointer argument.
7609 Function &ReplacementFn, Function::arg_iterator ArgIt) {
7610 BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
7612 const DataLayout &DL = IP->getDataLayout();
7613 unsigned AS = DL.getAllocaAddrSpace();
7614 Instruction *AI = new AllocaInst(*PrivatizableType, AS,
7615 Arg->getName() + ".priv", IP);
7616 createInitialization(*PrivatizableType, *AI, ReplacementFn,
7617 ArgIt->getArgNo(), IP);
7618
7619 if (AI->getType() != Arg->getType())
7620 AI = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
7621 AI, Arg->getType(), "", IP);
7622 Arg->replaceAllUsesWith(AI);
7623
7624 for (CallInst *CI : TailCalls)
7625 CI->setTailCall(false);
7626 };
7627
7628 // Callback to repair a call site of the associated function. The elements
7629 // of the privatizable type are loaded prior to the call and passed to the
7630 // new function version.
7633 AbstractCallSite ACS, SmallVectorImpl<Value *> &NewArgOperands) {
7634 // When no alignment is specified for the load instruction,
7635 // natural alignment is assumed.
7636 createReplacementValues(
7637 AlignAA ? AlignAA->getAssumedAlign() : Align(0),
7638 *PrivatizableType, ACS,
7639 ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
7640 NewArgOperands);
7641 };
7642
7643 // Collect the types that will replace the privatizable type in the function
7644 // signature.
7645 SmallVector<Type *, 16> ReplacementTypes;
7646 identifyReplacementTypes(*PrivatizableType, ReplacementTypes);
7647
7648 // Register a rewrite of the argument.
7649 if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
7650 std::move(FnRepairCB),
7651 std::move(ACSRepairCB)))
7652 return ChangeStatus::CHANGED;
7653 return ChangeStatus::UNCHANGED;
7654 }
7655
7656 /// See AbstractAttribute::trackStatistics()
7657 void trackStatistics() const override {
7658 STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
7659 }
7660};
7661
7662struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
7663 AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
7664 : AAPrivatizablePtrImpl(IRP, A) {}
7665
7666 /// See AbstractAttribute::initialize(...).
7667 void initialize(Attributor &A) override {
7668 // TODO: We can privatize more than arguments.
7669 indicatePessimisticFixpoint();
7670 }
7671
7672 ChangeStatus updateImpl(Attributor &A) override {
7673 llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
7674 "updateImpl will not be called");
7675 }
7676
7677 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
7678 std::optional<Type *> identifyPrivatizableType(Attributor &A) override {
7679 Value *Obj = getUnderlyingObject(&getAssociatedValue());
7680 if (!Obj) {
7681 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
7682 return nullptr;
7683 }
7684
7685 if (auto *AI = dyn_cast<AllocaInst>(Obj))
7686 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
7687 if (CI->isOne())
7688 return AI->getAllocatedType();
7689 if (auto *Arg = dyn_cast<Argument>(Obj)) {
7690 auto *PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
7691 *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
7692 if (PrivArgAA && PrivArgAA->isAssumedPrivatizablePtr())
7693 return PrivArgAA->getPrivatizableType();
7694 }
7695
7696 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
7697 "alloca nor privatizable argument: "
7698 << *Obj << "!\n");
7699 return nullptr;
7700 }
7701
7702 /// See AbstractAttribute::trackStatistics()
7703 void trackStatistics() const override {
7704 STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
7705 }
7706};
7707
7708struct AAPrivatizablePtrCallSiteArgument final
7709 : public AAPrivatizablePtrFloating {
7710 AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
7711 : AAPrivatizablePtrFloating(IRP, A) {}
7712
7713 /// See AbstractAttribute::initialize(...).
7714 void initialize(Attributor &A) override {
7715 if (A.hasAttr(getIRPosition(), Attribute::ByVal))
7716 indicateOptimisticFixpoint();
7717 }
7718
7719 /// See AbstractAttribute::updateImpl(...).
7720 ChangeStatus updateImpl(Attributor &A) override {
7721 PrivatizableType = identifyPrivatizableType(A);
7722 if (!PrivatizableType)
7723 return ChangeStatus::UNCHANGED;
7724 if (!*PrivatizableType)
7725 return indicatePessimisticFixpoint();
7726
7727 const IRPosition &IRP = getIRPosition();
7728 bool IsKnownNoCapture;
7729 bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::NoCapture>(
7730 A, this, IRP, DepClassTy::REQUIRED, IsKnownNoCapture);
7731 if (!IsAssumedNoCapture) {
7732 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
7733 return indicatePessimisticFixpoint();
7734 }
7735
7736 bool IsKnownNoAlias;
7737 if (!AA::hasAssumedIRAttr<Attribute::NoAlias>(
7738 A, this, IRP, DepClassTy::REQUIRED, IsKnownNoAlias)) {
7739 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
7740 return indicatePessimisticFixpoint();
7741 }
7742
7743 bool IsKnown;
7744 if (!AA::isAssumedReadOnly(A, IRP, *this, IsKnown)) {
7745 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
7746 return indicatePessimisticFixpoint();
7747 }
7748
7749 return ChangeStatus::UNCHANGED;
7750 }
7751
7752 /// See AbstractAttribute::trackStatistics()
7753 void trackStatistics() const override {
7754 STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
7755 }
7756};
7757
7758struct AAPrivatizablePtrCallSiteReturned final
7759 : public AAPrivatizablePtrFloating {
7760 AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
7761 : AAPrivatizablePtrFloating(IRP, A) {}
7762
7763 /// See AbstractAttribute::initialize(...).
7764 void initialize(Attributor &A) override {
7765 // TODO: We can privatize more than arguments.
7766 indicatePessimisticFixpoint();
7767 }
7768
7769 /// See AbstractAttribute::trackStatistics()
7770 void trackStatistics() const override {
7771 STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
7772 }
7773};
7774
7775struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
7776 AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
7777 : AAPrivatizablePtrFloating(IRP, A) {}
7778
7779 /// See AbstractAttribute::initialize(...).
7780 void initialize(Attributor &A) override {
7781 // TODO: We can privatize more than arguments.
7782 indicatePessimisticFixpoint();
7783 }
7784
7785 /// See AbstractAttribute::trackStatistics()
7786 void trackStatistics() const override {
7787 STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
7788 }
7789};
7790} // namespace
7791
7792/// -------------------- Memory Behavior Attributes ----------------------------
7793/// Includes read-none, read-only, and write-only.
7794/// ----------------------------------------------------------------------------
7795namespace {
7796struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
7797 AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
7798 : AAMemoryBehavior(IRP, A) {}
7799
7800 /// See AbstractAttribute::initialize(...).
7801 void initialize(Attributor &A) override {
7802 intersectAssumedBits(BEST_STATE);
7803 getKnownStateFromValue(A, getIRPosition(), getState());
7804 AAMemoryBehavior::initialize(A);
7805 }
7806
7807 /// Return the memory behavior information encoded in the IR for \p IRP.
7808 static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
7809 BitIntegerState &State,
7810 bool IgnoreSubsumingPositions = false) {
7812 A.getAttrs(IRP, AttrKinds, Attrs, IgnoreSubsumingPositions);
7813 for (const Attribute &Attr : Attrs) {
7814 switch (Attr.getKindAsEnum()) {
7815 case Attribute::ReadNone:
7816 State.addKnownBits(NO_ACCESSES);
7817 break;
7818 case Attribute::ReadOnly:
7819 State.addKnownBits(NO_WRITES);
7820 break;
7821 case Attribute::WriteOnly:
7822 State.addKnownBits(NO_READS);
7823 break;
7824 default:
7825 llvm_unreachable("Unexpected attribute!");
7826 }
7827 }
7828
7829 if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
7830 if (!I->mayReadFromMemory())
7831 State.addKnownBits(NO_READS);
7832 if (!I->mayWriteToMemory())
7833 State.addKnownBits(NO_WRITES);
7834 }
7835 }
7836
7837 /// See AbstractAttribute::getDeducedAttributes(...).
7838 void getDeducedAttributes(Attributor &A, LLVMContext &Ctx,
7839 SmallVectorImpl<Attribute> &Attrs) const override {
7840 assert(Attrs.size() == 0);
7841 if (isAssumedReadNone())
7842 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7843 else if (isAssumedReadOnly())
7844 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
7845 else if (isAssumedWriteOnly())
7846 Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
7847 assert(Attrs.size() <= 1);
7848 }
7849
7850 /// See AbstractAttribute::manifest(...).
7851 ChangeStatus manifest(Attributor &A) override {
7852 const IRPosition &IRP = getIRPosition();
7853
7854 if (A.hasAttr(IRP, Attribute::ReadNone,
7855 /* IgnoreSubsumingPositions */ true))
7856 return ChangeStatus::UNCHANGED;
7857
7858 // Check if we would improve the existing attributes first.
7859 SmallVector<Attribute, 4> DeducedAttrs;
7860 getDeducedAttributes(A, IRP.getAnchorValue().getContext(), DeducedAttrs);
7861 if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7862 return A.hasAttr(IRP, Attr.getKindAsEnum(),
7863 /* IgnoreSubsumingPositions */ true);
7864 }))
7865 return ChangeStatus::UNCHANGED;
7866
7867 // Clear existing attributes.
7868 A.removeAttrs(IRP, AttrKinds);
7869 // Clear conflicting writable attribute.
7870 if (isAssumedReadOnly())
7871 A.removeAttrs(IRP, Attribute::Writable);
7872
7873 // Use the generic manifest method.
7874 return IRAttribute::manifest(A);
7875 }
7876
7877 /// See AbstractState::getAsStr().
7878 const std::string getAsStr(Attributor *A) const override {
7879 if (isAssumedReadNone())
7880 return "readnone";
7881 if (isAssumedReadOnly())
7882 return "readonly";
7883 if (isAssumedWriteOnly())
7884 return "writeonly";
7885 return "may-read/write";
7886 }
7887
7888 /// The set of IR attributes AAMemoryBehavior deals with.
7889 static const Attribute::AttrKind AttrKinds[3];
7890};
7891
7892const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
7893 Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
7894
7895/// Memory behavior attribute for a floating value.
7896struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
7897 AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
7898 : AAMemoryBehaviorImpl(IRP, A) {}
7899
7900 /// See AbstractAttribute::updateImpl(...).
7901 ChangeStatus updateImpl(Attributor &A) override;
7902
7903 /// See AbstractAttribute::trackStatistics()
7904 void trackStatistics() const override {
7905 if (isAssumedReadNone())
7907 else if (isAssumedReadOnly())
7909 else if (isAssumedWriteOnly())
7911 }
7912
7913private:
7914 /// Return true if users of \p UserI might access the underlying
7915 /// variable/location described by \p U and should therefore be analyzed.
7916 bool followUsersOfUseIn(Attributor &A, const Use &U,
7917 const Instruction *UserI);
7918
7919 /// Update the state according to the effect of use \p U in \p UserI.
7920 void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI);
7921};
7922
7923/// Memory behavior attribute for function argument.
7924struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
7925 AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
7926 : AAMemoryBehaviorFloating(IRP, A) {}
7927
7928 /// See AbstractAttribute::initialize(...).
7929 void initialize(Attributor &A) override {
7930 intersectAssumedBits(BEST_STATE);
7931 const IRPosition &IRP = getIRPosition();
7932 // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
7933 // can query it when we use has/getAttr. That would allow us to reuse the
7934 // initialize of the base class here.
7935 bool HasByVal = A.hasAttr(IRP, {Attribute::ByVal},
7936 /* IgnoreSubsumingPositions */ true);
7937 getKnownStateFromValue(A, IRP, getState(),
7938 /* IgnoreSubsumingPositions */ HasByVal);
7939 }
7940
7941 ChangeStatus manifest(Attributor &A) override {
7942 // TODO: Pointer arguments are not supported on vectors of pointers yet.
7943 if (!getAssociatedValue().getType()->isPointerTy())
7944 return ChangeStatus::UNCHANGED;
7945
7946 // TODO: From readattrs.ll: "inalloca parameters are always
7947 // considered written"
7948 if (A.hasAttr(getIRPosition(),
7949 {Attribute::InAlloca, Attribute::Preallocated})) {
7950 removeKnownBits(NO_WRITES);
7951 removeAssumedBits(NO_WRITES);
7952 }
7953 A.removeAttrs(getIRPosition(), AttrKinds);
7954 return AAMemoryBehaviorFloating::manifest(A);
7955 }
7956
7957 /// See AbstractAttribute::trackStatistics()
7958 void trackStatistics() const override {
7959 if (isAssumedReadNone())
7960 STATS_DECLTRACK_ARG_ATTR(readnone)
7961 else if (isAssumedReadOnly())
7962 STATS_DECLTRACK_ARG_ATTR(readonly)
7963 else if (isAssumedWriteOnly())
7964 STATS_DECLTRACK_ARG_ATTR(writeonly)
7965 }
7966};
7967
7968struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
7969 AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
7970 : AAMemoryBehaviorArgument(IRP, A) {}
7971
7972 /// See AbstractAttribute::initialize(...).
7973 void initialize(Attributor &A) override {
7974 // If we don't have an associated attribute this is either a variadic call
7975 // or an indirect call, either way, nothing to do here.
7976 Argument *Arg = getAssociatedArgument();
7977 if (!Arg) {
7978 indicatePessimisticFixpoint();
7979 return;
7980 }
7981 if (Arg->hasByValAttr()) {
7982 addKnownBits(NO_WRITES);
7983 removeKnownBits(NO_READS);
7984 removeAssumedBits(NO_READS);
7985 }
7986 AAMemoryBehaviorArgument::initialize(A);
7987 if (getAssociatedFunction()->isDeclaration())
7988 indicatePessimisticFixpoint();
7989 }
7990
7991 /// See AbstractAttribute::updateImpl(...).
7992 ChangeStatus updateImpl(Attributor &A) override {
7993 // TODO: Once we have call site specific value information we can provide
7994 // call site specific liveness liveness information and then it makes
7995 // sense to specialize attributes for call sites arguments instead of
7996 // redirecting requests to the callee argument.
7997 Argument *Arg = getAssociatedArgument();
7998 const IRPosition &ArgPos = IRPosition::argument(*Arg);
7999 auto *ArgAA =
8000 A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
8001 if (!ArgAA)
8002 return indicatePessimisticFixpoint();
8003 return clampStateAndIndicateChange(getState(), ArgAA->getState());
8004 }
8005
8006 /// See AbstractAttribute::trackStatistics()
8007 void trackStatistics() const override {
8008 if (isAssumedReadNone())
8010 else if (isAssumedReadOnly())
8012 else if (isAssumedWriteOnly())
8014 }
8015};
8016
8017/// Memory behavior attribute for a call site return position.
8018struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
8019 AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
8020 : AAMemoryBehaviorFloating(IRP, A) {}
8021
8022 /// See AbstractAttribute::initialize(...).
8023 void initialize(Attributor &A) override {
8024 AAMemoryBehaviorImpl::initialize(A);
8025 }
8026 /// See AbstractAttribute::manifest(...).
8027 ChangeStatus manifest(Attributor &A) override {
8028 // We do not annotate returned values.
8029 return ChangeStatus::UNCHANGED;
8030 }
8031
8032 /// See AbstractAttribute::trackStatistics()
8033 void trackStatistics() const override {}
8034};
8035
8036/// An AA to represent the memory behavior function attributes.
8037struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
8038 AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
8039 : AAMemoryBehaviorImpl(IRP, A) {}
8040
8041 /// See AbstractAttribute::updateImpl(Attributor &A).
8042 ChangeStatus updateImpl(Attributor &A) override;
8043
8044 /// See AbstractAttribute::manifest(...).
8045 ChangeStatus manifest(Attributor &A) override {
8046 // TODO: It would be better to merge this with AAMemoryLocation, so that
8047 // we could determine read/write per location. This would also have the
8048 // benefit of only one place trying to manifest the memory attribute.
8049 Function &F = cast<Function>(getAnchorValue());
8051 if (isAssumedReadNone())
8052 ME = MemoryEffects::none();
8053 else if (isAssumedReadOnly())
8055 else if (isAssumedWriteOnly())
8057
8058 A.removeAttrs(getIRPosition(), AttrKinds);
8059 // Clear conflicting writable attribute.
8060 if (ME.onlyReadsMemory())
8061 for (Argument &Arg : F.args())
8062 A.removeAttrs(IRPosition::argument(Arg), Attribute::Writable);
8063 return A.manifestAttrs(getIRPosition(),
8064 Attribute::getWithMemoryEffects(F.getContext(), ME));
8065 }
8066
8067 /// See AbstractAttribute::trackStatistics()
8068 void trackStatistics() const override {
8069 if (isAssumedReadNone())
8070 STATS_DECLTRACK_FN_ATTR(readnone)
8071 else if (isAssumedReadOnly())
8072 STATS_DECLTRACK_FN_ATTR(readonly)
8073 else if (isAssumedWriteOnly())
8074 STATS_DECLTRACK_FN_ATTR(writeonly)
8075 }
8076};
8077
8078/// AAMemoryBehavior attribute for call sites.
8079struct AAMemoryBehaviorCallSite final
8080 : AACalleeToCallSite<AAMemoryBehavior, AAMemoryBehaviorImpl> {
8081 AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
8082 : AACalleeToCallSite<AAMemoryBehavior, AAMemoryBehaviorImpl>(IRP, A) {}
8083
8084 /// See AbstractAttribute::manifest(...).
8085 ChangeStatus manifest(Attributor &A) override {
8086 // TODO: Deduplicate this with AAMemoryBehaviorFunction.
8087 CallBase &CB = cast<CallBase>(getAnchorValue());
8089 if (isAssumedReadNone())
8090 ME = MemoryEffects::none();
8091 else if (isAssumedReadOnly())
8093 else if (isAssumedWriteOnly())
8095
8096 A.removeAttrs(getIRPosition(), AttrKinds);
8097 // Clear conflicting writable attribute.
8098 if (ME.onlyReadsMemory())
8099 for (Use &U : CB.args())
8100 A.removeAttrs(IRPosition::callsite_argument(CB, U.getOperandNo()),
8101 Attribute::Writable);
8102 return A.manifestAttrs(
8103 getIRPosition(), Attribute::getWithMemoryEffects(CB.getContext(), ME));
8104 }
8105
8106 /// See AbstractAttribute::trackStatistics()
8107 void trackStatistics() const override {
8108 if (isAssumedReadNone())
8109 STATS_DECLTRACK_CS_ATTR(readnone)
8110 else if (isAssumedReadOnly())
8111 STATS_DECLTRACK_CS_ATTR(readonly)
8112 else if (isAssumedWriteOnly())
8113 STATS_DECLTRACK_CS_ATTR(writeonly)
8114 }
8115};
8116
8117ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
8118
8119 // The current assumed state used to determine a change.
8120 auto AssumedState = getAssumed();
8121
8122 auto CheckRWInst = [&](Instruction &I) {
8123 // If the instruction has an own memory behavior state, use it to restrict
8124 // the local state. No further analysis is required as the other memory
8125 // state is as optimistic as it gets.
8126 if (const auto *CB = dyn_cast<CallBase>(&I)) {
8127 const auto *MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
8129 if (MemBehaviorAA) {
8130 intersectAssumedBits(MemBehaviorAA->getAssumed());
8131 return !isAtFixpoint();
8132 }
8133 }
8134
8135 // Remove access kind modifiers if necessary.
8136 if (I.mayReadFromMemory())
8137 removeAssumedBits(NO_READS);
8138 if (I.mayWriteToMemory())
8139 removeAssumedBits(NO_WRITES);
8140 return !isAtFixpoint();
8141 };
8142
8143 bool UsedAssumedInformation = false;
8144 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
8145 UsedAssumedInformation))
8146 return indicatePessimisticFixpoint();
8147
8148 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
8150}
8151
8152ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
8153
8154 const IRPosition &IRP = getIRPosition();
8155 const IRPosition &FnPos = IRPosition::function_scope(IRP);
8156 AAMemoryBehavior::StateType &S = getState();
8157
8158 // First, check the function scope. We take the known information and we avoid
8159 // work if the assumed information implies the current assumed information for
8160 // this attribute. This is a valid for all but byval arguments.
8161 Argument *Arg = IRP.getAssociatedArgument();
8162 AAMemoryBehavior::base_t FnMemAssumedState =
8164 if (!Arg || !Arg->hasByValAttr()) {
8165 const auto *FnMemAA =
8166 A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
8167 if (FnMemAA) {
8168 FnMemAssumedState = FnMemAA->getAssumed();
8169 S.addKnownBits(FnMemAA->getKnown());
8170 if ((S.getAssumed() & FnMemAA->getAssumed()) == S.getAssumed())
8172 }
8173 }
8174
8175 // The current assumed state used to determine a change.
8176 auto AssumedState = S.getAssumed();
8177
8178 // Make sure the value is not captured (except through "return"), if
8179 // it is, any information derived would be irrelevant anyway as we cannot
8180 // check the potential aliases introduced by the capture. However, no need
8181 // to fall back to anythign less optimistic than the function state.
8182 bool IsKnownNoCapture;
8183 const AANoCapture *ArgNoCaptureAA = nullptr;
8184 bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::NoCapture>(
8185 A, this, IRP, DepClassTy::OPTIONAL, IsKnownNoCapture, false,
8186 &ArgNoCaptureAA);
8187
8188 if (!IsAssumedNoCapture &&
8189 (!ArgNoCaptureAA || !ArgNoCaptureAA->isAssumedNoCaptureMaybeReturned())) {
8190 S.intersectAssumedBits(FnMemAssumedState);
8191 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
8193 }
8194
8195 // Visit and expand uses until all are analyzed or a fixpoint is reached.
8196 auto UsePred = [&](const Use &U, bool &Follow) -> bool {
8197 Instruction *UserI = cast<Instruction>(U.getUser());
8198 LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI
8199 << " \n");
8200
8201 // Droppable users, e.g., llvm::assume does not actually perform any action.
8202 if (UserI->isDroppable())
8203 return true;
8204
8205 // Check if the users of UserI should also be visited.
8206 Follow = followUsersOfUseIn(A, U, UserI);
8207
8208 // If UserI might touch memory we analyze the use in detail.
8209 if (UserI->mayReadOrWriteMemory())
8210 analyzeUseIn(A, U, UserI);
8211
8212 return !isAtFixpoint();
8213 };
8214
8215 if (!A.checkForAllUses(UsePred, *this, getAssociatedValue()))
8216 return indicatePessimisticFixpoint();
8217
8218 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
8220}
8221
8222bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U,
8223 const Instruction *UserI) {
8224 // The loaded value is unrelated to the pointer argument, no need to
8225 // follow the users of the load.
8226 if (isa<LoadInst>(UserI) || isa<ReturnInst>(UserI))
8227 return false;
8228
8229 // By default we follow all uses assuming UserI might leak information on U,
8230 // we have special handling for call sites operands though.
8231 const auto *CB = dyn_cast<CallBase>(UserI);
8232 if (!CB || !CB->isArgOperand(&U))
8233 return true;
8234
8235 // If the use is a call argument known not to be captured, the users of
8236 // the call do not need to be visited because they have to be unrelated to
8237 // the input. Note that this check is not trivial even though we disallow
8238 // general capturing of the underlying argument. The reason is that the
8239 // call might the argument "through return", which we allow and for which we
8240 // need to check call users.
8241 if (U.get()->getType()->isPointerTy()) {
8242 unsigned ArgNo = CB->getArgOperandNo(&U);
8243 bool IsKnownNoCapture;
8244 return !AA::hasAssumedIRAttr<Attribute::NoCapture>(
8245 A, this, IRPosition::callsite_argument(*CB, ArgNo),
8246 DepClassTy::OPTIONAL, IsKnownNoCapture);
8247 }
8248
8249 return true;
8250}
8251
8252void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U,
8253 const Instruction *UserI) {
8254 assert(UserI->mayReadOrWriteMemory());
8255
8256 switch (UserI->getOpcode()) {
8257 default:
8258 // TODO: Handle all atomics and other side-effect operations we know of.
8259 break;
8260 case Instruction::Load:
8261 // Loads cause the NO_READS property to disappear.
8262 removeAssumedBits(NO_READS);
8263 return;
8264
8265 case Instruction::Store:
8266 // Stores cause the NO_WRITES property to disappear if the use is the
8267 // pointer operand. Note that while capturing was taken care of somewhere
8268 // else we need to deal with stores of the value that is not looked through.
8269 if (cast<StoreInst>(UserI)->getPointerOperand() == U.get())
8270 removeAssumedBits(NO_WRITES);
8271 else
8272 indicatePessimisticFixpoint();
8273 return;
8274
8275 case Instruction::Call:
8276 case Instruction::CallBr:
8277 case Instruction::Invoke: {
8278 // For call sites we look at the argument memory behavior attribute (this
8279 // could be recursive!) in order to restrict our own state.
8280 const auto *CB = cast<CallBase>(UserI);
8281
8282 // Give up on operand bundles.
8283 if (CB->isBundleOperand(&U)) {
8284 indicatePessimisticFixpoint();
8285 return;
8286 }
8287
8288 // Calling a function does read the function pointer, maybe write it if the
8289 // function is self-modifying.
8290 if (CB->isCallee(&U)) {
8291 removeAssumedBits(NO_READS);
8292 break;
8293 }
8294
8295 // Adjust the possible access behavior based on the information on the
8296 // argument.
8297 IRPosition Pos;
8298 if (U.get()->getType()->isPointerTy())
8300 else
8302 const auto *MemBehaviorAA =
8303 A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
8304 if (!MemBehaviorAA)
8305 break;
8306 // "assumed" has at most the same bits as the MemBehaviorAA assumed
8307 // and at least "known".
8308 intersectAssumedBits(MemBehaviorAA->getAssumed());
8309 return;
8310 }
8311 };
8312
8313 // Generally, look at the "may-properties" and adjust the assumed state if we
8314 // did not trigger special handling before.
8315 if (UserI->mayReadFromMemory())
8316 removeAssumedBits(NO_READS);
8317 if (UserI->mayWriteToMemory())
8318 removeAssumedBits(NO_WRITES);
8319}
8320} // namespace
8321
8322/// -------------------- Memory Locations Attributes ---------------------------
8323/// Includes read-none, argmemonly, inaccessiblememonly,
8324/// inaccessiblememorargmemonly
8325/// ----------------------------------------------------------------------------
8326
8329 if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
8330 return "all memory";
8332 return "no memory";
8333 std::string S = "memory:";
8334 if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
8335 S += "stack,";
8336 if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
8337 S += "constant,";
8339 S += "internal global,";
8341 S += "external global,";
8342 if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
8343 S += "argument,";
8345 S += "inaccessible,";
8346 if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
8347 S += "malloced,";
8348 if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
8349 S += "unknown,";
8350 S.pop_back();
8351 return S;
8352}
8353
8354namespace {
8355struct AAMemoryLocationImpl : public AAMemoryLocation {
8356
8357 AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
8359 AccessKind2Accesses.fill(nullptr);
8360 }
8361
8362 ~AAMemoryLocationImpl() {
8363 // The AccessSets are allocated via a BumpPtrAllocator, we call
8364 // the destructor manually.
8365 for (AccessSet *AS : AccessKind2Accesses)
8366 if (AS)
8367 AS->~AccessSet();
8368 }
8369
8370 /// See AbstractAttribute::initialize(...).
8371 void initialize(Attributor &A) override {
8372 intersectAssumedBits(BEST_STATE);
8373 getKnownStateFromValue(A, getIRPosition(), getState());
8374 AAMemoryLocation::initialize(A);
8375 }
8376
8377 /// Return the memory behavior information encoded in the IR for \p IRP.
8378 static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
8379 BitIntegerState &State,
8380 bool IgnoreSubsumingPositions = false) {
8381 // For internal functions we ignore `argmemonly` and
8382 // `inaccessiblememorargmemonly` as we might break it via interprocedural
8383 // constant propagation. It is unclear if this is the best way but it is
8384 // unlikely this will cause real performance problems. If we are deriving
8385 // attributes for the anchor function we even remove the attribute in
8386 // addition to ignoring it.
8387 // TODO: A better way to handle this would be to add ~NO_GLOBAL_MEM /
8388 // MemoryEffects::Other as a possible location.
8389 bool UseArgMemOnly = true;
8390 Function *AnchorFn = IRP.getAnchorScope();
8391 if (AnchorFn && A.isRunOn(*AnchorFn))
8392 UseArgMemOnly = !AnchorFn->hasLocalLinkage();
8393
8395 A.getAttrs(IRP, {Attribute::Memory}, Attrs, IgnoreSubsumingPositions);
8396 for (const Attribute &Attr : Attrs) {
8397 // TODO: We can map MemoryEffects to Attributor locations more precisely.
8398 MemoryEffects ME = Attr.getMemoryEffects();
8399 if (ME.doesNotAccessMemory()) {
8400 State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
8401 continue;
8402 }
8403 if (ME.onlyAccessesInaccessibleMem()) {
8404 State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
8405 continue;
8406 }
8407 if (ME.onlyAccessesArgPointees()) {
8408 if (UseArgMemOnly)
8409 State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
8410 else {
8411 // Remove location information, only keep read/write info.
8412 ME = MemoryEffects(ME.getModRef());
8413 A.manifestAttrs(IRP,
8415 IRP.getAnchorValue().getContext(), ME),
8416 /*ForceReplace*/ true);
8417 }
8418 continue;
8419 }
8421 if (UseArgMemOnly)
8422 State.addKnownBits(inverseLocation(
8423 NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
8424 else {
8425 // Remove location information, only keep read/write info.
8426 ME = MemoryEffects(ME.getModRef());
8427 A.manifestAttrs(IRP,
8429 IRP.getAnchorValue().getContext(), ME),
8430 /*ForceReplace*/ true);
8431 }
8432 continue;
8433 }
8434 }
8435 }
8436
8437 /// See AbstractAttribute::getDeducedAttributes(...).
8438 void getDeducedAttributes(Attributor &A, LLVMContext &Ctx,
8439 SmallVectorImpl<Attribute> &Attrs) const override {
8440 // TODO: We can map Attributor locations to MemoryEffects more precisely.
8441 assert(Attrs.size() == 0);
8442 if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
8443 if (isAssumedReadNone())
8444 Attrs.push_back(
8446 else if (isAssumedInaccessibleMemOnly())
8449 else if (isAssumedArgMemOnly())
8450 Attrs.push_back(
8452 else if (isAssumedInaccessibleOrArgMemOnly())
8455 }
8456 assert(Attrs.size() <= 1);
8457 }
8458
8459 /// See AbstractAttribute::manifest(...).
8460 ChangeStatus manifest(Attributor &A) override {
8461 // TODO: If AAMemoryLocation and AAMemoryBehavior are merged, we could
8462 // provide per-location modref information here.
8463 const IRPosition &IRP = getIRPosition();
8464
8465 SmallVector<Attribute, 1> DeducedAttrs;
8466 getDeducedAttributes(A, IRP.getAnchorValue().getContext(), DeducedAttrs);
8467 if (DeducedAttrs.size() != 1)
8468 return ChangeStatus::UNCHANGED;
8469 MemoryEffects ME = DeducedAttrs[0].getMemoryEffects();
8470
8471 return A.manifestAttrs(IRP, Attribute::getWithMemoryEffects(
8472 IRP.getAnchorValue().getContext(), ME));
8473 }
8474
8475 /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
8476 bool checkForAllAccessesToMemoryKind(
8477 function_ref<bool(const Instruction *, const Value *, AccessKind,
8478 MemoryLocationsKind)>
8479 Pred,
8480 MemoryLocationsKind RequestedMLK) const override {
8481 if (!isValidState())
8482 return false;
8483
8484 MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
8485 if (AssumedMLK == NO_LOCATIONS)
8486 return true;
8487
8488 unsigned Idx = 0;
8489 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
8490 CurMLK *= 2, ++Idx) {
8491 if (CurMLK & RequestedMLK)
8492 continue;
8493
8494 if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
8495 for (const AccessInfo &AI : *Accesses)
8496 if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
8497 return false;
8498 }
8499
8500 return true;
8501 }
8502
8503 ChangeStatus indicatePessimisticFixpoint() override {
8504 // If we give up and indicate a pessimistic fixpoint this instruction will
8505 // become an access for all potential access kinds:
8506 // TODO: Add pointers for argmemonly and globals to improve the results of
8507 // checkForAllAccessesToMemoryKind.
8508 bool Changed = false;
8509 MemoryLocationsKind KnownMLK = getKnown();
8510 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
8511 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
8512 if (!(CurMLK & KnownMLK))
8513 updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
8514 getAccessKindFromInst(I));
8515 return AAMemoryLocation::indicatePessimisticFixpoint();
8516 }
8517
8518protected:
8519 /// Helper struct to tie together an instruction that has a read or write
8520 /// effect with the pointer it accesses (if any).
8521 struct AccessInfo {
8522
8523 /// The instruction that caused the access.
8524 const Instruction *I;
8525
8526 /// The base pointer that is accessed, or null if unknown.
8527 const Value *Ptr;
8528
8529 /// The kind of access (read/write/read+write).
8531
8532 bool operator==(const AccessInfo &RHS) const {
8533 return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
8534 }
8535 bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
8536 if (LHS.I != RHS.I)
8537 return LHS.I < RHS.I;
8538 if (LHS.Ptr != RHS.Ptr)
8539 return LHS.Ptr < RHS.Ptr;
8540 if (LHS.Kind != RHS.Kind)
8541 return LHS.Kind < RHS.Kind;
8542 return false;
8543 }
8544 };
8545
8546 /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
8547 /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
8548 using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
8549 std::array<AccessSet *, llvm::CTLog2<VALID_STATE>()> AccessKind2Accesses;
8550
8551 /// Categorize the pointer arguments of CB that might access memory in
8552 /// AccessedLoc and update the state and access map accordingly.
8553 void
8554 categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
8555 AAMemoryLocation::StateType &AccessedLocs,
8556 bool &Changed);
8557
8558 /// Return the kind(s) of location that may be accessed by \p V.
8560 categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
8561
8562 /// Return the access kind as determined by \p I.
8563 AccessKind getAccessKindFromInst(const Instruction *I) {
8564 AccessKind AK = READ_WRITE;
8565 if (I) {
8566 AK = I->mayReadFromMemory() ? READ : NONE;
8567 AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
8568 }
8569 return AK;
8570 }
8571
8572 /// Update the state \p State and the AccessKind2Accesses given that \p I is
8573 /// an access of kind \p AK to a \p MLK memory location with the access
8574 /// pointer \p Ptr.
8575 void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
8576 MemoryLocationsKind MLK, const Instruction *I,
8577 const Value *Ptr, bool &Changed,
8578 AccessKind AK = READ_WRITE) {
8579
8580 assert(isPowerOf2_32(MLK) && "Expected a single location set!");
8581 auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
8582 if (!Accesses)
8583 Accesses = new (Allocator) AccessSet();
8584 Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
8585 if (MLK == NO_UNKOWN_MEM)
8586 MLK = NO_LOCATIONS;
8587 State.removeAssumedBits(MLK);
8588 }
8589
8590 /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
8591 /// arguments, and update the state and access map accordingly.
8592 void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
8593 AAMemoryLocation::StateType &State, bool &Changed,
8594 unsigned AccessAS = 0);
8595
8596 /// Used to allocate access sets.
8598};
8599
8600void AAMemoryLocationImpl::categorizePtrValue(
8601 Attributor &A, const Instruction &I, const Value &Ptr,
8602 AAMemoryLocation::StateType &State, bool &Changed, unsigned AccessAS) {
8603 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
8604 << Ptr << " ["
8605 << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
8606
8607 auto Pred = [&](Value &Obj) {
8608 unsigned ObjectAS = Obj.getType()->getPointerAddressSpace();
8609 // TODO: recognize the TBAA used for constant accesses.
8610 MemoryLocationsKind MLK = NO_LOCATIONS;
8611
8612 // Filter accesses to constant (GPU) memory if we have an AS at the access
8613 // site or the object is known to actually have the associated AS.
8614 if ((AccessAS == (unsigned)AA::GPUAddressSpace::Constant ||
8615 (ObjectAS == (unsigned)AA::GPUAddressSpace::Constant &&
8616 isIdentifiedObject(&Obj))) &&
8617 AA::isGPU(*I.getModule()))
8618 return true;
8619
8620 if (isa<UndefValue>(&Obj))
8621 return true;
8622 if (isa<Argument>(&Obj)) {
8623 // TODO: For now we do not treat byval arguments as local copies performed
8624 // on the call edge, though, we should. To make that happen we need to
8625 // teach various passes, e.g., DSE, about the copy effect of a byval. That
8626 // would also allow us to mark functions only accessing byval arguments as
8627 // readnone again, arguably their accesses have no effect outside of the
8628 // function, like accesses to allocas.
8629 MLK = NO_ARGUMENT_MEM;
8630 } else if (auto *GV = dyn_cast<GlobalValue>(&Obj)) {
8631 // Reading constant memory is not treated as a read "effect" by the
8632 // function attr pass so we won't neither. Constants defined by TBAA are
8633 // similar. (We know we do not write it because it is constant.)
8634 if (auto *GVar = dyn_cast<GlobalVariable>(GV))
8635 if (GVar->isConstant())
8636 return true;
8637
8638 if (GV->hasLocalLinkage())
8639 MLK = NO_GLOBAL_INTERNAL_MEM;
8640 else
8641 MLK = NO_GLOBAL_EXTERNAL_MEM;
8642 } else if (isa<ConstantPointerNull>(&Obj) &&
8643 (!NullPointerIsDefined(getAssociatedFunction(), AccessAS) ||
8644 !NullPointerIsDefined(getAssociatedFunction(), ObjectAS))) {
8645 return true;
8646 } else if (isa<AllocaInst>(&Obj)) {
8647 MLK = NO_LOCAL_MEM;
8648 } else if (const auto *CB = dyn_cast<CallBase>(&Obj)) {
8649 bool IsKnownNoAlias;
8650 if (AA::hasAssumedIRAttr<Attribute::NoAlias>(
8652 IsKnownNoAlias))
8653 MLK = NO_MALLOCED_MEM;
8654 else
8655 MLK = NO_UNKOWN_MEM;
8656 } else {
8657 MLK = NO_UNKOWN_MEM;
8658 }
8659
8660 assert(MLK != NO_LOCATIONS && "No location specified!");
8661 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: "
8662 << Obj << " -> " << getMemoryLocationsAsStr(MLK) << "\n");
8663 updateStateAndAccessesMap(State, MLK, &I, &Obj, Changed,
8664 getAccessKindFromInst(&I));
8665
8666 return true;
8667 };
8668
8669 const auto *AA = A.getAAFor<AAUnderlyingObjects>(
8671 if (!AA || !AA->forallUnderlyingObjects(Pred, AA::Intraprocedural)) {
8672 LLVM_DEBUG(
8673 dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
8674 updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
8675 getAccessKindFromInst(&I));
8676 return;
8677 }
8678
8679 LLVM_DEBUG(
8680 dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: "
8681 << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
8682}
8683
8684void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
8685 Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
8686 bool &Changed) {
8687 for (unsigned ArgNo = 0, E = CB.arg_size(); ArgNo < E; ++ArgNo) {
8688
8689 // Skip non-pointer arguments.
8690 const Value *ArgOp = CB.getArgOperand(ArgNo);
8691 if (!ArgOp->getType()->isPtrOrPtrVectorTy())
8692 continue;
8693
8694 // Skip readnone arguments.
8695 const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
8696 const auto *ArgOpMemLocationAA =
8697 A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
8698
8699 if (ArgOpMemLocationAA && ArgOpMemLocationAA->isAssumedReadNone())
8700 continue;
8701
8702 // Categorize potentially accessed pointer arguments as if there was an
8703 // access instruction with them as pointer.
8704 categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
8705 }
8706}
8707
8709AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
8710 bool &Changed) {
8711 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
8712 << I << "\n");
8713
8714 AAMemoryLocation::StateType AccessedLocs;
8715 AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
8716
8717 if (auto *CB = dyn_cast<CallBase>(&I)) {
8718
8719 // First check if we assume any memory is access is visible.
8720 const auto *CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
8722 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
8723 << " [" << CBMemLocationAA << "]\n");
8724 if (!CBMemLocationAA) {
8725 updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr,
8726 Changed, getAccessKindFromInst(&I));
8727 return NO_UNKOWN_MEM;
8728 }
8729
8730 if (CBMemLocationAA->isAssumedReadNone())
8731 return NO_LOCATIONS;
8732
8733 if (CBMemLocationAA->isAssumedInaccessibleMemOnly()) {
8734 updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
8735 Changed, getAccessKindFromInst(&I));
8736 return AccessedLocs.getAssumed();
8737 }
8738
8739 uint32_t CBAssumedNotAccessedLocs =
8740 CBMemLocationAA->getAssumedNotAccessedLocation();
8741
8742 // Set the argmemonly and global bit as we handle them separately below.
8743 uint32_t CBAssumedNotAccessedLocsNoArgMem =
8744 CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
8745
8746 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
8747 if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
8748 continue;
8749 updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
8750 getAccessKindFromInst(&I));
8751 }
8752
8753 // Now handle global memory if it might be accessed. This is slightly tricky
8754 // as NO_GLOBAL_MEM has multiple bits set.
8755 bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
8756 if (HasGlobalAccesses) {
8757 auto AccessPred = [&](const Instruction *, const Value *Ptr,
8758 AccessKind Kind, MemoryLocationsKind MLK) {
8759 updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
8760 getAccessKindFromInst(&I));
8761 return true;
8762 };
8763 if (!CBMemLocationAA->checkForAllAccessesToMemoryKind(
8764 AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
8765 return AccessedLocs.getWorstState();
8766 }
8767
8768 LLVM_DEBUG(
8769 dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
8770 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
8771
8772 // Now handle argument memory if it might be accessed.
8773 bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
8774 if (HasArgAccesses)
8775 categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
8776
8777 LLVM_DEBUG(
8778 dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
8779 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
8780
8781 return AccessedLocs.getAssumed();
8782 }
8783
8784 if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
8785 LLVM_DEBUG(
8786 dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
8787 << I << " [" << *Ptr << "]\n");
8788 categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed,
8789 Ptr->getType()->getPointerAddressSpace());
8790 return AccessedLocs.getAssumed();
8791 }
8792
8793 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
8794 << I << "\n");
8795 updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
8796 getAccessKindFromInst(&I));
8797 return AccessedLocs.getAssumed();
8798}
8799
8800/// An AA to represent the memory behavior function attributes.
8801struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
8802 AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
8803 : AAMemoryLocationImpl(IRP, A) {}
8804
8805 /// See AbstractAttribute::updateImpl(Attributor &A).
8806 ChangeStatus updateImpl(Attributor &A) override {
8807
8808 const auto *MemBehaviorAA =
8809 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
8810 if (MemBehaviorAA && MemBehaviorAA->isAssumedReadNone()) {
8811 if (MemBehaviorAA->isKnownReadNone())
8812 return indicateOptimisticFixpoint();
8814 "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
8815 A.recordDependence(*MemBehaviorAA, *this, DepClassTy::OPTIONAL);
8816 return ChangeStatus::UNCHANGED;
8817 }
8818
8819 // The current assumed state used to determine a change.
8820 auto AssumedState = getAssumed();
8821 bool Changed = false;
8822
8823 auto CheckRWInst = [&](Instruction &I) {
8824 MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
8825 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
8826 << ": " << getMemoryLocationsAsStr(MLK) << "\n");
8827 removeAssumedBits(inverseLocation(MLK, false, false));
8828 // Stop once only the valid bit set in the *not assumed location*, thus
8829 // once we don't actually exclude any memory locations in the state.
8830 return getAssumedNotAccessedLocation() != VALID_STATE;
8831 };
8832
8833 bool UsedAssumedInformation = false;
8834 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
8835 UsedAssumedInformation))
8836 return indicatePessimisticFixpoint();
8837
8838 Changed |= AssumedState != getAssumed();
8839 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
8840 }
8841
8842 /// See AbstractAttribute::trackStatistics()
8843 void trackStatistics() const override {
8844 if (isAssumedReadNone())
8845 STATS_DECLTRACK_FN_ATTR(readnone)
8846 else if (isAssumedArgMemOnly())
8847 STATS_DECLTRACK_FN_ATTR(argmemonly)
8848 else if (isAssumedInaccessibleMemOnly())
8849 STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
8850 else if (isAssumedInaccessibleOrArgMemOnly())
8851 STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
8852 }
8853};
8854
8855/// AAMemoryLocation attribute for call sites.
8856struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
8857 AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
8858 : AAMemoryLocationImpl(IRP, A) {}
8859
8860 /// See AbstractAttribute::updateImpl(...).
8861 ChangeStatus updateImpl(Attributor &A) override {
8862 // TODO: Once we have call site specific value information we can provide
8863 // call site specific liveness liveness information and then it makes
8864 // sense to specialize attributes for call sites arguments instead of
8865 // redirecting requests to the callee argument.
8866 Function *F = getAssociatedFunction();
8867 const IRPosition &FnPos = IRPosition::function(*F);
8868 auto *FnAA =
8869 A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
8870 if (!FnAA)
8871 return indicatePessimisticFixpoint();
8872 bool Changed = false;
8873 auto AccessPred = [&](const Instruction *I, const Value *Ptr,
8874 AccessKind Kind, MemoryLocationsKind MLK) {
8875 updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
8876 getAccessKindFromInst(I));
8877 return true;
8878 };
8879 if (!FnAA->checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
8880 return indicatePessimisticFixpoint();
8881 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
8882 }
8883
8884 /// See AbstractAttribute::trackStatistics()
8885 void trackStatistics() const override {
8886 if (isAssumedReadNone())
8887 STATS_DECLTRACK_CS_ATTR(readnone)
8888 }
8889};
8890} // namespace
8891
8892/// ------------------ denormal-fp-math Attribute -------------------------
8893
8894namespace {
8895struct AADenormalFPMathImpl : public AADenormalFPMath {
8896 AADenormalFPMathImpl(const IRPosition &IRP, Attributor &A)
8897 : AADenormalFPMath(IRP, A) {}
8898
8899 const std::string getAsStr(Attributor *A) const override {
8900 std::string Str("AADenormalFPMath[");
8902
8903 DenormalState Known = getKnown();
8904 if (Known.Mode.isValid())
8905 OS << "denormal-fp-math=" << Known.Mode;
8906 else
8907 OS << "invalid";
8908
8909 if (Known.ModeF32.isValid())
8910 OS << " denormal-fp-math-f32=" << Known.ModeF32;
8911 OS << ']';
8912 return Str;
8913 }
8914};
8915
8916struct AADenormalFPMathFunction final : AADenormalFPMathImpl {
8917 AADenormalFPMathFunction(const IRPosition &IRP, Attributor &A)
8918 : AADenormalFPMathImpl(IRP, A) {}
8919
8920 void initialize(Attributor &A) override {
8921 const Function *F = getAnchorScope();
8922 DenormalMode Mode = F->getDenormalModeRaw();
8923 DenormalMode ModeF32 = F->getDenormalModeF32Raw();
8924
8925 // TODO: Handling this here prevents handling the case where a callee has a
8926 // fixed denormal-fp-math with dynamic denormal-fp-math-f32, but called from
8927 // a function with a fully fixed mode.
8928 if (ModeF32 == DenormalMode::getInvalid())
8929 ModeF32 = Mode;
8930 Known = DenormalState{Mode, ModeF32};
8931 if (isModeFixed())
8932 indicateFixpoint();
8933 }
8934
8935 ChangeStatus updateImpl(Attributor &A) override {
8936 ChangeStatus Change = ChangeStatus::UNCHANGED;
8937
8938 auto CheckCallSite = [=, &Change, &A](AbstractCallSite CS) {
8939 Function *Caller = CS.getInstruction()->getFunction();
8940 LLVM_DEBUG(dbgs() << "[AADenormalFPMath] Call " << Caller->getName()
8941 << "->" << getAssociatedFunction()->getName() << '\n');
8942
8943 const auto *CallerInfo = A.getAAFor<AADenormalFPMath>(
8944 *this, IRPosition::function(*Caller), DepClassTy::REQUIRED);
8945 if (!CallerInfo)
8946 return false;
8947
8948 Change = Change | clampStateAndIndicateChange(this->getState(),
8949 CallerInfo->getState());
8950 return true;
8951 };
8952
8953 bool AllCallSitesKnown = true;
8954 if (!A.checkForAllCallSites(CheckCallSite, *this, true, AllCallSitesKnown))
8955 return indicatePessimisticFixpoint();
8956
8957 if (Change == ChangeStatus::CHANGED && isModeFixed())
8958 indicateFixpoint();
8959 return Change;
8960 }
8961
8962 ChangeStatus manifest(Attributor &A) override {
8963 LLVMContext &Ctx = getAssociatedFunction()->getContext();
8964
8965 SmallVector<Attribute, 2> AttrToAdd;
8966 SmallVector<StringRef, 2> AttrToRemove;
8967 if (Known.Mode == DenormalMode::getDefault()) {
8968 AttrToRemove.push_back("denormal-fp-math");
8969 } else {
8970 AttrToAdd.push_back(
8971 Attribute::get(Ctx, "denormal-fp-math", Known.Mode.str()));
8972 }
8973
8974 if (Known.ModeF32 != Known.Mode) {
8975 AttrToAdd.push_back(
8976 Attribute::get(Ctx, "denormal-fp-math-f32", Known.ModeF32.str()));
8977 } else {
8978 AttrToRemove.push_back("denormal-fp-math-f32");
8979 }
8980
8981 auto &IRP = getIRPosition();
8982
8983 // TODO: There should be a combined add and remove API.
8984 return A.removeAttrs(IRP, AttrToRemove) |
8985 A.manifestAttrs(IRP, AttrToAdd, /*ForceReplace=*/true);
8986 }
8987
8988 void trackStatistics() const override {
8989 STATS_DECLTRACK_FN_ATTR(denormal_fp_math)
8990 }
8991};
8992} // namespace
8993
8994/// ------------------ Value Constant Range Attribute -------------------------
8995
8996namespace {
8997struct AAValueConstantRangeImpl : AAValueConstantRange {
8998 using StateType = IntegerRangeState;
8999 AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
9000 : AAValueConstantRange(IRP, A) {}
9001
9002 /// See AbstractAttribute::initialize(..).
9003 void initialize(Attributor &A) override {
9004 if (A.hasSimplificationCallback(getIRPosition())) {
9005 indicatePessimisticFixpoint();
9006 return;
9007 }
9008
9009 // Intersect a range given by SCEV.
9010 intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
9011
9012 // Intersect a range given by LVI.
9013 intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
9014 }
9015
9016 /// See AbstractAttribute::getAsStr().
9017 const std::string getAsStr(Attributor *A) const override {
9018 std::string Str;
9020 OS << "range(" << getBitWidth() << ")<";
9021 getKnown().print(OS);
9022 OS << " / ";
9023 getAssumed().print(OS);
9024 OS << ">";
9025 return Str;
9026 }
9027
9028 /// Helper function to get a SCEV expr for the associated value at program
9029 /// point \p I.
9030 const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
9031 if (!getAnchorScope())
9032 return nullptr;
9033
9034 ScalarEvolution *SE =
9035 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
9036 *getAnchorScope());
9037
9038 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
9039 *getAnchorScope());
9040
9041 if (!SE || !LI)
9042 return nullptr;
9043
9044 const SCEV *S = SE->getSCEV(&getAssociatedValue());
9045 if (!I)
9046 return S;
9047
9048 return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
9049 }
9050
9051 /// Helper function to get a range from SCEV for the associated value at
9052 /// program point \p I.
9053 ConstantRange getConstantRangeFromSCEV(Attributor &A,
9054 const Instruction *I = nullptr) const {
9055 if (!getAnchorScope())
9056 return getWorstState(getBitWidth());
9057
9058 ScalarEvolution *SE =
9059 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
9060 *getAnchorScope());
9061
9062 const SCEV *S = getSCEV(A, I);
9063 if (!SE || !S)
9064 return getWorstState(getBitWidth());
9065
9066 return SE->getUnsignedRange(S);
9067 }
9068
9069 /// Helper function to get a range from LVI for the associated value at
9070 /// program point \p I.
9072 getConstantRangeFromLVI(Attributor &A,
9073 const Instruction *CtxI = nullptr) const {
9074 if (!getAnchorScope())
9075 return getWorstState(getBitWidth());
9076
9077 LazyValueInfo *LVI =
9078 A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
9079 *getAnchorScope());
9080
9081 if (!LVI || !CtxI)
9082 return getWorstState(getBitWidth());
9083 return LVI->getConstantRange(&getAssociatedValue(),
9084 const_cast<Instruction *>(CtxI),
9085 /*UndefAllowed*/ false);
9086 }
9087
9088 /// Return true if \p CtxI is valid for querying outside analyses.
9089 /// This basically makes sure we do not ask intra-procedural analysis
9090 /// about a context in the wrong function or a context that violates
9091 /// dominance assumptions they might have. The \p AllowAACtxI flag indicates
9092 /// if the original context of this AA is OK or should be considered invalid.
9093 bool isValidCtxInstructionForOutsideAnalysis(Attributor &A,
9094 const Instruction *CtxI,
9095 bool AllowAACtxI) const {
9096 if (!CtxI || (!AllowAACtxI && CtxI == getCtxI()))
9097 return false;
9098
9099 // Our context might be in a different function, neither intra-procedural
9100 // analysis (ScalarEvolution nor LazyValueInfo) can handle that.
9101 if (!AA::isValidInScope(getAssociatedValue(), CtxI->getFunction()))
9102 return false;
9103
9104 // If the context is not dominated by the value there are paths to the
9105 // context that do not define the value. This cannot be handled by
9106 // LazyValueInfo so we need to bail.
9107 if (auto *I = dyn_cast<Instruction>(&getAssociatedValue())) {
9108 InformationCache &InfoCache = A.getInfoCache();
9109 const DominatorTree *DT =
9111 *I->getFunction());
9112 return DT && DT->dominates(I, CtxI);
9113 }
9114
9115 return true;
9116 }
9117
9118 /// See AAValueConstantRange::getKnownConstantRange(..).
9120 getKnownConstantRange(Attributor &A,
9121 const Instruction *CtxI = nullptr) const override {
9122 if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
9123 /* AllowAACtxI */ false))
9124 return getKnown();
9125
9126 ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
9127 ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
9128 return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
9129 }
9130
9131 /// See AAValueConstantRange::getAssumedConstantRange(..).
9133 getAssumedConstantRange(Attributor &A,
9134 const Instruction *CtxI = nullptr) const override {
9135 // TODO: Make SCEV use Attributor assumption.
9136 // We may be able to bound a variable range via assumptions in
9137 // Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
9138 // evolve to x^2 + x, then we can say that y is in [2, 12].
9139 if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
9140 /* AllowAACtxI */ false))
9141 return getAssumed();
9142
9143 ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
9144 ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
9145 return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
9146 }
9147
9148 /// Helper function to create MDNode for range metadata.
9149 static MDNode *
9150 getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
9151 const ConstantRange &AssumedConstantRange) {
9152 Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
9153 Ty, AssumedConstantRange.getLower())),
9154 ConstantAsMetadata::get(ConstantInt::get(
9155 Ty, AssumedConstantRange.getUpper()))};
9156 return MDNode::get(Ctx, LowAndHigh);
9157 }
9158
9159 /// Return true if \p Assumed is included in \p KnownRanges.
9160 static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
9161
9162 if (Assumed.isFullSet())
9163 return false;
9164
9165 if (!KnownRanges)
9166 return true;
9167
9168 // If multiple ranges are annotated in IR, we give up to annotate assumed
9169 // range for now.
9170
9171 // TODO: If there exists a known range which containts assumed range, we
9172 // can say assumed range is better.
9173 if (KnownRanges->getNumOperands() > 2)
9174 return false;
9175
9177 mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
9179 mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
9180
9181 ConstantRange Known(Lower->getValue(), Upper->getValue());
9182 return Known.contains(Assumed) && Known != Assumed;
9183 }
9184
9185 /// Helper function to set range metadata.
9186 static bool
9187 setRangeMetadataIfisBetterRange(Instruction *I,
9188 const ConstantRange &AssumedConstantRange) {
9189 auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
9190 if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
9191 if (!AssumedConstantRange.isEmptySet()) {
9192 I->setMetadata(LLVMContext::MD_range,
9193 getMDNodeForConstantRange(I->getType(), I->getContext(),
9194 AssumedConstantRange));
9195 return true;
9196 }
9197 }
9198 return false;
9199 }
9200
9201 /// See AbstractAttribute::manifest()
9202 ChangeStatus manifest(Attributor &A) override {
9203 ChangeStatus Changed = ChangeStatus::UNCHANGED;
9204 ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
9205 assert(!AssumedConstantRange.isFullSet() && "Invalid state");
9206
9207 auto &V = getAssociatedValue();
9208 if (!AssumedConstantRange.isEmptySet() &&
9209 !AssumedConstantRange.isSingleElement()) {
9210 if (Instruction *I = dyn_cast<Instruction>(&V)) {
9211 assert(I == getCtxI() && "Should not annotate an instruction which is "
9212 "not the context instruction");
9213 if (isa<CallInst>(I) || isa<LoadInst>(I))
9214 if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
9215 Changed = ChangeStatus::CHANGED;
9216 }
9217 }
9218
9219 return Changed;
9220 }
9221};
9222
9223struct AAValueConstantRangeArgument final
9224 : AAArgumentFromCallSiteArguments<
9225 AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
9226 true /* BridgeCallBaseContext */> {
9227 using Base = AAArgumentFromCallSiteArguments<
9228 AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
9229 true /* BridgeCallBaseContext */>;
9230 AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
9231 : Base(IRP, A) {}
9232
9233 /// See AbstractAttribute::trackStatistics()
9234 void trackStatistics() const override {
9235 STATS_DECLTRACK_ARG_ATTR(value_range)
9236 }
9237};
9238
9239struct AAValueConstantRangeReturned
9240 : AAReturnedFromReturnedValues<AAValueConstantRange,
9241 AAValueConstantRangeImpl,
9242 AAValueConstantRangeImpl::StateType,
9243 /* PropagateCallBaseContext */ true> {
9244 using Base =
9245 AAReturnedFromReturnedValues<AAValueConstantRange,
9246 AAValueConstantRangeImpl,
9248 /* PropagateCallBaseContext */ true>;
9249 AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
9250 : Base(IRP, A) {}
9251
9252 /// See AbstractAttribute::initialize(...).
9253 void initialize(Attributor &A) override {
9254 if (!A.isFunctionIPOAmendable(*getAssociatedFunction()))
9255 indicatePessimisticFixpoint();
9256 }
9257
9258 /// See AbstractAttribute::trackStatistics()
9259 void trackStatistics() const override {
9260 STATS_DECLTRACK_FNRET_ATTR(value_range)
9261 }
9262};
9263
9264struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
9265 AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
9266 : AAValueConstantRangeImpl(IRP, A) {}
9267
9268 /// See AbstractAttribute::initialize(...).
9269 void initialize(Attributor &A) override {
9270 AAValueConstantRangeImpl::initialize(A);
9271 if (isAtFixpoint())
9272 return;
9273
9274 Value &V = getAssociatedValue();
9275
9276 if (auto *C = dyn_cast<ConstantInt>(&V)) {
9277 unionAssumed(ConstantRange(C->getValue()));
9278 indicateOptimisticFixpoint();
9279 return;
9280 }
9281
9282 if (isa<UndefValue>(&V)) {
9283 // Collapse the undef state to 0.
9284 unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
9285 indicateOptimisticFixpoint();
9286 return;
9287 }
9288
9289 if (isa<CallBase>(&V))
9290 return;
9291
9292 if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
9293 return;
9294
9295 // If it is a load instruction with range metadata, use it.
9296 if (LoadInst *LI = dyn_cast<LoadInst>(&V))
9297 if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
9298 intersectKnown(getConstantRangeFromMetadata(*RangeMD));
9299 return;
9300 }
9301
9302 // We can work with PHI and select instruction as we traverse their operands
9303 // during update.
9304 if (isa<SelectInst>(V) || isa<PHINode>(V))
9305 return;
9306
9307 // Otherwise we give up.
9308 indicatePessimisticFixpoint();
9309
9310 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
9311 << getAssociatedValue() << "\n");
9312 }
9313
9314 bool calculateBinaryOperator(
9316 const Instruction *CtxI,
9318 Value *LHS = BinOp->getOperand(0);
9319 Value *RHS = BinOp->getOperand(1);
9320
9321 // Simplify the operands first.
9322 bool UsedAssumedInformation = false;
9323 const auto &SimplifiedLHS = A.getAssumedSimplified(
9324 IRPosition::value(*LHS, getCallBaseContext()), *this,
9325 UsedAssumedInformation, AA::Interprocedural);
9326 if (!SimplifiedLHS.has_value())
9327 return true;
9328 if (!*SimplifiedLHS)
9329 return false;
9330 LHS = *SimplifiedLHS;
9331
9332 const auto &SimplifiedRHS = A.getAssumedSimplified(
9333 IRPosition::value(*RHS, getCallBaseContext()), *this,
9334 UsedAssumedInformation, AA::Interprocedural);
9335 if (!SimplifiedRHS.has_value())
9336 return true;
9337 if (!*SimplifiedRHS)
9338 return false;
9339 RHS = *SimplifiedRHS;
9340
9341 // TODO: Allow non integers as well.
9342 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9343 return false;
9344
9345 auto *LHSAA = A.getAAFor<AAValueConstantRange>(
9346 *this, IRPosition::value(*LHS, getCallBaseContext()),
9347 DepClassTy::REQUIRED);
9348 if (!LHSAA)
9349 return false;
9350 QuerriedAAs.push_back(LHSAA);
9351 auto LHSAARange = LHSAA->getAssumedConstantRange(A, CtxI);
9352
9353 auto *RHSAA = A.getAAFor<AAValueConstantRange>(
9354 *this, IRPosition::value(*RHS, getCallBaseContext()),
9355 DepClassTy::REQUIRED);
9356 if (!RHSAA)
9357 return false;
9358 QuerriedAAs.push_back(RHSAA);
9359 auto RHSAARange = RHSAA->getAssumedConstantRange(A, CtxI);
9360
9361 auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
9362
9363 T.unionAssumed(AssumedRange);
9364
9365 // TODO: Track a known state too.
9366
9367 return T.isValidState();
9368 }
9369
9370 bool calculateCastInst(
9372 const Instruction *CtxI,
9374 assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
9375 // TODO: Allow non integers as well.
9376 Value *OpV = CastI->getOperand(0);
9377
9378 // Simplify the operand first.
9379 bool UsedAssumedInformation = false;
9380 const auto &SimplifiedOpV = A.getAssumedSimplified(
9381 IRPosition::value(*OpV, getCallBaseContext()), *this,
9382 UsedAssumedInformation, AA::Interprocedural);
9383 if (!SimplifiedOpV.has_value())
9384 return true;
9385 if (!*SimplifiedOpV)
9386 return false;
9387 OpV = *SimplifiedOpV;
9388
9389 if (!OpV->getType()->isIntegerTy())
9390 return false;
9391
9392 auto *OpAA = A.getAAFor<AAValueConstantRange>(
9393 *this, IRPosition::value(*OpV, getCallBaseContext()),
9394 DepClassTy::REQUIRED);
9395 if (!OpAA)
9396 return false;
9397 QuerriedAAs.push_back(OpAA);
9398 T.unionAssumed(OpAA->getAssumed().castOp(CastI->getOpcode(),
9399 getState().getBitWidth()));
9400 return T.isValidState();
9401 }
9402
9403 bool
9404 calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
9405 const Instruction *CtxI,
9407 Value *LHS = CmpI->getOperand(0);
9408 Value *RHS = CmpI->getOperand(1);
9409
9410 // Simplify the operands first.
9411 bool UsedAssumedInformation = false;
9412 const auto &SimplifiedLHS = A.getAssumedSimplified(
9413 IRPosition::value(*LHS, getCallBaseContext()), *this,
9414 UsedAssumedInformation, AA::Interprocedural);
9415 if (!SimplifiedLHS.has_value())
9416 return true;
9417 if (!*SimplifiedLHS)
9418 return false;
9419 LHS = *SimplifiedLHS;
9420
9421 const auto &SimplifiedRHS = A.getAssumedSimplified(
9422 IRPosition::value(*RHS, getCallBaseContext()), *this,
9423 UsedAssumedInformation, AA::Interprocedural);
9424 if (!SimplifiedRHS.has_value())
9425 return true;
9426 if (!*SimplifiedRHS)
9427 return false;
9428 RHS = *SimplifiedRHS;
9429
9430 // TODO: Allow non integers as well.
9431 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9432 return false;
9433
9434 auto *LHSAA = A.getAAFor<AAValueConstantRange>(
9435 *this, IRPosition::value(*LHS, getCallBaseContext()),
9436 DepClassTy::REQUIRED);
9437 if (!LHSAA)
9438 return false;
9439 QuerriedAAs.push_back(LHSAA);
9440 auto *RHSAA = A.getAAFor<AAValueConstantRange>(
9441 *this, IRPosition::value(*RHS, getCallBaseContext()),
9442 DepClassTy::REQUIRED);
9443 if (!RHSAA)
9444 return false;
9445 QuerriedAAs.push_back(RHSAA);
9446 auto LHSAARange = LHSAA->getAssumedConstantRange(A, CtxI);
9447 auto RHSAARange = RHSAA->getAssumedConstantRange(A, CtxI);
9448
9449 // If one of them is empty set, we can't decide.
9450 if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
9451 return true;
9452
9453 bool MustTrue = false, MustFalse = false;
9454
9455 auto AllowedRegion =
9457
9458 if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
9459 MustFalse = true;
9460
9461 if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange))
9462 MustTrue = true;
9463
9464 assert((!MustTrue || !MustFalse) &&
9465 "Either MustTrue or MustFalse should be false!");
9466
9467 if (MustTrue)
9468 T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
9469 else if (MustFalse)
9470 T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
9471 else
9472 T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
9473
9474 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " after "
9475 << (MustTrue ? "true" : (MustFalse ? "false" : "unknown"))
9476 << ": " << T << "\n\t" << *LHSAA << "\t<op>\n\t"
9477 << *RHSAA);
9478
9479 // TODO: Track a known state too.
9480 return T.isValidState();
9481 }
9482
9483 /// See AbstractAttribute::updateImpl(...).
9484 ChangeStatus updateImpl(Attributor &A) override {
9485
9487 auto VisitValueCB = [&](Value &V, const Instruction *CtxI) -> bool {
9488 Instruction *I = dyn_cast<Instruction>(&V);
9489 if (!I || isa<CallBase>(I)) {
9490
9491 // Simplify the operand first.
9492 bool UsedAssumedInformation = false;
9493 const auto &SimplifiedOpV = A.getAssumedSimplified(
9494 IRPosition::value(V, getCallBaseContext()), *this,
9495 UsedAssumedInformation, AA::Interprocedural);
9496 if (!SimplifiedOpV.has_value())
9497 return true;
9498 if (!*SimplifiedOpV)
9499 return false;
9500 Value *VPtr = *SimplifiedOpV;
9501
9502 // If the value is not instruction, we query AA to Attributor.
9503 const auto *AA = A.getAAFor<AAValueConstantRange>(
9504 *this, IRPosition::value(*VPtr, getCallBaseContext()),
9505 DepClassTy::REQUIRED);
9506
9507 // Clamp operator is not used to utilize a program point CtxI.
9508 if (AA)
9509 T.unionAssumed(AA->getAssumedConstantRange(A, CtxI));
9510 else
9511 return false;
9512
9513 return T.isValidState();
9514 }
9515
9517 if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
9518 if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
9519 return false;
9520 } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
9521 if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
9522 return false;
9523 } else if (auto *CastI = dyn_cast<CastInst>(I)) {
9524 if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
9525 return false;
9526 } else {
9527 // Give up with other instructions.
9528 // TODO: Add other instructions
9529
9530 T.indicatePessimisticFixpoint();
9531 return false;
9532 }
9533
9534 // Catch circular reasoning in a pessimistic way for now.
9535 // TODO: Check how the range evolves and if we stripped anything, see also
9536 // AADereferenceable or AAAlign for similar situations.
9537 for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
9538 if (QueriedAA != this)
9539 continue;
9540 // If we are in a stady state we do not need to worry.
9541 if (T.getAssumed() == getState().getAssumed())
9542 continue;
9543 T.indicatePessimisticFixpoint();
9544 }
9545
9546 return T.isValidState();
9547 };
9548
9549 if (!VisitValueCB(getAssociatedValue(), getCtxI()))
9550 return indicatePessimisticFixpoint();
9551
9552 // Ensure that long def-use chains can't cause circular reasoning either by
9553 // introducing a cutoff below.
9554 if (clampStateAndIndicateChange(getState(), T) == ChangeStatus::UNCHANGED)
9555 return ChangeStatus::UNCHANGED;
9556 if (++NumChanges > MaxNumChanges) {
9557 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] performed " << NumChanges
9558 << " but only " << MaxNumChanges
9559 << " are allowed to avoid cyclic reasoning.");
9560 return indicatePessimisticFixpoint();
9561 }
9562 return ChangeStatus::CHANGED;
9563 }
9564
9565 /// See AbstractAttribute::trackStatistics()
9566 void trackStatistics() const override {
9568 }
9569
9570 /// Tracker to bail after too many widening steps of the constant range.
9571 int NumChanges = 0;
9572
9573 /// Upper bound for the number of allowed changes (=widening steps) for the
9574 /// constant range before we give up.
9575 static constexpr int MaxNumChanges = 5;
9576};
9577
9578struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
9579 AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
9580 : AAValueConstantRangeImpl(IRP, A) {}
9581
9582 /// See AbstractAttribute::initialize(...).
9583 ChangeStatus updateImpl(Attributor &A) override {
9584 llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
9585 "not be called");
9586 }
9587
9588 /// See AbstractAttribute::trackStatistics()
9589 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
9590};
9591
9592struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
9593 AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
9594 : AAValueConstantRangeFunction(IRP, A) {}
9595
9596 /// See AbstractAttribute::trackStatistics()
9597 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
9598};
9599
9600struct AAValueConstantRangeCallSiteReturned
9601 : AACalleeToCallSite<AAValueConstantRange, AAValueConstantRangeImpl,
9602 AAValueConstantRangeImpl::StateType,
9603 /* IntroduceCallBaseContext */ true> {
9604 AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
9605 : AACalleeToCallSite<AAValueConstantRange, AAValueConstantRangeImpl,
9606 AAValueConstantRangeImpl::StateType,
9607 /* IntroduceCallBaseContext */ true>(IRP, A) {}
9608
9609 /// See AbstractAttribute::initialize(...).
9610 void initialize(Attributor &A) override {
9611 // If it is a load instruction with range metadata, use the metadata.
9612 if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
9613 if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
9614 intersectKnown(getConstantRangeFromMetadata(*RangeMD));
9615
9616 AAValueConstantRangeImpl::initialize(A);
9617 }
9618
9619 /// See AbstractAttribute::trackStatistics()
9620 void trackStatistics() const override {
9621 STATS_DECLTRACK_CSRET_ATTR(value_range)
9622 }
9623};
9624struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
9625 AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
9626 : AAValueConstantRangeFloating(IRP, A) {}
9627
9628 /// See AbstractAttribute::manifest()
9629 ChangeStatus manifest(Attributor &A) override {
9630 return ChangeStatus::UNCHANGED;
9631 }
9632
9633 /// See AbstractAttribute::trackStatistics()
9634 void trackStatistics() const override {
9635 STATS_DECLTRACK_CSARG_ATTR(value_range)
9636 }
9637};
9638} // namespace
9639
9640/// ------------------ Potential Values Attribute -------------------------
9641
9642namespace {
9643struct AAPotentialConstantValuesImpl : AAPotentialConstantValues {
9644 using StateType = PotentialConstantIntValuesState;
9645
9646 AAPotentialConstantValuesImpl(const IRPosition &IRP, Attributor &A)
9647 : AAPotentialConstantValues(IRP, A) {}
9648
9649 /// See AbstractAttribute::initialize(..).
9650 void initialize(Attributor &A) override {
9651 if (A.hasSimplificationCallback(getIRPosition()))
9652 indicatePessimisticFixpoint();
9653 else
9654 AAPotentialConstantValues::initialize(A);
9655 }
9656
9657 bool fillSetWithConstantValues(Attributor &A, const IRPosition &IRP, SetTy &S,
9658 bool &ContainsUndef, bool ForSelf) {
9660 bool UsedAssumedInformation = false;
9661 if (!A.getAssumedSimplifiedValues(IRP, *this, Values, AA::Interprocedural,
9662 UsedAssumedInformation)) {
9663 // Avoid recursion when the caller is computing constant values for this
9664 // IRP itself.
9665 if (ForSelf)
9666 return false;
9667 if (!IRP.getAssociatedType()->isIntegerTy())
9668 return false;
9669 auto *PotentialValuesAA = A.getAAFor<AAPotentialConstantValues>(
9670 *this, IRP, DepClassTy::REQUIRED);
9671 if (!PotentialValuesAA || !PotentialValuesAA->getState().isValidState())
9672 return false;
9673 ContainsUndef = PotentialValuesAA->getState().undefIsContained();
9674 S = PotentialValuesAA->getState().getAssumedSet();
9675 return true;
9676 }
9677
9678 // Copy all the constant values, except UndefValue. ContainsUndef is true
9679 // iff Values contains only UndefValue instances. If there are other known
9680 // constants, then UndefValue is dropped.
9681 ContainsUndef = false;
9682 for (auto &It : Values) {
9683 if (isa<UndefValue>(It.getValue())) {
9684 ContainsUndef = true;
9685 continue;
9686 }
9687 auto *CI = dyn_cast<ConstantInt>(It.getValue());
9688 if (!CI)
9689 return false;
9690 S.insert(CI->getValue());
9691 }
9692 ContainsUndef &= S.empty();
9693
9694 return true;
9695 }
9696
9697 /// See AbstractAttribute::getAsStr().
9698 const std::string getAsStr(Attributor *A) const override {
9699 std::string Str;
9701 OS << getState();
9702 return Str;
9703 }
9704
9705 /// See AbstractAttribute::updateImpl(...).
9706 ChangeStatus updateImpl(Attributor &A) override {
9707 return indicatePessimisticFixpoint();
9708 }
9709};
9710
9711struct AAPotentialConstantValuesArgument final
9712 : AAArgumentFromCallSiteArguments<AAPotentialConstantValues,
9713 AAPotentialConstantValuesImpl,
9714 PotentialConstantIntValuesState> {
9715 using Base = AAArgumentFromCallSiteArguments<AAPotentialConstantValues,
9716 AAPotentialConstantValuesImpl,
9718 AAPotentialConstantValuesArgument(const IRPosition &IRP, Attributor &A)
9719 : Base(IRP, A) {}
9720
9721 /// See AbstractAttribute::trackStatistics()
9722 void trackStatistics() const override {
9723 STATS_DECLTRACK_ARG_ATTR(potential_values)
9724 }
9725};
9726
9727struct AAPotentialConstantValuesReturned
9728 : AAReturnedFromReturnedValues<AAPotentialConstantValues,
9729 AAPotentialConstantValuesImpl> {
9730 using Base = AAReturnedFromReturnedValues<AAPotentialConstantValues,
9731 AAPotentialConstantValuesImpl>;
9732 AAPotentialConstantValuesReturned(const IRPosition &IRP, Attributor &A)
9733 : Base(IRP, A) {}
9734
9735 void initialize(Attributor &A) override {
9736 if (!A.isFunctionIPOAmendable(*getAssociatedFunction()))
9737 indicatePessimisticFixpoint();
9738 Base::initialize(A);
9739 }
9740
9741 /// See AbstractAttribute::trackStatistics()
9742 void trackStatistics() const override {
9743 STATS_DECLTRACK_FNRET_ATTR(potential_values)
9744 }
9745};
9746
9747struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
9748 AAPotentialConstantValuesFloating(const IRPosition &IRP, Attributor &A)
9749 : AAPotentialConstantValuesImpl(IRP, A) {}
9750
9751 /// See AbstractAttribute::initialize(..).
9752 void initialize(Attributor &A) override {
9753 AAPotentialConstantValuesImpl::initialize(A);
9754 if (isAtFixpoint())
9755 return;
9756
9757 Value &V = getAssociatedValue();
9758
9759 if (auto *C = dyn_cast<ConstantInt>(&V)) {
9760 unionAssumed(C->getValue());
9761 indicateOptimisticFixpoint();
9762 return;
9763 }
9764
9765 if (isa<UndefValue>(&V)) {
9766 unionAssumedWithUndef();
9767 indicateOptimisticFixpoint();
9768 return;
9769 }
9770
9771 if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
9772 return;
9773
9774 if (isa<SelectInst>(V) || isa<PHINode>(V) || isa<LoadInst>(V))
9775 return;
9776
9777 indicatePessimisticFixpoint();
9778
9779 LLVM_DEBUG(dbgs() << "[AAPotentialConstantValues] We give up: "
9780 << getAssociatedValue() << "\n");
9781 }
9782
9783 static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
9784 const APInt &RHS) {
9785 return ICmpInst::compare(LHS, RHS, ICI->getPredicate());
9786 }
9787
9788 static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
9789 uint32_t ResultBitWidth) {
9790 Instruction::CastOps CastOp = CI->getOpcode();
9791 switch (CastOp) {
9792 default:
9793 llvm_unreachable("unsupported or not integer cast");
9794 case Instruction::Trunc:
9795 return Src.trunc(ResultBitWidth);
9796 case Instruction::SExt:
9797 return Src.sext(ResultBitWidth);
9798 case Instruction::ZExt:
9799 return Src.zext(ResultBitWidth);
9800 case Instruction::BitCast:
9801 return Src;
9802 }
9803 }
9804
9805 static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
9806 const APInt &LHS, const APInt &RHS,
9807 bool &SkipOperation, bool &Unsupported) {
9808 Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
9809 // Unsupported is set to true when the binary operator is not supported.
9810 // SkipOperation is set to true when UB occur with the given operand pair
9811 // (LHS, RHS).
9812 // TODO: we should look at nsw and nuw keywords to handle operations
9813 // that create poison or undef value.
9814 switch (BinOpcode) {
9815 default:
9816 Unsupported = true;
9817 return LHS;
9818 case Instruction::Add:
9819 return LHS + RHS;
9820 case Instruction::Sub:
9821 return LHS - RHS;
9822 case Instruction::Mul:
9823 return LHS * RHS;
9824 case Instruction::UDiv:
9825 if (RHS.isZero()) {
9826 SkipOperation = true;
9827 return LHS;
9828 }
9829 return LHS.udiv(RHS);
9830 case Instruction::SDiv:
9831 if (RHS.isZero()) {
9832 SkipOperation = true;
9833 return LHS;
9834 }
9835 return LHS.sdiv(RHS);
9836 case Instruction::URem:
9837 if (RHS.isZero()) {
9838 SkipOperation = true;
9839 return LHS;
9840 }
9841 return LHS.urem(RHS);
9842 case Instruction::SRem:
9843 if (RHS.isZero()) {
9844 SkipOperation = true;
9845 return LHS;
9846 }
9847 return LHS.srem(RHS);
9848 case Instruction::Shl:
9849 return LHS.shl(RHS);
9850 case Instruction::LShr:
9851 return LHS.lshr(RHS);
9852 case Instruction::AShr:
9853 return LHS.ashr(RHS);
9854 case Instruction::And:
9855 return LHS & RHS;
9856 case Instruction::Or:
9857 return LHS | RHS;
9858 case Instruction::Xor:
9859 return LHS ^ RHS;
9860 }
9861 }
9862
9863 bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
9864 const APInt &LHS, const APInt &RHS) {
9865 bool SkipOperation = false;
9866 bool Unsupported = false;
9867 APInt Result =
9868 calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
9869 if (Unsupported)
9870 return false;
9871 // If SkipOperation is true, we can ignore this operand pair (L, R).
9872 if (!SkipOperation)
9873 unionAssumed(Result);
9874 return isValidState();
9875 }
9876
9877 ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
9878 auto AssumedBefore = getAssumed();
9879 Value *LHS = ICI->getOperand(0);
9880 Value *RHS = ICI->getOperand(1);
9881
9882 bool LHSContainsUndef = false, RHSContainsUndef = false;
9883 SetTy LHSAAPVS, RHSAAPVS;
9884 if (!fillSetWithConstantValues(A, IRPosition::value(*LHS), LHSAAPVS,
9885 LHSContainsUndef, /* ForSelf */ false) ||
9886 !fillSetWithConstantValues(A, IRPosition::value(*RHS), RHSAAPVS,
9887 RHSContainsUndef, /* ForSelf */ false))
9888 return indicatePessimisticFixpoint();
9889
9890 // TODO: make use of undef flag to limit potential values aggressively.
9891 bool MaybeTrue = false, MaybeFalse = false;
9892 const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
9893 if (LHSContainsUndef && RHSContainsUndef) {
9894 // The result of any comparison between undefs can be soundly replaced
9895 // with undef.
9896 unionAssumedWithUndef();
9897 } else if (LHSContainsUndef) {
9898 for (const APInt &R : RHSAAPVS) {
9899 bool CmpResult = calculateICmpInst(ICI, Zero, R);
9900 MaybeTrue |= CmpResult;
9901 MaybeFalse |= !CmpResult;
9902 if (MaybeTrue & MaybeFalse)
9903 return indicatePessimisticFixpoint();
9904 }
9905 } else if (RHSContainsUndef) {
9906 for (const APInt &L : LHSAAPVS) {
9907 bool CmpResult = calculateICmpInst(ICI, L, Zero);
9908 MaybeTrue |= CmpResult;
9909 MaybeFalse |= !CmpResult;
9910 if (MaybeTrue & MaybeFalse)
9911 return indicatePessimisticFixpoint();
9912 }
9913 } else {
9914 for (const APInt &L : LHSAAPVS) {
9915 for (const APInt &R : RHSAAPVS) {
9916 bool CmpResult = calculateICmpInst(ICI, L, R);
9917 MaybeTrue |= CmpResult;
9918 MaybeFalse |= !CmpResult;
9919 if (MaybeTrue & MaybeFalse)
9920 return indicatePessimisticFixpoint();
9921 }
9922 }
9923 }
9924 if (MaybeTrue)
9925 unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
9926 if (MaybeFalse)
9927 unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
9928 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9929 : ChangeStatus::CHANGED;
9930 }
9931
9932 ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
9933 auto AssumedBefore = getAssumed();
9934 Value *LHS = SI->getTrueValue();
9935 Value *RHS = SI->getFalseValue();
9936
9937 bool UsedAssumedInformation = false;
9938 std::optional<Constant *> C = A.getAssumedConstant(
9939 *SI->getCondition(), *this, UsedAssumedInformation);
9940
9941 // Check if we only need one operand.
9942 bool OnlyLeft = false, OnlyRight = false;
9943 if (C && *C && (*C)->isOneValue())
9944 OnlyLeft = true;
9945 else if (C && *C && (*C)->isZeroValue())
9946 OnlyRight = true;
9947
9948 bool LHSContainsUndef = false, RHSContainsUndef = false;
9949 SetTy LHSAAPVS, RHSAAPVS;
9950 if (!OnlyRight &&
9951 !fillSetWithConstantValues(A, IRPosition::value(*LHS), LHSAAPVS,
9952 LHSContainsUndef, /* ForSelf */ false))
9953 return indicatePessimisticFixpoint();
9954
9955 if (!OnlyLeft &&
9956 !fillSetWithConstantValues(A, IRPosition::value(*RHS), RHSAAPVS,
9957 RHSContainsUndef, /* ForSelf */ false))
9958 return indicatePessimisticFixpoint();
9959
9960 if (OnlyLeft || OnlyRight) {
9961 // select (true/false), lhs, rhs
9962 auto *OpAA = OnlyLeft ? &LHSAAPVS : &RHSAAPVS;
9963 auto Undef = OnlyLeft ? LHSContainsUndef : RHSContainsUndef;
9964
9965 if (Undef)
9966 unionAssumedWithUndef();
9967 else {
9968 for (const auto &It : *OpAA)
9969 unionAssumed(It);
9970 }
9971
9972 } else if (LHSContainsUndef && RHSContainsUndef) {
9973 // select i1 *, undef , undef => undef
9974 unionAssumedWithUndef();
9975 } else {
9976 for (const auto &It : LHSAAPVS)
9977 unionAssumed(It);
9978 for (const auto &It : RHSAAPVS)
9979 unionAssumed(It);
9980 }
9981 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9982 : ChangeStatus::CHANGED;
9983 }
9984
9985 ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
9986 auto AssumedBefore = getAssumed();
9987 if (!CI->isIntegerCast())
9988 return indicatePessimisticFixpoint();
9989 assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
9990 uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
9991 Value *Src = CI->getOperand(0);
9992
9993 bool SrcContainsUndef = false;
9994 SetTy SrcPVS;
9995 if (!fillSetWithConstantValues(A, IRPosition::value(*Src), SrcPVS,
9996 SrcContainsUndef, /* ForSelf */ false))
9997 return indicatePessimisticFixpoint();
9998
9999 if (SrcContainsUndef)
10000 unionAssumedWithUndef();
10001 else {
10002 for (const APInt &S : SrcPVS) {
10003 APInt T = calculateCastInst(CI, S, ResultBitWidth);
10004 unionAssumed(T);
10005 }
10006 }
10007 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
10008 : ChangeStatus::CHANGED;
10009 }
10010
10011 ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
10012 auto AssumedBefore = getAssumed();
10013 Value *LHS = BinOp->getOperand(0);
10014 Value *RHS = BinOp->getOperand(1);
10015
10016 bool LHSContainsUndef = false, RHSContainsUndef = false;
10017 SetTy LHSAAPVS, RHSAAPVS;
10018 if (!fillSetWithConstantValues(A, IRPosition::value(*LHS), LHSAAPVS,
10019 LHSContainsUndef, /* ForSelf */ false) ||
10020 !fillSetWithConstantValues(A, IRPosition::value(*RHS), RHSAAPVS,
10021 RHSContainsUndef, /* ForSelf */ false))
10022 return indicatePessimisticFixpoint();
10023
10024 const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
10025
10026 // TODO: make use of undef flag to limit potential values aggressively.
10027 if (LHSContainsUndef && RHSContainsUndef) {
10028 if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
10029 return indicatePessimisticFixpoint();
10030 } else if (LHSContainsUndef) {
10031 for (const APInt &R : RHSAAPVS) {
10032 if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
10033 return indicatePessimisticFixpoint();
10034 }
10035 } else if (RHSContainsUndef) {
10036 for (const APInt &L : LHSAAPVS) {
10037 if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
10038 return indicatePessimisticFixpoint();
10039 }
10040 } else {
10041 for (const APInt &L : LHSAAPVS) {
10042 for (const APInt &R : RHSAAPVS) {
10043 if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
10044 return indicatePessimisticFixpoint();
10045 }
10046 }
10047 }
10048 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
10049 : ChangeStatus::CHANGED;
10050 }
10051
10052 ChangeStatus updateWithInstruction(Attributor &A, Instruction *Inst) {
10053 auto AssumedBefore = getAssumed();
10054 SetTy Incoming;
10055 bool ContainsUndef;
10056 if (!fillSetWithConstantValues(A, IRPosition::value(*Inst), Incoming,
10057 ContainsUndef, /* ForSelf */ true))
10058 return indicatePessimisticFixpoint();
10059 if (ContainsUndef) {
10060 unionAssumedWithUndef();
10061 } else {
10062 for (const auto &It : Incoming)
10063 unionAssumed(It);
10064 }
10065 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
10066 : ChangeStatus::CHANGED;
10067 }
10068
10069 /// See AbstractAttribute::updateImpl(...).
10070 ChangeStatus updateImpl(Attributor &A) override {
10071 Value &V = getAssociatedValue();
10072 Instruction *I = dyn_cast<Instruction>(&V);
10073
10074 if (auto *ICI = dyn_cast<ICmpInst>(I))
10075 return updateWithICmpInst(A, ICI);
10076
10077 if (auto *SI = dyn_cast<SelectInst>(I))
10078 return updateWithSelectInst(A, SI);
10079
10080 if (auto *CI = dyn_cast<CastInst>(I))
10081 return updateWithCastInst(A, CI);
10082
10083 if (auto *BinOp = dyn_cast<BinaryOperator>(I))
10084 return updateWithBinaryOperator(A, BinOp);
10085
10086 if (isa<PHINode>(I) || isa<LoadInst>(I))
10087 return updateWithInstruction(A, I);
10088
10089 return indicatePessimisticFixpoint();
10090 }
10091
10092 /// See AbstractAttribute::trackStatistics()
10093 void trackStatistics() const override {
10094 STATS_DECLTRACK_FLOATING_ATTR(potential_values)
10095 }
10096};
10097
10098struct AAPotentialConstantValuesFunction : AAPotentialConstantValuesImpl {
10099 AAPotentialConstantValuesFunction(const IRPosition &IRP, Attributor &A)
10100 : AAPotentialConstantValuesImpl(IRP, A) {}
10101
10102 /// See AbstractAttribute::initialize(...).
10103 ChangeStatus updateImpl(Attributor &A) override {
10105 "AAPotentialConstantValues(Function|CallSite)::updateImpl will "
10106 "not be called");
10107 }
10108
10109 /// See AbstractAttribute::trackStatistics()
10110 void trackStatistics() const override {
10111 STATS_DECLTRACK_FN_ATTR(potential_values)
10112 }
10113};
10114
10115struct AAPotentialConstantValuesCallSite : AAPotentialConstantValuesFunction {
10116 AAPotentialConstantValuesCallSite(const IRPosition &IRP, Attributor &A)
10117 : AAPotentialConstantValuesFunction(IRP, A) {}
10118
10119 /// See AbstractAttribute::trackStatistics()
10120 void trackStatistics() const override {
10121 STATS_DECLTRACK_CS_ATTR(potential_values)
10122 }
10123};
10124
10125struct AAPotentialConstantValuesCallSiteReturned
10126 : AACalleeToCallSite<AAPotentialConstantValues,
10127 AAPotentialConstantValuesImpl> {
10128 AAPotentialConstantValuesCallSiteReturned(const IRPosition &IRP,
10129 Attributor &A)
10130 : AACalleeToCallSite<AAPotentialConstantValues,
10131 AAPotentialConstantValuesImpl>(IRP, A) {}
10132
10133 /// See AbstractAttribute::trackStatistics()
10134 void trackStatistics() const override {
10135 STATS_DECLTRACK_CSRET_ATTR(potential_values)
10136 }
10137};
10138
10139struct AAPotentialConstantValuesCallSiteArgument
10140 : AAPotentialConstantValuesFloating {
10141 AAPotentialConstantValuesCallSiteArgument(const IRPosition &IRP,
10142 Attributor &A)
10143 : AAPotentialConstantValuesFloating(IRP, A) {}
10144
10145 /// See AbstractAttribute::initialize(..).
10146 void initialize(Attributor &A) override {
10147 AAPotentialConstantValuesImpl::initialize(A);
10148 if (isAtFixpoint())
10149 return;
10150
10151 Value &V = getAssociatedValue();
10152
10153 if (auto *C = dyn_cast<ConstantInt>(&V)) {
10154 unionAssumed(C->getValue());
10155 indicateOptimisticFixpoint();
10156 return;
10157 }
10158
10159 if (isa<UndefValue>(&V)) {
10160 unionAssumedWithUndef();
10161 indicateOptimisticFixpoint();
10162 return;
10163 }
10164 }
10165
10166 /// See AbstractAttribute::updateImpl(...).
10167 ChangeStatus updateImpl(Attributor &A) override {
10168 Value &V = getAssociatedValue();
10169 auto AssumedBefore = getAssumed();
10170 auto *AA = A.getAAFor<AAPotentialConstantValues>(
10171 *this, IRPosition::value(V), DepClassTy::REQUIRED);
10172 if (!AA)
10173 return indicatePessimisticFixpoint();
10174 const auto &S = AA->getAssumed();
10175 unionAssumed(S);
10176 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
10177 : ChangeStatus::CHANGED;
10178 }
10179
10180 /// See AbstractAttribute::trackStatistics()
10181 void trackStatistics() const override {
10182 STATS_DECLTRACK_CSARG_ATTR(potential_values)
10183 }
10184};
10185} // namespace
10186
10187/// ------------------------ NoUndef Attribute ---------------------------------
10189 Attribute::AttrKind ImpliedAttributeKind,
10190 bool IgnoreSubsumingPositions) {
10191 assert(ImpliedAttributeKind == Attribute::NoUndef &&
10192 "Unexpected attribute kind");
10193 if (A.hasAttr(IRP, {Attribute::NoUndef}, IgnoreSubsumingPositions,
10194 Attribute::NoUndef))
10195 return true;
10196
10197 Value &Val = IRP.getAssociatedValue();
10200 LLVMContext &Ctx = Val.getContext();
10201 A.manifestAttrs(IRP, Attribute::get(Ctx, Attribute::NoUndef));
10202 return true;
10203 }
10204
10205 return false;
10206}
10207
10208namespace {
10209struct AANoUndefImpl : AANoUndef {
10210 AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
10211
10212 /// See AbstractAttribute::initialize(...).
10213 void initialize(Attributor &A) override {
10214 Value &V = getAssociatedValue();
10215 if (isa<UndefValue>(V))
10216 indicatePessimisticFixpoint();
10217 assert(!isImpliedByIR(A, getIRPosition(), Attribute::NoUndef));
10218 }
10219
10220 /// See followUsesInMBEC
10221 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
10222 AANoUndef::StateType &State) {
10223 const Value *UseV = U->get();
10224 const DominatorTree *DT = nullptr;
10225 AssumptionCache *AC = nullptr;
10226 InformationCache &InfoCache = A.getInfoCache();
10227 if (Function *F = getAnchorScope()) {
10230 }
10231 State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
10232 bool TrackUse = false;
10233 // Track use for instructions which must produce undef or poison bits when
10234 // at least one operand contains such bits.
10235 if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
10236 TrackUse = true;
10237 return TrackUse;
10238 }
10239
10240 /// See AbstractAttribute::getAsStr().
10241 const std::string getAsStr(Attributor *A) const override {
10242 return getAssumed() ? "noundef" : "may-undef-or-poison";
10243 }
10244
10245 ChangeStatus manifest(Attributor &A) override {
10246 // We don't manifest noundef attribute for dead positions because the
10247 // associated values with dead positions would be replaced with undef
10248 // values.
10249 bool UsedAssumedInformation = false;
10250 if (A.isAssumedDead(getIRPosition(), nullptr, nullptr,
10251 UsedAssumedInformation))
10252 return ChangeStatus::UNCHANGED;
10253 // A position whose simplified value does not have any value is
10254 // considered to be dead. We don't manifest noundef in such positions for
10255 // the same reason above.
10256 if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation,
10258 .has_value())
10259 return ChangeStatus::UNCHANGED;
10260 return AANoUndef::manifest(A);
10261 }
10262};
10263
10264struct AANoUndefFloating : public AANoUndefImpl {
10265 AANoUndefFloating(const IRPosition &IRP, Attributor &A)
10266 : AANoUndefImpl(IRP, A) {}
10267
10268 /// See AbstractAttribute::initialize(...).
10269 void initialize(Attributor &A) override {
10270 AANoUndefImpl::initialize(A);
10271 if (!getState().isAtFixpoint() && getAnchorScope() &&
10272 !getAnchorScope()->isDeclaration())
10273 if (Instruction *CtxI = getCtxI())
10274 followUsesInMBEC(*this, A, getState(), *CtxI);
10275 }
10276
10277 /// See AbstractAttribute::updateImpl(...).
10278 ChangeStatus updateImpl(Attributor &A) override {
10279 auto VisitValueCB = [&](const IRPosition &IRP) -> bool {
10280 bool IsKnownNoUndef;
10281 return AA::hasAssumedIRAttr<Attribute::NoUndef>(
10282 A, this, IRP, DepClassTy::REQUIRED, IsKnownNoUndef);
10283 };
10284
10285 bool Stripped;
10286 bool UsedAssumedInformation = false;
10287 Value *AssociatedValue = &getAssociatedValue();
10289 if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
10290 AA::AnyScope, UsedAssumedInformation))
10291 Stripped = false;
10292 else
10293 Stripped =
10294 Values.size() != 1 || Values.front().getValue() != AssociatedValue;
10295
10296 if (!Stripped) {
10297 // If we haven't stripped anything we might still be able to use a
10298 // different AA, but only if the IRP changes. Effectively when we
10299 // interpret this not as a call site value but as a floating/argument
10300 // value.
10301 const IRPosition AVIRP = IRPosition::value(*AssociatedValue);
10302 if (AVIRP == getIRPosition() || !VisitValueCB(AVIRP))
10303 return indicatePessimisticFixpoint();
10304 return ChangeStatus::UNCHANGED;
10305 }
10306
10307 for (const auto &VAC : Values)
10308 if (!VisitValueCB(IRPosition::value(*VAC.getValue())))
10309 return indicatePessimisticFixpoint();
10310
10311 return ChangeStatus::UNCHANGED;
10312 }
10313
10314 /// See AbstractAttribute::trackStatistics()
10315 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
10316};
10317
10318struct AANoUndefReturned final
10319 : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
10320 AANoUndefReturned(const IRPosition &IRP, Attributor &A)
10321 : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
10322
10323 /// See AbstractAttribute::trackStatistics()
10324 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
10325};
10326
10327struct AANoUndefArgument final
10328 : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
10329 AANoUndefArgument(const IRPosition &IRP, Attributor &A)
10330 : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
10331
10332 /// See AbstractAttribute::trackStatistics()
10333 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
10334};
10335
10336struct AANoUndefCallSiteArgument final : AANoUndefFloating {
10337 AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
10338 : AANoUndefFloating(IRP, A) {}
10339
10340 /// See AbstractAttribute::trackStatistics()
10341 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
10342};
10343
10344struct AANoUndefCallSiteReturned final
10345 : AACalleeToCallSite<AANoUndef, AANoUndefImpl> {
10346 AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
10347 : AACalleeToCallSite<AANoUndef, AANoUndefImpl>(IRP, A) {}
10348
10349 /// See AbstractAttribute::trackStatistics()
10350 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
10351};
10352
10353/// ------------------------ NoFPClass Attribute -------------------------------
10354
10355struct AANoFPClassImpl : AANoFPClass {
10356 AANoFPClassImpl(const IRPosition &IRP, Attributor &A) : AANoFPClass(IRP, A) {}
10357
10358 void initialize(Attributor &A) override {
10359 const IRPosition &IRP = getIRPosition();
10360
10361 Value &V = IRP.getAssociatedValue();
10362 if (isa<UndefValue>(V)) {
10363 indicateOptimisticFixpoint();
10364 return;
10365 }
10366
10368 A.getAttrs(getIRPosition(), {Attribute::NoFPClass}, Attrs, false);
10369 for (const auto &Attr : Attrs) {
10370 addKnownBits(Attr.getNoFPClass());
10371 }
10372
10373 const DataLayout &DL = A.getDataLayout();
10374 if (getPositionKind() != IRPosition::IRP_RETURNED) {
10376 addKnownBits(~KnownFPClass.KnownFPClasses);
10377 }
10378
10379 if (Instruction *CtxI = getCtxI())
10380 followUsesInMBEC(*this, A, getState(), *CtxI);
10381 }
10382
10383 /// See followUsesInMBEC
10384 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
10385 AANoFPClass::StateType &State) {
10386 // TODO: Determine what instructions can be looked through.
10387 auto *CB = dyn_cast<CallBase>(I);
10388 if (!CB)
10389 return false;
10390
10391 if (!CB->isArgOperand(U))
10392 return false;
10393
10394 unsigned ArgNo = CB->getArgOperandNo(U);
10395 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
10396 if (auto *NoFPAA = A.getAAFor<AANoFPClass>(*this, IRP, DepClassTy::NONE))
10397 State.addKnownBits(NoFPAA->getState().getKnown());
10398 return false;
10399 }
10400
10401 const std::string getAsStr(Attributor *A) const override {
10402 std::string Result = "nofpclass";
10403 raw_string_ostream OS(Result);
10404 OS << getKnownNoFPClass() << '/' << getAssumedNoFPClass();
10405 return Result;
10406 }
10407
10408 void getDeducedAttributes(Attributor &A, LLVMContext &Ctx,
10409 SmallVectorImpl<Attribute> &Attrs) const override {
10410 Attrs.emplace_back(Attribute::getWithNoFPClass(Ctx, getAssumedNoFPClass()));
10411 }
10412};
10413
10414struct AANoFPClassFloating : public AANoFPClassImpl {
10415 AANoFPClassFloating(const IRPosition &IRP, Attributor &A)
10416 : AANoFPClassImpl(IRP, A) {}
10417
10418 /// See AbstractAttribute::updateImpl(...).
10419 ChangeStatus updateImpl(Attributor &A) override {
10421 bool UsedAssumedInformation = false;
10422 if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
10423 AA::AnyScope, UsedAssumedInformation)) {
10424 Values.push_back({getAssociatedValue(), getCtxI()});
10425 }
10426
10427 StateType T;
10428 auto VisitValueCB = [&](Value &V, const Instruction *CtxI) -> bool {
10429 const auto *AA = A.getAAFor<AANoFPClass>(*this, IRPosition::value(V),
10430 DepClassTy::REQUIRED);
10431 if (!AA || this == AA) {
10432 T.indicatePessimisticFixpoint();
10433 } else {
10434 const AANoFPClass::StateType &S =
10435 static_cast<const AANoFPClass::StateType &>(AA->getState());
10436 T ^= S;
10437 }
10438 return T.isValidState();
10439 };
10440
10441 for (const auto &VAC : Values)
10442 if (!VisitValueCB(*VAC.getValue(), VAC.getCtxI()))
10443 return indicatePessimisticFixpoint();
10444
10445 return clampStateAndIndicateChange(getState(), T);
10446 }
10447
10448 /// See AbstractAttribute::trackStatistics()
10449 void trackStatistics() const override {
10451 }
10452};
10453
10454struct AANoFPClassReturned final
10455 : AAReturnedFromReturnedValues<AANoFPClass, AANoFPClassImpl,
10456 AANoFPClassImpl::StateType, false,
10457 Attribute::None, false> {
10458 AANoFPClassReturned(const IRPosition &IRP, Attributor &A)
10459 : AAReturnedFromReturnedValues<AANoFPClass, AANoFPClassImpl,
10460 AANoFPClassImpl::StateType, false,
10461 Attribute::None, false>(IRP, A) {}
10462
10463 /// See AbstractAttribute::trackStatistics()
10464 void trackStatistics() const override {
10466 }
10467};
10468
10469struct AANoFPClassArgument final
10470 : AAArgumentFromCallSiteArguments<AANoFPClass, AANoFPClassImpl> {
10471 AANoFPClassArgument(const IRPosition &IRP, Attributor &A)
10472 : AAArgumentFromCallSiteArguments<AANoFPClass, AANoFPClassImpl>(IRP, A) {}
10473
10474 /// See AbstractAttribute::trackStatistics()
10475 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofpclass) }
10476};
10477
10478struct AANoFPClassCallSiteArgument final : AANoFPClassFloating {
10479 AANoFPClassCallSiteArgument(const IRPosition &IRP, Attributor &A)
10480 : AANoFPClassFloating(IRP, A) {}
10481
10482 /// See AbstractAttribute::trackStatistics()
10483 void trackStatistics() const override {
10485 }
10486};
10487
10488struct AANoFPClassCallSiteReturned final
10489 : AACalleeToCallSite<AANoFPClass, AANoFPClassImpl> {
10490 AANoFPClassCallSiteReturned(const IRPosition &IRP, Attributor &A)
10491 : AACalleeToCallSite<AANoFPClass, AANoFPClassImpl>(IRP, A) {}
10492
10493 /// See AbstractAttribute::trackStatistics()
10494 void trackStatistics() const override {
10496 }
10497};
10498
10499struct AACallEdgesImpl : public AACallEdges {
10500 AACallEdgesImpl(const IRPosition &IRP, Attributor &A) : AACallEdges(IRP, A) {}
10501
10502 const SetVector<Function *> &getOptimisticEdges() const override {
10503 return CalledFunctions;
10504 }
10505
10506 bool hasUnknownCallee() const override { return HasUnknownCallee; }
10507
10508 bool hasNonAsmUnknownCallee() const override {
10509 return HasUnknownCalleeNonAsm;
10510 }
10511
10512 const std::string getAsStr(Attributor *A) const override {
10513 return "CallEdges[" + std::to_string(HasUnknownCallee) + "," +
10514 std::to_string(CalledFunctions.size()) + "]";
10515 }
10516
10517 void trackStatistics() const override {}
10518
10519protected:
10520 void addCalledFunction(Function *Fn, ChangeStatus &Change) {
10521 if (CalledFunctions.insert(Fn)) {
10522 Change = ChangeStatus::CHANGED;
10523 LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName()
10524 << "\n");
10525 }
10526 }
10527
10528 void setHasUnknownCallee(bool NonAsm, ChangeStatus &Change) {
10529 if (!HasUnknownCallee)
10530 Change = ChangeStatus::CHANGED;
10531 if (NonAsm && !HasUnknownCalleeNonAsm)
10532 Change = ChangeStatus::CHANGED;
10533 HasUnknownCalleeNonAsm |= NonAsm;
10534 HasUnknownCallee = true;
10535 }
10536
10537private:
10538 /// Optimistic set of functions that might be called by this position.
10539 SetVector<Function *> CalledFunctions;
10540
10541 /// Is there any call with a unknown callee.
10542 bool HasUnknownCallee = false;
10543
10544 /// Is there any call with a unknown callee, excluding any inline asm.
10545 bool HasUnknownCalleeNonAsm = false;
10546};
10547
10548struct AACallEdgesCallSite : public AACallEdgesImpl {
10549 AACallEdgesCallSite(const IRPosition &IRP, Attributor &A)
10550 : AACallEdgesImpl(IRP, A) {}
10551 /// See AbstractAttribute::updateImpl(...).
10552 ChangeStatus updateImpl(Attributor &A) override {
10553 ChangeStatus Change = ChangeStatus::UNCHANGED;
10554
10555 auto VisitValue = [&](Value &V, const Instruction *CtxI) -> bool {
10556 if (Function *Fn = dyn_cast<Function>(&V)) {
10557 addCalledFunction(Fn, Change);
10558 } else {
10559 LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n");
10560 setHasUnknownCallee(true, Change);
10561 }
10562
10563 // Explore all values.
10564 return true;
10565 };
10566
10568 // Process any value that we might call.
10569 auto ProcessCalledOperand = [&](Value *V, Instruction *CtxI) {
10570 if (isa<Constant>(V)) {
10571 VisitValue(*V, CtxI);
10572 return;
10573 }
10574
10575 bool UsedAssumedInformation = false;
10576 Values.clear();
10577 if (!A.getAssumedSimplifiedValues(IRPosition::value(*V), *this, Values,
10578 AA::AnyScope, UsedAssumedInformation)) {
10579 Values.push_back({*V, CtxI});
10580 }
10581 for (auto &VAC : Values)
10582 VisitValue(*VAC.getValue(), VAC.getCtxI());
10583 };
10584
10585 CallBase *CB = cast<CallBase>(getCtxI());
10586
10587 if (auto *IA = dyn_cast<InlineAsm>(CB->getCalledOperand())) {
10588 if (IA->hasSideEffects() &&
10589 !hasAssumption(*CB->getCaller(), "ompx_no_call_asm") &&
10590 !hasAssumption(*CB, "ompx_no_call_asm")) {
10591 setHasUnknownCallee(false, Change);
10592 }
10593 return Change;
10594 }
10595
10596 if (CB->isIndirectCall())
10597 if (auto *IndirectCallAA = A.getAAFor<AAIndirectCallInfo>(
10598 *this, getIRPosition(), DepClassTy::OPTIONAL))
10599 if (IndirectCallAA->foreachCallee(
10600 [&](Function *Fn) { return VisitValue(*Fn, CB); }))
10601 return Change;
10602
10603 // The most simple case.
10604 ProcessCalledOperand(CB->getCalledOperand(), CB);
10605
10606 // Process callback functions.
10607 SmallVector<const Use *, 4u> CallbackUses;
10608 AbstractCallSite::getCallbackUses(*CB, CallbackUses);
10609 for (const Use *U : CallbackUses)
10610 ProcessCalledOperand(U->get(), CB);
10611
10612 return Change;
10613 }
10614};
10615
10616struct AACallEdgesFunction : public AACallEdgesImpl {
10617 AACallEdgesFunction(const IRPosition &IRP, Attributor &A)
10618 : AACallEdgesImpl(IRP, A) {}
10619
10620 /// See AbstractAttribute::updateImpl(...).
10621 ChangeStatus updateImpl(Attributor &A) override {
10622 ChangeStatus Change = ChangeStatus::UNCHANGED;
10623
10624 auto ProcessCallInst = [&](Instruction &Inst) {
10625 CallBase &CB = cast<CallBase>(Inst);
10626
10627 auto *CBEdges = A.getAAFor<AACallEdges>(
10628 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
10629 if (!CBEdges)
10630 return false;
10631 if (CBEdges->hasNonAsmUnknownCallee())
10632 setHasUnknownCallee(true, Change);
10633 if (CBEdges->hasUnknownCallee())
10634 setHasUnknownCallee(false, Change);
10635
10636 for (Function *F : CBEdges->getOptimisticEdges())
10637 addCalledFunction(F, Change);
10638
10639 return true;
10640 };
10641
10642 // Visit all callable instructions.
10643 bool UsedAssumedInformation = false;
10644 if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this,
10645 UsedAssumedInformation,
10646 /* CheckBBLivenessOnly */ true)) {
10647 // If we haven't looked at all call like instructions, assume that there
10648 // are unknown callees.
10649 setHasUnknownCallee(true, Change);
10650 }
10651
10652 return Change;
10653 }
10654};
10655
10656/// -------------------AAInterFnReachability Attribute--------------------------
10657
10658struct AAInterFnReachabilityFunction
10659 : public CachedReachabilityAA<AAInterFnReachability, Function> {
10660 using Base = CachedReachabilityAA<AAInterFnReachability, Function>;
10661 AAInterFnReachabilityFunction(const IRPosition &IRP, Attributor &A)
10662 : Base(IRP, A) {}
10663
10664 bool instructionCanReach(
10665 Attributor &A, const Instruction &From, const Function &To,
10666 const AA::InstExclusionSetTy *ExclusionSet) const override {
10667 assert(From.getFunction() == getAnchorScope() && "Queried the wrong AA!");
10668 auto *NonConstThis = const_cast<AAInterFnReachabilityFunction *>(this);
10669
10670 RQITy StackRQI(A, From, To, ExclusionSet, false);
10671 typename RQITy::Reachable Result;
10672 if (!NonConstThis->checkQueryCache(A, StackRQI, Result))
10673 return NonConstThis->isReachableImpl(A, StackRQI,
10674 /*IsTemporaryRQI=*/true);
10675 return Result == RQITy::Reachable::Yes;
10676 }
10677
10678 bool isReachableImpl(Attributor &A, RQITy &RQI,
10679 bool IsTemporaryRQI) override {
10680 const Instruction *EntryI =
10681 &RQI.From->getFunction()->getEntryBlock().front();
10682 if (EntryI != RQI.From &&
10683 !instructionCanReach(A, *EntryI, *RQI.To, nullptr))
10684 return rememberResult(A, RQITy::Reachable::No, RQI, false,
10685 IsTemporaryRQI);
10686
10687 auto CheckReachableCallBase = [&](CallBase *CB) {
10688 auto *CBEdges = A.getAAFor<AACallEdges>(
10689 *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
10690 if (!CBEdges || !CBEdges->getState().isValidState())
10691 return false;
10692 // TODO Check To backwards in this case.
10693 if (CBEdges->hasUnknownCallee())
10694 return false;
10695
10696 for (Function *Fn : CBEdges->getOptimisticEdges()) {
10697 if (Fn == RQI.To)
10698 return false;
10699
10700 if (Fn->isDeclaration()) {
10701 if (Fn->hasFnAttribute(Attribute::NoCallback))
10702 continue;
10703 // TODO Check To backwards in this case.
10704 return false;
10705 }
10706
10707 if (Fn == getAnchorScope()) {
10708 if (EntryI == RQI.From)
10709 continue;
10710 return false;
10711 }
10712
10713 const AAInterFnReachability *InterFnReachability =
10714 A.getAAFor<AAInterFnReachability>(*this, IRPosition::function(*Fn),
10715 DepClassTy::OPTIONAL);
10716
10717 const Instruction &FnFirstInst = Fn->getEntryBlock().front();
10718 if (!InterFnReachability ||
10719 InterFnReachability->instructionCanReach(A, FnFirstInst, *RQI.To,
10720 RQI.ExclusionSet))
10721 return false;
10722 }
10723 return true;
10724 };
10725
10726 const auto *IntraFnReachability = A.getAAFor<AAIntraFnReachability>(
10727 *this, IRPosition::function(*RQI.From->getFunction()),
10728 DepClassTy::OPTIONAL);
10729
10730 // Determine call like instructions that we can reach from the inst.
10731 auto CheckCallBase = [&](Instruction &CBInst) {
10732 // There are usually less nodes in the call graph, check inter function
10733 // reachability first.
10734 if (CheckReachableCallBase(cast<CallBase>(&CBInst)))
10735 return true;
10736 return IntraFnReachability && !IntraFnReachability->isAssumedReachable(
10737 A, *RQI.From, CBInst, RQI.ExclusionSet);
10738 };
10739
10740 bool UsedExclusionSet = /* conservative */ true;
10741 bool UsedAssumedInformation = false;
10742 if (!A.checkForAllCallLikeInstructions(CheckCallBase, *this,
10743 UsedAssumedInformation,
10744 /* CheckBBLivenessOnly */ true))
10745 return rememberResult(A, RQITy::Reachable::Yes, RQI, UsedExclusionSet,
10746 IsTemporaryRQI);
10747
10748 return rememberResult(A, RQITy::Reachable::No, RQI, UsedExclusionSet,
10749 IsTemporaryRQI);
10750 }
10751
10752 void trackStatistics() const override {}
10753};
10754} // namespace
10755
10756template <typename AAType>
10757static std::optional<Constant *>
10759 const IRPosition &IRP, Type &Ty) {
10760 if (!Ty.isIntegerTy())
10761 return nullptr;
10762
10763 // This will also pass the call base context.
10764 const auto *AA = A.getAAFor<AAType>(QueryingAA, IRP, DepClassTy::NONE);
10765 if (!AA)
10766 return nullptr;
10767
10768 std::optional<Constant *> COpt = AA->getAssumedConstant(A);
10769
10770 if (!COpt.has_value()) {
10771 A.recordDependence(*AA, QueryingAA, DepClassTy::OPTIONAL);
10772 return std::nullopt;
10773 }
10774 if (auto *C = *COpt) {
10775 A.recordDependence(*AA, QueryingAA, DepClassTy::OPTIONAL);
10776 return C;
10777 }
10778 return nullptr;
10779}
10780
10782 Attributor &A, const AbstractAttribute &AA, const IRPosition &IRP,
10784 Type &Ty = *IRP.getAssociatedType();
10785 std::optional<Value *> V;
10786 for (auto &It : Values) {
10787 V = AA::combineOptionalValuesInAAValueLatice(V, It.getValue(), &Ty);
10788 if (V.has_value() && !*V)
10789 break;
10790 }
10791 if (!V.has_value())
10792 return UndefValue::get(&Ty);
10793 return *V;
10794}
10795
10796namespace {
10797struct AAPotentialValuesImpl : AAPotentialValues {
10798 using StateType = PotentialLLVMValuesState;
10799
10800 AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
10801 : AAPotentialValues(IRP, A) {}
10802
10803 /// See AbstractAttribute::initialize(..).
10804 void initialize(Attributor &A) override {
10805 if (A.hasSimplificationCallback(getIRPosition())) {
10806 indicatePessimisticFixpoint();
10807 return;
10808 }
10809 Value *Stripped = getAssociatedValue().stripPointerCasts();
10810 if (isa<Constant>(Stripped) && !isa<ConstantExpr>(Stripped)) {
10811 addValue(A, getState(), *Stripped, getCtxI(), AA::AnyScope,
10812 getAnchorScope());
10813 indicateOptimisticFixpoint();
10814 return;
10815 }
10816 AAPotentialValues::initialize(A);
10817 }
10818
10819 /// See AbstractAttribute::getAsStr().
10820 const std::string getAsStr(Attributor *A) const override {
10821 std::string Str;
10823 OS << getState();
10824 return Str;
10825 }
10826
10827 template <typename AAType>
10828 static std::optional<Value *> askOtherAA(Attributor &A,
10829 const AbstractAttribute &AA,
10830 const IRPosition &IRP, Type &Ty) {
10831 if (isa<Constant>(IRP.getAssociatedValue()))
10832 return &IRP.getAssociatedValue();
10833 std::optional<Constant *> C = askForAssumedConstant<AAType>(A, AA, IRP, Ty);
10834 if (!C)
10835 return std::nullopt;
10836 if (*C)
10837 if (auto *CC = AA::getWithType(**C, Ty))
10838 return CC;
10839 return nullptr;
10840 }
10841
10842 virtual void addValue(Attributor &A, StateType &State, Value &V,
10843 const Instruction *CtxI, AA::ValueScope S,
10844 Function *AnchorScope) const {
10845
10846 IRPosition ValIRP = IRPosition::value(V);
10847 if (auto *CB = dyn_cast_or_null<CallBase>(CtxI)) {
10848 for (const auto &U : CB->args()) {
10849 if (U.get() != &V)
10850 continue;
10851 ValIRP = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
10852 break;
10853 }
10854 }
10855
10856 Value *VPtr = &V;
10857 if (ValIRP.getAssociatedType()->isIntegerTy()) {
10858 Type &Ty = *getAssociatedType();
10859 std::optional<Value *> SimpleV =
10860 askOtherAA<AAValueConstantRange>(A, *this, ValIRP, Ty);
10861 if (SimpleV.has_value() && !*SimpleV) {
10862 auto *PotentialConstantsAA = A.getAAFor<AAPotentialConstantValues>(
10863 *this, ValIRP, DepClassTy::OPTIONAL);
10864 if (PotentialConstantsAA && PotentialConstantsAA->isValidState()) {
10865 for (const auto &It : PotentialConstantsAA->getAssumedSet())
10866 State.unionAssumed({{*ConstantInt::get(&Ty, It), nullptr}, S});
10867 if (PotentialConstantsAA->undefIsContained())
10868 State.unionAssumed({{*UndefValue::get(&Ty), nullptr}, S});
10869 return;
10870 }
10871 }
10872 if (!SimpleV.has_value())
10873 return;
10874
10875 if (*SimpleV)
10876 VPtr = *SimpleV;
10877 }
10878
10879 if (isa<ConstantInt>(VPtr))
10880 CtxI = nullptr;
10881 if (!AA::isValidInScope(*VPtr, AnchorScope))
10883
10884 State.unionAssumed({{*VPtr, CtxI}, S});
10885 }
10886
10887 /// Helper struct to tie a value+context pair together with the scope for
10888 /// which this is the simplified version.
10889 struct ItemInfo {
10892
10893 bool operator==(const ItemInfo &II) const {
10894 return II.I == I && II.S == S;
10895 };
10896 bool operator<(const ItemInfo &II) const {
10897 if (I == II.I)
10898 return S < II.S;
10899 return I < II.I;
10900 };
10901 };
10902
10903 bool recurseForValue(Attributor &A, const IRPosition &IRP, AA::ValueScope S) {
10905 for (auto CS : {AA::Intraprocedural, AA::Interprocedural}) {
10906 if (!(CS & S))
10907 continue;
10908
10909 bool UsedAssumedInformation = false;
10911 if (!A.getAssumedSimplifiedValues(IRP, this, Values, CS,
10912 UsedAssumedInformation))
10913 return false;
10914
10915 for (auto &It : Values)
10916 ValueScopeMap[It] += CS;
10917 }
10918 for (auto &It : ValueScopeMap)
10919 addValue(A, getState(), *It.first.getValue(), It.first.getCtxI(),
10920 AA::ValueScope(It.second), getAnchorScope());
10921
10922 return true;
10923 }
10924
10925 void giveUpOnIntraprocedural(Attributor &A) {
10926 auto NewS = StateType::getBestState(getState());
10927 for (const auto &It : getAssumedSet()) {
10928 if (It.second == AA::Intraprocedural)
10929 continue;
10930 addValue(A, NewS, *It.first.getValue(), It.first.getCtxI(),
10931 AA::Interprocedural, getAnchorScope());
10932 }
10933 assert(!undefIsContained() && "Undef should be an explicit value!");
10934 addValue(A, NewS, getAssociatedValue(), getCtxI(), AA::Intraprocedural,
10935 getAnchorScope());
10936 getState() = NewS;
10937 }
10938
10939 /// See AbstractState::indicatePessimisticFixpoint(...).
10940 ChangeStatus indicatePessimisticFixpoint() override {
10941 getState() = StateType::getBestState(getState());
10942 getState().unionAssumed({{getAssociatedValue(), getCtxI()}, AA::AnyScope});
10944 return ChangeStatus::CHANGED;
10945 }
10946
10947 /// See AbstractAttribute::updateImpl(...).
10948 ChangeStatus updateImpl(Attributor &A) override {
10949 return indicatePessimisticFixpoint();
10950 }
10951
10952 /// See AbstractAttribute::manifest(...).
10953 ChangeStatus manifest(Attributor &A) override {
10956 Values.clear();
10957 if (!getAssumedSimplifiedValues(A, Values, S))
10958 continue;
10959 Value &OldV = getAssociatedValue();
10960 if (isa<UndefValue>(OldV))
10961 continue;
10962 Value *NewV = getSingleValue(A, *this, getIRPosition(), Values);
10963 if (!NewV || NewV == &OldV)
10964 continue;
10965 if (getCtxI() &&
10966 !AA::isValidAtPosition({*NewV, *getCtxI()}, A.getInfoCache()))
10967 continue;
10968 if (A.changeAfterManifest(getIRPosition(), *NewV))
10969 return ChangeStatus::CHANGED;
10970 }
10972 }
10973
10974 bool getAssumedSimplifiedValues(
10976 AA::ValueScope S, bool RecurseForSelectAndPHI = false) const override {
10977 if (!isValidState())
10978 return false;
10979 bool UsedAssumedInformation = false;
10980 for (const auto &It : getAssumedSet())
10981 if (It.second & S) {
10982 if (RecurseForSelectAndPHI && (isa<PHINode>(It.first.getValue()) ||
10983 isa<SelectInst>(It.first.getValue()))) {
10984 if (A.getAssumedSimplifiedValues(
10985 IRPosition::inst(*cast<Instruction>(It.first.getValue())),
10986 this, Values, S, UsedAssumedInformation))
10987 continue;
10988 }
10989 Values.push_back(It.first);
10990 }
10991 assert(!undefIsContained() && "Undef should be an explicit value!");
10992 return true;
10993 }
10994};
10995
10996struct AAPotentialValuesFloating : AAPotentialValuesImpl {
10997 AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
10998 : AAPotentialValuesImpl(IRP, A) {}
10999
11000 /// See AbstractAttribute::updateImpl(...).
11001 ChangeStatus updateImpl(Attributor &A) override {
11002 auto AssumedBefore = getAssumed();
11003
11004 genericValueTraversal(A, &getAssociatedValue());
11005
11006 return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED
11007 : ChangeStatus::CHANGED;
11008 }
11009
11010 /// Helper struct to remember which AAIsDead instances we actually used.
11011 struct LivenessInfo {
11012 const AAIsDead *LivenessAA = nullptr;
11013 bool AnyDead = false;
11014 };
11015
11016 /// Check if \p Cmp is a comparison we can simplify.
11017 ///
11018 /// We handle multiple cases, one in which at least one operand is an
11019 /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other
11020 /// operand. Return true if successful, in that case Worklist will be updated.
11021 bool handleCmp(Attributor &A, Value &Cmp, Value *LHS, Value *RHS,
11022 CmpInst::Predicate Pred, ItemInfo II,
11023 SmallVectorImpl<ItemInfo> &Worklist) {
11024
11025 // Simplify the operands first.
11026 bool UsedAssumedInformation = false;
11027 SmallVector<AA::ValueAndContext> LHSValues, RHSValues;
11028 auto GetSimplifiedValues = [&](Value &V,
11030 if (!A.getAssumedSimplifiedValues(
11031 IRPosition::value(V, getCallBaseContext()), this, Values,
11032 AA::Intraprocedural, UsedAssumedInformation)) {
11033 Values.clear();
11034 Values.push_back(AA::ValueAndContext{V, II.I.getCtxI()});
11035 }
11036 return Values.empty();
11037 };
11038 if (GetSimplifiedValues(*LHS, LHSValues))
11039 return true;
11040 if (GetSimplifiedValues(*RHS, RHSValues))
11041 return true;
11042
11043 LLVMContext &Ctx = LHS->getContext();
11044
11045 InformationCache &InfoCache = A.getInfoCache();
11046 Instruction *CmpI = dyn_cast<Instruction>(&Cmp);
11047 Function *F = CmpI ? CmpI->getFunction() : nullptr;
11048 const auto *DT =
11050 : nullptr;
11051 const auto *TLI =
11052 F ? A.getInfoCache().getTargetLibraryInfoForFunction(*F) : nullptr;
11053 auto *AC =
11055 : nullptr;
11056
11057 const DataLayout &DL = A.getDataLayout();
11058 SimplifyQuery Q(DL, TLI, DT, AC, CmpI);
11059
11060 auto CheckPair = [&](Value &LHSV, Value &RHSV) {
11061 if (isa<UndefValue>(LHSV) || isa<UndefValue>(RHSV)) {
11062 addValue(A, getState(), *UndefValue::get(Cmp.getType()),
11063 /* CtxI */ nullptr, II.S, getAnchorScope());
11064 return true;
11065 }
11066
11067 // Handle the trivial case first in which we don't even need to think
11068 // about null or non-null.
11069 if (&LHSV == &RHSV &&
11071 Constant *NewV = ConstantInt::get(Type::getInt1Ty(Ctx),
11073 addValue(A, getState(), *NewV, /* CtxI */ nullptr, II.S,
11074 getAnchorScope());
11075 return true;
11076 }
11077
11078 auto *TypedLHS = AA::getWithType(LHSV, *LHS->getType());
11079 auto *TypedRHS = AA::getWithType(RHSV, *RHS->getType());
11080 if (TypedLHS && TypedRHS) {
11081 Value *NewV = simplifyCmpInst(Pred, TypedLHS, TypedRHS, Q);
11082 if (NewV && NewV != &Cmp) {
11083 addValue(A, getState(), *NewV, /* CtxI */ nullptr, II.S,
11084 getAnchorScope());
11085 return true;
11086 }
11087 }
11088
11089 // From now on we only handle equalities (==, !=).
11090 if (!CmpInst::isEquality(Pred))
11091 return false;
11092
11093 bool LHSIsNull = isa<ConstantPointerNull>(LHSV);
11094 bool RHSIsNull = isa<ConstantPointerNull>(RHSV);
11095 if (!LHSIsNull && !RHSIsNull)
11096 return false;
11097
11098 // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
11099 // non-nullptr operand and if we assume it's non-null we can conclude the
11100 // result of the comparison.
11101 assert((LHSIsNull || RHSIsNull) &&
11102 "Expected nullptr versus non-nullptr comparison at this point");
11103
11104 // The index is the operand that we assume is not null.
11105 unsigned PtrIdx = LHSIsNull;
11106 bool IsKnownNonNull;
11107 bool IsAssumedNonNull = AA::hasAssumedIRAttr<Attribute::NonNull>(
11108 A, this, IRPosition::value(*(PtrIdx ? &RHSV : &LHSV)),
11109 DepClassTy::REQUIRED, IsKnownNonNull);
11110 if (!IsAssumedNonNull)
11111 return false;
11112
11113 // The new value depends on the predicate, true for != and false for ==.
11114 Constant *NewV =
11115 ConstantInt::get(Type::getInt1Ty(Ctx), Pred == CmpInst::ICMP_NE);
11116 addValue(A, getState(), *NewV, /* CtxI */ nullptr, II.S,
11117 getAnchorScope());
11118 return true;
11119 };
11120
11121 for (auto &LHSValue : LHSValues)
11122 for (auto &RHSValue : RHSValues)
11123 if (!CheckPair(*LHSValue.getValue(), *RHSValue.getValue()))
11124 return false;
11125 return true;
11126 }
11127
11128 bool handleSelectInst(Attributor &A, SelectInst &SI, ItemInfo II,
11129 SmallVectorImpl<ItemInfo> &Worklist) {
11130 const Instruction *CtxI = II.I.getCtxI();
11131 bool UsedAssumedInformation = false;
11132
11133 std::optional<Constant *> C =
11134 A.getAssumedConstant(*SI.getCondition(), *this, UsedAssumedInformation);
11135 bool NoValueYet = !C.has_value();
11136 if (NoValueYet || isa_and_nonnull<UndefValue>(*C))
11137 return true;
11138 if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) {
11139 if (CI->isZero())
11140 Worklist.push_back({{*SI.getFalseValue(), CtxI}, II.S});
11141 else
11142 Worklist.push_back({{*SI.getTrueValue(), CtxI}, II.S});
11143 } else if (&SI == &getAssociatedValue()) {
11144 // We could not simplify the condition, assume both values.
11145 Worklist.push_back({{*SI.getTrueValue(), CtxI}, II.S});
11146 Worklist.push_back({{*SI.getFalseValue(), CtxI}, II.S});
11147 } else {
11148 std::optional<Value *> SimpleV = A.getAssumedSimplified(
11149 IRPosition::inst(SI), *this, UsedAssumedInformation, II.S);
11150 if (!SimpleV.has_value())
11151 return true;
11152 if (*SimpleV) {
11153 addValue(A, getState(), **SimpleV, CtxI, II.S, getAnchorScope());
11154 return true;
11155 }
11156 return false;
11157 }
11158 return true;
11159 }
11160
11161 bool handleLoadInst(Attributor &A, LoadInst &LI, ItemInfo II,
11162 SmallVectorImpl<ItemInfo> &Worklist) {
11163 SmallSetVector<Value *, 4> PotentialCopies;
11164 SmallSetVector<Instruction *, 4> PotentialValueOrigins;
11165 bool UsedAssumedInformation = false;
11166 if (!AA::getPotentiallyLoadedValues(A, LI, PotentialCopies,
11167 PotentialValueOrigins, *this,
11168 UsedAssumedInformation,
11169 /* OnlyExact */ true)) {
11170 LLVM_DEBUG(dbgs() << "[AAPotentialValues] Failed to get potentially "
11171 "loaded values for load instruction "
11172 << LI << "\n");
11173 return false;
11174 }
11175
11176 // Do not simplify loads that are only used in llvm.assume if we cannot also
11177 // remove all stores that may feed into the load. The reason is that the
11178 // assume is probably worth something as long as the stores are around.
11179 InformationCache &InfoCache = A.getInfoCache();
11180 if (InfoCache.isOnlyUsedByAssume(LI)) {
11181 if (!llvm::all_of(PotentialValueOrigins, [&](Instruction *I) {
11182 if (!I || isa<AssumeInst>(I))
11183 return true;
11184 if (auto *SI = dyn_cast<StoreInst>(I))
11185 return A.isAssumedDead(SI->getOperandUse(0), this,
11186 /* LivenessAA */ nullptr,
11187 UsedAssumedInformation,
11188 /* CheckBBLivenessOnly */ false);
11189 return A.isAssumedDead(*I, this, /* LivenessAA */ nullptr,
11190 UsedAssumedInformation,
11191 /* CheckBBLivenessOnly */ false);
11192 })) {
11193 LLVM_DEBUG(dbgs() << "[AAPotentialValues] Load is onl used by assumes "
11194 "and we cannot delete all the stores: "
11195 << LI << "\n");
11196 return false;
11197 }
11198 }
11199
11200 // Values have to be dynamically unique or we loose the fact that a
11201 // single llvm::Value might represent two runtime values (e.g.,
11202 // stack locations in different recursive calls).
11203 const Instruction *CtxI = II.I.getCtxI();
11204 bool ScopeIsLocal = (II.S & AA::Intraprocedural);
11205 bool AllLocal = ScopeIsLocal;
11206 bool DynamicallyUnique = llvm::all_of(PotentialCopies, [&](Value *PC) {
11207 AllLocal &= AA::isValidInScope(*PC, getAnchorScope());
11208 return AA::isDynamicallyUnique(A, *this, *PC);
11209 });
11210 if (!DynamicallyUnique) {
11211 LLVM_DEBUG(dbgs() << "[AAPotentialValues] Not all potentially loaded "
11212 "values are dynamically unique: "
11213 << LI << "\n");
11214 return false;
11215 }
11216
11217 for (auto *PotentialCopy : PotentialCopies) {
11218 if (AllLocal) {
11219 Worklist.push_back({{*PotentialCopy, CtxI}, II.S});
11220 } else {
11221 Worklist.push_back({{*PotentialCopy, CtxI}, AA::Interprocedural});
11222 }
11223 }
11224 if (!AllLocal && ScopeIsLocal)
11225 addValue(A, getState(), LI, CtxI, AA::Intraprocedural, getAnchorScope());
11226 return true;
11227 }
11228
11229 bool handlePHINode(
11230 Attributor &A, PHINode &PHI, ItemInfo II,
11231 SmallVectorImpl<ItemInfo> &Worklist,
11233 auto GetLivenessInfo = [&](const Function &F) -> LivenessInfo & {
11234 LivenessInfo &LI = LivenessAAs[&F];
11235 if (!LI.LivenessAA)
11236 LI.LivenessAA = A.getAAFor<AAIsDead>(*this, IRPosition::function(F),
11237 DepClassTy::NONE);
11238 return LI;
11239 };
11240
11241 if (&PHI == &getAssociatedValue()) {
11242 LivenessInfo &LI = GetLivenessInfo(*PHI.getFunction());
11243 const auto *CI =
11244 A.getInfoCache().getAnalysisResultForFunction<CycleAnalysis>(
11245 *PHI.getFunction());
11246
11247 Cycle *C = nullptr;
11248 bool CyclePHI = mayBeInCycle(CI, &PHI, /* HeaderOnly */ true, &C);
11249 for (unsigned u = 0, e = PHI.getNumIncomingValues(); u < e; u++) {
11250 BasicBlock *IncomingBB = PHI.getIncomingBlock(u);
11251 if (LI.LivenessAA &&
11252 LI.LivenessAA->isEdgeDead(IncomingBB, PHI.getParent())) {
11253 LI.AnyDead = true;
11254 continue;
11255 }
11256 Value *V = PHI.getIncomingValue(u);
11257 if (V == &PHI)
11258 continue;
11259
11260 // If the incoming value is not the PHI but an instruction in the same
11261 // cycle we might have multiple versions of it flying around.
11262 if (CyclePHI && isa<Instruction>(V) &&
11263 (!C || C->contains(cast<Instruction>(V)->getParent())))
11264 return false;
11265
11266 Worklist.push_back({{*V, IncomingBB->getTerminator()}, II.S});
11267 }
11268 return true;
11269 }
11270
11271 bool UsedAssumedInformation = false;
11272 std::optional<Value *> SimpleV = A.getAssumedSimplified(
11273 IRPosition::inst(PHI), *this, UsedAssumedInformation, II.S);
11274 if (!SimpleV.has_value())
11275 return true;
11276 if (!(*SimpleV))
11277 return false;
11278 addValue(A, getState(), **SimpleV, &PHI, II.S, getAnchorScope());
11279 return true;
11280 }
11281
11282 /// Use the generic, non-optimistic InstSimplfy functionality if we managed to
11283 /// simplify any operand of the instruction \p I. Return true if successful,
11284 /// in that case Worklist will be updated.
11285 bool handleGenericInst(Attributor &A, Instruction &I, ItemInfo II,
11286 SmallVectorImpl<ItemInfo> &Worklist) {
11287 bool SomeSimplified = false;
11288 bool UsedAssumedInformation = false;
11289
11290 SmallVector<Value *, 8> NewOps(I.getNumOperands());
11291 int Idx = 0;
11292 for (Value *Op : I.operands()) {
11293 const auto &SimplifiedOp = A.getAssumedSimplified(
11294 IRPosition::value(*Op, getCallBaseContext()), *this,
11295 UsedAssumedInformation, AA::Intraprocedural);
11296 // If we are not sure about any operand we are not sure about the entire
11297 // instruction, we'll wait.
11298 if (!SimplifiedOp.has_value())
11299 return true;
11300
11301 if (*SimplifiedOp)
11302 NewOps[Idx] = *SimplifiedOp;
11303 else
11304 NewOps[Idx] = Op;
11305
11306 SomeSimplified |= (NewOps[Idx] != Op);
11307 ++Idx;
11308 }
11309
11310 // We won't bother with the InstSimplify interface if we didn't simplify any
11311 // operand ourselves.
11312 if (!SomeSimplified)
11313 return false;
11314
11315 InformationCache &InfoCache = A.getInfoCache();
11316 Function *F = I.getFunction();
11317 const auto *DT =
11319 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
11320 auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
11321
11322 const DataLayout &DL = I.getDataLayout();
11323 SimplifyQuery Q(DL, TLI, DT, AC, &I);
11324 Value *NewV = simplifyInstructionWithOperands(&I, NewOps, Q);
11325 if (!NewV || NewV == &I)
11326 return false;
11327
11328 LLVM_DEBUG(dbgs() << "Generic inst " << I << " assumed simplified to "
11329 << *NewV << "\n");
11330 Worklist.push_back({{*NewV, II.I.getCtxI()}, II.S});
11331 return true;
11332 }
11333
11335 Attributor &A, Instruction &I, ItemInfo II,
11336 SmallVectorImpl<ItemInfo> &Worklist,
11338 if (auto *CI = dyn_cast<CmpInst>(&I))
11339 return handleCmp(A, *CI, CI->getOperand(0), CI->getOperand(1),
11340 CI->getPredicate(), II, Worklist);
11341
11342 switch (I.getOpcode()) {
11343 case Instruction::Select:
11344 return handleSelectInst(A, cast<SelectInst>(I), II, Worklist);
11345 case Instruction::PHI:
11346 return handlePHINode(A, cast<PHINode>(I), II, Worklist, LivenessAAs);
11347 case Instruction::Load:
11348 return handleLoadInst(A, cast<LoadInst>(I), II, Worklist);
11349 default:
11350 return handleGenericInst(A, I, II, Worklist);
11351 };
11352 return false;
11353 }
11354
11355 void genericValueTraversal(Attributor &A, Value *InitialV) {
11357
11358 SmallSet<ItemInfo, 16> Visited;
11360 Worklist.push_back({{*InitialV, getCtxI()}, AA::AnyScope});
11361
11362 int Iteration = 0;
11363 do {
11364 ItemInfo II = Worklist.pop_back_val();
11365 Value *V = II.I.getValue();
11366 assert(V);
11367 const Instruction *CtxI = II.I.getCtxI();
11368 AA::ValueScope S = II.S;
11369
11370 // Check if we should process the current value. To prevent endless
11371 // recursion keep a record of the values we followed!
11372 if (!Visited.insert(II).second)
11373 continue;
11374
11375 // Make sure we limit the compile time for complex expressions.
11376 if (Iteration++ >= MaxPotentialValuesIterations) {
11377 LLVM_DEBUG(dbgs() << "Generic value traversal reached iteration limit: "
11378 << Iteration << "!\n");
11379 addValue(A, getState(), *V, CtxI, S, getAnchorScope());
11380 continue;
11381 }
11382
11383 // Explicitly look through calls with a "returned" attribute if we do
11384 // not have a pointer as stripPointerCasts only works on them.
11385 Value *NewV = nullptr;
11386 if (V->getType()->isPointerTy()) {
11387 NewV = AA::getWithType(*V->stripPointerCasts(), *V->getType());
11388 } else {
11389 if (auto *CB = dyn_cast<CallBase>(V))
11390 if (auto *Callee =
11391 dyn_cast_if_present<Function>(CB->getCalledOperand())) {
11392 for (Argument &Arg : Callee->args())
11393 if (Arg.hasReturnedAttr()) {
11394 NewV = CB->getArgOperand(Arg.getArgNo());
11395 break;
11396 }
11397 }
11398 }
11399 if (NewV && NewV != V) {
11400 Worklist.push_back({{*NewV, CtxI}, S});
11401 continue;
11402 }
11403
11404 if (auto *I = dyn_cast<Instruction>(V)) {
11405 if (simplifyInstruction(A, *I, II, Worklist, LivenessAAs))
11406 continue;
11407 }
11408
11409 if (V != InitialV || isa<Argument>(V))
11410 if (recurseForValue(A, IRPosition::value(*V), II.S))
11411 continue;
11412
11413 // If we haven't stripped anything we give up.
11414 if (V == InitialV && CtxI == getCtxI()) {
11415 indicatePessimisticFixpoint();
11416 return;
11417 }
11418
11419 addValue(A, getState(), *V, CtxI, S, getAnchorScope());
11420 } while (!Worklist.empty());
11421
11422 // If we actually used liveness information so we have to record a
11423 // dependence.
11424 for (auto &It : LivenessAAs)
11425 if (It.second.AnyDead)
11426 A.recordDependence(*It.second.LivenessAA, *this, DepClassTy::OPTIONAL);
11427 }
11428
11429 /// See AbstractAttribute::trackStatistics()
11430 void trackStatistics() const override {
11431 STATS_DECLTRACK_FLOATING_ATTR(potential_values)
11432 }
11433};
11434
11435struct AAPotentialValuesArgument final : AAPotentialValuesImpl {
11436 using Base = AAPotentialValuesImpl;
11437 AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
11438 : Base(IRP, A) {}
11439
11440 /// See AbstractAttribute::initialize(..).
11441 void initialize(Attributor &A) override {
11442 auto &Arg = cast<Argument>(getAssociatedValue());
11444 indicatePessimisticFixpoint();
11445 }
11446
11447 /// See AbstractAttribute::updateImpl(...).
11448 ChangeStatus updateImpl(Attributor &A) override {
11449 auto AssumedBefore = getAssumed();
11450
11451 unsigned ArgNo = getCalleeArgNo();
11452
11453 bool UsedAssumedInformation = false;
11455 auto CallSitePred = [&](AbstractCallSite ACS) {
11456 const auto CSArgIRP = IRPosition::callsite_argument(ACS, ArgNo);
11457 if (CSArgIRP.getPositionKind() == IRP_INVALID)
11458 return false;
11459
11460 if (!A.getAssumedSimplifiedValues(CSArgIRP, this, Values,
11462 UsedAssumedInformation))
11463 return false;
11464
11465 return isValidState();
11466 };
11467
11468 if (!A.checkForAllCallSites(CallSitePred, *this,
11469 /* RequireAllCallSites */ true,
11470 UsedAssumedInformation))
11471 return indicatePessimisticFixpoint();
11472
11473 Function *Fn = getAssociatedFunction();
11474 bool AnyNonLocal = false;
11475 for (auto &It : Values) {
11476 if (isa<Constant>(It.getValue())) {
11477 addValue(A, getState(), *It.getValue(), It.getCtxI(), AA::AnyScope,
11478 getAnchorScope());
11479 continue;
11480 }
11481 if (!AA::isDynamicallyUnique(A, *this, *It.getValue()))
11482 return indicatePessimisticFixpoint();
11483
11484 if (auto *Arg = dyn_cast<Argument>(It.getValue()))
11485 if (Arg->getParent() == Fn) {
11486 addValue(A, getState(), *It.getValue(), It.getCtxI(), AA::AnyScope,
11487 getAnchorScope());
11488 continue;
11489 }
11490 addValue(A, getState(), *It.getValue(), It.getCtxI(), AA::Interprocedural,
11491 getAnchorScope());
11492 AnyNonLocal = true;
11493 }
11494 assert(!undefIsContained() && "Undef should be an explicit value!");
11495 if (AnyNonLocal)
11496 giveUpOnIntraprocedural(A);
11497
11498 return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED
11499 : ChangeStatus::CHANGED;
11500 }
11501
11502 /// See AbstractAttribute::trackStatistics()
11503 void trackStatistics() const override {
11504 STATS_DECLTRACK_ARG_ATTR(potential_values)
11505 }
11506};
11507
11508struct AAPotentialValuesReturned : public AAPotentialValuesFloating {
11509 using Base = AAPotentialValuesFloating;
11510 AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
11511 : Base(IRP, A) {}
11512
11513 /// See AbstractAttribute::initialize(..).
11514 void initialize(Attributor &A) override {
11515 Function *F = getAssociatedFunction();
11516 if (!F || F->isDeclaration() || F->getReturnType()->isVoidTy()) {
11517 indicatePessimisticFixpoint();
11518 return;
11519 }
11520
11521 for (Argument &Arg : F->args())
11522 if (Arg.hasReturnedAttr()) {
11523 addValue(A, getState(), Arg, nullptr, AA::AnyScope, F);
11524 ReturnedArg = &Arg;
11525 break;
11526 }
11527 if (!A.isFunctionIPOAmendable(*F) ||
11528 A.hasSimplificationCallback(getIRPosition())) {
11529 if (!ReturnedArg)
11530 indicatePessimisticFixpoint();
11531 else
11532 indicateOptimisticFixpoint();
11533 }
11534 }
11535
11536 /// See AbstractAttribute::updateImpl(...).
11537 ChangeStatus updateImpl(Attributor &A) override {
11538 auto AssumedBefore = getAssumed();
11539 bool UsedAssumedInformation = false;
11540
11542 Function *AnchorScope = getAnchorScope();
11543 auto HandleReturnedValue = [&](Value &V, Instruction *CtxI,
11544 bool AddValues) {
11546 Values.clear();
11547 if (!A.getAssumedSimplifiedValues(IRPosition::value(V), this, Values, S,
11548 UsedAssumedInformation,
11549 /* RecurseForSelectAndPHI */ true))
11550 return false;
11551 if (!AddValues)
11552 continue;
11553
11554 bool AllInterAreIntra = false;
11555 if (S == AA::Interprocedural)
11556 AllInterAreIntra =
11557 llvm::all_of(Values, [&](const AA::ValueAndContext &VAC) {
11558 return AA::isValidInScope(*VAC.getValue(), AnchorScope);
11559 });
11560
11561 for (const AA::ValueAndContext &VAC : Values) {
11562 addValue(A, getState(), *VAC.getValue(),
11563 VAC.getCtxI() ? VAC.getCtxI() : CtxI,
11564 AllInterAreIntra ? AA::AnyScope : S, AnchorScope);
11565 }
11566 if (AllInterAreIntra)
11567 break;
11568 }
11569 return true;
11570 };
11571
11572 if (ReturnedArg) {
11573 HandleReturnedValue(*ReturnedArg, nullptr, true);
11574 } else {
11575 auto RetInstPred = [&](Instruction &RetI) {
11576 bool AddValues = true;
11577 if (isa<PHINode>(RetI.getOperand(0)) ||
11578 isa<SelectInst>(RetI.getOperand(0))) {
11579 addValue(A, getState(), *RetI.getOperand(0), &RetI, AA::AnyScope,
11580 AnchorScope);
11581 AddValues = false;
11582 }
11583 return HandleReturnedValue(*RetI.getOperand(0), &RetI, AddValues);
11584 };
11585
11586 if (!A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
11587 UsedAssumedInformation,
11588 /* CheckBBLivenessOnly */ true))
11589 return indicatePessimisticFixpoint();
11590 }
11591
11592 return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED
11593 : ChangeStatus::CHANGED;
11594 }
11595
11596 ChangeStatus manifest(Attributor &A) override {
11597 if (ReturnedArg)
11598 return ChangeStatus::UNCHANGED;
11600 if (!getAssumedSimplifiedValues(A, Values, AA::ValueScope::Intraprocedural,
11601 /* RecurseForSelectAndPHI */ true))
11602 return ChangeStatus::UNCHANGED;
11603 Value *NewVal = getSingleValue(A, *this, getIRPosition(), Values);
11604 if (!NewVal)
11605 return ChangeStatus::UNCHANGED;
11606
11607 ChangeStatus Changed = ChangeStatus::UNCHANGED;
11608 if (auto *Arg = dyn_cast<Argument>(NewVal)) {
11609 STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
11610 "Number of function with unique return");
11611 Changed |= A.manifestAttrs(
11613 {Attribute::get(Arg->getContext(), Attribute::Returned)});
11614 STATS_DECLTRACK_ARG_ATTR(returned);
11615 }
11616
11617 auto RetInstPred = [&](Instruction &RetI) {
11618 Value *RetOp = RetI.getOperand(0);
11619 if (isa<UndefValue>(RetOp) || RetOp == NewVal)
11620 return true;
11621 if (AA::isValidAtPosition({*NewVal, RetI}, A.getInfoCache()))
11622 if (A.changeUseAfterManifest(RetI.getOperandUse(0), *NewVal))
11623 Changed = ChangeStatus::CHANGED;
11624 return true;
11625 };
11626 bool UsedAssumedInformation = false;
11627 (void)A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
11628 UsedAssumedInformation,
11629 /* CheckBBLivenessOnly */ true);
11630 return Changed;
11631 }
11632
11633 ChangeStatus indicatePessimisticFixpoint() override {
11635 }
11636
11637 /// See AbstractAttribute::trackStatistics()
11638 void trackStatistics() const override{
11639 STATS_DECLTRACK_FNRET_ATTR(potential_values)}
11640
11641 /// The argumented with an existing `returned` attribute.
11642 Argument *ReturnedArg = nullptr;
11643};
11644
11645struct AAPotentialValuesFunction : AAPotentialValuesImpl {
11646 AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
11647 : AAPotentialValuesImpl(IRP, A) {}
11648
11649 /// See AbstractAttribute::updateImpl(...).
11650 ChangeStatus updateImpl(Attributor &A) override {
11651 llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
11652 "not be called");
11653 }
11654
11655 /// See AbstractAttribute::trackStatistics()
11656 void trackStatistics() const override {
11657 STATS_DECLTRACK_FN_ATTR(potential_values)
11658 }
11659};
11660
11661struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
11662 AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
11663 : AAPotentialValuesFunction(IRP, A) {}
11664
11665 /// See AbstractAttribute::trackStatistics()
11666 void trackStatistics() const override {
11667 STATS_DECLTRACK_CS_ATTR(potential_values)
11668 }
11669};
11670
11671struct AAPotentialValuesCallSiteReturned : AAPotentialValuesImpl {
11672 AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
11673 : AAPotentialValuesImpl(IRP, A) {}
11674
11675 /// See AbstractAttribute::updateImpl(...).
11676 ChangeStatus updateImpl(Attributor &A) override {
11677 auto AssumedBefore = getAssumed();
11678
11679 Function *Callee = getAssociatedFunction();
11680 if (!Callee)
11681 return indicatePessimisticFixpoint();
11682
11683 bool UsedAssumedInformation = false;
11684 auto *CB = cast<CallBase>(getCtxI());
11685 if (CB->isMustTailCall() &&
11686 !A.isAssumedDead(IRPosition::inst(*CB), this, nullptr,
11687 UsedAssumedInformation))
11688 return indicatePessimisticFixpoint();
11689
11690 Function *Caller = CB->getCaller();
11691
11692 auto AddScope = [&](AA::ValueScope S) {
11694 if (!A.getAssumedSimplifiedValues(IRPosition::returned(*Callee), this,
11695 Values, S, UsedAssumedInformation))
11696 return false;
11697
11698 for (auto &It : Values) {
11699 Value *V = It.getValue();
11700 std::optional<Value *> CallerV = A.translateArgumentToCallSiteContent(
11701 V, *CB, *this, UsedAssumedInformation);
11702 if (!CallerV.has_value()) {
11703 // Nothing to do as long as no value was determined.
11704 continue;
11705 }
11706 V = *CallerV ? *CallerV : V;
11707 if (*CallerV && AA::isDynamicallyUnique(A, *this, *V)) {
11708 if (recurseForValue(A, IRPosition::value(*V), S))
11709 continue;
11710 }
11711 if (S == AA::Intraprocedural && !AA::isValidInScope(*V, Caller)) {
11712 giveUpOnIntraprocedural(A);
11713 return true;
11714 }
11715 addValue(A, getState(), *V, CB, S, getAnchorScope());
11716 }
11717 return true;
11718 };
11719 if (!AddScope(AA::Intraprocedural))
11720 return indicatePessimisticFixpoint();
11721 if (!AddScope(AA::Interprocedural))
11722 return indicatePessimisticFixpoint();
11723 return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED
11724 : ChangeStatus::CHANGED;
11725 }
11726
11727 ChangeStatus indicatePessimisticFixpoint() override {
11728 return AAPotentialValues::indicatePessimisticFixpoint();
11729 }
11730
11731 /// See AbstractAttribute::trackStatistics()
11732 void trackStatistics() const override {
11733 STATS_DECLTRACK_CSRET_ATTR(potential_values)
11734 }
11735};
11736
11737struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
11738 AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
11739 : AAPotentialValuesFloating(IRP, A) {}
11740
11741 /// See AbstractAttribute::trackStatistics()
11742 void trackStatistics() const override {
11743 STATS_DECLTRACK_CSARG_ATTR(potential_values)
11744 }
11745};
11746} // namespace
11747
11748/// ---------------------- Assumption Propagation ------------------------------
11749namespace {
11750struct AAAssumptionInfoImpl : public AAAssumptionInfo {
11751 AAAssumptionInfoImpl(const IRPosition &IRP, Attributor &A,
11752 const DenseSet<StringRef> &Known)
11753 : AAAssumptionInfo(IRP, A, Known) {}
11754
11755 /// See AbstractAttribute::manifest(...).
11756 ChangeStatus manifest(Attributor &A) override {
11757 // Don't manifest a universal set if it somehow made it here.
11758 if (getKnown().isUniversal())
11759 return ChangeStatus::UNCHANGED;
11760
11761 const IRPosition &IRP = getIRPosition();
11762 SmallVector<StringRef, 0> Set(getAssumed().getSet().begin(),
11763 getAssumed().getSet().end());
11764 llvm::sort(Set);
11765 return A.manifestAttrs(IRP,
11768 llvm::join(Set, ",")),
11769 /*ForceReplace=*/true);
11770 }
11771
11772 bool hasAssumption(const StringRef Assumption) const override {
11773 return isValidState() && setContains(Assumption);
11774 }
11775
11776 /// See AbstractAttribute::getAsStr()
11777 const std::string getAsStr(Attributor *A) const override {
11778 const SetContents &Known = getKnown();
11779 const SetContents &Assumed = getAssumed();
11780
11781 SmallVector<StringRef, 0> Set(Known.getSet().begin(), Known.getSet().end());
11782 llvm::sort(Set);
11783 const std::string KnownStr = llvm::join(Set, ",");
11784
11785 std::string AssumedStr = "Universal";
11786 if (!Assumed.isUniversal()) {
11787 Set.assign(Assumed.getSet().begin(), Assumed.getSet().end());
11788 AssumedStr = llvm::join(Set, ",");
11789 }
11790 return "Known [" + KnownStr + "]," + " Assumed [" + AssumedStr + "]";
11791 }
11792};
11793
11794/// Propagates assumption information from parent functions to all of their
11795/// successors. An assumption can be propagated if the containing function
11796/// dominates the called function.
11797///
11798/// We start with a "known" set of assumptions already valid for the associated
11799/// function and an "assumed" set that initially contains all possible
11800/// assumptions. The assumed set is inter-procedurally updated by narrowing its
11801/// contents as concrete values are known. The concrete values are seeded by the
11802/// first nodes that are either entries into the call graph, or contains no
11803/// assumptions. Each node is updated as the intersection of the assumed state
11804/// with all of its predecessors.
11805struct AAAssumptionInfoFunction final : AAAssumptionInfoImpl {
11806 AAAssumptionInfoFunction(const IRPosition &IRP, Attributor &A)
11807 : AAAssumptionInfoImpl(IRP, A,
11808 getAssumptions(*IRP.getAssociatedFunction())) {}
11809
11810 /// See AbstractAttribute::updateImpl(...).
11811 ChangeStatus updateImpl(Attributor &A) override {
11812 bool Changed = false;
11813
11814 auto CallSitePred = [&](AbstractCallSite ACS) {
11815 const auto *AssumptionAA = A.getAAFor<AAAssumptionInfo>(
11816 *this, IRPosition::callsite_function(*ACS.getInstruction()),
11817 DepClassTy::REQUIRED);
11818 if (!AssumptionAA)
11819 return false;
11820 // Get the set of assumptions shared by all of this function's callers.
11821 Changed |= getIntersection(AssumptionAA->getAssumed());
11822 return !getAssumed().empty() || !getKnown().empty();
11823 };
11824
11825 bool UsedAssumedInformation = false;
11826 // Get the intersection of all assumptions held by this node's predecessors.
11827 // If we don't know all the call sites then this is either an entry into the
11828 // call graph or an empty node. This node is known to only contain its own
11829 // assumptions and can be propagated to its successors.
11830 if (!A.checkForAllCallSites(CallSitePred, *this, true,
11831 UsedAssumedInformation))
11832 return indicatePessimisticFixpoint();
11833
11834 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
11835 }
11836
11837 void trackStatistics() const override {}
11838};
11839
11840/// Assumption Info defined for call sites.
11841struct AAAssumptionInfoCallSite final : AAAssumptionInfoImpl {
11842
11843 AAAssumptionInfoCallSite(const IRPosition &IRP, Attributor &A)
11844 : AAAssumptionInfoImpl(IRP, A, getInitialAssumptions(IRP)) {}
11845
11846 /// See AbstractAttribute::initialize(...).
11847 void initialize(Attributor &A) override {
11848 const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
11849 A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED);
11850 }
11851
11852 /// See AbstractAttribute::updateImpl(...).
11853 ChangeStatus updateImpl(Attributor &A) override {
11854 const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
11855 auto *AssumptionAA =
11856 A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED);
11857 if (!AssumptionAA)
11858 return indicatePessimisticFixpoint();
11859 bool Changed = getIntersection(AssumptionAA->getAssumed());
11860 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
11861 }
11862
11863 /// See AbstractAttribute::trackStatistics()
11864 void trackStatistics() const override {}
11865
11866private:
11867 /// Helper to initialized the known set as all the assumptions this call and
11868 /// the callee contain.
11869 DenseSet<StringRef> getInitialAssumptions(const IRPosition &IRP) {
11870 const CallBase &CB = cast<CallBase>(IRP.getAssociatedValue());
11871 auto Assumptions = getAssumptions(CB);
11872 if (const Function *F = CB.getCaller())
11873 set_union(Assumptions, getAssumptions(*F));
11874 if (Function *F = IRP.getAssociatedFunction())
11875 set_union(Assumptions, getAssumptions(*F));
11876 return Assumptions;
11877 }
11878};
11879} // namespace
11880
11882 return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>(
11883 A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I))));
11884}
11885
11887
11888/// ------------------------ UnderlyingObjects ---------------------------------
11889
11890namespace {
11891struct AAUnderlyingObjectsImpl
11892 : StateWrapper<BooleanState, AAUnderlyingObjects> {
11894 AAUnderlyingObjectsImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
11895
11896 /// See AbstractAttribute::getAsStr().
11897 const std::string getAsStr(Attributor *A) const override {
11898 if (!isValidState())
11899 return "<invalid>";
11900 std::string Str;
11902 OS << "underlying objects: inter " << InterAssumedUnderlyingObjects.size()
11903 << " objects, intra " << IntraAssumedUnderlyingObjects.size()
11904 << " objects.\n";
11905 if (!InterAssumedUnderlyingObjects.empty()) {
11906 OS << "inter objects:\n";
11907 for (auto *Obj : InterAssumedUnderlyingObjects)
11908 OS << *Obj << '\n';
11909 }
11910 if (!IntraAssumedUnderlyingObjects.empty()) {
11911 OS << "intra objects:\n";
11912 for (auto *Obj : IntraAssumedUnderlyingObjects)
11913 OS << *Obj << '\n';
11914 }
11915 return Str;
11916 }
11917
11918 /// See AbstractAttribute::trackStatistics()
11919 void trackStatistics() const override {}
11920
11921 /// See AbstractAttribute::updateImpl(...).
11922 ChangeStatus updateImpl(Attributor &A) override {
11923 auto &Ptr = getAssociatedValue();
11924
11925 bool UsedAssumedInformation = false;
11926 auto DoUpdate = [&](SmallSetVector<Value *, 8> &UnderlyingObjects,
11928 SmallPtrSet<Value *, 8> SeenObjects;
11930
11931 if (!A.getAssumedSimplifiedValues(IRPosition::value(Ptr), *this, Values,
11932 Scope, UsedAssumedInformation))
11933 return UnderlyingObjects.insert(&Ptr);
11934
11935 bool Changed = false;
11936
11937 for (unsigned I = 0; I < Values.size(); ++I) {
11938 auto &VAC = Values[I];
11939 auto *Obj = VAC.getValue();
11940 Value *UO = getUnderlyingObject(Obj);
11941 if (!SeenObjects.insert(UO ? UO : Obj).second)
11942 continue;
11943 if (UO && UO != Obj) {
11944 if (isa<AllocaInst>(UO) || isa<GlobalValue>(UO)) {
11945 Changed |= UnderlyingObjects.insert(UO);
11946 continue;
11947 }
11948
11949 const auto *OtherAA = A.getAAFor<AAUnderlyingObjects>(
11950 *this, IRPosition::value(*UO), DepClassTy::OPTIONAL);
11951 auto Pred = [&](Value &V) {
11952 if (&V == UO)
11953 Changed |= UnderlyingObjects.insert(UO);
11954 else
11955 Values.emplace_back(V, nullptr);
11956 return true;
11957 };
11958
11959 if (!OtherAA || !OtherAA->forallUnderlyingObjects(Pred, Scope))
11961 "The forall call should not return false at this position");
11962 UsedAssumedInformation |= !OtherAA->getState().isAtFixpoint();
11963 continue;
11964 }
11965
11966 if (isa<SelectInst>(Obj)) {
11967 Changed |= handleIndirect(A, *Obj, UnderlyingObjects, Scope,
11968 UsedAssumedInformation);
11969 continue;
11970 }
11971 if (auto *PHI = dyn_cast<PHINode>(Obj)) {
11972 // Explicitly look through PHIs as we do not care about dynamically
11973 // uniqueness.
11974 for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
11975 Changed |=
11976 handleIndirect(A, *PHI->getIncomingValue(u), UnderlyingObjects,
11977 Scope, UsedAssumedInformation);
11978 }
11979 continue;
11980 }
11981
11982 Changed |= UnderlyingObjects.insert(Obj);
11983 }
11984
11985 return Changed;
11986 };
11987
11988 bool Changed = false;
11989 Changed |= DoUpdate(IntraAssumedUnderlyingObjects, AA::Intraprocedural);
11990 Changed |= DoUpdate(InterAssumedUnderlyingObjects, AA::Interprocedural);
11991 if (!UsedAssumedInformation)
11992 indicateOptimisticFixpoint();
11993 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
11994 }
11995
11996 bool forallUnderlyingObjects(
11997 function_ref<bool(Value &)> Pred,
11998 AA::ValueScope Scope = AA::Interprocedural) const override {
11999 if (!isValidState())
12000 return Pred(getAssociatedValue());
12001
12002 auto &AssumedUnderlyingObjects = Scope == AA::Intraprocedural
12003 ? IntraAssumedUnderlyingObjects
12004 : InterAssumedUnderlyingObjects;
12005 for (Value *Obj : AssumedUnderlyingObjects)
12006 if (!Pred(*Obj))
12007 return false;
12008
12009 return true;
12010 }
12011
12012private:
12013 /// Handle the case where the value is not the actual underlying value, such
12014 /// as a phi node or a select instruction.
12015 bool handleIndirect(Attributor &A, Value &V,
12016 SmallSetVector<Value *, 8> &UnderlyingObjects,
12017 AA::ValueScope Scope, bool &UsedAssumedInformation) {
12018 bool Changed = false;
12019 const auto *AA = A.getAAFor<AAUnderlyingObjects>(
12020 *this, IRPosition::value(V), DepClassTy::OPTIONAL);
12021 auto Pred = [&](Value &V) {
12022 Changed |= UnderlyingObjects.insert(&V);
12023 return true;
12024 };
12025 if (!AA || !AA->forallUnderlyingObjects(Pred, Scope))
12027 "The forall call should not return false at this position");
12028 UsedAssumedInformation |= !AA->getState().isAtFixpoint();
12029 return Changed;
12030 }
12031
12032 /// All the underlying objects collected so far via intra procedural scope.
12033 SmallSetVector<Value *, 8> IntraAssumedUnderlyingObjects;
12034 /// All the underlying objects collected so far via inter procedural scope.
12035 SmallSetVector<Value *, 8> InterAssumedUnderlyingObjects;
12036};
12037
12038struct AAUnderlyingObjectsFloating final : AAUnderlyingObjectsImpl {
12039 AAUnderlyingObjectsFloating(const IRPosition &IRP, Attributor &A)
12040 : AAUnderlyingObjectsImpl(IRP, A) {}
12041};
12042
12043struct AAUnderlyingObjectsArgument final : AAUnderlyingObjectsImpl {
12044 AAUnderlyingObjectsArgument(const IRPosition &IRP, Attributor &A)
12045 : AAUnderlyingObjectsImpl(IRP, A) {}
12046};
12047
12048struct AAUnderlyingObjectsCallSite final : AAUnderlyingObjectsImpl {
12049 AAUnderlyingObjectsCallSite(const IRPosition &IRP, Attributor &A)
12050 : AAUnderlyingObjectsImpl(IRP, A) {}
12051};
12052
12053struct AAUnderlyingObjectsCallSiteArgument final : AAUnderlyingObjectsImpl {
12054 AAUnderlyingObjectsCallSiteArgument(const IRPosition &IRP, Attributor &A)
12055 : AAUnderlyingObjectsImpl(IRP, A) {}
12056};
12057
12058struct AAUnderlyingObjectsReturned final : AAUnderlyingObjectsImpl {
12059 AAUnderlyingObjectsReturned(const IRPosition &IRP, Attributor &A)
12060 : AAUnderlyingObjectsImpl(IRP, A) {}
12061};
12062
12063struct AAUnderlyingObjectsCallSiteReturned final : AAUnderlyingObjectsImpl {
12064 AAUnderlyingObjectsCallSiteReturned(const IRPosition &IRP, Attributor &A)
12065 : AAUnderlyingObjectsImpl(IRP, A) {}
12066};
12067
12068struct AAUnderlyingObjectsFunction final : AAUnderlyingObjectsImpl {
12069 AAUnderlyingObjectsFunction(const IRPosition &IRP, Attributor &A)
12070 : AAUnderlyingObjectsImpl(IRP, A) {}
12071};
12072} // namespace
12073
12074/// ------------------------ Global Value Info -------------------------------
12075namespace {
12076struct AAGlobalValueInfoFloating : public AAGlobalValueInfo {
12077 AAGlobalValueInfoFloating(const IRPosition &IRP, Attributor &A)
12078 : AAGlobalValueInfo(IRP, A) {}
12079
12080 /// See AbstractAttribute::initialize(...).
12081 void initialize(Attributor &A) override {}
12082
12083 bool checkUse(Attributor &A, const Use &U, bool &Follow,
12085 Instruction *UInst = dyn_cast<Instruction>(U.getUser());
12086 if (!UInst) {
12087 Follow = true;
12088 return true;
12089 }
12090
12091 LLVM_DEBUG(dbgs() << "[AAGlobalValueInfo] Check use: " << *U.get() << " in "
12092 << *UInst << "\n");
12093
12094 if (auto *Cmp = dyn_cast<ICmpInst>(U.getUser())) {
12095 int Idx = &Cmp->getOperandUse(0) == &U;
12096 if (isa<Constant>(Cmp->getOperand(Idx)))
12097 return true;
12098 return U == &getAnchorValue();
12099 }
12100
12101 // Explicitly catch return instructions.
12102 if (isa<ReturnInst>(UInst)) {
12103 auto CallSitePred = [&](AbstractCallSite ACS) {
12104 Worklist.push_back(ACS.getInstruction());
12105 return true;
12106 };
12107 bool UsedAssumedInformation = false;
12108 // TODO: We should traverse the uses or add a "non-call-site" CB.
12109 if (!A.checkForAllCallSites(CallSitePred, *UInst->getFunction(),
12110 /*RequireAllCallSites=*/true, this,
12111 UsedAssumedInformation))
12112 return false;
12113 return true;
12114 }
12115
12116 // For now we only use special logic for call sites. However, the tracker
12117 // itself knows about a lot of other non-capturing cases already.
12118 auto *CB = dyn_cast<CallBase>(UInst);
12119 if (!CB)
12120 return false;
12121 // Direct calls are OK uses.
12122 if (CB->isCallee(&U))
12123 return true;
12124 // Non-argument uses are scary.
12125 if (!CB->isArgOperand(&U))
12126 return false;
12127 // TODO: Iterate callees.
12128 auto *Fn = dyn_cast<Function>(CB->getCalledOperand());
12129 if (!Fn || !A.isFunctionIPOAmendable(*Fn))
12130 return false;
12131
12132 unsigned ArgNo = CB->getArgOperandNo(&U);
12133 Worklist.push_back(Fn->getArg(ArgNo));
12134 return true;
12135 }
12136
12137 ChangeStatus updateImpl(Attributor &A) override {
12138 unsigned NumUsesBefore = Uses.size();
12139
12142 Worklist.push_back(&getAnchorValue());
12143
12144 auto UsePred = [&](const Use &U, bool &Follow) -> bool {
12145 Uses.insert(&U);
12146 switch (DetermineUseCaptureKind(U, nullptr)) {
12147 case UseCaptureKind::NO_CAPTURE:
12148 return checkUse(A, U, Follow, Worklist);
12149 case UseCaptureKind::MAY_CAPTURE:
12150 return checkUse(A, U, Follow, Worklist);
12151 case UseCaptureKind::PASSTHROUGH:
12152 Follow = true;
12153 return true;
12154 }
12155 return true;
12156 };
12157 auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
12158 Uses.insert(&OldU);
12159 return true;
12160 };
12161
12162 while (!Worklist.empty()) {
12163 const Value *V = Worklist.pop_back_val();
12164 if (!Visited.insert(V).second)
12165 continue;
12166 if (!A.checkForAllUses(UsePred, *this, *V,
12167 /* CheckBBLivenessOnly */ true,
12168 DepClassTy::OPTIONAL,
12169 /* IgnoreDroppableUses */ true, EquivalentUseCB)) {
12170 return indicatePessimisticFixpoint();
12171 }
12172 }
12173
12174 return Uses.size() == NumUsesBefore ? ChangeStatus::UNCHANGED
12175 : ChangeStatus::CHANGED;
12176 }
12177
12178 bool isPotentialUse(const Use &U) const override {
12179 return !isValidState() || Uses.contains(&U);
12180 }
12181
12182 /// See AbstractAttribute::manifest(...).
12183 ChangeStatus manifest(Attributor &A) override {
12184 return ChangeStatus::UNCHANGED;
12185 }
12186
12187 /// See AbstractAttribute::getAsStr().
12188 const std::string getAsStr(Attributor *A) const override {
12189 return "[" + std::to_string(Uses.size()) + " uses]";
12190 }
12191
12192 void trackStatistics() const override {
12193 STATS_DECLTRACK_FLOATING_ATTR(GlobalValuesTracked);
12194 }
12195
12196private:
12197 /// Set of (transitive) uses of this GlobalValue.
12199};
12200} // namespace
12201
12202/// ------------------------ Indirect Call Info -------------------------------
12203namespace {
12204struct AAIndirectCallInfoCallSite : public AAIndirectCallInfo {
12205 AAIndirectCallInfoCallSite(const IRPosition &IRP, Attributor &A)
12206 : AAIndirectCallInfo(IRP, A) {}
12207
12208 /// See AbstractAttribute::initialize(...).
12209 void initialize(Attributor &A) override {
12210 auto *MD = getCtxI()->getMetadata(LLVMContext::MD_callees);
12211 if (!MD && !A.isClosedWorldModule())
12212 return;
12213
12214 if (MD) {
12215 for (const auto &Op : MD->operands())
12216 if (Function *Callee = mdconst::dyn_extract_or_null<Function>(Op))
12217 PotentialCallees.insert(Callee);
12218 } else if (A.isClosedWorldModule()) {
12219 ArrayRef<Function *> IndirectlyCallableFunctions =
12220 A.getInfoCache().getIndirectlyCallableFunctions(A);
12221 PotentialCallees.insert(IndirectlyCallableFunctions.begin(),
12222 IndirectlyCallableFunctions.end());
12223 }
12224
12225 if (PotentialCallees.empty())
12226 indicateOptimisticFixpoint();
12227 }
12228
12229 ChangeStatus updateImpl(Attributor &A) override {
12230 CallBase *CB = cast<CallBase>(getCtxI());
12231 const Use &CalleeUse = CB->getCalledOperandUse();
12232 Value *FP = CB->getCalledOperand();
12233
12234 SmallSetVector<Function *, 4> AssumedCalleesNow;
12235 bool AllCalleesKnownNow = AllCalleesKnown;
12236
12237 auto CheckPotentialCalleeUse = [&](Function &PotentialCallee,
12238 bool &UsedAssumedInformation) {
12239 const auto *GIAA = A.getAAFor<AAGlobalValueInfo>(
12240 *this, IRPosition::value(PotentialCallee), DepClassTy::OPTIONAL);
12241 if (!GIAA || GIAA->isPotentialUse(CalleeUse))
12242 return true;
12243 UsedAssumedInformation = !GIAA->isAtFixpoint();
12244 return false;
12245 };
12246
12247 auto AddPotentialCallees = [&]() {
12248 for (auto *PotentialCallee : PotentialCallees) {
12249 bool UsedAssumedInformation = false;
12250 if (CheckPotentialCalleeUse(*PotentialCallee, UsedAssumedInformation))
12251 AssumedCalleesNow.insert(PotentialCallee);
12252 }
12253 };
12254
12255 // Use simplification to find potential callees, if !callees was present,
12256 // fallback to that set if necessary.
12257 bool UsedAssumedInformation = false;
12259 if (!A.getAssumedSimplifiedValues(IRPosition::value(*FP), this, Values,
12260 AA::ValueScope::AnyScope,
12261 UsedAssumedInformation)) {
12262 if (PotentialCallees.empty())
12263 return indicatePessimisticFixpoint();
12264 AddPotentialCallees();
12265 }
12266
12267 // Try to find a reason for \p Fn not to be a potential callee. If none was
12268 // found, add it to the assumed callees set.
12269 auto CheckPotentialCallee = [&](Function &Fn) {
12270 if (!PotentialCallees.empty() && !PotentialCallees.count(&Fn))
12271 return false;
12272
12273 auto &CachedResult = FilterResults[&Fn];
12274 if (CachedResult.has_value())
12275 return CachedResult.value();
12276
12277 bool UsedAssumedInformation = false;
12278 if (!CheckPotentialCalleeUse(Fn, UsedAssumedInformation)) {
12279 if (!UsedAssumedInformation)
12280 CachedResult = false;
12281 return false;
12282 }
12283
12284 int NumFnArgs = Fn.arg_size();
12285 int NumCBArgs = CB->arg_size();
12286
12287 // Check if any excess argument (which we fill up with poison) is known to
12288 // be UB on undef.
12289 for (int I = NumCBArgs; I < NumFnArgs; ++I) {
12290 bool IsKnown = false;
12291 if (AA::hasAssumedIRAttr<Attribute::NoUndef>(
12292 A, this, IRPosition::argument(*Fn.getArg(I)),
12293 DepClassTy::OPTIONAL, IsKnown)) {
12294 if (IsKnown)
12295 CachedResult = false;
12296 return false;
12297 }
12298 }
12299
12300 CachedResult = true;
12301 return true;
12302 };
12303
12304 // Check simplification result, prune known UB callees, also restrict it to
12305 // the !callees set, if present.
12306 for (auto &VAC : Values) {
12307 if (isa<UndefValue>(VAC.getValue()))
12308 continue;
12309 if (isa<ConstantPointerNull>(VAC.getValue()) &&
12310 VAC.getValue()->getType()->getPointerAddressSpace() == 0)
12311 continue;
12312 // TODO: Check for known UB, e.g., poison + noundef.
12313 if (auto *VACFn = dyn_cast<Function>(VAC.getValue())) {
12314 if (CheckPotentialCallee(*VACFn))
12315 AssumedCalleesNow.insert(VACFn);
12316 continue;
12317 }
12318 if (!PotentialCallees.empty()) {
12319 AddPotentialCallees();
12320 break;
12321 }
12322 AllCalleesKnownNow = false;
12323 }
12324
12325 if (AssumedCalleesNow == AssumedCallees &&
12326 AllCalleesKnown == AllCalleesKnownNow)
12327 return ChangeStatus::UNCHANGED;
12328
12329 std::swap(AssumedCallees, AssumedCalleesNow);
12330 AllCalleesKnown = AllCalleesKnownNow;
12331 return ChangeStatus::CHANGED;
12332 }
12333
12334 /// See AbstractAttribute::manifest(...).
12335 ChangeStatus manifest(Attributor &A) override {
12336 // If we can't specialize at all, give up now.
12337 if (!AllCalleesKnown && AssumedCallees.empty())
12338 return ChangeStatus::UNCHANGED;
12339
12340 CallBase *CB = cast<CallBase>(getCtxI());
12341 bool UsedAssumedInformation = false;
12342 if (A.isAssumedDead(*CB, this, /*LivenessAA=*/nullptr,
12343 UsedAssumedInformation))
12344 return ChangeStatus::UNCHANGED;
12345
12346 ChangeStatus Changed = ChangeStatus::UNCHANGED;
12347 Value *FP = CB->getCalledOperand();
12348 if (FP->getType()->getPointerAddressSpace())
12349 FP = new AddrSpaceCastInst(FP, PointerType::get(FP->getType(), 0),
12350 FP->getName() + ".as0", CB->getIterator());
12351
12352 bool CBIsVoid = CB->getType()->isVoidTy();
12354 FunctionType *CSFT = CB->getFunctionType();
12355 SmallVector<Value *> CSArgs(CB->args());
12356
12357 // If we know all callees and there are none, the call site is (effectively)
12358 // dead (or UB).
12359 if (AssumedCallees.empty()) {
12360 assert(AllCalleesKnown &&
12361 "Expected all callees to be known if there are none.");
12362 A.changeToUnreachableAfterManifest(CB);
12363 return ChangeStatus::CHANGED;
12364 }
12365
12366 // Special handling for the single callee case.
12367 if (AllCalleesKnown && AssumedCallees.size() == 1) {
12368 auto *NewCallee = AssumedCallees.front();
12369 if (isLegalToPromote(*CB, NewCallee)) {
12370 promoteCall(*CB, NewCallee, nullptr);
12371 NumIndirectCallsPromoted++;
12372 return ChangeStatus::CHANGED;
12373 }
12374 Instruction *NewCall =
12375 CallInst::Create(FunctionCallee(CSFT, NewCallee), CSArgs,
12376 CB->getName(), CB->getIterator());
12377 if (!CBIsVoid)
12378 A.changeAfterManifest(IRPosition::callsite_returned(*CB), *NewCall);
12379 A.deleteAfterManifest(*CB);
12380 return ChangeStatus::CHANGED;
12381 }
12382
12383 // For each potential value we create a conditional
12384 //
12385 // ```
12386 // if (ptr == value) value(args);
12387 // else ...
12388 // ```
12389 //
12390 bool SpecializedForAnyCallees = false;
12391 bool SpecializedForAllCallees = AllCalleesKnown;
12392 ICmpInst *LastCmp = nullptr;
12393 SmallVector<Function *, 8> SkippedAssumedCallees;
12395 for (Function *NewCallee : AssumedCallees) {
12396 if (!A.shouldSpecializeCallSiteForCallee(*this, *CB, *NewCallee,
12397 AssumedCallees.size())) {
12398 SkippedAssumedCallees.push_back(NewCallee);
12399 SpecializedForAllCallees = false;
12400 continue;
12401 }
12402 SpecializedForAnyCallees = true;
12403
12404 LastCmp = new ICmpInst(IP, llvm::CmpInst::ICMP_EQ, FP, NewCallee);
12405 Instruction *ThenTI =
12406 SplitBlockAndInsertIfThen(LastCmp, IP, /* Unreachable */ false);
12407 BasicBlock *CBBB = CB->getParent();
12408 A.registerManifestAddedBasicBlock(*ThenTI->getParent());
12409 A.registerManifestAddedBasicBlock(*IP->getParent());
12410 auto *SplitTI = cast<BranchInst>(LastCmp->getNextNode());
12411 BasicBlock *ElseBB;
12412 if (&*IP == CB) {
12413 ElseBB = BasicBlock::Create(ThenTI->getContext(), "",
12414 ThenTI->getFunction(), CBBB);
12415 A.registerManifestAddedBasicBlock(*ElseBB);
12416 IP = BranchInst::Create(CBBB, ElseBB)->getIterator();
12417 SplitTI->replaceUsesOfWith(CBBB, ElseBB);
12418 } else {
12419 ElseBB = IP->getParent();
12420 ThenTI->replaceUsesOfWith(ElseBB, CBBB);
12421 }
12422 CastInst *RetBC = nullptr;
12423 CallInst *NewCall = nullptr;
12424 if (isLegalToPromote(*CB, NewCallee)) {
12425 auto *CBClone = cast<CallBase>(CB->clone());
12426 CBClone->insertBefore(ThenTI);
12427 NewCall = &cast<CallInst>(promoteCall(*CBClone, NewCallee, &RetBC));
12428 NumIndirectCallsPromoted++;
12429 } else {
12430 NewCall = CallInst::Create(FunctionCallee(CSFT, NewCallee), CSArgs,
12431 CB->getName(), ThenTI->getIterator());
12432 }
12433 NewCalls.push_back({NewCall, RetBC});
12434 }
12435
12436 auto AttachCalleeMetadata = [&](CallBase &IndirectCB) {
12437 if (!AllCalleesKnown)
12438 return ChangeStatus::UNCHANGED;
12439 MDBuilder MDB(IndirectCB.getContext());
12440 MDNode *Callees = MDB.createCallees(SkippedAssumedCallees);
12441 IndirectCB.setMetadata(LLVMContext::MD_callees, Callees);
12442 return ChangeStatus::CHANGED;
12443 };
12444
12445 if (!SpecializedForAnyCallees)
12446 return AttachCalleeMetadata(*CB);
12447
12448 // Check if we need the fallback indirect call still.
12449 if (SpecializedForAllCallees) {
12451 LastCmp->eraseFromParent();
12452 new UnreachableInst(IP->getContext(), IP);
12453 IP->eraseFromParent();
12454 } else {
12455 auto *CBClone = cast<CallInst>(CB->clone());
12456 CBClone->setName(CB->getName());
12457 CBClone->insertBefore(*IP->getParent(), IP);
12458 NewCalls.push_back({CBClone, nullptr});
12459 AttachCalleeMetadata(*CBClone);
12460 }
12461
12462 // Check if we need a PHI to merge the results.
12463 if (!CBIsVoid) {
12464 auto *PHI = PHINode::Create(CB->getType(), NewCalls.size(),
12465 CB->getName() + ".phi",
12466 CB->getParent()->getFirstInsertionPt());
12467 for (auto &It : NewCalls) {
12468 CallBase *NewCall = It.first;
12469 Instruction *CallRet = It.second ? It.second : It.first;
12470 if (CallRet->getType() == CB->getType())
12471 PHI->addIncoming(CallRet, CallRet->getParent());
12472 else if (NewCall->getType()->isVoidTy())
12473 PHI->addIncoming(PoisonValue::get(CB->getType()),
12474 NewCall->getParent());
12475 else
12476 llvm_unreachable("Call return should match or be void!");
12477 }
12478 A.changeAfterManifest(IRPosition::callsite_returned(*CB), *PHI);
12479 }
12480
12481 A.deleteAfterManifest(*CB);
12482 Changed = ChangeStatus::CHANGED;
12483
12484 return Changed;
12485 }
12486
12487 /// See AbstractAttribute::getAsStr().
12488 const std::string getAsStr(Attributor *A) const override {
12489 return std::string(AllCalleesKnown ? "eliminate" : "specialize") +
12490 " indirect call site with " + std::to_string(AssumedCallees.size()) +
12491 " functions";
12492 }
12493
12494 void trackStatistics() const override {
12495 if (AllCalleesKnown) {
12497 Eliminated, CallSites,
12498 "Number of indirect call sites eliminated via specialization")
12499 } else {
12500 STATS_DECLTRACK(Specialized, CallSites,
12501 "Number of indirect call sites specialized")
12502 }
12503 }
12504
12505 bool foreachCallee(function_ref<bool(Function *)> CB) const override {
12506 return isValidState() && AllCalleesKnown && all_of(AssumedCallees, CB);
12507 }
12508
12509private:
12510 /// Map to remember filter results.
12512
12513 /// If the !callee metadata was present, this set will contain all potential
12514 /// callees (superset).
12515 SmallSetVector<Function *, 4> PotentialCallees;
12516
12517 /// This set contains all currently assumed calllees, which might grow over
12518 /// time.
12519 SmallSetVector<Function *, 4> AssumedCallees;
12520
12521 /// Flag to indicate if all possible callees are in the AssumedCallees set or
12522 /// if there could be others.
12523 bool AllCalleesKnown = true;
12524};
12525} // namespace
12526
12527/// ------------------------ Address Space ------------------------------------
12528namespace {
12529
12530template <typename InstType>
12531static bool makeChange(Attributor &A, InstType *MemInst, const Use &U,
12532 Value *OriginalValue, PointerType *NewPtrTy,
12533 bool UseOriginalValue) {
12534 if (U.getOperandNo() != InstType::getPointerOperandIndex())
12535 return false;
12536
12537 if (MemInst->isVolatile()) {
12538 auto *TTI = A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(
12539 *MemInst->getFunction());
12540 unsigned NewAS = NewPtrTy->getPointerAddressSpace();
12541 if (!TTI || !TTI->hasVolatileVariant(MemInst, NewAS))
12542 return false;
12543 }
12544
12545 if (UseOriginalValue) {
12546 A.changeUseAfterManifest(const_cast<Use &>(U), *OriginalValue);
12547 return true;
12548 }
12549
12550 Instruction *CastInst = new AddrSpaceCastInst(OriginalValue, NewPtrTy);
12551 CastInst->insertBefore(MemInst);
12552 A.changeUseAfterManifest(const_cast<Use &>(U), *CastInst);
12553 return true;
12554}
12555
12556struct AAAddressSpaceImpl : public AAAddressSpace {
12557 AAAddressSpaceImpl(const IRPosition &IRP, Attributor &A)
12558 : AAAddressSpace(IRP, A) {}
12559
12560 uint32_t getAddressSpace() const override {
12561 assert(isValidState() && "the AA is invalid");
12562 return AssumedAddressSpace;
12563 }
12564
12565 /// See AbstractAttribute::initialize(...).
12566 void initialize(Attributor &A) override {
12567 assert(getAssociatedType()->isPtrOrPtrVectorTy() &&
12568 "Associated value is not a pointer");
12569
12570 if (!A.getInfoCache().getFlatAddressSpace().has_value()) {
12571 indicatePessimisticFixpoint();
12572 return;
12573 }
12574
12575 unsigned FlatAS = A.getInfoCache().getFlatAddressSpace().value();
12576 unsigned AS = getAssociatedType()->getPointerAddressSpace();
12577 if (AS != FlatAS) {
12578 [[maybe_unused]] bool R = takeAddressSpace(AS);
12579 assert(R && "The take should happen");
12580 indicateOptimisticFixpoint();
12581 }
12582 }
12583
12584 ChangeStatus updateImpl(Attributor &A) override {
12585 unsigned FlatAS = A.getInfoCache().getFlatAddressSpace().value();
12586 uint32_t OldAddressSpace = AssumedAddressSpace;
12587
12588 auto CheckAddressSpace = [&](Value &Obj) {
12589 if (isa<UndefValue>(&Obj))
12590 return true;
12591 // If an argument in flat address space only has addrspace cast uses, and
12592 // those casts are same, then we take the dst addrspace.
12593 if (auto *Arg = dyn_cast<Argument>(&Obj)) {
12594 if (Arg->getType()->getPointerAddressSpace() == FlatAS) {
12595 unsigned CastAddrSpace = FlatAS;
12596 for (auto *U : Arg->users()) {
12597 auto *ASCI = dyn_cast<AddrSpaceCastInst>(U);
12598 if (!ASCI)
12599 return takeAddressSpace(Obj.getType()->getPointerAddressSpace());
12600 if (CastAddrSpace != FlatAS &&
12601 CastAddrSpace != ASCI->getDestAddressSpace())
12602 return false;
12603 CastAddrSpace = ASCI->getDestAddressSpace();
12604 }
12605 if (CastAddrSpace != FlatAS)
12606 return takeAddressSpace(CastAddrSpace);
12607 }
12608 }
12609 return takeAddressSpace(Obj.getType()->getPointerAddressSpace());
12610 };
12611
12612 auto *AUO = A.getOrCreateAAFor<AAUnderlyingObjects>(getIRPosition(), this,
12613 DepClassTy::REQUIRED);
12614 if (!AUO->forallUnderlyingObjects(CheckAddressSpace))
12615 return indicatePessimisticFixpoint();
12616
12617 return OldAddressSpace == AssumedAddressSpace ? ChangeStatus::UNCHANGED
12618 : ChangeStatus::CHANGED;
12619 }
12620
12621 /// See AbstractAttribute::manifest(...).
12622 ChangeStatus manifest(Attributor &A) override {
12623 unsigned NewAS = getAddressSpace();
12624
12625 if (NewAS == InvalidAddressSpace ||
12626 NewAS == getAssociatedType()->getPointerAddressSpace())
12627 return ChangeStatus::UNCHANGED;
12628
12629 unsigned FlatAS = A.getInfoCache().getFlatAddressSpace().value();
12630
12631 Value *AssociatedValue = &getAssociatedValue();
12632 Value *OriginalValue = peelAddrspacecast(AssociatedValue, FlatAS);
12633
12634 PointerType *NewPtrTy =
12635 PointerType::get(getAssociatedType()->getContext(), NewAS);
12636 bool UseOriginalValue =
12637 OriginalValue->getType()->getPointerAddressSpace() == NewAS;
12638
12639 bool Changed = false;
12640
12641 auto Pred = [&](const Use &U, bool &) {
12642 if (U.get() != AssociatedValue)
12643 return true;
12644 auto *Inst = dyn_cast<Instruction>(U.getUser());
12645 if (!Inst)
12646 return true;
12647 // This is a WA to make sure we only change uses from the corresponding
12648 // CGSCC if the AA is run on CGSCC instead of the entire module.
12649 if (!A.isRunOn(Inst->getFunction()))
12650 return true;
12651 if (auto *LI = dyn_cast<LoadInst>(Inst)) {
12652 Changed |=
12653 makeChange(A, LI, U, OriginalValue, NewPtrTy, UseOriginalValue);
12654 } else if (auto *SI = dyn_cast<StoreInst>(Inst)) {
12655 Changed |=
12656 makeChange(A, SI, U, OriginalValue, NewPtrTy, UseOriginalValue);
12657 } else if (auto *RMW = dyn_cast<AtomicRMWInst>(Inst)) {
12658 Changed |=
12659 makeChange(A, RMW, U, OriginalValue, NewPtrTy, UseOriginalValue);
12660 } else if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst)) {
12661 Changed |=
12662 makeChange(A, CmpX, U, OriginalValue, NewPtrTy, UseOriginalValue);
12663 }
12664 return true;
12665 };
12666
12667 // It doesn't matter if we can't check all uses as we can simply
12668 // conservatively ignore those that can not be visited.
12669 (void)A.checkForAllUses(Pred, *this, getAssociatedValue(),
12670 /* CheckBBLivenessOnly */ true);
12671
12672 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
12673 }
12674
12675 /// See AbstractAttribute::getAsStr().
12676 const std::string getAsStr(Attributor *A) const override {
12677 if (!isValidState())
12678 return "addrspace(<invalid>)";
12679 return "addrspace(" +
12680 (AssumedAddressSpace == InvalidAddressSpace
12681 ? "none"
12682 : std::to_string(AssumedAddressSpace)) +
12683 ")";
12684 }
12685
12686private:
12687 uint32_t AssumedAddressSpace = InvalidAddressSpace;
12688
12689 bool takeAddressSpace(uint32_t AS) {
12690 if (AssumedAddressSpace == InvalidAddressSpace) {
12691 AssumedAddressSpace = AS;
12692 return true;
12693 }
12694 return AssumedAddressSpace == AS;
12695 }
12696
12697 static Value *peelAddrspacecast(Value *V, unsigned FlatAS) {
12698 if (auto *I = dyn_cast<AddrSpaceCastInst>(V)) {
12699 assert(I->getSrcAddressSpace() != FlatAS &&
12700 "there should not be flat AS -> non-flat AS");
12701 return I->getPointerOperand();
12702 }
12703 if (auto *C = dyn_cast<ConstantExpr>(V))
12704 if (C->getOpcode() == Instruction::AddrSpaceCast) {
12705 assert(C->getOperand(0)->getType()->getPointerAddressSpace() !=
12706 FlatAS &&
12707 "there should not be flat AS -> non-flat AS X");
12708 return C->getOperand(0);
12709 }
12710 return V;
12711 }
12712};
12713
12714struct AAAddressSpaceFloating final : AAAddressSpaceImpl {
12715 AAAddressSpaceFloating(const IRPosition &IRP, Attributor &A)
12716 : AAAddressSpaceImpl(IRP, A) {}
12717
12718 void trackStatistics() const override {
12720 }
12721};
12722
12723struct AAAddressSpaceReturned final : AAAddressSpaceImpl {
12724 AAAddressSpaceReturned(const IRPosition &IRP, Attributor &A)
12725 : AAAddressSpaceImpl(IRP, A) {}
12726
12727 /// See AbstractAttribute::initialize(...).
12728 void initialize(Attributor &A) override {
12729 // TODO: we don't rewrite function argument for now because it will need to
12730 // rewrite the function signature and all call sites.
12731 (void)indicatePessimisticFixpoint();
12732 }
12733
12734 void trackStatistics() const override {
12735 STATS_DECLTRACK_FNRET_ATTR(addrspace);
12736 }
12737};
12738
12739struct AAAddressSpaceCallSiteReturned final : AAAddressSpaceImpl {
12740 AAAddressSpaceCallSiteReturned(const IRPosition &IRP, Attributor &A)
12741 : AAAddressSpaceImpl(IRP, A) {}
12742
12743 void trackStatistics() const override {
12744 STATS_DECLTRACK_CSRET_ATTR(addrspace);
12745 }
12746};
12747
12748struct AAAddressSpaceArgument final : AAAddressSpaceImpl {
12749 AAAddressSpaceArgument(const IRPosition &IRP, Attributor &A)
12750 : AAAddressSpaceImpl(IRP, A) {}
12751
12752 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(addrspace); }
12753};
12754
12755struct AAAddressSpaceCallSiteArgument final : AAAddressSpaceImpl {
12756 AAAddressSpaceCallSiteArgument(const IRPosition &IRP, Attributor &A)
12757 : AAAddressSpaceImpl(IRP, A) {}
12758
12759 /// See AbstractAttribute::initialize(...).
12760 void initialize(Attributor &A) override {
12761 // TODO: we don't rewrite call site argument for now because it will need to
12762 // rewrite the function signature of the callee.
12763 (void)indicatePessimisticFixpoint();
12764 }
12765
12766 void trackStatistics() const override {
12767 STATS_DECLTRACK_CSARG_ATTR(addrspace);
12768 }
12769};
12770} // namespace
12771
12772/// ----------- Allocation Info ----------
12773namespace {
12774struct AAAllocationInfoImpl : public AAAllocationInfo {
12775 AAAllocationInfoImpl(const IRPosition &IRP, Attributor &A)
12776 : AAAllocationInfo(IRP, A) {}
12777
12778 std::optional<TypeSize> getAllocatedSize() const override {
12779 assert(isValidState() && "the AA is invalid");
12780 return AssumedAllocatedSize;
12781 }
12782
12783 std::optional<TypeSize> findInitialAllocationSize(Instruction *I,
12784 const DataLayout &DL) {
12785
12786 // TODO: implement case for malloc like instructions
12787 switch (I->getOpcode()) {
12788 case Instruction::Alloca: {
12789 AllocaInst *AI = cast<AllocaInst>(I);
12790 return AI->getAllocationSize(DL);
12791 }
12792 default:
12793 return std::nullopt;
12794 }
12795 }
12796
12797 ChangeStatus updateImpl(Attributor &A) override {
12798
12799 const IRPosition &IRP = getIRPosition();
12800 Instruction *I = IRP.getCtxI();
12801
12802 // TODO: update check for malloc like calls
12803 if (!isa<AllocaInst>(I))
12804 return indicatePessimisticFixpoint();
12805
12806 bool IsKnownNoCapture;
12807 if (!AA::hasAssumedIRAttr<Attribute::NoCapture>(
12808 A, this, IRP, DepClassTy::OPTIONAL, IsKnownNoCapture))
12809 return indicatePessimisticFixpoint();
12810
12811 const AAPointerInfo *PI =
12812 A.getOrCreateAAFor<AAPointerInfo>(IRP, *this, DepClassTy::REQUIRED);
12813
12814 if (!PI)
12815 return indicatePessimisticFixpoint();
12816
12817 if (!PI->getState().isValidState() || PI->reachesReturn())
12818 return indicatePessimisticFixpoint();
12819
12820 const DataLayout &DL = A.getDataLayout();
12821 const auto AllocationSize = findInitialAllocationSize(I, DL);
12822
12823 // If allocation size is nullopt, we give up.
12824 if (!AllocationSize)
12825 return indicatePessimisticFixpoint();
12826
12827 // For zero sized allocations, we give up.
12828 // Since we can't reduce further
12829 if (*AllocationSize == 0)
12830 return indicatePessimisticFixpoint();
12831
12832 int64_t BinSize = PI->numOffsetBins();
12833
12834 // TODO: implement for multiple bins
12835 if (BinSize > 1)
12836 return indicatePessimisticFixpoint();
12837
12838 if (BinSize == 0) {
12839 auto NewAllocationSize = std::optional<TypeSize>(TypeSize(0, false));
12840 if (!changeAllocationSize(NewAllocationSize))
12841 return ChangeStatus::UNCHANGED;
12842 return ChangeStatus::CHANGED;
12843 }
12844
12845 // TODO: refactor this to be part of multiple bin case
12846 const auto &It = PI->begin();
12847
12848 // TODO: handle if Offset is not zero
12849 if (It->first.Offset != 0)
12850 return indicatePessimisticFixpoint();
12851
12852 uint64_t SizeOfBin = It->first.Offset + It->first.Size;
12853
12854 if (SizeOfBin >= *AllocationSize)
12855 return indicatePessimisticFixpoint();
12856
12857 auto NewAllocationSize =
12858 std::optional<TypeSize>(TypeSize(SizeOfBin * 8, false));
12859
12860 if (!changeAllocationSize(NewAllocationSize))
12861 return ChangeStatus::UNCHANGED;
12862
12863 return ChangeStatus::CHANGED;
12864 }
12865
12866 /// See AbstractAttribute::manifest(...).
12867 ChangeStatus manifest(Attributor &A) override {
12868
12869 assert(isValidState() &&
12870 "Manifest should only be called if the state is valid.");
12871
12872 Instruction *I = getIRPosition().getCtxI();
12873
12874 auto FixedAllocatedSizeInBits = getAllocatedSize()->getFixedValue();
12875
12876 unsigned long NumBytesToAllocate = (FixedAllocatedSizeInBits + 7) / 8;
12877
12878 switch (I->getOpcode()) {
12879 // TODO: add case for malloc like calls
12880 case Instruction::Alloca: {
12881
12882 AllocaInst *AI = cast<AllocaInst>(I);
12883
12884 Type *CharType = Type::getInt8Ty(I->getContext());
12885
12886 auto *NumBytesToValue =
12887 ConstantInt::get(I->getContext(), APInt(32, NumBytesToAllocate));
12888
12889 BasicBlock::iterator insertPt = AI->getIterator();
12890 insertPt = std::next(insertPt);
12891 AllocaInst *NewAllocaInst =
12892 new AllocaInst(CharType, AI->getAddressSpace(), NumBytesToValue,
12893 AI->getAlign(), AI->getName(), insertPt);
12894
12895 if (A.changeAfterManifest(IRPosition::inst(*AI), *NewAllocaInst))
12896 return ChangeStatus::CHANGED;
12897
12898 break;
12899 }
12900 default:
12901 break;
12902 }
12903
12904 return ChangeStatus::UNCHANGED;
12905 }
12906
12907 /// See AbstractAttribute::getAsStr().
12908 const std::string getAsStr(Attributor *A) const override {
12909 if (!isValidState())
12910 return "allocationinfo(<invalid>)";
12911 return "allocationinfo(" +
12912 (AssumedAllocatedSize == HasNoAllocationSize
12913 ? "none"
12914 : std::to_string(AssumedAllocatedSize->getFixedValue())) +
12915 ")";
12916 }
12917
12918private:
12919 std::optional<TypeSize> AssumedAllocatedSize = HasNoAllocationSize;
12920
12921 // Maintain the computed allocation size of the object.
12922 // Returns (bool) weather the size of the allocation was modified or not.
12923 bool changeAllocationSize(std::optional<TypeSize> Size) {
12924 if (AssumedAllocatedSize == HasNoAllocationSize ||
12925 AssumedAllocatedSize != Size) {
12926 AssumedAllocatedSize = Size;
12927 return true;
12928 }
12929 return false;
12930 }
12931};
12932
12933struct AAAllocationInfoFloating : AAAllocationInfoImpl {
12934 AAAllocationInfoFloating(const IRPosition &IRP, Attributor &A)
12935 : AAAllocationInfoImpl(IRP, A) {}
12936
12937 void trackStatistics() const override {
12938 STATS_DECLTRACK_FLOATING_ATTR(allocationinfo);
12939 }
12940};
12941
12942struct AAAllocationInfoReturned : AAAllocationInfoImpl {
12943 AAAllocationInfoReturned(const IRPosition &IRP, Attributor &A)
12944 : AAAllocationInfoImpl(IRP, A) {}
12945
12946 /// See AbstractAttribute::initialize(...).
12947 void initialize(Attributor &A) override {
12948 // TODO: we don't rewrite function argument for now because it will need to
12949 // rewrite the function signature and all call sites
12950 (void)indicatePessimisticFixpoint();
12951 }
12952
12953 void trackStatistics() const override {
12954 STATS_DECLTRACK_FNRET_ATTR(allocationinfo);
12955 }
12956};
12957
12958struct AAAllocationInfoCallSiteReturned : AAAllocationInfoImpl {
12959 AAAllocationInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
12960 : AAAllocationInfoImpl(IRP, A) {}
12961
12962 void trackStatistics() const override {
12963 STATS_DECLTRACK_CSRET_ATTR(allocationinfo);
12964 }
12965};
12966
12967struct AAAllocationInfoArgument : AAAllocationInfoImpl {
12968 AAAllocationInfoArgument(const IRPosition &IRP, Attributor &A)
12969 : AAAllocationInfoImpl(IRP, A) {}
12970
12971 void trackStatistics() const override {
12972 STATS_DECLTRACK_ARG_ATTR(allocationinfo);
12973 }
12974};
12975
12976struct AAAllocationInfoCallSiteArgument : AAAllocationInfoImpl {
12977 AAAllocationInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
12978 : AAAllocationInfoImpl(IRP, A) {}
12979
12980 /// See AbstractAttribute::initialize(...).
12981 void initialize(Attributor &A) override {
12982
12983 (void)indicatePessimisticFixpoint();
12984 }
12985
12986 void trackStatistics() const override {
12987 STATS_DECLTRACK_CSARG_ATTR(allocationinfo);
12988 }
12989};
12990} // namespace
12991
12992const char AANoUnwind::ID = 0;
12993const char AANoSync::ID = 0;
12994const char AANoFree::ID = 0;
12995const char AANonNull::ID = 0;
12996const char AAMustProgress::ID = 0;
12997const char AANoRecurse::ID = 0;
12998const char AANonConvergent::ID = 0;
12999const char AAWillReturn::ID = 0;
13000const char AAUndefinedBehavior::ID = 0;
13001const char AANoAlias::ID = 0;
13002const char AAIntraFnReachability::ID = 0;
13003const char AANoReturn::ID = 0;
13004const char AAIsDead::ID = 0;
13005const char AADereferenceable::ID = 0;
13006const char AAAlign::ID = 0;
13007const char AAInstanceInfo::ID = 0;
13008const char AANoCapture::ID = 0;
13009const char AAValueSimplify::ID = 0;
13010const char AAHeapToStack::ID = 0;
13011const char AAPrivatizablePtr::ID = 0;
13012const char AAMemoryBehavior::ID = 0;
13013const char AAMemoryLocation::ID = 0;
13014const char AAValueConstantRange::ID = 0;
13015const char AAPotentialConstantValues::ID = 0;
13016const char AAPotentialValues::ID = 0;
13017const char AANoUndef::ID = 0;
13018const char AANoFPClass::ID = 0;
13019const char AACallEdges::ID = 0;
13020const char AAInterFnReachability::ID = 0;
13021const char AAPointerInfo::ID = 0;
13022const char AAAssumptionInfo::ID = 0;
13023const char AAUnderlyingObjects::ID = 0;
13024const char AAAddressSpace::ID = 0;
13025const char AAAllocationInfo::ID = 0;
13026const char AAIndirectCallInfo::ID = 0;
13027const char AAGlobalValueInfo::ID = 0;
13028const char AADenormalFPMath::ID = 0;
13029
13030// Macro magic to create the static generator function for attributes that
13031// follow the naming scheme.
13032
13033#define SWITCH_PK_INV(CLASS, PK, POS_NAME) \
13034 case IRPosition::PK: \
13035 llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
13036
13037#define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX) \
13038 case IRPosition::PK: \
13039 AA = new (A.Allocator) CLASS##SUFFIX(IRP, A); \
13040 ++NumAAs; \
13041 break;
13042
13043#define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
13044 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
13045 CLASS *AA = nullptr; \
13046 switch (IRP.getPositionKind()) { \
13047 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
13048 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \
13049 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \
13050 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \
13051 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \
13052 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \
13053 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \
13054 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \
13055 } \
13056 return *AA; \
13057 }
13058
13059#define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
13060 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
13061 CLASS *AA = nullptr; \
13062 switch (IRP.getPositionKind()) { \
13063 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
13064 SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function") \
13065 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \
13066 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \
13067 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \
13068 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \
13069 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \
13070 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \
13071 } \
13072 return *AA; \
13073 }
13074
13075#define CREATE_ABSTRACT_ATTRIBUTE_FOR_ONE_POSITION(POS, SUFFIX, CLASS) \
13076 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
13077 CLASS *AA = nullptr; \
13078 switch (IRP.getPositionKind()) { \
13079 SWITCH_PK_CREATE(CLASS, IRP, POS, SUFFIX) \
13080 default: \
13081 llvm_unreachable("Cannot create " #CLASS " for position otherthan " #POS \
13082 " position!"); \
13083 } \
13084 return *AA; \
13085 }
13086
13087#define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
13088 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
13089 CLASS *AA = nullptr; \
13090 switch (IRP.getPositionKind()) { \
13091 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
13092 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \
13093 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \
13094 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \
13095 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \
13096 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \
13097 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \
13098 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \
13099 } \
13100 return *AA; \
13101 }
13102
13103#define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
13104 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
13105 CLASS *AA = nullptr; \
13106 switch (IRP.getPositionKind()) { \
13107 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
13108 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \
13109 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \
13110 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \
13111 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \
13112 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \
13113 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \
13114 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \
13115 } \
13116 return *AA; \
13117 }
13118
13119#define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
13120 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
13121 CLASS *AA = nullptr; \
13122 switch (IRP.getPositionKind()) { \
13123 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
13124 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \
13125 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \
13126 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \
13127 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \
13128 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \
13129 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \
13130 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \
13131 } \
13132 return *AA; \
13133 }
13134
13144
13160
13165
13170
13177
13179
13180#undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
13181#undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
13182#undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
13183#undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
13184#undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
13185#undef CREATE_ABSTRACT_ATTRIBUTE_FOR_ONE_POSITION
13186#undef SWITCH_PK_CREATE
13187#undef SWITCH_PK_INV
#define Success
AMDGPU Register Bank Select
Rewrite undef for PHI
This file implements a class to represent arbitrary precision integral constant values and operations...
ReachingDefAnalysis InstSet & ToRemove
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
#define STATS_DECLTRACK(NAME, TYPE, MSG)
static std::optional< Constant * > askForAssumedConstant(Attributor &A, const AbstractAttribute &QueryingAA, const IRPosition &IRP, Type &Ty)
static cl::opt< unsigned, true > MaxPotentialValues("attributor-max-potential-values", cl::Hidden, cl::desc("Maximum number of potential values to be " "tracked for each position."), cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues), cl::init(7))
static const Value * getPointerOperand(const Instruction *I, bool AllowVolatile)
Get pointer operand of memory accessing instruction.
static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA, StateType &S, const IRPosition::CallBaseContext *CBContext=nullptr)
Clamp the information known for all returned values of a function (identified by QueryingAA) into S.
#define STATS_DECLTRACK_FN_ATTR(NAME)
#define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)
static cl::opt< int > MaxPotentialValuesIterations("attributor-max-potential-values-iterations", cl::Hidden, cl::desc("Maximum number of iterations we keep dismantling potential values."), cl::init(64))
#define STATS_DECLTRACK_CS_ATTR(NAME)
#define PIPE_OPERATOR(CLASS)
#define DefineKeys(ToTy)
static bool mayBeInCycle(const CycleInfo *CI, const Instruction *I, bool HeaderOnly, Cycle **CPtr=nullptr)
#define STATS_DECLTRACK_ARG_ATTR(NAME)
static const Value * stripAndAccumulateOffsets(Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val, const DataLayout &DL, APInt &Offset, bool GetMinOffset, bool AllowNonInbounds, bool UseAssumed=false)
#define STATS_DECLTRACK_CSRET_ATTR(NAME)
static cl::opt< bool > ManifestInternal("attributor-manifest-internal", cl::Hidden, cl::desc("Manifest Attributor internal string attributes."), cl::init(false))
static Value * constructPointer(Value *Ptr, int64_t Offset, IRBuilder< NoFolder > &IRB)
Helper function to create a pointer based on Ptr, and advanced by Offset bytes.
#define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)
#define BUILD_STAT_NAME(NAME, TYPE)
static bool isDenselyPacked(Type *Ty, const DataLayout &DL)
Checks if a type could have padding bytes.
#define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)
static const Value * getMinimalBaseOfPointer(Attributor &A, const AbstractAttribute &QueryingAA, const Value *Ptr, int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds=false)
#define STATS_DECLTRACK_FNRET_ATTR(NAME)
#define STATS_DECLTRACK_CSARG_ATTR(NAME)
#define CREATE_ABSTRACT_ATTRIBUTE_FOR_ONE_POSITION(POS, SUFFIX, CLASS)
#define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)
static cl::opt< int > MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128), cl::Hidden)
#define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)
#define STATS_DECLTRACK_FLOATING_ATTR(NAME)
#define STATS_DECL(NAME, TYPE, MSG)
basic Basic Alias true
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static bool isReachableImpl(SmallVectorImpl< BasicBlock * > &Worklist, const StopSetT &StopSet, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet, const DominatorTree *DT, const LoopInfo *LI)
Definition: CFG.cpp:134
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares an analysis pass that computes CycleInfo for LLVM IR, specialized from GenericCycl...
DXIL Resource Access
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Given that RA is a live propagate it s liveness to any other values it uses(according to Uses). void DeadArgumentEliminationPass
Performs the initial survey of the specified function
Given that RA is a live value
#define LLVM_DEBUG(...)
Definition: Debug.h:106
This file defines DenseMapInfo traits for DenseMap.
T Content
uint32_t Index
uint64_t Size
#define Check(C,...)
Hexagon Common GEP
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
#define T
#define T1
static unsigned getAddressSpace(const Value *V, unsigned MaxLookup)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
if(PassOpts->AAPipeline)
static StringRef getName(Value *V)
Basic Register Allocator
static cl::opt< RegAllocEvictionAdvisorAnalysis::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Development, "development", "for training")))
Remove Loads Into Fake Uses
This builds on the llvm/ADT/GraphTraits.h file to find the strongly connected components (SCCs) of a ...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool IsDead
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines generic set operations that may be used on set's of different types,...
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:166
This file contains some functions that are useful when dealing with strings.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:39
static void initialize(TargetLibraryInfoImpl &TLI, const Triple &T, ArrayRef< StringLiteral > StandardNames)
Initialize the set of available library functions based on the specified target triple.
@ Floating
This pass exposes codegen information to IR-level passes.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
Value * RHS
Value * LHS
static unsigned getSize(unsigned Kind)
AACallGraphNode * operator*() const
A manager for alias analyses.
bool isNoAlias(const MemoryLocation &LocA, const MemoryLocation &LocB)
A trivial helper function to check to see if the specified pointers are no-alias.
Class for arbitrary precision integers.
Definition: APInt.h:78
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1542
AbstractCallSite.
CallBase * getInstruction() const
Return the underlying instruction.
bool isCallbackCall() const
Return true if this ACS represents a callback call.
bool isDirectCall() const
Return true if this ACS represents a direct call.
static void getCallbackUses(const CallBase &CB, SmallVectorImpl< const Use * > &CallbackUses)
Add operand uses of CB that represent callback uses into CallbackUses.
int getCallArgOperandNo(Argument &Arg) const
Return the operand index of the underlying instruction associated with Arg.
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
Definition: Instructions.h:63
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:124
unsigned getAddressSpace() const
Return the address space for the allocation.
Definition: Instructions.h:104
std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
bool hasPointeeInMemoryValueAttr() const
Return true if this argument has the byval, sret, inalloca, preallocated, or byref attribute.
Definition: Function.cpp:182
bool hasReturnedAttr() const
Return true if this argument has the returned attribute.
Definition: Function.cpp:307
bool hasByValAttr() const
Return true if this argument has the byval attribute.
Definition: Function.cpp:144
const Function * getParent() const
Definition: Argument.h:43
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
Definition: Argument.h:49
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
iterator end() const
Definition: ArrayRef.h:157
iterator begin() const
Definition: ArrayRef.h:156
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
static Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)
Return a uniquified Attribute object.
Definition: Attributes.cpp:95
static Attribute getWithDereferenceableBytes(LLVMContext &Context, uint64_t Bytes)
Definition: Attributes.cpp:244
FPClassTest getNoFPClass() const
Return the FPClassTest for nofpclass.
Definition: Attributes.cpp:490
Attribute::AttrKind getKindAsEnum() const
Return the attribute's kind as an enum (Attribute::AttrKind).
Definition: Attributes.cpp:364
MemoryEffects getMemoryEffects() const
Returns memory effects.
Definition: Attributes.cpp:484
static Attribute getWithDereferenceableOrNullBytes(LLVMContext &Context, uint64_t Bytes)
Definition: Attributes.cpp:250
static Attribute getWithNoFPClass(LLVMContext &Context, FPClassTest Mask)
Definition: Attributes.cpp:286
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:86
static bool isEnumAttrKind(AttrKind Kind)
Definition: Attributes.h:99
static Attribute getWithMemoryEffects(LLVMContext &Context, MemoryEffects ME)
Definition: Attributes.cpp:281
static Attribute getWithAlignment(LLVMContext &Context, Align Alignment)
Return a uniquified Attribute object that has the specific alignment set.
Definition: Attributes.cpp:234
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:416
const Instruction & front() const
Definition: BasicBlock.h:471
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:212
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:219
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:177
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
BinaryOps getOpcode() const
Definition: InstrTypes.h:370
Conditional or Unconditional Branch instruction.
unsigned getNumSuccessors() const
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
BasicBlock * getSuccessor(unsigned i) const
Value * getCondition() const
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:66
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1120
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1349
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
bool isIndirectCall() const
Return true if the callsite is an indirect call.
bool isCallee(Value::const_user_iterator UI) const
Determine whether the passed iterator points to the callee operand's Use.
Definition: InstrTypes.h:1360
Value * getCalledOperand() const
Definition: InstrTypes.h:1342
const Use & getCalledOperandUse() const
Definition: InstrTypes.h:1344
const Use & getArgOperandUse(unsigned i) const
Wrappers for getting the Use of a call argument.
Definition: InstrTypes.h:1305
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1294
bool isBundleOperand(unsigned Idx) const
Return true if the operand at index Idx is a bundle operand.
Definition: InstrTypes.h:1984
bool isConvergent() const
Determine if the invoke is convergent.
Definition: InstrTypes.h:1935
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1207
Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1285
unsigned getArgOperandNo(const Use *U) const
Given a use for a arg operand, get the arg operand number that corresponds to it.
Definition: InstrTypes.h:1325
unsigned arg_size() const
Definition: InstrTypes.h:1292
bool isArgOperand(const Use *U) const
Definition: InstrTypes.h:1314
Function * getCaller()
Helper to get the caller (the parent function).
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:444
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition: InstrTypes.h:608
bool isIntegerCast() const
There are several places where we need to know if a cast instruction only deals with integer source a...
Type * getDestTy() const
Return the destination type, as a convenience.
Definition: InstrTypes.h:615
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:661
bool isEquality() const
Determine if this is an equals/not equals predicate.
Definition: InstrTypes.h:913
bool isFalseWhenEqual() const
This is just a convenience.
Definition: InstrTypes.h:946
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:673
@ ICMP_EQ
equal
Definition: InstrTypes.h:694
@ ICMP_NE
not equal
Definition: InstrTypes.h:695
bool isTrueWhenEqual() const
This is just a convenience.
Definition: InstrTypes.h:940
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:763
static ConstantAsMetadata * get(Constant *C)
Definition: Metadata.h:528
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:1108
static Constant * getExtractElement(Constant *Vec, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
Definition: Constants.cpp:2554
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:866
This class represents a range of values.
Definition: ConstantRange.h:47
const APInt & getLower() const
Return the lower value for this range.
bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
bool isEmptySet() const
Return true if this set contains no members.
APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
bool isSingleElement() const
Return true if this set contains exactly one member.
static ConstantRange makeAllowedICmpRegion(CmpInst::Predicate Pred, const ConstantRange &Other)
Produce the smallest range such that all values that may satisfy the given predicate with any value c...
const APInt & getUpper() const
Return the upper value for this range.
APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
This is an important base class in LLVM.
Definition: Constant.h:42
Analysis pass which computes a CycleInfo.
Definition: CycleAnalysis.h:46
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
A debug info location.
Definition: DebugLoc.h:33
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:156
unsigned size() const
Definition: DenseMap.h:99
iterator begin()
Definition: DenseMap.h:75
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:152
iterator end()
Definition: DenseMap.h:84
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition: DenseMap.h:147
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:211
Implements a dense probed hash-table based set.
Definition: DenseSet.h:278
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
An instruction for ordering other memory operations.
Definition: Instructions.h:424
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:170
const BasicBlock & getEntryBlock() const
Definition: Function.h:809
iterator_range< arg_iterator > args()
Definition: Function.h:892
const Function & getFunction() const
Definition: Function.h:171
size_t arg_size() const
Definition: Function.h:901
Argument * getArg(unsigned i) const
Definition: Function.h:886
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:731
CycleT * getCycle(const BlockT *Block) const
Find the innermost cycle containing a given block.
A possibly irreducible generalization of a Loop.
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:296
bool hasLocalLinkage() const
Definition: GlobalValue.h:528
This instruction compares its operands according to the predicate given to the constructor.
static bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:2002
ConstantInt * getInt64(uint64_t C)
Get a constant 64-bit value.
Definition: IRBuilder.h:488
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2697
Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
bool isLifetimeStartOrEnd() const LLVM_READONLY
Return true if the instruction is a llvm.lifetime.start or llvm.lifetime.end marker.
bool mayReadOrWriteMemory() const
Return true if this instruction may read or write memory.
Definition: Instruction.h:759
bool mayWriteToMemory() const LLVM_READONLY
Return true if this instruction may modify memory.
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
Definition: Instruction.cpp:97
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:92
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:70
BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
bool mayHaveSideEffects() const LLVM_READONLY
Return true if the instruction may have side effects.
bool isTerminator() const
Definition: Instruction.h:277
bool mayReadFromMemory() const LLVM_READONLY
Return true if this instruction may read memory.
const Instruction * getNextNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the next non-debug instruction in the same basic block as 'this',...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:274
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:468
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Definition: Instruction.cpp:74
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
Invoke instruction.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
Analysis to compute lazy value information.
This pass computes, caches, and vends lazy value constraint information.
Definition: LazyValueInfo.h:32
ConstantRange getConstantRange(Value *V, Instruction *CxtI, bool UndefAllowed)
Return the ConstantRange constraint that is known to hold for the specified value at the specified in...
An instruction for reading from memory.
Definition: Instructions.h:176
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:566
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Metadata node.
Definition: Metadata.h:1069
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1430
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1543
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1436
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
bool empty() const
Definition: MapVector.h:79
static MemoryEffectsBase readOnly()
Create MemoryEffectsBase that can read any memory.
Definition: ModRef.h:122
bool doesNotAccessMemory() const
Whether this function accesses no memory.
Definition: ModRef.h:192
static MemoryEffectsBase argMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access argument memory.
Definition: ModRef.h:132
static MemoryEffectsBase inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access inaccessible memory.
Definition: ModRef.h:138
bool onlyAccessesInaccessibleMem() const
Whether this function only (at most) accesses inaccessible memory.
Definition: ModRef.h:211
ModRefInfo getModRef(Location Loc) const
Get ModRefInfo for the given Location.
Definition: ModRef.h:165
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
Definition: ModRef.h:201
bool onlyReadsMemory() const
Whether this function only (at most) reads memory.
Definition: ModRef.h:195
static MemoryEffectsBase writeOnly()
Create MemoryEffectsBase that can write any memory.
Definition: ModRef.h:127
static MemoryEffectsBase inaccessibleOrArgMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access inaccessible or argument memory.
Definition: ModRef.h:145
static MemoryEffectsBase none()
Create MemoryEffectsBase that cannot read or write any memory.
Definition: ModRef.h:117
bool onlyAccessesInaccessibleOrArgMem() const
Whether this function only (at most) accesses argument and inaccessible memory.
Definition: ModRef.h:217
static MemoryEffectsBase unknown()
Create MemoryEffectsBase that can read and write any memory.
Definition: ModRef.h:112
static std::optional< MemoryLocation > getOrNone(const Instruction *Inst)
Root of the metadata hierarchy.
Definition: Metadata.h:62
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Evaluate the size and offset of an object pointed to by a Value*.
static SizeOffsetValue unknown()
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1878
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
This class represents an analyzed expression in the program.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
const SCEV * getSCEVAtScope(const SCEV *S, const Loop *L)
Return a SCEV expression for the specified value at the specified scope in the program.
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
unsigned getSmallConstantMaxTripCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Returns the upper bound of the loop trip count as a normal unsigned value.
ConstantRange getUnsignedRange(const SCEV *S)
Determine the unsigned range for a particular SCEV.
This class represents the LLVM 'select' instruction.
A vector that has set insertion semantics.
Definition: SetVector.h:57
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:98
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:162
bool erase(PtrType Ptr)
Remove pointer from the set.
Definition: SmallPtrSet.h:401
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:452
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:384
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:519
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:370
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:132
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:181
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:937
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:683
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
An instruction for storing to memory.
Definition: Instructions.h:292
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Definition: DataLayout.h:567
TypeSize getElementOffset(unsigned Idx) const
Definition: DataLayout.h:596
TypeSize getElementOffsetInBits(unsigned Idx) const
Definition: DataLayout.h:601
Class to represent struct types.
Definition: DerivedTypes.h:218
unsigned getNumElements() const
Random access to the elements.
Definition: DerivedTypes.h:365
Type * getElementType(unsigned N) const
Definition: DerivedTypes.h:366
Multiway switch.
Analysis pass providing the TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
bool areTypesABICompatible(const Function *Caller, const Function *Callee, const ArrayRef< Type * > &Types) const
bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) const
Return true if the given instruction (assumed to be a memory access instruction) has a volatile varia...
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getIntegerBitWidth() const
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:264
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:310
static IntegerType * getInt8Ty(LLVMContext &C)
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:267
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:237
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:139
'undef' values are things that do not have specified contents.
Definition: Constants.h:1412
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1859
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
User * getUser() const
Returns the User that contains this Use.
Definition: Use.h:72
Value * get() const
Definition: Use.h:66
bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition: User.cpp:21
const Use & getOperandUse(unsigned i) const
Definition: User.h:241
Value * getOperand(unsigned i) const
Definition: User.h:228
unsigned getNumOperands() const
Definition: User.h:250
bool isDroppable() const
A droppable user is a user for which uses can be dropped without affecting correctness and should be ...
Definition: User.cpp:115
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: ValueMap.h:164
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
static constexpr uint64_t MaximumAlignment
Definition: Value.h:811
const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr) const
Accumulate the constant offset this value has compared to a base pointer.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
iterator_range< user_iterator > users()
Definition: Value.h:421
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
iterator_range< use_iterator > uses()
Definition: Value.h:376
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:213
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:353
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:661
Enumerate the SCCs of a directed graph in reverse topological order of the SCC DAG.
Definition: SCCIterator.h:49
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isAssumedReadNone(Attributor &A, const IRPosition &IRP, const AbstractAttribute &QueryingAA, bool &IsKnown)
Return true if IRP is readnone.
Definition: Attributor.cpp:653
bool isAssumedReadOnly(Attributor &A, const IRPosition &IRP, const AbstractAttribute &QueryingAA, bool &IsKnown)
Return true if IRP is readonly.
Definition: Attributor.cpp:648
raw_ostream & operator<<(raw_ostream &OS, const RangeTy &R)
Definition: Attributor.h:319
std::optional< Value * > combineOptionalValuesInAAValueLatice(const std::optional< Value * > &A, const std::optional< Value * > &B, Type *Ty)
Return the combination of A and B such that the result is a possible value of both.
Definition: Attributor.cpp:339
bool isValidAtPosition(const ValueAndContext &VAC, InformationCache &InfoCache)
Return true if the value of VAC is a valid at the position of VAC, that is a constant,...
Definition: Attributor.cpp:290
bool isAssumedThreadLocalObject(Attributor &A, Value &Obj, const AbstractAttribute &QueryingAA)
Return true if Obj is assumed to be a thread local object.
Definition: Attributor.cpp:835
bool isDynamicallyUnique(Attributor &A, const AbstractAttribute &QueryingAA, const Value &V, bool ForAnalysisOnly=true)
Return true if V is dynamically unique, that is, there are no two "instances" of V at runtime with di...
Definition: Attributor.cpp:231
bool getPotentialCopiesOfStoredValue(Attributor &A, StoreInst &SI, SmallSetVector< Value *, 4 > &PotentialCopies, const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation, bool OnlyExact=false)
Collect all potential values of the one stored by SI into PotentialCopies.
Definition: Attributor.cpp:599
bool isGPU(const Module &M)
Return true iff M target a GPU (and we can use GPU AS reasoning).
Definition: Attributor.cpp:200
ValueScope
Flags to distinguish intra-procedural queries from potentially inter-procedural queries.
Definition: Attributor.h:180
@ Intraprocedural
Definition: Attributor.h:181
@ Interprocedural
Definition: Attributor.h:182
bool isValidInScope(const Value &V, const Function *Scope)
Return true if V is a valid value in Scope, that is a constant or an instruction/argument of Scope.
Definition: Attributor.cpp:280
bool isPotentiallyReachable(Attributor &A, const Instruction &FromI, const Instruction &ToI, const AbstractAttribute &QueryingAA, const AA::InstExclusionSetTy *ExclusionSet=nullptr, std::function< bool(const Function &F)> GoBackwardsCB=nullptr)
Return true if ToI is potentially reachable from FromI without running into any instruction in Exclus...
Definition: Attributor.cpp:816
bool isNoSyncInst(Attributor &A, const Instruction &I, const AbstractAttribute &QueryingAA)
Return true if I is a nosync instruction.
Definition: Attributor.cpp:205
bool getPotentiallyLoadedValues(Attributor &A, LoadInst &LI, SmallSetVector< Value *, 4 > &PotentialValues, SmallSetVector< Instruction *, 4 > &PotentialValueOrigins, const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation, bool OnlyExact=false)
Collect all potential values LI could read into PotentialValues.
Definition: Attributor.cpp:589
Value * getWithType(Value &V, Type &Ty)
Try to convert V to type Ty without introducing new instructions.
Definition: Attributor.cpp:316
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ Unsupported
This operation is completely unsupported on the target.
@ Undef
Value of the register doesn't matter.
Offsets
Offsets in bytes from the start of the input buffer.
Definition: SIInstrInfo.h:1600
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Definition: LLVMContext.h:54
@ CE
Windows NT (Windows on ARM)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:463
unsigned combineHashValue(unsigned a, unsigned b)
Simplistic combination of 32-bit hash values into 32-bit hash values.
Definition: DenseMapInfo.h:39
ElementType
The element type of an SRV or UAV resource.
Definition: DXILABI.h:58
constexpr double e
Definition: MathExtras.h:47
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:226
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:235
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
@ Length
Definition: DWP.cpp:480
bool operator<(int64_t V1, const APSInt &V2)
Definition: APSInt.h:361
LLVM_ATTRIBUTE_ALWAYS_INLINE DynamicAPInt gcd(const DynamicAPInt &A, const DynamicAPInt &B)
Definition: DynamicAPInt.h:390
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1739
bool isLegalToPromote(const CallBase &CB, Function *Callee, const char **FailureReason=nullptr)
Return true if the given indirect call site can be made to call Callee.
Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1697
auto pred_end(const MachineBasicBlock *BB)
unsigned getPointerAddressSpace(const Type *T)
Definition: SPIRVUtils.h:256
auto successors(const MachineBasicBlock *BB)
bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
APFloat abs(APFloat X)
Returns the absolute value of the argument.
Definition: APFloat.h:1529
raw_fd_ostream & outs()
This returns a reference to a raw_fd_ostream for standard output.
UseCaptureKind DetermineUseCaptureKind(const Use &U, llvm::function_ref< bool(Value *, const DataLayout &)> IsDereferenceableOrNull)
Determine what kind of capture behaviour U may exhibit.
Value * getAllocAlignment(const CallBase *V, const TargetLibraryInfo *TLI)
Gets the alignment argument for an aligned_alloc-like function, using either built-in knowledge based...
Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
scc_iterator< T > scc_begin(const T &G)
Construct the begin iterator for a deduced graph type T.
Definition: SCCIterator.h:233
PotentialValuesState< std::pair< AA::ValueAndContext, AA::ValueScope > > PotentialLLVMValuesState
Definition: Attributor.h:5223
@ NONE
Definition: Attributor.h:6476
bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
raw_ostream & WriteGraph(raw_ostream &O, const GraphType &G, bool ShortNames=false, const Twine &Title="")
Definition: GraphWriter.h:359
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
auto map_range(ContainerTy &&C, FuncTy F)
Definition: STLExtras.h:377
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1746
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:340
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:291
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1664
MemoryEffectsBase< IRMemLocation > MemoryEffects
Summary of how a function affects memory in the program.
Definition: ModRef.h:268
@ None
Definition: CodeGenData.h:106
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:1187
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool isPointerTy(const Type *T)
Definition: SPIRVUtils.h:250
bool wouldInstructionBeTriviallyDead(const Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction would have no side effects if it was not used.
Definition: Local.cpp:425
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
bool set_union(S1Ty &S1, const S2Ty &S2)
set_union(A, B) - Compute A := A u B, return whether A changed.
Definition: SetOperations.h:43
void RemapInstruction(Instruction *I, ValueToValueMapTy &VM, RemapFlags Flags=RF_None, ValueMapTypeRemapper *TypeMapper=nullptr, ValueMaterializer *Materializer=nullptr)
Convert the instruction operands from referencing the current values into those specified by VM.
Definition: ValueMapper.h:263
CallBase & promoteCall(CallBase &CB, Function *Callee, CastInst **RetBitCast=nullptr)
Promote the given indirect call site to unconditionally call Callee.
bool hasAssumption(const Function &F, const KnownAssumptionString &AssumptionStr)
Return true if F has the assumption AssumptionStr attached.
Definition: Assumptions.cpp:70
RetainedKnowledge getKnowledgeFromUse(const Use *U, ArrayRef< Attribute::AttrKind > AttrKinds)
Return a valid Knowledge associated to the Use U if its Attribute kind is in AttrKinds.
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ Other
Any other memory.
ChangeStatus clampStateAndIndicateChange< DerefState >(DerefState &S, const DerefState &R)
PotentialValuesState< APInt > PotentialConstantIntValuesState
Definition: Attributor.h:5221
DWARFExpression::Operation Op
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R)
Helper function to clamp a state S of type StateType with the information in R and indicate/return if...
Definition: Attributor.h:3464
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:217
auto pred_begin(const MachineBasicBlock *BB)
ChangeStatus
{
Definition: Attributor.h:489
std::optional< APInt > getAllocSize(const CallBase *CB, const TargetLibraryInfo *TLI, function_ref< const Value *(const Value *)> Mapper=[](const Value *V) { return V;})
Return the size of the requested allocation.
DenseSet< StringRef > getAssumptions(const Function &F)
Return the set of all assumptions for the function F.
Definition: Assumptions.cpp:86
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
Definition: Alignment.h:111
Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
@ OPTIONAL
The target may be valid if the source is not.
@ NONE
Do not track a dependence between source and target.
@ REQUIRED
The target cannot be valid if the source is not.
Value * simplifyCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a CmpInst, fold the result or return null.
bool mayContainIrreducibleControl(const Function &F, const LoopInfo *LI)
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
Definition: bit.h:327
KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, unsigned Depth, const SimplifyQuery &SQ)
Determine which floating-point classes are valid for V, and return them in KnownFPClass bit sets.
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
constexpr StringRef AssumptionAttrKey
The key we use for assumption attributes.
Definition: Assumptions.h:28
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
A type to track pointer/struct usage and accesses for AAPointerInfo.
AAPointerInfo::const_bin_iterator end() const
ChangeStatus addAccess(Attributor &A, const AAPointerInfo::RangeList &Ranges, Instruction &I, std::optional< Value * > Content, AAPointerInfo::AccessKind Kind, Type *Ty, Instruction *RemoteI=nullptr)
Add a new Access to the state at offset Offset and with size Size.
DenseMap< const Instruction *, SmallVector< unsigned > > RemoteIMap
AAPointerInfo::const_bin_iterator begin() const
AAPointerInfo::OffsetInfo ReturnedOffsets
Flag to determine if the underlying pointer is reaching a return statement in the associated function...
bool forallInterferingAccesses(Instruction &I, function_ref< bool(const AAPointerInfo::Access &, bool)> CB, AA::RangeTy &Range) const
See AAPointerInfo::forallInterferingAccesses.
State(State &&SIS)=default
const AAPointerInfo::Access & getAccess(unsigned Index) const
bool forallInterferingAccesses(AA::RangeTy Range, function_ref< bool(const AAPointerInfo::Access &, bool)> CB) const
See AAPointerInfo::forallInterferingAccesses.
SmallVector< AAPointerInfo::Access > AccessList
bool isAtFixpoint() const override
See AbstractState::isAtFixpoint().
static State getWorstState(const State &SIS)
Return the worst possible representable state.
AAPointerInfo::OffsetBinsTy OffsetBins
ChangeStatus indicateOptimisticFixpoint() override
See AbstractState::indicateOptimisticFixpoint().
State & operator=(const State &R)
ChangeStatus indicatePessimisticFixpoint() override
See AbstractState::indicatePessimisticFixpoint().
static State getBestState(const State &SIS)
Return the best possible representable state.
bool isValidState() const override
See AbstractState::isValidState().
----------------—AAIntraFnReachability Attribute-----------------------—
ReachabilityQueryInfo(const ReachabilityQueryInfo &RQI)
unsigned Hash
Precomputed hash for this RQI.
ReachabilityQueryInfo(const Instruction *From, const ToTy *To)
ReachabilityQueryInfo(Attributor &A, const Instruction &From, const ToTy &To, const AA::InstExclusionSetTy *ES, bool MakeUnique)
Constructor replacement to ensure unique and stable sets are used for the cache.
An abstract interface for address space information.
Definition: Attributor.h:6286
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:6322
An abstract interface for all align attributes.
Definition: Attributor.h:4272
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:4303
Align getKnownAlign() const
Return known alignment.
Definition: Attributor.h:4286
static const char ID
Definition: Attributor.h:6361
An abstract attribute for getting assumption information.
Definition: Attributor.h:6212
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:6240
An abstract state for querying live call edges.
Definition: Attributor.h:5487
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:5530
An abstract Attribute for specializing "dynamic" components of "denormal-fp-math" and "denormal-fp-ma...
Definition: Attributor.h:6447
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:6469
An abstract interface for all dereferenceable attribute.
Definition: Attributor.h:4218
uint32_t getKnownDereferenceableBytes() const
Return known dereferenceable bytes.
Definition: Attributor.h:4242
uint32_t getAssumedDereferenceableBytes() const
Return assumed dereferenceable bytes.
Definition: Attributor.h:4237
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:4263
An abstract interface for llvm::GlobalValue information interference.
Definition: Attributor.h:6366
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:6400
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:4565
An abstract interface for indirect call information interference.
Definition: Attributor.h:6405
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:6441
An abstract interface to track if a value leaves it's defining function instance.
Definition: Attributor.h:4310
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:4343
An abstract Attribute for computing reachability between functions.
Definition: Attributor.h:5683
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:5718
bool canReach(Attributor &A, const Function &Fn) const
If the function represented by this possition can reach Fn.
Definition: Attributor.h:5689
virtual bool instructionCanReach(Attributor &A, const Instruction &Inst, const Function &Fn, const AA::InstExclusionSetTy *ExclusionSet=nullptr) const =0
Can Inst reach Fn.
An abstract interface to determine reachability of point A to B.
Definition: Attributor.h:3816
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3844
virtual bool isAssumedReachable(Attributor &A, const Instruction &From, const Instruction &To, const AA::InstExclusionSetTy *ExclusionSet=nullptr) const =0
Returns true if 'From' instruction is assumed to reach, 'To' instruction.
An abstract interface for liveness abstract attribute.
Definition: Attributor.h:3976
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:4066
An abstract interface for memory access kind related attributes (readnone/readonly/writeonly).
Definition: Attributor.h:4630
bool isAssumedReadOnly() const
Return true if we assume that the underlying value is not accessed (=written) in its respective scope...
Definition: Attributor.h:4669
bool isKnownReadNone() const
Return true if we know that the underlying value is not read or accessed in its respective scope.
Definition: Attributor.h:4657
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:4696
bool isAssumedReadNone() const
Return true if we assume that the underlying value is not read or accessed in its respective scope.
Definition: Attributor.h:4661
An abstract interface for all memory location attributes (readnone/argmemonly/inaccessiblememonly/ina...
Definition: Attributor.h:4705
static std::string getMemoryLocationsAsStr(MemoryLocationsKind MLK)
Return the locations encoded by MLK as a readable string.
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:4881
StateType::base_t MemoryLocationsKind
Definition: Attributor.h:4706
An abstract interface for all nonnull attributes.
Definition: Attributor.h:3592
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3627
An abstract interface for all noalias attributes.
Definition: Attributor.h:3851
static bool isImpliedByIR(Attributor &A, const IRPosition &IRP, Attribute::AttrKind ImpliedAttributeKind, bool IgnoreSubsumingPositions=false)
See IRAttribute::isImpliedByIR.
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3890
An abstract interface for all nocapture attributes.
Definition: Attributor.h:4351
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:4424
@ NO_CAPTURE_MAYBE_RETURNED
If we do not capture the value in memory or through integers we can only communicate it back as a der...
Definition: Attributor.h:4381
bool isAssumedNoCaptureMaybeReturned() const
Return true if we assume that the underlying value is not captured in its respective scope but we all...
Definition: Attributor.h:4405
static bool isImpliedByIR(Attributor &A, const IRPosition &IRP, Attribute::AttrKind ImpliedAttributeKind, bool IgnoreSubsumingPositions=false)
See IRAttribute::isImpliedByIR.
static void determineFunctionCaptureCapabilities(const IRPosition &IRP, const Function &F, BitIntegerState &State)
Update State according to the capture capabilities of F for position IRP.
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:5441
An AbstractAttribute for nofree.
Definition: Attributor.h:3897
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3940
An abstract attribute for norecurse.
Definition: Attributor.h:3685
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3709
An AbstractAttribute for noreturn.
Definition: Attributor.h:3947
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3971
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3585
static bool isAlignedBarrier(const CallBase &CB, bool ExecutedAligned)
Helper function to determine if CB is an aligned (GPU) barrier.
static bool isNonRelaxedAtomic(const Instruction *I)
Helper function used to determine whether an instruction is non-relaxed atomic.
static bool isNoSyncIntrinsic(const Instruction *I)
Helper function specific for intrinsics which are potentially volatile.
An abstract interface for all noundef attributes.
Definition: Attributor.h:5355
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:5390
static bool isImpliedByIR(Attributor &A, const IRPosition &IRP, Attribute::AttrKind ImpliedAttributeKind, bool IgnoreSubsumingPositions=false)
See IRAttribute::isImpliedByIR.
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3502
An abstract Attribute for determining the necessity of the convergent attribute.
Definition: Attributor.h:5723
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:5751
bool isAssumedNotConvergent() const
Return true if "non-convergent" is assumed.
Definition: Attributor.h:5733
An abstract interface for all nonnull attributes.
Definition: Attributor.h:3634
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3678
static bool isImpliedByIR(Attributor &A, const IRPosition &IRP, Attribute::AttrKind ImpliedAttributeKind, bool IgnoreSubsumingPositions=false)
See AbstractAttribute::isImpliedByIR(...).
An access description.
Definition: Attributor.h:5988
A helper containing a list of offsets computed for a Use.
Definition: Attributor.h:5792
A container for a list of ranges.
Definition: Attributor.h:5837
static void set_difference(const RangeList &L, const RangeList &R, RangeList &D)
Copy ranges from L that are not in R, into D.
Definition: Attributor.h:5873
An abstract interface for struct information.
Definition: Attributor.h:5755
virtual bool reachesReturn() const =0
virtual const_bin_iterator begin() const =0
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:6204
virtual int64_t numOffsetBins() const =0
An abstract interface for potential values analysis.
Definition: Attributor.h:5245
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:5304
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:5341
static Value * getSingleValue(Attributor &A, const AbstractAttribute &AA, const IRPosition &IRP, SmallVectorImpl< AA::ValueAndContext > &Values)
Extract the single value in Values if any.
An abstract interface for privatizability.
Definition: Attributor.h:4579
virtual std::optional< Type * > getPrivatizableType() const =0
Return the type we can choose for a private copy of the underlying value.
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:4621
An abstract attribute for undefined behavior.
Definition: Attributor.h:3778
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3811
An abstract attribute for getting all assumption underlying objects.
Definition: Attributor.h:6244
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:6274
An abstract interface for range value analysis.
Definition: Attributor.h:4886
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:4949
An abstract interface for value simplify abstract attribute.
Definition: Attributor.h:4503
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:4525
An abstract attribute for willreturn.
Definition: Attributor.h:3716
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3773
Helper to represent an access offset and size, with logic to deal with uncertainty and check for over...
Definition: Attributor.h:237
static constexpr int64_t Unknown
Definition: Attributor.h:316
static RangeTy getUnknown()
Definition: Attributor.h:243
Base struct for all "concrete attribute" deductions.
Definition: Attributor.h:3284
void print(raw_ostream &OS) const
Helper functions, for debug purposes only.
Definition: Attributor.h:3368
virtual StateType & getState()=0
Return the internal abstract state for inspection.
An interface to query the internal state of an abstract attribute.
Definition: Attributor.h:2604
virtual ChangeStatus indicatePessimisticFixpoint()=0
Indicate that the abstract state should converge to the pessimistic state.
virtual bool isAtFixpoint() const =0
Return if this abstract state is fixed, thus does not need to be updated if information changes as it...
virtual bool isValidState() const =0
Return if this abstract state is in a valid state.
virtual ChangeStatus indicateOptimisticFixpoint()=0
Indicate that the abstract state should converge to the optimistic state.
Helper for AA::PointerInfo::Access DenseMap/Set usage ignoring everythign but the instruction.
static unsigned getHashValue(const Access &A)
static Access getTombstoneKey()
static bool isEqual(const Access &LHS, const Access &RHS)
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
Helper struct used in the communication between an abstract attribute (AA) that wants to change the s...
Definition: Attributor.h:2212
std::function< void(const ArgumentReplacementInfo &, AbstractCallSite, SmallVectorImpl< Value * > &)> ACSRepairCBTy
Abstract call site (ACS) repair callback type.
Definition: Attributor.h:2235
const Argument & getReplacedArg() const
Definition: Attributor.h:2242
std::function< void(const ArgumentReplacementInfo &, Function &, Function::arg_iterator)> CalleeRepairCBTy
Callee repair callback type.
Definition: Attributor.h:2221
The fixpoint analysis framework that orchestrates the attribute deduction.
Definition: Attributor.h:1516
std::function< std::optional< Value * >(const IRPosition &, const AbstractAttribute *, bool &)> SimplifictionCallbackTy
Register CB as a simplification callback.
Definition: Attributor.h:2017
Specialization of the integer state for a bit-wise encoding.
Definition: Attributor.h:2745
BitIntegerState & removeAssumedBits(base_t BitsEncoding)
Remove the bits in BitsEncoding from the "assumed bits" if not known.
Definition: Attributor.h:2770
BitIntegerState & addKnownBits(base_t Bits)
Add the bits in BitsEncoding to the "known bits".
Definition: Attributor.h:2762
Simple wrapper for a single bit (boolean) state.
Definition: Attributor.h:2888
Represent subnormal handling kind for floating point instruction inputs and outputs.
static constexpr DenormalMode getDefault()
Return the assumed default mode for a function without denormal-fp-math.
static constexpr DenormalMode getInvalid()
static unsigned getHashValue(const Access &A)
static bool isEqual(const Access &LHS, const Access &RHS)
static bool isEqual(const AA::RangeTy &A, const AA::RangeTy B)
static unsigned getHashValue(const AA::RangeTy &Range)
static ReachabilityQueryInfo< ToTy > * getEmptyKey()
static ReachabilityQueryInfo< ToTy > * getTombstoneKey()
static bool isEqual(const ReachabilityQueryInfo< ToTy > *LHS, const ReachabilityQueryInfo< ToTy > *RHS)
static unsigned getHashValue(const ReachabilityQueryInfo< ToTy > *RQI)
An information struct used to provide DenseMap with the various necessary components for a given valu...
Definition: DenseMapInfo.h:52
State for dereferenceable attribute.
Definition: Attributor.h:4072
IncIntegerState DerefBytesState
State representing for dereferenceable bytes.
Definition: Attributor.h:4088
ChangeStatus manifest(Attributor &A) override
See AbstractAttribute::manifest(...).
Definition: Attributor.h:3221
Helper to describe and deal with positions in the LLVM-IR.
Definition: Attributor.h:586
Function * getAssociatedFunction() const
Return the associated function, if any.
Definition: Attributor.h:717
static const IRPosition callsite_returned(const CallBase &CB)
Create a position describing the returned value of CB.
Definition: Attributor.h:654
static const IRPosition returned(const Function &F, const CallBaseContext *CBContext=nullptr)
Create a position describing the returned value of F.
Definition: Attributor.h:636
Argument * getAssociatedArgument() const
Return the associated argument, if any.
Definition: Attributor.cpp:995
static const IRPosition value(const Value &V, const CallBaseContext *CBContext=nullptr)
Create a position describing the value of V.
Definition: Attributor.h:610
int getCalleeArgNo() const
Return the callee argument number of the associated value if it is an argument or call site argument,...
Definition: Attributor.h:804
static const IRPosition inst(const Instruction &I, const CallBaseContext *CBContext=nullptr)
Create a position describing the instruction I.
Definition: Attributor.h:622
static const IRPosition callsite_argument(const CallBase &CB, unsigned ArgNo)
Create a position describing the argument of CB at position ArgNo.
Definition: Attributor.h:659
@ IRP_ARGUMENT
An attribute for a function argument.
Definition: Attributor.h:600
@ IRP_RETURNED
An attribute for the function return value.
Definition: Attributor.h:596
@ IRP_CALL_SITE
An attribute for a call site (function scope).
Definition: Attributor.h:599
@ IRP_CALL_SITE_RETURNED
An attribute for a call site return value.
Definition: Attributor.h:597
@ IRP_FUNCTION
An attribute for a function (scope).
Definition: Attributor.h:598
@ IRP_CALL_SITE_ARGUMENT
An attribute for a call site argument.
Definition: Attributor.h:601
@ IRP_INVALID
An invalid position.
Definition: Attributor.h:593
Instruction * getCtxI() const
Return the context instruction, if any.
Definition: Attributor.h:770
static const IRPosition argument(const Argument &Arg, const CallBaseContext *CBContext=nullptr)
Create a position describing the argument Arg.
Definition: Attributor.h:643
Type * getAssociatedType() const
Return the type this abstract attribute is associated with.
Definition: Attributor.h:793
static const IRPosition function(const Function &F, const CallBaseContext *CBContext=nullptr)
Create a position describing the function scope of F.
Definition: Attributor.h:629
const CallBaseContext * getCallBaseContext() const
Get the call base context from the position.
Definition: Attributor.h:932
Value & getAssociatedValue() const
Return the value this abstract attribute is associated with.
Definition: Attributor.h:784
Value & getAnchorValue() const
Return the value this abstract attribute is anchored with.
Definition: Attributor.h:703
int getCallSiteArgNo() const
Return the call site argument number of the associated value if it is an argument or call site argume...
Definition: Attributor.h:813
static const IRPosition function_scope(const IRPosition &IRP, const CallBaseContext *CBContext=nullptr)
Create a position with function scope matching the "context" of IRP.
Definition: Attributor.h:682
Kind getPositionKind() const
Return the associated position kind.
Definition: Attributor.h:882
bool isArgumentPosition() const
Return true if the position is an argument or call site argument.
Definition: Attributor.h:914
static const IRPosition callsite_function(const CallBase &CB)
Create a position describing the function scope of CB.
Definition: Attributor.h:649
Function * getAnchorScope() const
Return the Function surrounding the anchor value.
Definition: Attributor.h:758
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
Data structure to hold cached (LLVM-IR) information.
Definition: Attributor.h:1203
bool isOnlyUsedByAssume(const Instruction &I) const
Definition: Attributor.h:1297
AP::Result * getAnalysisResultForFunction(const Function &F, bool CachedOnly=false)
Return the analysis result from a pass AP for function F.
Definition: Attributor.h:1307
State for an integer range.
Definition: Attributor.h:2930
ConstantRange getKnown() const
Return the known state encoding.
Definition: Attributor.h:2986
ConstantRange getAssumed() const
Return the assumed state encoding.
Definition: Attributor.h:2989
bool isValidState() const override
See AbstractState::isValidState() NOTE: For now we simply pretend that the worst possible state is in...
Definition: Attributor.h:2663
bool isAtFixpoint() const override
See AbstractState::isAtFixpoint()
Definition: Attributor.h:2666
ChangeStatus indicateOptimisticFixpoint() override
See AbstractState::indicateOptimisticFixpoint(...)
Definition: Attributor.h:2669
base_t getAssumed() const
Return the assumed state encoding.
Definition: Attributor.h:2684
static constexpr base_t getWorstState()
Return the worst possible representable state.
Definition: Attributor.h:2656
ChangeStatus indicatePessimisticFixpoint() override
See AbstractState::indicatePessimisticFixpoint(...)
Definition: Attributor.h:2675
Helper that allows to insert a new assumption string in the known assumption set by creating a (stati...
Definition: Assumptions.h:36
FPClassTest KnownFPClasses
Floating-point classes the value could be one of.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
A "must be executed context" for a given program point PP is the set of instructions,...
Definition: MustExecute.h:385
iterator & end()
Return an universal end iterator.
Definition: MustExecute.h:433
bool findInContextOf(const Instruction *I, const Instruction *PP)
Helper to look for I in the context of PP.
Definition: MustExecute.h:469
iterator & begin(const Instruction *PP)
Return an iterator to explore the context around PP.
Definition: MustExecute.h:419
bool checkForAllContext(const Instruction *PP, function_ref< bool(const Instruction *)> Pred)
}
Definition: MustExecute.h:455
Various options to control the behavior of getObjectSize.
A class for a set state.
Definition: Attributor.h:4960
static unsigned MaxPotentialValues
Maximum number of potential values to be tracked.
Definition: Attributor.h:5013
void unionAssumed(const MemberTy &C)
Union assumed set with the passed value.
Definition: Attributor.h:5030
const SetTy & getAssumedSet() const
Return this set.
Definition: Attributor.h:4990
Represent one information held inside an operand bundle of an llvm.assume.
A MapVector that performs no allocations if smaller than a certain size.
Definition: MapVector.h:254
Helper to tie a abstract state implementation to an abstract attribute.
Definition: Attributor.h:3173
StateType & getState() override
See AbstractAttribute::getState(...).
Definition: Attributor.h:3181
bool unionAssumed(std::optional< Value * > Other)
Merge Other into the currently assumed simplified value.