Bug Summary

File:llvm/lib/Transforms/IPO/AttributorAttributes.cpp
Warning:line 5031, column 9
Value stored to 'HasChanged' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name AttributorAttributes.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/lib/Transforms/IPO -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/lib/Transforms/IPO -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/lib/Transforms/IPO -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-04-14-063029-18377-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
1//===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// See the Attributor.h file comment and the class descriptions in that file for
10// more information.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Transforms/IPO/Attributor.h"
15
16#include "llvm/ADT/SCCIterator.h"
17#include "llvm/ADT/SmallPtrSet.h"
18#include "llvm/ADT/Statistic.h"
19#include "llvm/Analysis/AliasAnalysis.h"
20#include "llvm/Analysis/AssumeBundleQueries.h"
21#include "llvm/Analysis/AssumptionCache.h"
22#include "llvm/Analysis/CaptureTracking.h"
23#include "llvm/Analysis/LazyValueInfo.h"
24#include "llvm/Analysis/MemoryBuiltins.h"
25#include "llvm/Analysis/ScalarEvolution.h"
26#include "llvm/Analysis/TargetTransformInfo.h"
27#include "llvm/Analysis/ValueTracking.h"
28#include "llvm/IR/IRBuilder.h"
29#include "llvm/IR/Instruction.h"
30#include "llvm/IR/IntrinsicInst.h"
31#include "llvm/IR/NoFolder.h"
32#include "llvm/Support/CommandLine.h"
33#include "llvm/Transforms/IPO/ArgumentPromotion.h"
34#include "llvm/Transforms/Utils/Local.h"
35
36#include <cassert>
37
38using namespace llvm;
39
40#define DEBUG_TYPE"attributor" "attributor"
41
42static cl::opt<bool> ManifestInternal(
43 "attributor-manifest-internal", cl::Hidden,
44 cl::desc("Manifest Attributor internal string attributes."),
45 cl::init(false));
46
47static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
48 cl::Hidden);
49
50template <>
51unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
52
53static cl::opt<unsigned, true> MaxPotentialValues(
54 "attributor-max-potential-values", cl::Hidden,
55 cl::desc("Maximum number of potential values to be "
56 "tracked for each position."),
57 cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
58 cl::init(7));
59
60STATISTIC(NumAAs, "Number of abstract attributes created")static llvm::Statistic NumAAs = {"attributor", "NumAAs", "Number of abstract attributes created"
}
;
61
62// Some helper macros to deal with statistics tracking.
63//
64// Usage:
65// For simple IR attribute tracking overload trackStatistics in the abstract
66// attribute and choose the right STATS_DECLTRACK_********* macro,
67// e.g.,:
68// void trackStatistics() const override {
69// STATS_DECLTRACK_ARG_ATTR(returned)
70// }
71// If there is a single "increment" side one can use the macro
72// STATS_DECLTRACK with a custom message. If there are multiple increment
73// sides, STATS_DECL and STATS_TRACK can also be used separately.
74//
75#define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)("Number of " "TYPE" " marked '" "NAME" "'") \
76 ("Number of " #TYPE " marked '" #NAME "'")
77#define BUILD_STAT_NAME(NAME, TYPE)NumIRTYPE_NAME NumIR##TYPE##_##NAME
78#define STATS_DECL_(NAME, MSG)static llvm::Statistic NAME = {"attributor", "NAME", MSG}; STATISTIC(NAME, MSG)static llvm::Statistic NAME = {"attributor", "NAME", MSG};
79#define STATS_DECL(NAME, TYPE, MSG)static llvm::Statistic NumIRTYPE_NAME = {"attributor", "NumIRTYPE_NAME"
, MSG};;
\
80 STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG)static llvm::Statistic NumIRTYPE_NAME = {"attributor", "NumIRTYPE_NAME"
, MSG};
;
81#define STATS_TRACK(NAME, TYPE)++(NumIRTYPE_NAME); ++(BUILD_STAT_NAME(NAME, TYPE)NumIRTYPE_NAME);
82#define STATS_DECLTRACK(NAME, TYPE, MSG){ static llvm::Statistic NumIRTYPE_NAME = {"attributor", "NumIRTYPE_NAME"
, MSG};; ++(NumIRTYPE_NAME); }
\
83 { \
84 STATS_DECL(NAME, TYPE, MSG)static llvm::Statistic NumIRTYPE_NAME = {"attributor", "NumIRTYPE_NAME"
, MSG};;
\
85 STATS_TRACK(NAME, TYPE)++(NumIRTYPE_NAME); \
86 }
87#define STATS_DECLTRACK_ARG_ATTR(NAME){ static llvm::Statistic NumIRArguments_NAME = {"attributor",
"NumIRArguments_NAME", ("Number of " "arguments" " marked '"
"NAME" "'")};; ++(NumIRArguments_NAME); }
\
88 STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME)){ static llvm::Statistic NumIRArguments_NAME = {"attributor",
"NumIRArguments_NAME", ("Number of " "arguments" " marked '"
"NAME" "'")};; ++(NumIRArguments_NAME); }
89#define STATS_DECLTRACK_CSARG_ATTR(NAME){ static llvm::Statistic NumIRCSArguments_NAME = {"attributor"
, "NumIRCSArguments_NAME", ("Number of " "call site arguments"
" marked '" "NAME" "'")};; ++(NumIRCSArguments_NAME); }
\
90 STATS_DECLTRACK(NAME, CSArguments, \{ static llvm::Statistic NumIRCSArguments_NAME = {"attributor"
, "NumIRCSArguments_NAME", ("Number of " "call site arguments"
" marked '" "NAME" "'")};; ++(NumIRCSArguments_NAME); }
91 BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME)){ static llvm::Statistic NumIRCSArguments_NAME = {"attributor"
, "NumIRCSArguments_NAME", ("Number of " "call site arguments"
" marked '" "NAME" "'")};; ++(NumIRCSArguments_NAME); }
92#define STATS_DECLTRACK_FN_ATTR(NAME){ static llvm::Statistic NumIRFunction_NAME = {"attributor", "NumIRFunction_NAME"
, ("Number of " "functions" " marked '" "NAME" "'")};; ++(NumIRFunction_NAME
); }
\
93 STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME)){ static llvm::Statistic NumIRFunction_NAME = {"attributor", "NumIRFunction_NAME"
, ("Number of " "functions" " marked '" "NAME" "'")};; ++(NumIRFunction_NAME
); }
94#define STATS_DECLTRACK_CS_ATTR(NAME){ static llvm::Statistic NumIRCS_NAME = {"attributor", "NumIRCS_NAME"
, ("Number of " "call site" " marked '" "NAME" "'")};; ++(NumIRCS_NAME
); }
\
95 STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME)){ static llvm::Statistic NumIRCS_NAME = {"attributor", "NumIRCS_NAME"
, ("Number of " "call site" " marked '" "NAME" "'")};; ++(NumIRCS_NAME
); }
96#define STATS_DECLTRACK_FNRET_ATTR(NAME){ static llvm::Statistic NumIRFunctionReturn_NAME = {"attributor"
, "NumIRFunctionReturn_NAME", ("Number of " "function returns"
" marked '" "NAME" "'")};; ++(NumIRFunctionReturn_NAME); }
\
97 STATS_DECLTRACK(NAME, FunctionReturn, \{ static llvm::Statistic NumIRFunctionReturn_NAME = {"attributor"
, "NumIRFunctionReturn_NAME", ("Number of " "function returns"
" marked '" "NAME" "'")};; ++(NumIRFunctionReturn_NAME); }
98 BUILD_STAT_MSG_IR_ATTR(function returns, NAME)){ static llvm::Statistic NumIRFunctionReturn_NAME = {"attributor"
, "NumIRFunctionReturn_NAME", ("Number of " "function returns"
" marked '" "NAME" "'")};; ++(NumIRFunctionReturn_NAME); }
99#define STATS_DECLTRACK_CSRET_ATTR(NAME){ static llvm::Statistic NumIRCSReturn_NAME = {"attributor", "NumIRCSReturn_NAME"
, ("Number of " "call site returns" " marked '" "NAME" "'")};
; ++(NumIRCSReturn_NAME); }
\
100 STATS_DECLTRACK(NAME, CSReturn, \{ static llvm::Statistic NumIRCSReturn_NAME = {"attributor", "NumIRCSReturn_NAME"
, ("Number of " "call site returns" " marked '" "NAME" "'")};
; ++(NumIRCSReturn_NAME); }
101 BUILD_STAT_MSG_IR_ATTR(call site returns, NAME)){ static llvm::Statistic NumIRCSReturn_NAME = {"attributor", "NumIRCSReturn_NAME"
, ("Number of " "call site returns" " marked '" "NAME" "'")};
; ++(NumIRCSReturn_NAME); }
102#define STATS_DECLTRACK_FLOATING_ATTR(NAME){ static llvm::Statistic NumIRFloating_NAME = {"attributor", "NumIRFloating_NAME"
, ("Number of floating values known to be '" "NAME" "'")};; ++
(NumIRFloating_NAME); }
\
103 STATS_DECLTRACK(NAME, Floating, \{ static llvm::Statistic NumIRFloating_NAME = {"attributor", "NumIRFloating_NAME"
, ("Number of floating values known to be '" #NAME "'")};; ++
(NumIRFloating_NAME); }
104 ("Number of floating values known to be '" #NAME "'")){ static llvm::Statistic NumIRFloating_NAME = {"attributor", "NumIRFloating_NAME"
, ("Number of floating values known to be '" #NAME "'")};; ++
(NumIRFloating_NAME); }
105
106// Specialization of the operator<< for abstract attributes subclasses. This
107// disambiguates situations where multiple operators are applicable.
108namespace llvm {
109#define PIPE_OPERATOR(CLASS) \
110 raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) { \
111 return OS << static_cast<const AbstractAttribute &>(AA); \
112 }
113
114PIPE_OPERATOR(AAIsDead)
115PIPE_OPERATOR(AANoUnwind)
116PIPE_OPERATOR(AANoSync)
117PIPE_OPERATOR(AANoRecurse)
118PIPE_OPERATOR(AAWillReturn)
119PIPE_OPERATOR(AANoReturn)
120PIPE_OPERATOR(AAReturnedValues)
121PIPE_OPERATOR(AANonNull)
122PIPE_OPERATOR(AANoAlias)
123PIPE_OPERATOR(AADereferenceable)
124PIPE_OPERATOR(AAAlign)
125PIPE_OPERATOR(AANoCapture)
126PIPE_OPERATOR(AAValueSimplify)
127PIPE_OPERATOR(AANoFree)
128PIPE_OPERATOR(AAHeapToStack)
129PIPE_OPERATOR(AAReachability)
130PIPE_OPERATOR(AAMemoryBehavior)
131PIPE_OPERATOR(AAMemoryLocation)
132PIPE_OPERATOR(AAValueConstantRange)
133PIPE_OPERATOR(AAPrivatizablePtr)
134PIPE_OPERATOR(AAUndefinedBehavior)
135PIPE_OPERATOR(AAPotentialValues)
136PIPE_OPERATOR(AANoUndef)
137
138#undef PIPE_OPERATOR
139} // namespace llvm
140
141namespace {
142
143static Optional<ConstantInt *>
144getAssumedConstantInt(Attributor &A, const Value &V,
145 const AbstractAttribute &AA,
146 bool &UsedAssumedInformation) {
147 Optional<Constant *> C = A.getAssumedConstant(V, AA, UsedAssumedInformation);
148 if (C.hasValue())
149 return dyn_cast_or_null<ConstantInt>(C.getValue());
150 return llvm::None;
151}
152
153/// Get pointer operand of memory accessing instruction. If \p I is
154/// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
155/// is set to false and the instruction is volatile, return nullptr.
156static const Value *getPointerOperand(const Instruction *I,
157 bool AllowVolatile) {
158 if (!AllowVolatile && I->isVolatile())
159 return nullptr;
160
161 if (auto *LI = dyn_cast<LoadInst>(I)) {
162 return LI->getPointerOperand();
163 }
164
165 if (auto *SI = dyn_cast<StoreInst>(I)) {
166 return SI->getPointerOperand();
167 }
168
169 if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
170 return CXI->getPointerOperand();
171 }
172
173 if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
174 return RMWI->getPointerOperand();
175 }
176
177 return nullptr;
178}
179
180/// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
181/// advanced by \p Offset bytes. To aid later analysis the method tries to build
182/// getelement pointer instructions that traverse the natural type of \p Ptr if
183/// possible. If that fails, the remaining offset is adjusted byte-wise, hence
184/// through a cast to i8*.
185///
186/// TODO: This could probably live somewhere more prominantly if it doesn't
187/// already exist.
188static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
189 int64_t Offset, IRBuilder<NoFolder> &IRB,
190 const DataLayout &DL) {
191 assert(Offset >= 0 && "Negative offset not supported yet!")((Offset >= 0 && "Negative offset not supported yet!"
) ? static_cast<void> (0) : __assert_fail ("Offset >= 0 && \"Negative offset not supported yet!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 191, __PRETTY_FUNCTION__))
;
192 LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offsetdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "Construct pointer: " <<
*Ptr << " + " << Offset << "-bytes as " <<
*ResTy << "\n"; } } while (false)
193 << "-bytes as " << *ResTy << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "Construct pointer: " <<
*Ptr << " + " << Offset << "-bytes as " <<
*ResTy << "\n"; } } while (false)
;
194
195 if (Offset) {
196 SmallVector<Value *, 4> Indices;
197 std::string GEPName = Ptr->getName().str() + ".0";
198
199 // Add 0 index to look through the pointer.
200 assert((uint64_t)Offset < DL.getTypeAllocSize(PtrElemTy) &&(((uint64_t)Offset < DL.getTypeAllocSize(PtrElemTy) &&
"Offset out of bounds") ? static_cast<void> (0) : __assert_fail
("(uint64_t)Offset < DL.getTypeAllocSize(PtrElemTy) && \"Offset out of bounds\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 201, __PRETTY_FUNCTION__))
201 "Offset out of bounds")(((uint64_t)Offset < DL.getTypeAllocSize(PtrElemTy) &&
"Offset out of bounds") ? static_cast<void> (0) : __assert_fail
("(uint64_t)Offset < DL.getTypeAllocSize(PtrElemTy) && \"Offset out of bounds\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 201, __PRETTY_FUNCTION__))
;
202 Indices.push_back(Constant::getNullValue(IRB.getInt32Ty()));
203
204 Type *Ty = PtrElemTy;
205 do {
206 auto *STy = dyn_cast<StructType>(Ty);
207 if (!STy)
208 // Non-aggregate type, we cast and make byte-wise progress now.
209 break;
210
211 const StructLayout *SL = DL.getStructLayout(STy);
212 if (int64_t(SL->getSizeInBytes()) < Offset)
213 break;
214
215 uint64_t Idx = SL->getElementContainingOffset(Offset);
216 assert(Idx < STy->getNumElements() && "Offset calculation error!")((Idx < STy->getNumElements() && "Offset calculation error!"
) ? static_cast<void> (0) : __assert_fail ("Idx < STy->getNumElements() && \"Offset calculation error!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 216, __PRETTY_FUNCTION__))
;
217 uint64_t Rem = Offset - SL->getElementOffset(Idx);
218 Ty = STy->getElementType(Idx);
219
220 LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offsetdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { errs() << "Ty: " << *Ty <<
" Offset: " << Offset << " Idx: " << Idx <<
" Rem: " << Rem << "\n"; } } while (false)
221 << " Idx: " << Idx << " Rem: " << Rem << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { errs() << "Ty: " << *Ty <<
" Offset: " << Offset << " Idx: " << Idx <<
" Rem: " << Rem << "\n"; } } while (false)
;
222
223 GEPName += "." + std::to_string(Idx);
224 Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
225 Offset = Rem;
226 } while (Offset);
227
228 // Create a GEP for the indices collected above.
229 Ptr = IRB.CreateGEP(PtrElemTy, Ptr, Indices, GEPName);
230
231 // If an offset is left we use byte-wise adjustment.
232 if (Offset) {
233 Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
234 Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt32(Offset),
235 GEPName + ".b" + Twine(Offset));
236 }
237 }
238
239 // Ensure the result has the requested type.
240 Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
241
242 LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "Constructed pointer: " <<
*Ptr << "\n"; } } while (false)
;
243 return Ptr;
244}
245
246/// Recursively visit all values that might become \p IRP at some point. This
247/// will be done by looking through cast instructions, selects, phis, and calls
248/// with the "returned" attribute. Once we cannot look through the value any
249/// further, the callback \p VisitValueCB is invoked and passed the current
250/// value, the \p State, and a flag to indicate if we stripped anything.
251/// Stripped means that we unpacked the value associated with \p IRP at least
252/// once. Note that the value used for the callback may still be the value
253/// associated with \p IRP (due to PHIs). To limit how much effort is invested,
254/// we will never visit more values than specified by \p MaxValues.
255template <typename AAType, typename StateTy>
256static bool genericValueTraversal(
257 Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
258 function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
259 VisitValueCB,
260 const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
261 function_ref<Value *(Value *)> StripCB = nullptr) {
262
263 const AAIsDead *LivenessAA = nullptr;
264 if (IRP.getAnchorScope())
265 LivenessAA = &A.getAAFor<AAIsDead>(
266 QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
267 DepClassTy::NONE);
268 bool AnyDead = false;
269
270 using Item = std::pair<Value *, const Instruction *>;
271 SmallSet<Item, 16> Visited;
272 SmallVector<Item, 16> Worklist;
273 Worklist.push_back({&IRP.getAssociatedValue(), CtxI});
274
275 int Iteration = 0;
276 do {
277 Item I = Worklist.pop_back_val();
278 Value *V = I.first;
279 CtxI = I.second;
280 if (StripCB)
281 V = StripCB(V);
282
283 // Check if we should process the current value. To prevent endless
284 // recursion keep a record of the values we followed!
285 if (!Visited.insert(I).second)
286 continue;
287
288 // Make sure we limit the compile time for complex expressions.
289 if (Iteration++ >= MaxValues)
290 return false;
291
292 // Explicitly look through calls with a "returned" attribute if we do
293 // not have a pointer as stripPointerCasts only works on them.
294 Value *NewV = nullptr;
295 if (V->getType()->isPointerTy()) {
296 NewV = V->stripPointerCasts();
297 } else {
298 auto *CB = dyn_cast<CallBase>(V);
299 if (CB && CB->getCalledFunction()) {
300 for (Argument &Arg : CB->getCalledFunction()->args())
301 if (Arg.hasReturnedAttr()) {
302 NewV = CB->getArgOperand(Arg.getArgNo());
303 break;
304 }
305 }
306 }
307 if (NewV && NewV != V) {
308 Worklist.push_back({NewV, CtxI});
309 continue;
310 }
311
312 // Look through select instructions, visit both potential values.
313 if (auto *SI = dyn_cast<SelectInst>(V)) {
314 Worklist.push_back({SI->getTrueValue(), CtxI});
315 Worklist.push_back({SI->getFalseValue(), CtxI});
316 continue;
317 }
318
319 // Look through phi nodes, visit all live operands.
320 if (auto *PHI = dyn_cast<PHINode>(V)) {
321 assert(LivenessAA &&((LivenessAA && "Expected liveness in the presence of instructions!"
) ? static_cast<void> (0) : __assert_fail ("LivenessAA && \"Expected liveness in the presence of instructions!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 322, __PRETTY_FUNCTION__))
322 "Expected liveness in the presence of instructions!")((LivenessAA && "Expected liveness in the presence of instructions!"
) ? static_cast<void> (0) : __assert_fail ("LivenessAA && \"Expected liveness in the presence of instructions!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 322, __PRETTY_FUNCTION__))
;
323 for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
324 BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
325 if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
326 LivenessAA,
327 /* CheckBBLivenessOnly */ true)) {
328 AnyDead = true;
329 continue;
330 }
331 Worklist.push_back(
332 {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
333 }
334 continue;
335 }
336
337 if (UseValueSimplify && !isa<Constant>(V)) {
338 bool UsedAssumedInformation = false;
339 Optional<Constant *> C =
340 A.getAssumedConstant(*V, QueryingAA, UsedAssumedInformation);
341 if (!C.hasValue())
342 continue;
343 if (Value *NewV = C.getValue()) {
344 Worklist.push_back({NewV, CtxI});
345 continue;
346 }
347 }
348
349 // Once a leaf is reached we inform the user through the callback.
350 if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
351 return false;
352 } while (!Worklist.empty());
353
354 // If we actually used liveness information so we have to record a dependence.
355 if (AnyDead)
356 A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
357
358 // All values have been visited.
359 return true;
360}
361
362const Value *stripAndAccumulateMinimalOffsets(
363 Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val,
364 const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
365 bool UseAssumed = false) {
366
367 auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
368 const IRPosition &Pos = IRPosition::value(V);
369 // Only track dependence if we are going to use the assumed info.
370 const AAValueConstantRange &ValueConstantRangeAA =
371 A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
372 UseAssumed ? DepClassTy::OPTIONAL
373 : DepClassTy::NONE);
374 ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
375 : ValueConstantRangeAA.getKnown();
376 // We can only use the lower part of the range because the upper part can
377 // be higher than what the value can really be.
378 ROffset = Range.getSignedMin();
379 return true;
380 };
381
382 return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
383 AttributorAnalysis);
384}
385
386static const Value *getMinimalBaseOfAccsesPointerOperand(
387 Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I,
388 int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) {
389 const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
390 if (!Ptr)
391 return nullptr;
392 APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
393 const Value *Base = stripAndAccumulateMinimalOffsets(
394 A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
395
396 BytesOffset = OffsetAPInt.getSExtValue();
397 return Base;
398}
399
400static const Value *
401getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
402 const DataLayout &DL,
403 bool AllowNonInbounds = false) {
404 const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
405 if (!Ptr)
406 return nullptr;
407
408 return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
409 AllowNonInbounds);
410}
411
412/// Helper function to clamp a state \p S of type \p StateType with the
413/// information in \p R and indicate/return if \p S did change (as-in update is
414/// required to be run again).
415template <typename StateType>
416ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
417 auto Assumed = S.getAssumed();
418 S ^= R;
419 return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
420 : ChangeStatus::CHANGED;
421}
422
423/// Clamp the information known for all returned values of a function
424/// (identified by \p QueryingAA) into \p S.
425template <typename AAType, typename StateType = typename AAType::StateType>
426static void clampReturnedValueStates(
427 Attributor &A, const AAType &QueryingAA, StateType &S,
428 const IRPosition::CallBaseContext *CBContext = nullptr) {
429 LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Clamp return value states for "
<< QueryingAA << " into " << S << "\n"
; } } while (false)
430 << QueryingAA << " into " << S << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Clamp return value states for "
<< QueryingAA << " into " << S << "\n"
; } } while (false)
;
431
432 assert((QueryingAA.getIRPosition().getPositionKind() ==(((QueryingAA.getIRPosition().getPositionKind() == IRPosition
::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind(
) == IRPosition::IRP_CALL_SITE_RETURNED) && "Can only clamp returned value states for a function returned or call "
"site returned position!") ? static_cast<void> (0) : __assert_fail
("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 437, __PRETTY_FUNCTION__))
433 IRPosition::IRP_RETURNED ||(((QueryingAA.getIRPosition().getPositionKind() == IRPosition
::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind(
) == IRPosition::IRP_CALL_SITE_RETURNED) && "Can only clamp returned value states for a function returned or call "
"site returned position!") ? static_cast<void> (0) : __assert_fail
("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 437, __PRETTY_FUNCTION__))
434 QueryingAA.getIRPosition().getPositionKind() ==(((QueryingAA.getIRPosition().getPositionKind() == IRPosition
::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind(
) == IRPosition::IRP_CALL_SITE_RETURNED) && "Can only clamp returned value states for a function returned or call "
"site returned position!") ? static_cast<void> (0) : __assert_fail
("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 437, __PRETTY_FUNCTION__))
435 IRPosition::IRP_CALL_SITE_RETURNED) &&(((QueryingAA.getIRPosition().getPositionKind() == IRPosition
::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind(
) == IRPosition::IRP_CALL_SITE_RETURNED) && "Can only clamp returned value states for a function returned or call "
"site returned position!") ? static_cast<void> (0) : __assert_fail
("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 437, __PRETTY_FUNCTION__))
436 "Can only clamp returned value states for a function returned or call "(((QueryingAA.getIRPosition().getPositionKind() == IRPosition
::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind(
) == IRPosition::IRP_CALL_SITE_RETURNED) && "Can only clamp returned value states for a function returned or call "
"site returned position!") ? static_cast<void> (0) : __assert_fail
("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 437, __PRETTY_FUNCTION__))
437 "site returned position!")(((QueryingAA.getIRPosition().getPositionKind() == IRPosition
::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind(
) == IRPosition::IRP_CALL_SITE_RETURNED) && "Can only clamp returned value states for a function returned or call "
"site returned position!") ? static_cast<void> (0) : __assert_fail
("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 437, __PRETTY_FUNCTION__))
;
438
439 // Use an optional state as there might not be any return values and we want
440 // to join (IntegerState::operator&) the state of all there are.
441 Optional<StateType> T;
442
443 // Callback for each possibly returned value.
444 auto CheckReturnValue = [&](Value &RV) -> bool {
445 const IRPosition &RVPos = IRPosition::value(RV, CBContext);
446 const AAType &AA =
447 A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
448 LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] RV: " <<
RV << " AA: " << AA.getAsStr() << " @ " <<
RVPos << "\n"; } } while (false)
449 << " @ " << RVPos << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] RV: " <<
RV << " AA: " << AA.getAsStr() << " @ " <<
RVPos << "\n"; } } while (false)
;
450 const StateType &AAS = AA.getState();
451 if (T.hasValue())
452 *T &= AAS;
453 else
454 T = AAS;
455 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << Tdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] AA State: " <<
AAS << " RV State: " << T << "\n"; } } while
(false)
456 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] AA State: " <<
AAS << " RV State: " << T << "\n"; } } while
(false)
;
457 return T->isValidState();
458 };
459
460 if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
461 S.indicatePessimisticFixpoint();
462 else if (T.hasValue())
463 S ^= *T;
464}
465
466/// Helper class for generic deduction: return value -> returned position.
467template <typename AAType, typename BaseType,
468 typename StateType = typename BaseType::StateType,
469 bool PropagateCallBaseContext = false>
470struct AAReturnedFromReturnedValues : public BaseType {
471 AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
472 : BaseType(IRP, A) {}
473
474 /// See AbstractAttribute::updateImpl(...).
475 ChangeStatus updateImpl(Attributor &A) override {
476 StateType S(StateType::getBestState(this->getState()));
477 clampReturnedValueStates<AAType, StateType>(
478 A, *this, S,
479 PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
480 // TODO: If we know we visited all returned values, thus no are assumed
481 // dead, we can take the known information from the state T.
482 return clampStateAndIndicateChange<StateType>(this->getState(), S);
483 }
484};
485
486/// Clamp the information known at all call sites for a given argument
487/// (identified by \p QueryingAA) into \p S.
488template <typename AAType, typename StateType = typename AAType::StateType>
489static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
490 StateType &S) {
491 LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Clamp call site argument states for "
<< QueryingAA << " into " << S << "\n"
; } } while (false)
492 << QueryingAA << " into " << S << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Clamp call site argument states for "
<< QueryingAA << " into " << S << "\n"
; } } while (false)
;
493
494 assert(QueryingAA.getIRPosition().getPositionKind() ==((QueryingAA.getIRPosition().getPositionKind() == IRPosition::
IRP_ARGUMENT && "Can only clamp call site argument states for an argument position!"
) ? static_cast<void> (0) : __assert_fail ("QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_ARGUMENT && \"Can only clamp call site argument states for an argument position!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 496, __PRETTY_FUNCTION__))
495 IRPosition::IRP_ARGUMENT &&((QueryingAA.getIRPosition().getPositionKind() == IRPosition::
IRP_ARGUMENT && "Can only clamp call site argument states for an argument position!"
) ? static_cast<void> (0) : __assert_fail ("QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_ARGUMENT && \"Can only clamp call site argument states for an argument position!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 496, __PRETTY_FUNCTION__))
496 "Can only clamp call site argument states for an argument position!")((QueryingAA.getIRPosition().getPositionKind() == IRPosition::
IRP_ARGUMENT && "Can only clamp call site argument states for an argument position!"
) ? static_cast<void> (0) : __assert_fail ("QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_ARGUMENT && \"Can only clamp call site argument states for an argument position!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 496, __PRETTY_FUNCTION__))
;
497
498 // Use an optional state as there might not be any return values and we want
499 // to join (IntegerState::operator&) the state of all there are.
500 Optional<StateType> T;
501
502 // The argument number which is also the call site argument number.
503 unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
504
505 auto CallSiteCheck = [&](AbstractCallSite ACS) {
506 const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
507 // Check if a coresponding argument was found or if it is on not associated
508 // (which can happen for callback calls).
509 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
510 return false;
511
512 const AAType &AA =
513 A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
514 LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] ACS: " <<
*ACS.getInstruction() << " AA: " << AA.getAsStr(
) << " @" << ACSArgPos << "\n"; } } while (
false)
515 << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] ACS: " <<
*ACS.getInstruction() << " AA: " << AA.getAsStr(
) << " @" << ACSArgPos << "\n"; } } while (
false)
;
516 const StateType &AAS = AA.getState();
517 if (T.hasValue())
518 *T &= AAS;
519 else
520 T = AAS;
521 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << Tdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] AA State: " <<
AAS << " CSA State: " << T << "\n"; } } while
(false)
522 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] AA State: " <<
AAS << " CSA State: " << T << "\n"; } } while
(false)
;
523 return T->isValidState();
524 };
525
526 bool AllCallSitesKnown;
527 if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
528 AllCallSitesKnown))
529 S.indicatePessimisticFixpoint();
530 else if (T.hasValue())
531 S ^= *T;
532}
533
534/// This function is the bridge between argument position and the call base
535/// context.
536template <typename AAType, typename BaseType,
537 typename StateType = typename AAType::StateType>
538bool getArgumentStateFromCallBaseContext(Attributor &A,
539 BaseType &QueryingAttribute,
540 IRPosition &Pos, StateType &State) {
541 assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&(((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
"Expected an 'argument' position !") ? static_cast<void>
(0) : __assert_fail ("(Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) && \"Expected an 'argument' position !\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 542, __PRETTY_FUNCTION__))
542 "Expected an 'argument' position !")(((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
"Expected an 'argument' position !") ? static_cast<void>
(0) : __assert_fail ("(Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) && \"Expected an 'argument' position !\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 542, __PRETTY_FUNCTION__))
;
543 const CallBase *CBContext = Pos.getCallBaseContext();
544 if (!CBContext)
545 return false;
546
547 int ArgNo = Pos.getCallSiteArgNo();
548 assert(ArgNo >= 0 && "Invalid Arg No!")((ArgNo >= 0 && "Invalid Arg No!") ? static_cast<
void> (0) : __assert_fail ("ArgNo >= 0 && \"Invalid Arg No!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 548, __PRETTY_FUNCTION__))
;
549
550 const auto &AA = A.getAAFor<AAType>(
551 QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
552 DepClassTy::REQUIRED);
553 const StateType &CBArgumentState =
554 static_cast<const StateType &>(AA.getState());
555
556 LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Briding Call site context to argument"
<< "Position:" << Pos << "CB Arg state:" <<
CBArgumentState << "\n"; } } while (false)
557 << "Position:" << Pos << "CB Arg state:" << CBArgumentStatedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Briding Call site context to argument"
<< "Position:" << Pos << "CB Arg state:" <<
CBArgumentState << "\n"; } } while (false)
558 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Briding Call site context to argument"
<< "Position:" << Pos << "CB Arg state:" <<
CBArgumentState << "\n"; } } while (false)
;
559
560 // NOTE: If we want to do call site grouping it should happen here.
561 State ^= CBArgumentState;
562 return true;
563}
564
565/// Helper class for generic deduction: call site argument -> argument position.
566template <typename AAType, typename BaseType,
567 typename StateType = typename AAType::StateType,
568 bool BridgeCallBaseContext = false>
569struct AAArgumentFromCallSiteArguments : public BaseType {
570 AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
571 : BaseType(IRP, A) {}
572
573 /// See AbstractAttribute::updateImpl(...).
574 ChangeStatus updateImpl(Attributor &A) override {
575 StateType S = StateType::getBestState(this->getState());
576
577 if (BridgeCallBaseContext) {
578 bool Success =
579 getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
580 A, *this, this->getIRPosition(), S);
581 if (Success)
582 return clampStateAndIndicateChange<StateType>(this->getState(), S);
583 }
584 clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
585
586 // TODO: If we know we visited all incoming values, thus no are assumed
587 // dead, we can take the known information from the state T.
588 return clampStateAndIndicateChange<StateType>(this->getState(), S);
589 }
590};
591
592/// Helper class for generic replication: function returned -> cs returned.
593template <typename AAType, typename BaseType,
594 typename StateType = typename BaseType::StateType,
595 bool IntroduceCallBaseContext = false>
596struct AACallSiteReturnedFromReturned : public BaseType {
597 AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
598 : BaseType(IRP, A) {}
599
600 /// See AbstractAttribute::updateImpl(...).
601 ChangeStatus updateImpl(Attributor &A) override {
602 assert(this->getIRPosition().getPositionKind() ==((this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED
&& "Can only wrap function returned positions for call site returned "
"positions!") ? static_cast<void> (0) : __assert_fail (
"this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED && \"Can only wrap function returned positions for call site returned \" \"positions!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 605, __PRETTY_FUNCTION__))
603 IRPosition::IRP_CALL_SITE_RETURNED &&((this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED
&& "Can only wrap function returned positions for call site returned "
"positions!") ? static_cast<void> (0) : __assert_fail (
"this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED && \"Can only wrap function returned positions for call site returned \" \"positions!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 605, __PRETTY_FUNCTION__))
604 "Can only wrap function returned positions for call site returned "((this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED
&& "Can only wrap function returned positions for call site returned "
"positions!") ? static_cast<void> (0) : __assert_fail (
"this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED && \"Can only wrap function returned positions for call site returned \" \"positions!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 605, __PRETTY_FUNCTION__))
605 "positions!")((this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED
&& "Can only wrap function returned positions for call site returned "
"positions!") ? static_cast<void> (0) : __assert_fail (
"this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED && \"Can only wrap function returned positions for call site returned \" \"positions!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 605, __PRETTY_FUNCTION__))
;
606 auto &S = this->getState();
607
608 const Function *AssociatedFunction =
609 this->getIRPosition().getAssociatedFunction();
610 if (!AssociatedFunction)
611 return S.indicatePessimisticFixpoint();
612
613 CallBase &CBContext = static_cast<CallBase &>(this->getAnchorValue());
614 if (IntroduceCallBaseContext)
615 LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Introducing call base context:"
<< CBContext << "\n"; } } while (false)
616 << CBContext << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Introducing call base context:"
<< CBContext << "\n"; } } while (false)
;
617
618 IRPosition FnPos = IRPosition::returned(
619 *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
620 const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
621 return clampStateAndIndicateChange(S, AA.getState());
622 }
623};
624
625/// Helper function to accumulate uses.
626template <class AAType, typename StateType = typename AAType::StateType>
627static void followUsesInContext(AAType &AA, Attributor &A,
628 MustBeExecutedContextExplorer &Explorer,
629 const Instruction *CtxI,
630 SetVector<const Use *> &Uses,
631 StateType &State) {
632 auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
633 for (unsigned u = 0; u < Uses.size(); ++u) {
634 const Use *U = Uses[u];
635 if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
636 bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
637 if (Found && AA.followUseInMBEC(A, U, UserI, State))
638 for (const Use &Us : UserI->uses())
639 Uses.insert(&Us);
640 }
641 }
642}
643
644/// Use the must-be-executed-context around \p I to add information into \p S.
645/// The AAType class is required to have `followUseInMBEC` method with the
646/// following signature and behaviour:
647///
648/// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
649/// U - Underlying use.
650/// I - The user of the \p U.
651/// Returns true if the value should be tracked transitively.
652///
653template <class AAType, typename StateType = typename AAType::StateType>
654static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
655 Instruction &CtxI) {
656
657 // Container for (transitive) uses of the associated value.
658 SetVector<const Use *> Uses;
659 for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
660 Uses.insert(&U);
661
662 MustBeExecutedContextExplorer &Explorer =
663 A.getInfoCache().getMustBeExecutedContextExplorer();
664
665 followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
666
667 if (S.isAtFixpoint())
668 return;
669
670 SmallVector<const BranchInst *, 4> BrInsts;
671 auto Pred = [&](const Instruction *I) {
672 if (const BranchInst *Br = dyn_cast<BranchInst>(I))
673 if (Br->isConditional())
674 BrInsts.push_back(Br);
675 return true;
676 };
677
678 // Here, accumulate conditional branch instructions in the context. We
679 // explore the child paths and collect the known states. The disjunction of
680 // those states can be merged to its own state. Let ParentState_i be a state
681 // to indicate the known information for an i-th branch instruction in the
682 // context. ChildStates are created for its successors respectively.
683 //
684 // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
685 // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
686 // ...
687 // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
688 //
689 // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
690 //
691 // FIXME: Currently, recursive branches are not handled. For example, we
692 // can't deduce that ptr must be dereferenced in below function.
693 //
694 // void f(int a, int c, int *ptr) {
695 // if(a)
696 // if (b) {
697 // *ptr = 0;
698 // } else {
699 // *ptr = 1;
700 // }
701 // else {
702 // if (b) {
703 // *ptr = 0;
704 // } else {
705 // *ptr = 1;
706 // }
707 // }
708 // }
709
710 Explorer.checkForAllContext(&CtxI, Pred);
711 for (const BranchInst *Br : BrInsts) {
712 StateType ParentState;
713
714 // The known state of the parent state is a conjunction of children's
715 // known states so it is initialized with a best state.
716 ParentState.indicateOptimisticFixpoint();
717
718 for (const BasicBlock *BB : Br->successors()) {
719 StateType ChildState;
720
721 size_t BeforeSize = Uses.size();
722 followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
723
724 // Erase uses which only appear in the child.
725 for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
726 It = Uses.erase(It);
727
728 ParentState &= ChildState;
729 }
730
731 // Use only known state.
732 S += ParentState;
733 }
734}
735
736/// -----------------------NoUnwind Function Attribute--------------------------
737
738struct AANoUnwindImpl : AANoUnwind {
739 AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
740
741 const std::string getAsStr() const override {
742 return getAssumed() ? "nounwind" : "may-unwind";
743 }
744
745 /// See AbstractAttribute::updateImpl(...).
746 ChangeStatus updateImpl(Attributor &A) override {
747 auto Opcodes = {
748 (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
749 (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet,
750 (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
751
752 auto CheckForNoUnwind = [&](Instruction &I) {
753 if (!I.mayThrow())
754 return true;
755
756 if (const auto *CB = dyn_cast<CallBase>(&I)) {
757 const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
758 *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
759 return NoUnwindAA.isAssumedNoUnwind();
760 }
761 return false;
762 };
763
764 if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
765 return indicatePessimisticFixpoint();
766
767 return ChangeStatus::UNCHANGED;
768 }
769};
770
771struct AANoUnwindFunction final : public AANoUnwindImpl {
772 AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
773 : AANoUnwindImpl(IRP, A) {}
774
775 /// See AbstractAttribute::trackStatistics()
776 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind){ static llvm::Statistic NumIRFunction_nounwind = {"attributor"
, "NumIRFunction_nounwind", ("Number of " "functions" " marked '"
"nounwind" "'")};; ++(NumIRFunction_nounwind); }
}
777};
778
779/// NoUnwind attribute deduction for a call sites.
780struct AANoUnwindCallSite final : AANoUnwindImpl {
781 AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
782 : AANoUnwindImpl(IRP, A) {}
783
784 /// See AbstractAttribute::initialize(...).
785 void initialize(Attributor &A) override {
786 AANoUnwindImpl::initialize(A);
787 Function *F = getAssociatedFunction();
788 if (!F || F->isDeclaration())
789 indicatePessimisticFixpoint();
790 }
791
792 /// See AbstractAttribute::updateImpl(...).
793 ChangeStatus updateImpl(Attributor &A) override {
794 // TODO: Once we have call site specific value information we can provide
795 // call site specific liveness information and then it makes
796 // sense to specialize attributes for call sites arguments instead of
797 // redirecting requests to the callee argument.
798 Function *F = getAssociatedFunction();
799 const IRPosition &FnPos = IRPosition::function(*F);
800 auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
801 return clampStateAndIndicateChange(getState(), FnAA.getState());
802 }
803
804 /// See AbstractAttribute::trackStatistics()
805 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind){ static llvm::Statistic NumIRCS_nounwind = {"attributor", "NumIRCS_nounwind"
, ("Number of " "call site" " marked '" "nounwind" "'")};; ++
(NumIRCS_nounwind); }
; }
806};
807
808/// --------------------- Function Return Values -------------------------------
809
810/// "Attribute" that collects all potential returned values and the return
811/// instructions that they arise from.
812///
813/// If there is a unique returned value R, the manifest method will:
814/// - mark R with the "returned" attribute, if R is an argument.
815class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
816
817 /// Mapping of values potentially returned by the associated function to the
818 /// return instructions that might return them.
819 MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
820
821 /// Mapping to remember the number of returned values for a call site such
822 /// that we can avoid updates if nothing changed.
823 DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
824
825 /// Set of unresolved calls returned by the associated function.
826 SmallSetVector<CallBase *, 4> UnresolvedCalls;
827
828 /// State flags
829 ///
830 ///{
831 bool IsFixed = false;
832 bool IsValidState = true;
833 ///}
834
835public:
836 AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
837 : AAReturnedValues(IRP, A) {}
838
839 /// See AbstractAttribute::initialize(...).
840 void initialize(Attributor &A) override {
841 // Reset the state.
842 IsFixed = false;
843 IsValidState = true;
844 ReturnedValues.clear();
845
846 Function *F = getAssociatedFunction();
847 if (!F || F->isDeclaration()) {
848 indicatePessimisticFixpoint();
849 return;
850 }
851 assert(!F->getReturnType()->isVoidTy() &&((!F->getReturnType()->isVoidTy() && "Did not expect a void return type!"
) ? static_cast<void> (0) : __assert_fail ("!F->getReturnType()->isVoidTy() && \"Did not expect a void return type!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 852, __PRETTY_FUNCTION__))
852 "Did not expect a void return type!")((!F->getReturnType()->isVoidTy() && "Did not expect a void return type!"
) ? static_cast<void> (0) : __assert_fail ("!F->getReturnType()->isVoidTy() && \"Did not expect a void return type!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 852, __PRETTY_FUNCTION__))
;
853
854 // The map from instruction opcodes to those instructions in the function.
855 auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
856
857 // Look through all arguments, if one is marked as returned we are done.
858 for (Argument &Arg : F->args()) {
859 if (Arg.hasReturnedAttr()) {
860 auto &ReturnInstSet = ReturnedValues[&Arg];
861 if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
862 for (Instruction *RI : *Insts)
863 ReturnInstSet.insert(cast<ReturnInst>(RI));
864
865 indicateOptimisticFixpoint();
866 return;
867 }
868 }
869
870 if (!A.isFunctionIPOAmendable(*F))
871 indicatePessimisticFixpoint();
872 }
873
874 /// See AbstractAttribute::manifest(...).
875 ChangeStatus manifest(Attributor &A) override;
876
877 /// See AbstractAttribute::getState(...).
878 AbstractState &getState() override { return *this; }
879
880 /// See AbstractAttribute::getState(...).
881 const AbstractState &getState() const override { return *this; }
882
883 /// See AbstractAttribute::updateImpl(Attributor &A).
884 ChangeStatus updateImpl(Attributor &A) override;
885
886 llvm::iterator_range<iterator> returned_values() override {
887 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
888 }
889
890 llvm::iterator_range<const_iterator> returned_values() const override {
891 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
892 }
893
894 const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
895 return UnresolvedCalls;
896 }
897
898 /// Return the number of potential return values, -1 if unknown.
899 size_t getNumReturnValues() const override {
900 return isValidState() ? ReturnedValues.size() : -1;
901 }
902
903 /// Return an assumed unique return value if a single candidate is found. If
904 /// there cannot be one, return a nullptr. If it is not clear yet, return the
905 /// Optional::NoneType.
906 Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
907
908 /// See AbstractState::checkForAllReturnedValues(...).
909 bool checkForAllReturnedValuesAndReturnInsts(
910 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
911 const override;
912
913 /// Pretty print the attribute similar to the IR representation.
914 const std::string getAsStr() const override;
915
916 /// See AbstractState::isAtFixpoint().
917 bool isAtFixpoint() const override { return IsFixed; }
918
919 /// See AbstractState::isValidState().
920 bool isValidState() const override { return IsValidState; }
921
922 /// See AbstractState::indicateOptimisticFixpoint(...).
923 ChangeStatus indicateOptimisticFixpoint() override {
924 IsFixed = true;
925 return ChangeStatus::UNCHANGED;
926 }
927
928 ChangeStatus indicatePessimisticFixpoint() override {
929 IsFixed = true;
930 IsValidState = false;
931 return ChangeStatus::CHANGED;
932 }
933};
934
935ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
936 ChangeStatus Changed = ChangeStatus::UNCHANGED;
937
938 // Bookkeeping.
939 assert(isValidState())((isValidState()) ? static_cast<void> (0) : __assert_fail
("isValidState()", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 939, __PRETTY_FUNCTION__))
;
940 STATS_DECLTRACK(KnownReturnValues, FunctionReturn,{ static llvm::Statistic NumIRFunctionReturn_KnownReturnValues
= {"attributor", "NumIRFunctionReturn_KnownReturnValues", "Number of function with known return values"
};; ++(NumIRFunctionReturn_KnownReturnValues); }
941 "Number of function with known return values"){ static llvm::Statistic NumIRFunctionReturn_KnownReturnValues
= {"attributor", "NumIRFunctionReturn_KnownReturnValues", "Number of function with known return values"
};; ++(NumIRFunctionReturn_KnownReturnValues); }
;
942
943 // Check if we have an assumed unique return value that we could manifest.
944 Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
945
946 if (!UniqueRV.hasValue() || !UniqueRV.getValue())
947 return Changed;
948
949 // Bookkeeping.
950 STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,{ static llvm::Statistic NumIRFunctionReturn_UniqueReturnValue
= {"attributor", "NumIRFunctionReturn_UniqueReturnValue", "Number of function with unique return"
};; ++(NumIRFunctionReturn_UniqueReturnValue); }
951 "Number of function with unique return"){ static llvm::Statistic NumIRFunctionReturn_UniqueReturnValue
= {"attributor", "NumIRFunctionReturn_UniqueReturnValue", "Number of function with unique return"
};; ++(NumIRFunctionReturn_UniqueReturnValue); }
;
952
953 // Callback to replace the uses of CB with the constant C.
954 auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) {
955 if (CB.use_empty())
956 return ChangeStatus::UNCHANGED;
957 if (A.changeValueAfterManifest(CB, C))
958 return ChangeStatus::CHANGED;
959 return ChangeStatus::UNCHANGED;
960 };
961
962 // If the assumed unique return value is an argument, annotate it.
963 if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
964 if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
965 getAssociatedFunction()->getReturnType())) {
966 getIRPosition() = IRPosition::argument(*UniqueRVArg);
967 Changed = IRAttribute::manifest(A);
968 }
969 } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
970 // We can replace the returned value with the unique returned constant.
971 Value &AnchorValue = getAnchorValue();
972 if (Function *F = dyn_cast<Function>(&AnchorValue)) {
973 for (const Use &U : F->uses())
974 if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
975 if (CB->isCallee(&U)) {
976 Constant *RVCCast =
977 CB->getType() == RVC->getType()
978 ? RVC
979 : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
980 Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
981 }
982 } else {
983 assert(isa<CallBase>(AnchorValue) &&((isa<CallBase>(AnchorValue) && "Expcected a function or call base anchor!"
) ? static_cast<void> (0) : __assert_fail ("isa<CallBase>(AnchorValue) && \"Expcected a function or call base anchor!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 984, __PRETTY_FUNCTION__))
984 "Expcected a function or call base anchor!")((isa<CallBase>(AnchorValue) && "Expcected a function or call base anchor!"
) ? static_cast<void> (0) : __assert_fail ("isa<CallBase>(AnchorValue) && \"Expcected a function or call base anchor!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 984, __PRETTY_FUNCTION__))
;
985 Constant *RVCCast =
986 AnchorValue.getType() == RVC->getType()
987 ? RVC
988 : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
989 Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
990 }
991 if (Changed == ChangeStatus::CHANGED)
992 STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,{ static llvm::Statistic NumIRFunctionReturn_UniqueConstantReturnValue
= {"attributor", "NumIRFunctionReturn_UniqueConstantReturnValue"
, "Number of function returns replaced by constant return"};;
++(NumIRFunctionReturn_UniqueConstantReturnValue); }
993 "Number of function returns replaced by constant return"){ static llvm::Statistic NumIRFunctionReturn_UniqueConstantReturnValue
= {"attributor", "NumIRFunctionReturn_UniqueConstantReturnValue"
, "Number of function returns replaced by constant return"};;
++(NumIRFunctionReturn_UniqueConstantReturnValue); }
;
994 }
995
996 return Changed;
997}
998
999const std::string AAReturnedValuesImpl::getAsStr() const {
1000 return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1001 (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
1002 ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
1003}
1004
1005Optional<Value *>
1006AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1007 // If checkForAllReturnedValues provides a unique value, ignoring potential
1008 // undef values that can also be present, it is assumed to be the actual
1009 // return value and forwarded to the caller of this method. If there are
1010 // multiple, a nullptr is returned indicating there cannot be a unique
1011 // returned value.
1012 Optional<Value *> UniqueRV;
1013
1014 auto Pred = [&](Value &RV) -> bool {
1015 // If we found a second returned value and neither the current nor the saved
1016 // one is an undef, there is no unique returned value. Undefs are special
1017 // since we can pretend they have any value.
1018 if (UniqueRV.hasValue() && UniqueRV != &RV &&
1019 !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
1020 UniqueRV = nullptr;
1021 return false;
1022 }
1023
1024 // Do not overwrite a value with an undef.
1025 if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
1026 UniqueRV = &RV;
1027
1028 return true;
1029 };
1030
1031 if (!A.checkForAllReturnedValues(Pred, *this))
1032 UniqueRV = nullptr;
1033
1034 return UniqueRV;
1035}
1036
1037bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1038 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1039 const {
1040 if (!isValidState())
1041 return false;
1042
1043 // Check all returned values but ignore call sites as long as we have not
1044 // encountered an overdefined one during an update.
1045 for (auto &It : ReturnedValues) {
1046 Value *RV = It.first;
1047
1048 CallBase *CB = dyn_cast<CallBase>(RV);
1049 if (CB && !UnresolvedCalls.count(CB))
1050 continue;
1051
1052 if (!Pred(*RV, It.second))
1053 return false;
1054 }
1055
1056 return true;
1057}
1058
1059ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1060 size_t NumUnresolvedCalls = UnresolvedCalls.size();
1061 bool Changed = false;
1062
1063 // State used in the value traversals starting in returned values.
1064 struct RVState {
1065 // The map in which we collect return values -> return instrs.
1066 decltype(ReturnedValues) &RetValsMap;
1067 // The flag to indicate a change.
1068 bool &Changed;
1069 // The return instrs we come from.
1070 SmallSetVector<ReturnInst *, 4> RetInsts;
1071 };
1072
1073 // Callback for a leaf value returned by the associated function.
1074 auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS,
1075 bool) -> bool {
1076 auto Size = RVS.RetValsMap[&Val].size();
1077 RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1078 bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1079 RVS.Changed |= Inserted;
1080 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (Inserted) dbgs() << "[AAReturnedValues] 1 Add new returned value "
<< Val << " => " << RVS.RetInsts.size()
<< "\n"; }; } } while (false)
1081 if (Inserted)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (Inserted) dbgs() << "[AAReturnedValues] 1 Add new returned value "
<< Val << " => " << RVS.RetInsts.size()
<< "\n"; }; } } while (false)
1082 dbgs() << "[AAReturnedValues] 1 Add new returned value " << Valdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (Inserted) dbgs() << "[AAReturnedValues] 1 Add new returned value "
<< Val << " => " << RVS.RetInsts.size()
<< "\n"; }; } } while (false)
1083 << " => " << RVS.RetInsts.size() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (Inserted) dbgs() << "[AAReturnedValues] 1 Add new returned value "
<< Val << " => " << RVS.RetInsts.size()
<< "\n"; }; } } while (false)
1084 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (Inserted) dbgs() << "[AAReturnedValues] 1 Add new returned value "
<< Val << " => " << RVS.RetInsts.size()
<< "\n"; }; } } while (false)
;
1085 return true;
1086 };
1087
1088 // Helper method to invoke the generic value traversal.
1089 auto VisitReturnedValue = [&](Value &RV, RVState &RVS,
1090 const Instruction *CtxI) {
1091 IRPosition RetValPos = IRPosition::value(RV);
1092 return genericValueTraversal<AAReturnedValues, RVState>(
1093 A, RetValPos, *this, RVS, VisitValueCB, CtxI,
1094 /* UseValueSimplify */ false);
1095 };
1096
1097 // Callback for all "return intructions" live in the associated function.
1098 auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1099 ReturnInst &Ret = cast<ReturnInst>(I);
1100 RVState RVS({ReturnedValues, Changed, {}});
1101 RVS.RetInsts.insert(&Ret);
1102 return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I);
1103 };
1104
1105 // Start by discovering returned values from all live returned instructions in
1106 // the associated function.
1107 if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1108 return indicatePessimisticFixpoint();
1109
1110 // Once returned values "directly" present in the code are handled we try to
1111 // resolve returned calls. To avoid modifications to the ReturnedValues map
1112 // while we iterate over it we kept record of potential new entries in a copy
1113 // map, NewRVsMap.
1114 decltype(ReturnedValues) NewRVsMap;
1115
1116 auto HandleReturnValue = [&](Value *RV,
1117 SmallSetVector<ReturnInst *, 4> &RIs) {
1118 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *RV << " by #"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] Returned value: "
<< *RV << " by #" << RIs.size() << " RIs\n"
; } } while (false)
1119 << RIs.size() << " RIs\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] Returned value: "
<< *RV << " by #" << RIs.size() << " RIs\n"
; } } while (false)
;
1120 CallBase *CB = dyn_cast<CallBase>(RV);
1121 if (!CB || UnresolvedCalls.count(CB))
1122 return;
1123
1124 if (!CB->getCalledFunction()) {
1125 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CBdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] Unresolved call: "
<< *CB << "\n"; } } while (false)
1126 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] Unresolved call: "
<< *CB << "\n"; } } while (false)
;
1127 UnresolvedCalls.insert(CB);
1128 return;
1129 }
1130
1131 // TODO: use the function scope once we have call site AAReturnedValues.
1132 const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1133 *this, IRPosition::function(*CB->getCalledFunction()),
1134 DepClassTy::REQUIRED);
1135 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
<< RetValAA << "\n"; } } while (false)
1136 << RetValAA << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
<< RetValAA << "\n"; } } while (false)
;
1137
1138 // Skip dead ends, thus if we do not know anything about the returned
1139 // call we mark it as unresolved and it will stay that way.
1140 if (!RetValAA.getState().isValidState()) {
1141 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CBdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] Unresolved call: "
<< *CB << "\n"; } } while (false)
1142 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] Unresolved call: "
<< *CB << "\n"; } } while (false)
;
1143 UnresolvedCalls.insert(CB);
1144 return;
1145 }
1146
1147 // Do not try to learn partial information. If the callee has unresolved
1148 // return values we will treat the call as unresolved/opaque.
1149 auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1150 if (!RetValAAUnresolvedCalls.empty()) {
1151 UnresolvedCalls.insert(CB);
1152 return;
1153 }
1154
1155 // Now check if we can track transitively returned values. If possible, thus
1156 // if all return value can be represented in the current scope, do so.
1157 bool Unresolved = false;
1158 for (auto &RetValAAIt : RetValAA.returned_values()) {
1159 Value *RetVal = RetValAAIt.first;
1160 if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1161 isa<Constant>(RetVal))
1162 continue;
1163 // Anything that did not fit in the above categories cannot be resolved,
1164 // mark the call as unresolved.
1165 LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] transitively returned value "
"cannot be translated: " << *RetVal << "\n"; } }
while (false)
1166 "cannot be translated: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] transitively returned value "
"cannot be translated: " << *RetVal << "\n"; } }
while (false)
1167 << *RetVal << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] transitively returned value "
"cannot be translated: " << *RetVal << "\n"; } }
while (false)
;
1168 UnresolvedCalls.insert(CB);
1169 Unresolved = true;
1170 break;
1171 }
1172
1173 if (Unresolved)
1174 return;
1175
1176 // Now track transitively returned values.
1177 unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1178 if (NumRetAA == RetValAA.getNumReturnValues()) {
1179 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] Skip call as it has not "
"changed since it was seen last\n"; } } while (false)
1180 "changed since it was seen last\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] Skip call as it has not "
"changed since it was seen last\n"; } } while (false)
;
1181 return;
1182 }
1183 NumRetAA = RetValAA.getNumReturnValues();
1184
1185 for (auto &RetValAAIt : RetValAA.returned_values()) {
1186 Value *RetVal = RetValAAIt.first;
1187 if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1188 // Arguments are mapped to call site operands and we begin the traversal
1189 // again.
1190 bool Unused = false;
1191 RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1192 VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB);
1193 continue;
1194 }
1195 if (isa<CallBase>(RetVal)) {
1196 // Call sites are resolved by the callee attribute over time, no need to
1197 // do anything for us.
1198 continue;
1199 }
1200 if (isa<Constant>(RetVal)) {
1201 // Constants are valid everywhere, we can simply take them.
1202 NewRVsMap[RetVal].insert(RIs.begin(), RIs.end());
1203 continue;
1204 }
1205 }
1206 };
1207
1208 for (auto &It : ReturnedValues)
1209 HandleReturnValue(It.first, It.second);
1210
1211 // Because processing the new information can again lead to new return values
1212 // we have to be careful and iterate until this iteration is complete. The
1213 // idea is that we are in a stable state at the end of an update. All return
1214 // values have been handled and properly categorized. We might not update
1215 // again if we have not requested a non-fix attribute so we cannot "wait" for
1216 // the next update to analyze a new return value.
1217 while (!NewRVsMap.empty()) {
1218 auto It = std::move(NewRVsMap.back());
1219 NewRVsMap.pop_back();
1220
1221 assert(!It.second.empty() && "Entry does not add anything.")((!It.second.empty() && "Entry does not add anything."
) ? static_cast<void> (0) : __assert_fail ("!It.second.empty() && \"Entry does not add anything.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 1221, __PRETTY_FUNCTION__))
;
1222 auto &ReturnInsts = ReturnedValues[It.first];
1223 for (ReturnInst *RI : It.second)
1224 if (ReturnInsts.insert(RI)) {
1225 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] Add new returned value "
<< *It.first << " => " << *RI << "\n"
; } } while (false)
1226 << *It.first << " => " << *RI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] Add new returned value "
<< *It.first << " => " << *RI << "\n"
; } } while (false)
;
1227 HandleReturnValue(It.first, ReturnInsts);
1228 Changed = true;
1229 }
1230 }
1231
1232 Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1233 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1234}
1235
1236struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1237 AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1238 : AAReturnedValuesImpl(IRP, A) {}
1239
1240 /// See AbstractAttribute::trackStatistics()
1241 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned){ static llvm::Statistic NumIRArguments_returned = {"attributor"
, "NumIRArguments_returned", ("Number of " "arguments" " marked '"
"returned" "'")};; ++(NumIRArguments_returned); }
}
1242};
1243
1244/// Returned values information for a call sites.
1245struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1246 AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1247 : AAReturnedValuesImpl(IRP, A) {}
1248
1249 /// See AbstractAttribute::initialize(...).
1250 void initialize(Attributor &A) override {
1251 // TODO: Once we have call site specific value information we can provide
1252 // call site specific liveness information and then it makes
1253 // sense to specialize attributes for call sites instead of
1254 // redirecting requests to the callee.
1255 llvm_unreachable("Abstract attributes for returned values are not "::llvm::llvm_unreachable_internal("Abstract attributes for returned values are not "
"supported for call sites yet!", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 1256)
1256 "supported for call sites yet!")::llvm::llvm_unreachable_internal("Abstract attributes for returned values are not "
"supported for call sites yet!", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 1256)
;
1257 }
1258
1259 /// See AbstractAttribute::updateImpl(...).
1260 ChangeStatus updateImpl(Attributor &A) override {
1261 return indicatePessimisticFixpoint();
1262 }
1263
1264 /// See AbstractAttribute::trackStatistics()
1265 void trackStatistics() const override {}
1266};
1267
1268/// ------------------------ NoSync Function Attribute -------------------------
1269
1270struct AANoSyncImpl : AANoSync {
1271 AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1272
1273 const std::string getAsStr() const override {
1274 return getAssumed() ? "nosync" : "may-sync";
1275 }
1276
1277 /// See AbstractAttribute::updateImpl(...).
1278 ChangeStatus updateImpl(Attributor &A) override;
1279
1280 /// Helper function used to determine whether an instruction is non-relaxed
1281 /// atomic. In other words, if an atomic instruction does not have unordered
1282 /// or monotonic ordering
1283 static bool isNonRelaxedAtomic(Instruction *I);
1284
1285 /// Helper function specific for intrinsics which are potentially volatile
1286 static bool isNoSyncIntrinsic(Instruction *I);
1287};
1288
1289bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1290 if (!I->isAtomic())
1291 return false;
1292
1293 if (auto *FI = dyn_cast<FenceInst>(I))
1294 // All legal orderings for fence are stronger than monotonic.
1295 return FI->getSyncScopeID() != SyncScope::SingleThread;
1296 else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
1297 // Unordered is not a legal ordering for cmpxchg.
1298 return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
1299 AI->getFailureOrdering() != AtomicOrdering::Monotonic);
1300 }
1301
1302 AtomicOrdering Ordering;
1303 switch (I->getOpcode()) {
1304 case Instruction::AtomicRMW:
1305 Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1306 break;
1307 case Instruction::Store:
1308 Ordering = cast<StoreInst>(I)->getOrdering();
1309 break;
1310 case Instruction::Load:
1311 Ordering = cast<LoadInst>(I)->getOrdering();
1312 break;
1313 default:
1314 llvm_unreachable(::llvm::llvm_unreachable_internal("New atomic operations need to be known in the attributor."
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 1315)
1315 "New atomic operations need to be known in the attributor.")::llvm::llvm_unreachable_internal("New atomic operations need to be known in the attributor."
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 1315)
;
1316 }
1317
1318 return (Ordering != AtomicOrdering::Unordered &&
1319 Ordering != AtomicOrdering::Monotonic);
1320}
1321
1322/// Return true if this intrinsic is nosync. This is only used for intrinsics
1323/// which would be nosync except that they have a volatile flag. All other
1324/// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
1325bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1326 if (auto *MI = dyn_cast<MemIntrinsic>(I))
1327 return !MI->isVolatile();
1328 return false;
1329}
1330
1331ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1332
1333 auto CheckRWInstForNoSync = [&](Instruction &I) {
1334 /// We are looking for volatile instructions or Non-Relaxed atomics.
1335
1336 if (const auto *CB = dyn_cast<CallBase>(&I)) {
1337 if (CB->hasFnAttr(Attribute::NoSync))
1338 return true;
1339
1340 if (isNoSyncIntrinsic(&I))
1341 return true;
1342
1343 const auto &NoSyncAA = A.getAAFor<AANoSync>(
1344 *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1345 return NoSyncAA.isAssumedNoSync();
1346 }
1347
1348 if (!I.isVolatile() && !isNonRelaxedAtomic(&I))
1349 return true;
1350
1351 return false;
1352 };
1353
1354 auto CheckForNoSync = [&](Instruction &I) {
1355 // At this point we handled all read/write effects and they are all
1356 // nosync, so they can be skipped.
1357 if (I.mayReadOrWriteMemory())
1358 return true;
1359
1360 // non-convergent and readnone imply nosync.
1361 return !cast<CallBase>(I).isConvergent();
1362 };
1363
1364 if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1365 !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1366 return indicatePessimisticFixpoint();
1367
1368 return ChangeStatus::UNCHANGED;
1369}
1370
1371struct AANoSyncFunction final : public AANoSyncImpl {
1372 AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1373 : AANoSyncImpl(IRP, A) {}
1374
1375 /// See AbstractAttribute::trackStatistics()
1376 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync){ static llvm::Statistic NumIRFunction_nosync = {"attributor"
, "NumIRFunction_nosync", ("Number of " "functions" " marked '"
"nosync" "'")};; ++(NumIRFunction_nosync); }
}
1377};
1378
1379/// NoSync attribute deduction for a call sites.
1380struct AANoSyncCallSite final : AANoSyncImpl {
1381 AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1382 : AANoSyncImpl(IRP, A) {}
1383
1384 /// See AbstractAttribute::initialize(...).
1385 void initialize(Attributor &A) override {
1386 AANoSyncImpl::initialize(A);
1387 Function *F = getAssociatedFunction();
1388 if (!F || F->isDeclaration())
1389 indicatePessimisticFixpoint();
1390 }
1391
1392 /// See AbstractAttribute::updateImpl(...).
1393 ChangeStatus updateImpl(Attributor &A) override {
1394 // TODO: Once we have call site specific value information we can provide
1395 // call site specific liveness information and then it makes
1396 // sense to specialize attributes for call sites arguments instead of
1397 // redirecting requests to the callee argument.
1398 Function *F = getAssociatedFunction();
1399 const IRPosition &FnPos = IRPosition::function(*F);
1400 auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
1401 return clampStateAndIndicateChange(getState(), FnAA.getState());
1402 }
1403
1404 /// See AbstractAttribute::trackStatistics()
1405 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync){ static llvm::Statistic NumIRCS_nosync = {"attributor", "NumIRCS_nosync"
, ("Number of " "call site" " marked '" "nosync" "'")};; ++(NumIRCS_nosync
); }
; }
1406};
1407
1408/// ------------------------ No-Free Attributes ----------------------------
1409
1410struct AANoFreeImpl : public AANoFree {
1411 AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1412
1413 /// See AbstractAttribute::updateImpl(...).
1414 ChangeStatus updateImpl(Attributor &A) override {
1415 auto CheckForNoFree = [&](Instruction &I) {
1416 const auto &CB = cast<CallBase>(I);
1417 if (CB.hasFnAttr(Attribute::NoFree))
1418 return true;
1419
1420 const auto &NoFreeAA = A.getAAFor<AANoFree>(
1421 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1422 return NoFreeAA.isAssumedNoFree();
1423 };
1424
1425 if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1426 return indicatePessimisticFixpoint();
1427 return ChangeStatus::UNCHANGED;
1428 }
1429
1430 /// See AbstractAttribute::getAsStr().
1431 const std::string getAsStr() const override {
1432 return getAssumed() ? "nofree" : "may-free";
1433 }
1434};
1435
1436struct AANoFreeFunction final : public AANoFreeImpl {
1437 AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1438 : AANoFreeImpl(IRP, A) {}
1439
1440 /// See AbstractAttribute::trackStatistics()
1441 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree){ static llvm::Statistic NumIRFunction_nofree = {"attributor"
, "NumIRFunction_nofree", ("Number of " "functions" " marked '"
"nofree" "'")};; ++(NumIRFunction_nofree); }
}
1442};
1443
1444/// NoFree attribute deduction for a call sites.
1445struct AANoFreeCallSite final : AANoFreeImpl {
1446 AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1447 : AANoFreeImpl(IRP, A) {}
1448
1449 /// See AbstractAttribute::initialize(...).
1450 void initialize(Attributor &A) override {
1451 AANoFreeImpl::initialize(A);
1452 Function *F = getAssociatedFunction();
1453 if (!F || F->isDeclaration())
1454 indicatePessimisticFixpoint();
1455 }
1456
1457 /// See AbstractAttribute::updateImpl(...).
1458 ChangeStatus updateImpl(Attributor &A) override {
1459 // TODO: Once we have call site specific value information we can provide
1460 // call site specific liveness information and then it makes
1461 // sense to specialize attributes for call sites arguments instead of
1462 // redirecting requests to the callee argument.
1463 Function *F = getAssociatedFunction();
1464 const IRPosition &FnPos = IRPosition::function(*F);
1465 auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
1466 return clampStateAndIndicateChange(getState(), FnAA.getState());
1467 }
1468
1469 /// See AbstractAttribute::trackStatistics()
1470 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree){ static llvm::Statistic NumIRCS_nofree = {"attributor", "NumIRCS_nofree"
, ("Number of " "call site" " marked '" "nofree" "'")};; ++(NumIRCS_nofree
); }
; }
1471};
1472
1473/// NoFree attribute for floating values.
1474struct AANoFreeFloating : AANoFreeImpl {
1475 AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1476 : AANoFreeImpl(IRP, A) {}
1477
1478 /// See AbstractAttribute::trackStatistics()
1479 void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree){ static llvm::Statistic NumIRFloating_nofree = {"attributor"
, "NumIRFloating_nofree", ("Number of floating values known to be '"
"nofree" "'")};; ++(NumIRFloating_nofree); }
}
1480
1481 /// See Abstract Attribute::updateImpl(...).
1482 ChangeStatus updateImpl(Attributor &A) override {
1483 const IRPosition &IRP = getIRPosition();
1484
1485 const auto &NoFreeAA = A.getAAFor<AANoFree>(
1486 *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
1487 if (NoFreeAA.isAssumedNoFree())
1488 return ChangeStatus::UNCHANGED;
1489
1490 Value &AssociatedValue = getIRPosition().getAssociatedValue();
1491 auto Pred = [&](const Use &U, bool &Follow) -> bool {
1492 Instruction *UserI = cast<Instruction>(U.getUser());
1493 if (auto *CB = dyn_cast<CallBase>(UserI)) {
1494 if (CB->isBundleOperand(&U))
1495 return false;
1496 if (!CB->isArgOperand(&U))
1497 return true;
1498 unsigned ArgNo = CB->getArgOperandNo(&U);
1499
1500 const auto &NoFreeArg = A.getAAFor<AANoFree>(
1501 *this, IRPosition::callsite_argument(*CB, ArgNo),
1502 DepClassTy::REQUIRED);
1503 return NoFreeArg.isAssumedNoFree();
1504 }
1505
1506 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1507 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1508 Follow = true;
1509 return true;
1510 }
1511 if (isa<ReturnInst>(UserI))
1512 return true;
1513
1514 // Unknown user.
1515 return false;
1516 };
1517 if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1518 return indicatePessimisticFixpoint();
1519
1520 return ChangeStatus::UNCHANGED;
1521 }
1522};
1523
1524/// NoFree attribute for a call site argument.
1525struct AANoFreeArgument final : AANoFreeFloating {
1526 AANoFreeArgument(const IRPosition &IRP, Attributor &A)
1527 : AANoFreeFloating(IRP, A) {}
1528
1529 /// See AbstractAttribute::trackStatistics()
1530 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree){ static llvm::Statistic NumIRArguments_nofree = {"attributor"
, "NumIRArguments_nofree", ("Number of " "arguments" " marked '"
"nofree" "'")};; ++(NumIRArguments_nofree); }
}
1531};
1532
1533/// NoFree attribute for call site arguments.
1534struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1535 AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
1536 : AANoFreeFloating(IRP, A) {}
1537
1538 /// See AbstractAttribute::updateImpl(...).
1539 ChangeStatus updateImpl(Attributor &A) override {
1540 // TODO: Once we have call site specific value information we can provide
1541 // call site specific liveness information and then it makes
1542 // sense to specialize attributes for call sites arguments instead of
1543 // redirecting requests to the callee argument.
1544 Argument *Arg = getAssociatedArgument();
1545 if (!Arg)
1546 return indicatePessimisticFixpoint();
1547 const IRPosition &ArgPos = IRPosition::argument(*Arg);
1548 auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
1549 return clampStateAndIndicateChange(getState(), ArgAA.getState());
1550 }
1551
1552 /// See AbstractAttribute::trackStatistics()
1553 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree){ static llvm::Statistic NumIRCSArguments_nofree = {"attributor"
, "NumIRCSArguments_nofree", ("Number of " "call site arguments"
" marked '" "nofree" "'")};; ++(NumIRCSArguments_nofree); }
};
1554};
1555
1556/// NoFree attribute for function return value.
1557struct AANoFreeReturned final : AANoFreeFloating {
1558 AANoFreeReturned(const IRPosition &IRP, Attributor &A)
1559 : AANoFreeFloating(IRP, A) {
1560 llvm_unreachable("NoFree is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoFree is not applicable to function returns!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 1560)
;
1561 }
1562
1563 /// See AbstractAttribute::initialize(...).
1564 void initialize(Attributor &A) override {
1565 llvm_unreachable("NoFree is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoFree is not applicable to function returns!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 1565)
;
1566 }
1567
1568 /// See AbstractAttribute::updateImpl(...).
1569 ChangeStatus updateImpl(Attributor &A) override {
1570 llvm_unreachable("NoFree is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoFree is not applicable to function returns!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 1570)
;
1571 }
1572
1573 /// See AbstractAttribute::trackStatistics()
1574 void trackStatistics() const override {}
1575};
1576
1577/// NoFree attribute deduction for a call site return value.
1578struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1579 AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
1580 : AANoFreeFloating(IRP, A) {}
1581
1582 ChangeStatus manifest(Attributor &A) override {
1583 return ChangeStatus::UNCHANGED;
1584 }
1585 /// See AbstractAttribute::trackStatistics()
1586 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree){ static llvm::Statistic NumIRCSReturn_nofree = {"attributor"
, "NumIRCSReturn_nofree", ("Number of " "call site returns" " marked '"
"nofree" "'")};; ++(NumIRCSReturn_nofree); }
}
1587};
1588
1589/// ------------------------ NonNull Argument Attribute ------------------------
1590static int64_t getKnownNonNullAndDerefBytesForUse(
1591 Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
1592 const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1593 TrackUse = false;
1594
1595 const Value *UseV = U->get();
1596 if (!UseV->getType()->isPointerTy())
1597 return 0;
1598
1599 // We need to follow common pointer manipulation uses to the accesses they
1600 // feed into. We can try to be smart to avoid looking through things we do not
1601 // like for now, e.g., non-inbounds GEPs.
1602 if (isa<CastInst>(I)) {
1603 TrackUse = true;
1604 return 0;
1605 }
1606
1607 if (isa<GetElementPtrInst>(I)) {
1608 TrackUse = true;
1609 return 0;
1610 }
1611
1612 Type *PtrTy = UseV->getType();
1613 const Function *F = I->getFunction();
1614 bool NullPointerIsDefined =
1615 F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1616 const DataLayout &DL = A.getInfoCache().getDL();
1617 if (const auto *CB = dyn_cast<CallBase>(I)) {
1618 if (CB->isBundleOperand(U)) {
1619 if (RetainedKnowledge RK = getKnowledgeFromUse(
1620 U, {Attribute::NonNull, Attribute::Dereferenceable})) {
1621 IsNonNull |=
1622 (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
1623 return RK.ArgValue;
1624 }
1625 return 0;
1626 }
1627
1628 if (CB->isCallee(U)) {
1629 IsNonNull |= !NullPointerIsDefined;
1630 return 0;
1631 }
1632
1633 unsigned ArgNo = CB->getArgOperandNo(U);
1634 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
1635 // As long as we only use known information there is no need to track
1636 // dependences here.
1637 auto &DerefAA =
1638 A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
1639 IsNonNull |= DerefAA.isKnownNonNull();
1640 return DerefAA.getKnownDereferenceableBytes();
1641 }
1642
1643 int64_t Offset;
1644 const Value *Base =
1645 getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL);
1646 if (Base) {
1647 if (Base == &AssociatedValue &&
1648 getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1649 int64_t DerefBytes =
1650 (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1651
1652 IsNonNull |= !NullPointerIsDefined;
1653 return std::max(int64_t(0), DerefBytes);
1654 }
1655 }
1656
1657 /// Corner case when an offset is 0.
1658 Base = getBasePointerOfAccessPointerOperand(I, Offset, DL,
1659 /*AllowNonInbounds*/ true);
1660 if (Base) {
1661 if (Offset == 0 && Base == &AssociatedValue &&
1662 getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1663 int64_t DerefBytes =
1664 (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1665 IsNonNull |= !NullPointerIsDefined;
1666 return std::max(int64_t(0), DerefBytes);
1667 }
1668 }
1669
1670 return 0;
1671}
1672
1673struct AANonNullImpl : AANonNull {
1674 AANonNullImpl(const IRPosition &IRP, Attributor &A)
1675 : AANonNull(IRP, A),
1676 NullIsDefined(NullPointerIsDefined(
1677 getAnchorScope(),
1678 getAssociatedValue().getType()->getPointerAddressSpace())) {}
1679
1680 /// See AbstractAttribute::initialize(...).
1681 void initialize(Attributor &A) override {
1682 Value &V = getAssociatedValue();
1683 if (!NullIsDefined &&
1684 hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
1685 /* IgnoreSubsumingPositions */ false, &A)) {
1686 indicateOptimisticFixpoint();
1687 return;
1688 }
1689
1690 if (isa<ConstantPointerNull>(V)) {
1691 indicatePessimisticFixpoint();
1692 return;
1693 }
1694
1695 AANonNull::initialize(A);
1696
1697 bool CanBeNull, CanBeFreed;
1698 if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
1699 CanBeFreed)) {
1700 if (!CanBeNull) {
1701 indicateOptimisticFixpoint();
1702 return;
1703 }
1704 }
1705
1706 if (isa<GlobalValue>(&getAssociatedValue())) {
1707 indicatePessimisticFixpoint();
1708 return;
1709 }
1710
1711 if (Instruction *CtxI = getCtxI())
1712 followUsesInMBEC(*this, A, getState(), *CtxI);
1713 }
1714
1715 /// See followUsesInMBEC
1716 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
1717 AANonNull::StateType &State) {
1718 bool IsNonNull = false;
1719 bool TrackUse = false;
1720 getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1721 IsNonNull, TrackUse);
1722 State.setKnown(IsNonNull);
1723 return TrackUse;
1724 }
1725
1726 /// See AbstractAttribute::getAsStr().
1727 const std::string getAsStr() const override {
1728 return getAssumed() ? "nonnull" : "may-null";
1729 }
1730
1731 /// Flag to determine if the underlying value can be null and still allow
1732 /// valid accesses.
1733 const bool NullIsDefined;
1734};
1735
1736/// NonNull attribute for a floating value.
1737struct AANonNullFloating : public AANonNullImpl {
1738 AANonNullFloating(const IRPosition &IRP, Attributor &A)
1739 : AANonNullImpl(IRP, A) {}
1740
1741 /// See AbstractAttribute::updateImpl(...).
1742 ChangeStatus updateImpl(Attributor &A) override {
1743 const DataLayout &DL = A.getDataLayout();
1744
1745 DominatorTree *DT = nullptr;
1746 AssumptionCache *AC = nullptr;
1747 InformationCache &InfoCache = A.getInfoCache();
1748 if (const Function *Fn = getAnchorScope()) {
1749 DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1750 AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
1751 }
1752
1753 auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
1754 AANonNull::StateType &T, bool Stripped) -> bool {
1755 const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
1756 DepClassTy::REQUIRED);
1757 if (!Stripped && this == &AA) {
1758 if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
1759 T.indicatePessimisticFixpoint();
1760 } else {
1761 // Use abstract attribute information.
1762 const AANonNull::StateType &NS = AA.getState();
1763 T ^= NS;
1764 }
1765 return T.isValidState();
1766 };
1767
1768 StateType T;
1769 if (!genericValueTraversal<AANonNull, StateType>(
1770 A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
1771 return indicatePessimisticFixpoint();
1772
1773 return clampStateAndIndicateChange(getState(), T);
1774 }
1775
1776 /// See AbstractAttribute::trackStatistics()
1777 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull){ static llvm::Statistic NumIRFunctionReturn_nonnull = {"attributor"
, "NumIRFunctionReturn_nonnull", ("Number of " "function returns"
" marked '" "nonnull" "'")};; ++(NumIRFunctionReturn_nonnull
); }
}
1778};
1779
1780/// NonNull attribute for function return value.
1781struct AANonNullReturned final
1782 : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
1783 AANonNullReturned(const IRPosition &IRP, Attributor &A)
1784 : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
1785
1786 /// See AbstractAttribute::getAsStr().
1787 const std::string getAsStr() const override {
1788 return getAssumed() ? "nonnull" : "may-null";
1789 }
1790
1791 /// See AbstractAttribute::trackStatistics()
1792 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull){ static llvm::Statistic NumIRFunctionReturn_nonnull = {"attributor"
, "NumIRFunctionReturn_nonnull", ("Number of " "function returns"
" marked '" "nonnull" "'")};; ++(NumIRFunctionReturn_nonnull
); }
}
1793};
1794
1795/// NonNull attribute for function argument.
1796struct AANonNullArgument final
1797 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
1798 AANonNullArgument(const IRPosition &IRP, Attributor &A)
1799 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
1800
1801 /// See AbstractAttribute::trackStatistics()
1802 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull){ static llvm::Statistic NumIRArguments_nonnull = {"attributor"
, "NumIRArguments_nonnull", ("Number of " "arguments" " marked '"
"nonnull" "'")};; ++(NumIRArguments_nonnull); }
}
1803};
1804
1805struct AANonNullCallSiteArgument final : AANonNullFloating {
1806 AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
1807 : AANonNullFloating(IRP, A) {}
1808
1809 /// See AbstractAttribute::trackStatistics()
1810 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull){ static llvm::Statistic NumIRCSArguments_nonnull = {"attributor"
, "NumIRCSArguments_nonnull", ("Number of " "call site arguments"
" marked '" "nonnull" "'")};; ++(NumIRCSArguments_nonnull); }
}
1811};
1812
1813/// NonNull attribute for a call site return position.
1814struct AANonNullCallSiteReturned final
1815 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
1816 AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
1817 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
1818
1819 /// See AbstractAttribute::trackStatistics()
1820 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull){ static llvm::Statistic NumIRCSReturn_nonnull = {"attributor"
, "NumIRCSReturn_nonnull", ("Number of " "call site returns" " marked '"
"nonnull" "'")};; ++(NumIRCSReturn_nonnull); }
}
1821};
1822
1823/// ------------------------ No-Recurse Attributes ----------------------------
1824
1825struct AANoRecurseImpl : public AANoRecurse {
1826 AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
1827
1828 /// See AbstractAttribute::getAsStr()
1829 const std::string getAsStr() const override {
1830 return getAssumed() ? "norecurse" : "may-recurse";
1831 }
1832};
1833
1834struct AANoRecurseFunction final : AANoRecurseImpl {
1835 AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
1836 : AANoRecurseImpl(IRP, A) {}
1837
1838 /// See AbstractAttribute::initialize(...).
1839 void initialize(Attributor &A) override {
1840 AANoRecurseImpl::initialize(A);
1841 if (const Function *F = getAnchorScope())
1842 if (A.getInfoCache().getSccSize(*F) != 1)
1843 indicatePessimisticFixpoint();
1844 }
1845
1846 /// See AbstractAttribute::updateImpl(...).
1847 ChangeStatus updateImpl(Attributor &A) override {
1848
1849 // If all live call sites are known to be no-recurse, we are as well.
1850 auto CallSitePred = [&](AbstractCallSite ACS) {
1851 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1852 *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
1853 DepClassTy::NONE);
1854 return NoRecurseAA.isKnownNoRecurse();
1855 };
1856 bool AllCallSitesKnown;
1857 if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
1858 // If we know all call sites and all are known no-recurse, we are done.
1859 // If all known call sites, which might not be all that exist, are known
1860 // to be no-recurse, we are not done but we can continue to assume
1861 // no-recurse. If one of the call sites we have not visited will become
1862 // live, another update is triggered.
1863 if (AllCallSitesKnown)
1864 indicateOptimisticFixpoint();
1865 return ChangeStatus::UNCHANGED;
1866 }
1867
1868 // If the above check does not hold anymore we look at the calls.
1869 auto CheckForNoRecurse = [&](Instruction &I) {
1870 const auto &CB = cast<CallBase>(I);
1871 if (CB.hasFnAttr(Attribute::NoRecurse))
1872 return true;
1873
1874 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1875 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1876 if (!NoRecurseAA.isAssumedNoRecurse())
1877 return false;
1878
1879 // Recursion to the same function
1880 if (CB.getCalledFunction() == getAnchorScope())
1881 return false;
1882
1883 return true;
1884 };
1885
1886 if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1887 return indicatePessimisticFixpoint();
1888 return ChangeStatus::UNCHANGED;
1889 }
1890
1891 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse){ static llvm::Statistic NumIRFunction_norecurse = {"attributor"
, "NumIRFunction_norecurse", ("Number of " "functions" " marked '"
"norecurse" "'")};; ++(NumIRFunction_norecurse); }
}
1892};
1893
1894/// NoRecurse attribute deduction for a call sites.
1895struct AANoRecurseCallSite final : AANoRecurseImpl {
1896 AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
1897 : AANoRecurseImpl(IRP, A) {}
1898
1899 /// See AbstractAttribute::initialize(...).
1900 void initialize(Attributor &A) override {
1901 AANoRecurseImpl::initialize(A);
1902 Function *F = getAssociatedFunction();
1903 if (!F || F->isDeclaration())
1904 indicatePessimisticFixpoint();
1905 }
1906
1907 /// See AbstractAttribute::updateImpl(...).
1908 ChangeStatus updateImpl(Attributor &A) override {
1909 // TODO: Once we have call site specific value information we can provide
1910 // call site specific liveness information and then it makes
1911 // sense to specialize attributes for call sites arguments instead of
1912 // redirecting requests to the callee argument.
1913 Function *F = getAssociatedFunction();
1914 const IRPosition &FnPos = IRPosition::function(*F);
1915 auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
1916 return clampStateAndIndicateChange(getState(), FnAA.getState());
1917 }
1918
1919 /// See AbstractAttribute::trackStatistics()
1920 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse){ static llvm::Statistic NumIRCS_norecurse = {"attributor", "NumIRCS_norecurse"
, ("Number of " "call site" " marked '" "norecurse" "'")};; ++
(NumIRCS_norecurse); }
; }
1921};
1922
1923/// -------------------- Undefined-Behavior Attributes ------------------------
1924
1925struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
1926 AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
1927 : AAUndefinedBehavior(IRP, A) {}
1928
1929 /// See AbstractAttribute::updateImpl(...).
1930 // through a pointer (i.e. also branches etc.)
1931 ChangeStatus updateImpl(Attributor &A) override {
1932 const size_t UBPrevSize = KnownUBInsts.size();
1933 const size_t NoUBPrevSize = AssumedNoUBInsts.size();
1934
1935 auto InspectMemAccessInstForUB = [&](Instruction &I) {
1936 // Skip instructions that are already saved.
1937 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1938 return true;
1939
1940 // If we reach here, we know we have an instruction
1941 // that accesses memory through a pointer operand,
1942 // for which getPointerOperand() should give it to us.
1943 const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true);
1944 assert(PtrOp &&((PtrOp && "Expected pointer operand of memory accessing instruction"
) ? static_cast<void> (0) : __assert_fail ("PtrOp && \"Expected pointer operand of memory accessing instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 1945, __PRETTY_FUNCTION__))
1945 "Expected pointer operand of memory accessing instruction")((PtrOp && "Expected pointer operand of memory accessing instruction"
) ? static_cast<void> (0) : __assert_fail ("PtrOp && \"Expected pointer operand of memory accessing instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 1945, __PRETTY_FUNCTION__))
;
1946
1947 // Either we stopped and the appropriate action was taken,
1948 // or we got back a simplified value to continue.
1949 Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
1950 if (!SimplifiedPtrOp.hasValue())
1951 return true;
1952 const Value *PtrOpVal = SimplifiedPtrOp.getValue();
1953
1954 // A memory access through a pointer is considered UB
1955 // only if the pointer has constant null value.
1956 // TODO: Expand it to not only check constant values.
1957 if (!isa<ConstantPointerNull>(PtrOpVal)) {
1958 AssumedNoUBInsts.insert(&I);
1959 return true;
1960 }
1961 const Type *PtrTy = PtrOpVal->getType();
1962
1963 // Because we only consider instructions inside functions,
1964 // assume that a parent function exists.
1965 const Function *F = I.getFunction();
1966
1967 // A memory access using constant null pointer is only considered UB
1968 // if null pointer is _not_ defined for the target platform.
1969 if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
1970 AssumedNoUBInsts.insert(&I);
1971 else
1972 KnownUBInsts.insert(&I);
1973 return true;
1974 };
1975
1976 auto InspectBrInstForUB = [&](Instruction &I) {
1977 // A conditional branch instruction is considered UB if it has `undef`
1978 // condition.
1979
1980 // Skip instructions that are already saved.
1981 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1982 return true;
1983
1984 // We know we have a branch instruction.
1985 auto BrInst = cast<BranchInst>(&I);
1986
1987 // Unconditional branches are never considered UB.
1988 if (BrInst->isUnconditional())
1989 return true;
1990
1991 // Either we stopped and the appropriate action was taken,
1992 // or we got back a simplified value to continue.
1993 Optional<Value *> SimplifiedCond =
1994 stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
1995 if (!SimplifiedCond.hasValue())
1996 return true;
1997 AssumedNoUBInsts.insert(&I);
1998 return true;
1999 };
2000
2001 auto InspectCallSiteForUB = [&](Instruction &I) {
2002 // Check whether a callsite always cause UB or not
2003
2004 // Skip instructions that are already saved.
2005 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2006 return true;
2007
2008 // Check nonnull and noundef argument attribute violation for each
2009 // callsite.
2010 CallBase &CB = cast<CallBase>(I);
2011 Function *Callee = CB.getCalledFunction();
2012 if (!Callee)
2013 return true;
2014 for (unsigned idx = 0; idx < CB.getNumArgOperands(); idx++) {
2015 // If current argument is known to be simplified to null pointer and the
2016 // corresponding argument position is known to have nonnull attribute,
2017 // the argument is poison. Furthermore, if the argument is poison and
2018 // the position is known to have noundef attriubte, this callsite is
2019 // considered UB.
2020 if (idx >= Callee->arg_size())
2021 break;
2022 Value *ArgVal = CB.getArgOperand(idx);
2023 if (!ArgVal)
2024 continue;
2025 // Here, we handle three cases.
2026 // (1) Not having a value means it is dead. (we can replace the value
2027 // with undef)
2028 // (2) Simplified to undef. The argument violate noundef attriubte.
2029 // (3) Simplified to null pointer where known to be nonnull.
2030 // The argument is a poison value and violate noundef attribute.
2031 IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2032 auto &NoUndefAA =
2033 A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2034 if (!NoUndefAA.isKnownNoUndef())
2035 continue;
2036 auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
2037 *this, IRPosition::value(*ArgVal), DepClassTy::NONE);
2038 if (!ValueSimplifyAA.isKnown())
2039 continue;
2040 Optional<Value *> SimplifiedVal =
2041 ValueSimplifyAA.getAssumedSimplifiedValue(A);
2042 if (!SimplifiedVal.hasValue() ||
2043 isa<UndefValue>(*SimplifiedVal.getValue())) {
2044 KnownUBInsts.insert(&I);
2045 continue;
2046 }
2047 if (!ArgVal->getType()->isPointerTy() ||
2048 !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2049 continue;
2050 auto &NonNullAA =
2051 A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2052 if (NonNullAA.isKnownNonNull())
2053 KnownUBInsts.insert(&I);
2054 }
2055 return true;
2056 };
2057
2058 auto InspectReturnInstForUB =
2059 [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) {
2060 // Check if a return instruction always cause UB or not
2061 // Note: It is guaranteed that the returned position of the anchor
2062 // scope has noundef attribute when this is called.
2063 // We also ensure the return position is not "assumed dead"
2064 // because the returned value was then potentially simplified to
2065 // `undef` in AAReturnedValues without removing the `noundef`
2066 // attribute yet.
2067
2068 // When the returned position has noundef attriubte, UB occur in the
2069 // following cases.
2070 // (1) Returned value is known to be undef.
2071 // (2) The value is known to be a null pointer and the returned
2072 // position has nonnull attribute (because the returned value is
2073 // poison).
2074 bool FoundUB = false;
2075 if (isa<UndefValue>(V)) {
2076 FoundUB = true;
2077 } else {
2078 if (isa<ConstantPointerNull>(V)) {
2079 auto &NonNullAA = A.getAAFor<AANonNull>(
2080 *this, IRPosition::returned(*getAnchorScope()),
2081 DepClassTy::NONE);
2082 if (NonNullAA.isKnownNonNull())
2083 FoundUB = true;
2084 }
2085 }
2086
2087 if (FoundUB)
2088 for (ReturnInst *RI : RetInsts)
2089 KnownUBInsts.insert(RI);
2090 return true;
2091 };
2092
2093 A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2094 {Instruction::Load, Instruction::Store,
2095 Instruction::AtomicCmpXchg,
2096 Instruction::AtomicRMW},
2097 /* CheckBBLivenessOnly */ true);
2098 A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2099 /* CheckBBLivenessOnly */ true);
2100 A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this);
2101
2102 // If the returned position of the anchor scope has noundef attriubte, check
2103 // all returned instructions.
2104 if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2105 const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2106 if (!A.isAssumedDead(ReturnIRP, this, nullptr)) {
2107 auto &RetPosNoUndefAA =
2108 A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2109 if (RetPosNoUndefAA.isKnownNoUndef())
2110 A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB,
2111 *this);
2112 }
2113 }
2114
2115 if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2116 UBPrevSize != KnownUBInsts.size())
2117 return ChangeStatus::CHANGED;
2118 return ChangeStatus::UNCHANGED;
2119 }
2120
2121 bool isKnownToCauseUB(Instruction *I) const override {
2122 return KnownUBInsts.count(I);
2123 }
2124
2125 bool isAssumedToCauseUB(Instruction *I) const override {
2126 // In simple words, if an instruction is not in the assumed to _not_
2127 // cause UB, then it is assumed UB (that includes those
2128 // in the KnownUBInsts set). The rest is boilerplate
2129 // is to ensure that it is one of the instructions we test
2130 // for UB.
2131
2132 switch (I->getOpcode()) {
2133 case Instruction::Load:
2134 case Instruction::Store:
2135 case Instruction::AtomicCmpXchg:
2136 case Instruction::AtomicRMW:
2137 return !AssumedNoUBInsts.count(I);
2138 case Instruction::Br: {
2139 auto BrInst = cast<BranchInst>(I);
2140 if (BrInst->isUnconditional())
2141 return false;
2142 return !AssumedNoUBInsts.count(I);
2143 } break;
2144 default:
2145 return false;
2146 }
2147 return false;
2148 }
2149
2150 ChangeStatus manifest(Attributor &A) override {
2151 if (KnownUBInsts.empty())
2152 return ChangeStatus::UNCHANGED;
2153 for (Instruction *I : KnownUBInsts)
2154 A.changeToUnreachableAfterManifest(I);
2155 return ChangeStatus::CHANGED;
2156 }
2157
2158 /// See AbstractAttribute::getAsStr()
2159 const std::string getAsStr() const override {
2160 return getAssumed() ? "undefined-behavior" : "no-ub";
2161 }
2162
2163 /// Note: The correctness of this analysis depends on the fact that the
2164 /// following 2 sets will stop changing after some point.
2165 /// "Change" here means that their size changes.
2166 /// The size of each set is monotonically increasing
2167 /// (we only add items to them) and it is upper bounded by the number of
2168 /// instructions in the processed function (we can never save more
2169 /// elements in either set than this number). Hence, at some point,
2170 /// they will stop increasing.
2171 /// Consequently, at some point, both sets will have stopped
2172 /// changing, effectively making the analysis reach a fixpoint.
2173
2174 /// Note: These 2 sets are disjoint and an instruction can be considered
2175 /// one of 3 things:
2176 /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2177 /// the KnownUBInsts set.
2178 /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2179 /// has a reason to assume it).
2180 /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2181 /// could not find a reason to assume or prove that it can cause UB,
2182 /// hence it assumes it doesn't. We have a set for these instructions
2183 /// so that we don't reprocess them in every update.
2184 /// Note however that instructions in this set may cause UB.
2185
2186protected:
2187 /// A set of all live instructions _known_ to cause UB.
2188 SmallPtrSet<Instruction *, 8> KnownUBInsts;
2189
2190private:
2191 /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2192 SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2193
2194 // Should be called on updates in which if we're processing an instruction
2195 // \p I that depends on a value \p V, one of the following has to happen:
2196 // - If the value is assumed, then stop.
2197 // - If the value is known but undef, then consider it UB.
2198 // - Otherwise, do specific processing with the simplified value.
2199 // We return None in the first 2 cases to signify that an appropriate
2200 // action was taken and the caller should stop.
2201 // Otherwise, we return the simplified value that the caller should
2202 // use for specific processing.
2203 Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2204 Instruction *I) {
2205 const auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
2206 *this, IRPosition::value(*V), DepClassTy::REQUIRED);
2207 Optional<Value *> SimplifiedV =
2208 ValueSimplifyAA.getAssumedSimplifiedValue(A);
2209 if (!ValueSimplifyAA.isKnown()) {
2210 // Don't depend on assumed values.
2211 return llvm::None;
2212 }
2213 if (!SimplifiedV.hasValue()) {
2214 // If it is known (which we tested above) but it doesn't have a value,
2215 // then we can assume `undef` and hence the instruction is UB.
2216 KnownUBInsts.insert(I);
2217 return llvm::None;
2218 }
2219 Value *Val = SimplifiedV.getValue();
2220 if (isa<UndefValue>(Val)) {
2221 KnownUBInsts.insert(I);
2222 return llvm::None;
2223 }
2224 return Val;
2225 }
2226};
2227
2228struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2229 AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2230 : AAUndefinedBehaviorImpl(IRP, A) {}
2231
2232 /// See AbstractAttribute::trackStatistics()
2233 void trackStatistics() const override {
2234 STATS_DECL(UndefinedBehaviorInstruction, Instruction,static llvm::Statistic NumIRInstruction_UndefinedBehaviorInstruction
= {"attributor", "NumIRInstruction_UndefinedBehaviorInstruction"
, "Number of instructions known to have UB"};;
2235 "Number of instructions known to have UB")static llvm::Statistic NumIRInstruction_UndefinedBehaviorInstruction
= {"attributor", "NumIRInstruction_UndefinedBehaviorInstruction"
, "Number of instructions known to have UB"};;
;
2236 BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction)NumIRInstruction_UndefinedBehaviorInstruction +=
2237 KnownUBInsts.size();
2238 }
2239};
2240
2241/// ------------------------ Will-Return Attributes ----------------------------
2242
2243// Helper function that checks whether a function has any cycle which we don't
2244// know if it is bounded or not.
2245// Loops with maximum trip count are considered bounded, any other cycle not.
2246static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2247 ScalarEvolution *SE =
2248 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2249 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2250 // If either SCEV or LoopInfo is not available for the function then we assume
2251 // any cycle to be unbounded cycle.
2252 // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2253 // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2254 if (!SE || !LI) {
2255 for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2256 if (SCCI.hasCycle())
2257 return true;
2258 return false;
2259 }
2260
2261 // If there's irreducible control, the function may contain non-loop cycles.
2262 if (mayContainIrreducibleControl(F, LI))
2263 return true;
2264
2265 // Any loop that does not have a max trip count is considered unbounded cycle.
2266 for (auto *L : LI->getLoopsInPreorder()) {
2267 if (!SE->getSmallConstantMaxTripCount(L))
2268 return true;
2269 }
2270 return false;
2271}
2272
2273struct AAWillReturnImpl : public AAWillReturn {
2274 AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2275 : AAWillReturn(IRP, A) {}
2276
2277 /// See AbstractAttribute::initialize(...).
2278 void initialize(Attributor &A) override {
2279 AAWillReturn::initialize(A);
2280
2281 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2282 indicateOptimisticFixpoint();
2283 return;
2284 }
2285 }
2286
2287 /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
2288 bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
2289 // Check for `mustprogress` in the scope and the associated function which
2290 // might be different if this is a call site.
2291 if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2292 (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2293 return false;
2294
2295 const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
2296 DepClassTy::NONE);
2297 if (!MemAA.isAssumedReadOnly())
2298 return false;
2299 if (KnownOnly && !MemAA.isKnownReadOnly())
2300 return false;
2301 if (!MemAA.isKnownReadOnly())
2302 A.recordDependence(MemAA, *this, DepClassTy::OPTIONAL);
2303
2304 return true;
2305 }
2306
2307 /// See AbstractAttribute::updateImpl(...).
2308 ChangeStatus updateImpl(Attributor &A) override {
2309 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2310 return ChangeStatus::UNCHANGED;
2311
2312 auto CheckForWillReturn = [&](Instruction &I) {
2313 IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2314 const auto &WillReturnAA =
2315 A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2316 if (WillReturnAA.isKnownWillReturn())
2317 return true;
2318 if (!WillReturnAA.isAssumedWillReturn())
2319 return false;
2320 const auto &NoRecurseAA =
2321 A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2322 return NoRecurseAA.isAssumedNoRecurse();
2323 };
2324
2325 if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2326 return indicatePessimisticFixpoint();
2327
2328 return ChangeStatus::UNCHANGED;
2329 }
2330
2331 /// See AbstractAttribute::getAsStr()
2332 const std::string getAsStr() const override {
2333 return getAssumed() ? "willreturn" : "may-noreturn";
2334 }
2335};
2336
2337struct AAWillReturnFunction final : AAWillReturnImpl {
2338 AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2339 : AAWillReturnImpl(IRP, A) {}
2340
2341 /// See AbstractAttribute::initialize(...).
2342 void initialize(Attributor &A) override {
2343 AAWillReturnImpl::initialize(A);
2344
2345 Function *F = getAnchorScope();
2346 if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2347 indicatePessimisticFixpoint();
2348 }
2349
2350 /// See AbstractAttribute::trackStatistics()
2351 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn){ static llvm::Statistic NumIRFunction_willreturn = {"attributor"
, "NumIRFunction_willreturn", ("Number of " "functions" " marked '"
"willreturn" "'")};; ++(NumIRFunction_willreturn); }
}
2352};
2353
2354/// WillReturn attribute deduction for a call sites.
2355struct AAWillReturnCallSite final : AAWillReturnImpl {
2356 AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2357 : AAWillReturnImpl(IRP, A) {}
2358
2359 /// See AbstractAttribute::initialize(...).
2360 void initialize(Attributor &A) override {
2361 AAWillReturnImpl::initialize(A);
2362 Function *F = getAssociatedFunction();
2363 if (!F || !A.isFunctionIPOAmendable(*F))
2364 indicatePessimisticFixpoint();
2365 }
2366
2367 /// See AbstractAttribute::updateImpl(...).
2368 ChangeStatus updateImpl(Attributor &A) override {
2369 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2370 return ChangeStatus::UNCHANGED;
2371
2372 // TODO: Once we have call site specific value information we can provide
2373 // call site specific liveness information and then it makes
2374 // sense to specialize attributes for call sites arguments instead of
2375 // redirecting requests to the callee argument.
2376 Function *F = getAssociatedFunction();
2377 const IRPosition &FnPos = IRPosition::function(*F);
2378 auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
2379 return clampStateAndIndicateChange(getState(), FnAA.getState());
2380 }
2381
2382 /// See AbstractAttribute::trackStatistics()
2383 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn){ static llvm::Statistic NumIRCS_willreturn = {"attributor", "NumIRCS_willreturn"
, ("Number of " "call site" " marked '" "willreturn" "'")};; ++
(NumIRCS_willreturn); }
; }
2384};
2385
2386/// -------------------AAReachability Attribute--------------------------
2387
2388struct AAReachabilityImpl : AAReachability {
2389 AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2390 : AAReachability(IRP, A) {}
2391
2392 const std::string getAsStr() const override {
2393 // TODO: Return the number of reachable queries.
2394 return "reachable";
2395 }
2396
2397 /// See AbstractAttribute::initialize(...).
2398 void initialize(Attributor &A) override { indicatePessimisticFixpoint(); }
2399
2400 /// See AbstractAttribute::updateImpl(...).
2401 ChangeStatus updateImpl(Attributor &A) override {
2402 return indicatePessimisticFixpoint();
2403 }
2404};
2405
2406struct AAReachabilityFunction final : public AAReachabilityImpl {
2407 AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2408 : AAReachabilityImpl(IRP, A) {}
2409
2410 /// See AbstractAttribute::trackStatistics()
2411 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable){ static llvm::Statistic NumIRFunction_reachable = {"attributor"
, "NumIRFunction_reachable", ("Number of " "functions" " marked '"
"reachable" "'")};; ++(NumIRFunction_reachable); }
; }
2412};
2413
2414/// ------------------------ NoAlias Argument Attribute ------------------------
2415
2416struct AANoAliasImpl : AANoAlias {
2417 AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2418 assert(getAssociatedType()->isPointerTy() &&((getAssociatedType()->isPointerTy() && "Noalias is a pointer attribute"
) ? static_cast<void> (0) : __assert_fail ("getAssociatedType()->isPointerTy() && \"Noalias is a pointer attribute\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 2419, __PRETTY_FUNCTION__))
2419 "Noalias is a pointer attribute")((getAssociatedType()->isPointerTy() && "Noalias is a pointer attribute"
) ? static_cast<void> (0) : __assert_fail ("getAssociatedType()->isPointerTy() && \"Noalias is a pointer attribute\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 2419, __PRETTY_FUNCTION__))
;
2420 }
2421
2422 const std::string getAsStr() const override {
2423 return getAssumed() ? "noalias" : "may-alias";
2424 }
2425};
2426
2427/// NoAlias attribute for a floating value.
2428struct AANoAliasFloating final : AANoAliasImpl {
2429 AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2430 : AANoAliasImpl(IRP, A) {}
2431
2432 /// See AbstractAttribute::initialize(...).
2433 void initialize(Attributor &A) override {
2434 AANoAliasImpl::initialize(A);
2435 Value *Val = &getAssociatedValue();
2436 do {
2437 CastInst *CI = dyn_cast<CastInst>(Val);
2438 if (!CI)
2439 break;
2440 Value *Base = CI->getOperand(0);
2441 if (!Base->hasOneUse())
2442 break;
2443 Val = Base;
2444 } while (true);
2445
2446 if (!Val->getType()->isPointerTy()) {
2447 indicatePessimisticFixpoint();
2448 return;
2449 }
2450
2451 if (isa<AllocaInst>(Val))
2452 indicateOptimisticFixpoint();
2453 else if (isa<ConstantPointerNull>(Val) &&
2454 !NullPointerIsDefined(getAnchorScope(),
2455 Val->getType()->getPointerAddressSpace()))
2456 indicateOptimisticFixpoint();
2457 else if (Val != &getAssociatedValue()) {
2458 const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
2459 *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
2460 if (ValNoAliasAA.isKnownNoAlias())
2461 indicateOptimisticFixpoint();
2462 }
2463 }
2464
2465 /// See AbstractAttribute::updateImpl(...).
2466 ChangeStatus updateImpl(Attributor &A) override {
2467 // TODO: Implement this.
2468 return indicatePessimisticFixpoint();
2469 }
2470
2471 /// See AbstractAttribute::trackStatistics()
2472 void trackStatistics() const override {
2473 STATS_DECLTRACK_FLOATING_ATTR(noalias){ static llvm::Statistic NumIRFloating_noalias = {"attributor"
, "NumIRFloating_noalias", ("Number of floating values known to be '"
"noalias" "'")};; ++(NumIRFloating_noalias); }
2474 }
2475};
2476
2477/// NoAlias attribute for an argument.
2478struct AANoAliasArgument final
2479 : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2480 using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2481 AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2482
2483 /// See AbstractAttribute::initialize(...).
2484 void initialize(Attributor &A) override {
2485 Base::initialize(A);
2486 // See callsite argument attribute and callee argument attribute.
2487 if (hasAttr({Attribute::ByVal}))
2488 indicateOptimisticFixpoint();
2489 }
2490
2491 /// See AbstractAttribute::update(...).
2492 ChangeStatus updateImpl(Attributor &A) override {
2493 // We have to make sure no-alias on the argument does not break
2494 // synchronization when this is a callback argument, see also [1] below.
2495 // If synchronization cannot be affected, we delegate to the base updateImpl
2496 // function, otherwise we give up for now.
2497
2498 // If the function is no-sync, no-alias cannot break synchronization.
2499 const auto &NoSyncAA =
2500 A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
2501 DepClassTy::OPTIONAL);
2502 if (NoSyncAA.isAssumedNoSync())
2503 return Base::updateImpl(A);
2504
2505 // If the argument is read-only, no-alias cannot break synchronization.
2506 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2507 *this, getIRPosition(), DepClassTy::OPTIONAL);
2508 if (MemBehaviorAA.isAssumedReadOnly())
2509 return Base::updateImpl(A);
2510
2511 // If the argument is never passed through callbacks, no-alias cannot break
2512 // synchronization.
2513 bool AllCallSitesKnown;
2514 if (A.checkForAllCallSites(
2515 [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2516 true, AllCallSitesKnown))
2517 return Base::updateImpl(A);
2518
2519 // TODO: add no-alias but make sure it doesn't break synchronization by
2520 // introducing fake uses. See:
2521 // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2522 // International Workshop on OpenMP 2018,
2523 // http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2524
2525 return indicatePessimisticFixpoint();
2526 }
2527
2528 /// See AbstractAttribute::trackStatistics()
2529 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias){ static llvm::Statistic NumIRArguments_noalias = {"attributor"
, "NumIRArguments_noalias", ("Number of " "arguments" " marked '"
"noalias" "'")};; ++(NumIRArguments_noalias); }
}
2530};
2531
2532struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2533 AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
2534 : AANoAliasImpl(IRP, A) {}
2535
2536 /// See AbstractAttribute::initialize(...).
2537 void initialize(Attributor &A) override {
2538 // See callsite argument attribute and callee argument attribute.
2539 const auto &CB = cast<CallBase>(getAnchorValue());
2540 if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
2541 indicateOptimisticFixpoint();
2542 Value &Val = getAssociatedValue();
2543 if (isa<ConstantPointerNull>(Val) &&
2544 !NullPointerIsDefined(getAnchorScope(),
2545 Val.getType()->getPointerAddressSpace()))
2546 indicateOptimisticFixpoint();
2547 }
2548
2549 /// Determine if the underlying value may alias with the call site argument
2550 /// \p OtherArgNo of \p ICS (= the underlying call site).
2551 bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
2552 const AAMemoryBehavior &MemBehaviorAA,
2553 const CallBase &CB, unsigned OtherArgNo) {
2554 // We do not need to worry about aliasing with the underlying IRP.
2555 if (this->getCalleeArgNo() == (int)OtherArgNo)
2556 return false;
2557
2558 // If it is not a pointer or pointer vector we do not alias.
2559 const Value *ArgOp = CB.getArgOperand(OtherArgNo);
2560 if (!ArgOp->getType()->isPtrOrPtrVectorTy())
2561 return false;
2562
2563 auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2564 *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
2565
2566 // If the argument is readnone, there is no read-write aliasing.
2567 if (CBArgMemBehaviorAA.isAssumedReadNone()) {
2568 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2569 return false;
2570 }
2571
2572 // If the argument is readonly and the underlying value is readonly, there
2573 // is no read-write aliasing.
2574 bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
2575 if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
2576 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2577 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2578 return false;
2579 }
2580
2581 // We have to utilize actual alias analysis queries so we need the object.
2582 if (!AAR)
2583 AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
2584
2585 // Try to rule it out at the call site.
2586 bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2587 LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[NoAliasCSArg] Check alias between "
"callsite arguments: " << getAssociatedValue() <<
" " << *ArgOp << " => " << (IsAliasing ?
"" : "no-") << "alias \n"; } } while (false)
2588 "callsite arguments: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[NoAliasCSArg] Check alias between "
"callsite arguments: " << getAssociatedValue() <<
" " << *ArgOp << " => " << (IsAliasing ?
"" : "no-") << "alias \n"; } } while (false)
2589 << getAssociatedValue() << " " << *ArgOp << " => "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[NoAliasCSArg] Check alias between "
"callsite arguments: " << getAssociatedValue() <<
" " << *ArgOp << " => " << (IsAliasing ?
"" : "no-") << "alias \n"; } } while (false)
2590 << (IsAliasing ? "" : "no-") << "alias \n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[NoAliasCSArg] Check alias between "
"callsite arguments: " << getAssociatedValue() <<
" " << *ArgOp << " => " << (IsAliasing ?
"" : "no-") << "alias \n"; } } while (false)
;
2591
2592 return IsAliasing;
2593 }
2594
2595 bool
2596 isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
2597 const AAMemoryBehavior &MemBehaviorAA,
2598 const AANoAlias &NoAliasAA) {
2599 // We can deduce "noalias" if the following conditions hold.
2600 // (i) Associated value is assumed to be noalias in the definition.
2601 // (ii) Associated value is assumed to be no-capture in all the uses
2602 // possibly executed before this callsite.
2603 // (iii) There is no other pointer argument which could alias with the
2604 // value.
2605
2606 bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
2607 if (!AssociatedValueIsNoAliasAtDef) {
2608 LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AANoAlias] " << getAssociatedValue
() << " is not no-alias at the definition\n"; } } while
(false)
2609 << " is not no-alias at the definition\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AANoAlias] " << getAssociatedValue
() << " is not no-alias at the definition\n"; } } while
(false)
;
2610 return false;
2611 }
2612
2613 A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
2614
2615 const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2616 const Function *ScopeFn = VIRP.getAnchorScope();
2617 auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
2618 // Check whether the value is captured in the scope using AANoCapture.
2619 // Look at CFG and check only uses possibly executed before this
2620 // callsite.
2621 auto UsePred = [&](const Use &U, bool &Follow) -> bool {
2622 Instruction *UserI = cast<Instruction>(U.getUser());
2623
2624 // If UserI is the curr instruction and there is a single potential use of
2625 // the value in UserI we allow the use.
2626 // TODO: We should inspect the operands and allow those that cannot alias
2627 // with the value.
2628 if (UserI == getCtxI() && UserI->getNumOperands() == 1)
2629 return true;
2630
2631 if (ScopeFn) {
2632 const auto &ReachabilityAA = A.getAAFor<AAReachability>(
2633 *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL);
2634
2635 if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI()))
2636 return true;
2637
2638 if (auto *CB = dyn_cast<CallBase>(UserI)) {
2639 if (CB->isArgOperand(&U)) {
2640
2641 unsigned ArgNo = CB->getArgOperandNo(&U);
2642
2643 const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
2644 *this, IRPosition::callsite_argument(*CB, ArgNo),
2645 DepClassTy::OPTIONAL);
2646
2647 if (NoCaptureAA.isAssumedNoCapture())
2648 return true;
2649 }
2650 }
2651 }
2652
2653 // For cases which can potentially have more users
2654 if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
2655 isa<SelectInst>(U)) {
2656 Follow = true;
2657 return true;
2658 }
2659
2660 LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AANoAliasCSArg] Unknown user: "
<< *U << "\n"; } } while (false)
;
2661 return false;
2662 };
2663
2664 if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2665 if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
2666 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AANoAliasCSArg] " <<
getAssociatedValue() << " cannot be noalias as it is potentially captured\n"
; } } while (false)
2667 dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AANoAliasCSArg] " <<
getAssociatedValue() << " cannot be noalias as it is potentially captured\n"
; } } while (false)
2668 << " cannot be noalias as it is potentially captured\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AANoAliasCSArg] " <<
getAssociatedValue() << " cannot be noalias as it is potentially captured\n"
; } } while (false)
;
2669 return false;
2670 }
2671 }
2672 A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
2673
2674 // Check there is no other pointer argument which could alias with the
2675 // value passed at this call site.
2676 // TODO: AbstractCallSite
2677 const auto &CB = cast<CallBase>(getAnchorValue());
2678 for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
2679 OtherArgNo++)
2680 if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
2681 return false;
2682
2683 return true;
2684 }
2685
2686 /// See AbstractAttribute::updateImpl(...).
2687 ChangeStatus updateImpl(Attributor &A) override {
2688 // If the argument is readnone we are done as there are no accesses via the
2689 // argument.
2690 auto &MemBehaviorAA =
2691 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
2692 if (MemBehaviorAA.isAssumedReadNone()) {
2693 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2694 return ChangeStatus::UNCHANGED;
2695 }
2696
2697 const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2698 const auto &NoAliasAA =
2699 A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
2700
2701 AAResults *AAR = nullptr;
2702 if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
2703 NoAliasAA)) {
2704 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n"
; } } while (false)
2705 dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n"
; } } while (false)
;
2706 return ChangeStatus::UNCHANGED;
2707 }
2708
2709 return indicatePessimisticFixpoint();
2710 }
2711
2712 /// See AbstractAttribute::trackStatistics()
2713 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias){ static llvm::Statistic NumIRCSArguments_noalias = {"attributor"
, "NumIRCSArguments_noalias", ("Number of " "call site arguments"
" marked '" "noalias" "'")};; ++(NumIRCSArguments_noalias); }
}
2714};
2715
2716/// NoAlias attribute for function return value.
2717struct AANoAliasReturned final : AANoAliasImpl {
2718 AANoAliasReturned(const IRPosition &IRP, Attributor &A)
2719 : AANoAliasImpl(IRP, A) {}
2720
2721 /// See AbstractAttribute::initialize(...).
2722 void initialize(Attributor &A) override {
2723 AANoAliasImpl::initialize(A);
2724 Function *F = getAssociatedFunction();
2725 if (!F || F->isDeclaration())
2726 indicatePessimisticFixpoint();
2727 }
2728
2729 /// See AbstractAttribute::updateImpl(...).
2730 virtual ChangeStatus updateImpl(Attributor &A) override {
2731
2732 auto CheckReturnValue = [&](Value &RV) -> bool {
2733 if (Constant *C = dyn_cast<Constant>(&RV))
2734 if (C->isNullValue() || isa<UndefValue>(C))
2735 return true;
2736
2737 /// For now, we can only deduce noalias if we have call sites.
2738 /// FIXME: add more support.
2739 if (!isa<CallBase>(&RV))
2740 return false;
2741
2742 const IRPosition &RVPos = IRPosition::value(RV);
2743 const auto &NoAliasAA =
2744 A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
2745 if (!NoAliasAA.isAssumedNoAlias())
2746 return false;
2747
2748 const auto &NoCaptureAA =
2749 A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
2750 return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2751 };
2752
2753 if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2754 return indicatePessimisticFixpoint();
2755
2756 return ChangeStatus::UNCHANGED;
2757 }
2758
2759 /// See AbstractAttribute::trackStatistics()
2760 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias){ static llvm::Statistic NumIRFunctionReturn_noalias = {"attributor"
, "NumIRFunctionReturn_noalias", ("Number of " "function returns"
" marked '" "noalias" "'")};; ++(NumIRFunctionReturn_noalias
); }
}
2761};
2762
2763/// NoAlias attribute deduction for a call site return value.
2764struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2765 AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
2766 : AANoAliasImpl(IRP, A) {}
2767
2768 /// See AbstractAttribute::initialize(...).
2769 void initialize(Attributor &A) override {
2770 AANoAliasImpl::initialize(A);
2771 Function *F = getAssociatedFunction();
2772 if (!F || F->isDeclaration())
2773 indicatePessimisticFixpoint();
2774 }
2775
2776 /// See AbstractAttribute::updateImpl(...).
2777 ChangeStatus updateImpl(Attributor &A) override {
2778 // TODO: Once we have call site specific value information we can provide
2779 // call site specific liveness information and then it makes
2780 // sense to specialize attributes for call sites arguments instead of
2781 // redirecting requests to the callee argument.
2782 Function *F = getAssociatedFunction();
2783 const IRPosition &FnPos = IRPosition::returned(*F);
2784 auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
2785 return clampStateAndIndicateChange(getState(), FnAA.getState());
2786 }
2787
2788 /// See AbstractAttribute::trackStatistics()
2789 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias){ static llvm::Statistic NumIRCSReturn_noalias = {"attributor"
, "NumIRCSReturn_noalias", ("Number of " "call site returns" " marked '"
"noalias" "'")};; ++(NumIRCSReturn_noalias); }
; }
2790};
2791
2792/// -------------------AAIsDead Function Attribute-----------------------
2793
2794struct AAIsDeadValueImpl : public AAIsDead {
2795 AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
2796
2797 /// See AAIsDead::isAssumedDead().
2798 bool isAssumedDead() const override { return getAssumed(); }
2799
2800 /// See AAIsDead::isKnownDead().
2801 bool isKnownDead() const override { return getKnown(); }
2802
2803 /// See AAIsDead::isAssumedDead(BasicBlock *).
2804 bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2805
2806 /// See AAIsDead::isKnownDead(BasicBlock *).
2807 bool isKnownDead(const BasicBlock *BB) const override { return false; }
2808
2809 /// See AAIsDead::isAssumedDead(Instruction *I).
2810 bool isAssumedDead(const Instruction *I) const override {
2811 return I == getCtxI() && isAssumedDead();
2812 }
2813
2814 /// See AAIsDead::isKnownDead(Instruction *I).
2815 bool isKnownDead(const Instruction *I) const override {
2816 return isAssumedDead(I) && getKnown();
2817 }
2818
2819 /// See AbstractAttribute::getAsStr().
2820 const std::string getAsStr() const override {
2821 return isAssumedDead() ? "assumed-dead" : "assumed-live";
2822 }
2823
2824 /// Check if all uses are assumed dead.
2825 bool areAllUsesAssumedDead(Attributor &A, Value &V) {
2826 auto UsePred = [&](const Use &U, bool &Follow) { return false; };
2827 // Explicitly set the dependence class to required because we want a long
2828 // chain of N dependent instructions to be considered live as soon as one is
2829 // without going through N update cycles. This is not required for
2830 // correctness.
2831 return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED);
2832 }
2833
2834 /// Determine if \p I is assumed to be side-effect free.
2835 bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
2836 if (!I || wouldInstructionBeTriviallyDead(I))
2837 return true;
2838
2839 auto *CB = dyn_cast<CallBase>(I);
2840 if (!CB || isa<IntrinsicInst>(CB))
2841 return false;
2842
2843 const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
2844 const auto &NoUnwindAA =
2845 A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
2846 if (!NoUnwindAA.isAssumedNoUnwind())
2847 return false;
2848 if (!NoUnwindAA.isKnownNoUnwind())
2849 A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
2850
2851 const auto &MemBehaviorAA =
2852 A.getAndUpdateAAFor<AAMemoryBehavior>(*this, CallIRP, DepClassTy::NONE);
2853 if (MemBehaviorAA.isAssumedReadOnly()) {
2854 if (!MemBehaviorAA.isKnownReadOnly())
2855 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2856 return true;
2857 }
2858 return false;
2859 }
2860};
2861
2862struct AAIsDeadFloating : public AAIsDeadValueImpl {
2863 AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
2864 : AAIsDeadValueImpl(IRP, A) {}
2865
2866 /// See AbstractAttribute::initialize(...).
2867 void initialize(Attributor &A) override {
2868 if (isa<UndefValue>(getAssociatedValue())) {
2869 indicatePessimisticFixpoint();
2870 return;
2871 }
2872
2873 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2874 if (!isAssumedSideEffectFree(A, I))
2875 indicatePessimisticFixpoint();
2876 }
2877
2878 /// See AbstractAttribute::updateImpl(...).
2879 ChangeStatus updateImpl(Attributor &A) override {
2880 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2881 if (!isAssumedSideEffectFree(A, I))
2882 return indicatePessimisticFixpoint();
2883
2884 if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2885 return indicatePessimisticFixpoint();
2886 return ChangeStatus::UNCHANGED;
2887 }
2888
2889 /// See AbstractAttribute::manifest(...).
2890 ChangeStatus manifest(Attributor &A) override {
2891 Value &V = getAssociatedValue();
2892 if (auto *I = dyn_cast<Instruction>(&V)) {
2893 // If we get here we basically know the users are all dead. We check if
2894 // isAssumedSideEffectFree returns true here again because it might not be
2895 // the case and only the users are dead but the instruction (=call) is
2896 // still needed.
2897 if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
2898 A.deleteAfterManifest(*I);
2899 return ChangeStatus::CHANGED;
2900 }
2901 }
2902 if (V.use_empty())
2903 return ChangeStatus::UNCHANGED;
2904
2905 bool UsedAssumedInformation = false;
2906 Optional<Constant *> C =
2907 A.getAssumedConstant(V, *this, UsedAssumedInformation);
2908 if (C.hasValue() && C.getValue())
2909 return ChangeStatus::UNCHANGED;
2910
2911 // Replace the value with undef as it is dead but keep droppable uses around
2912 // as they provide information we don't want to give up on just yet.
2913 UndefValue &UV = *UndefValue::get(V.getType());
2914 bool AnyChange =
2915 A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
2916 return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2917 }
2918
2919 /// See AbstractAttribute::trackStatistics()
2920 void trackStatistics() const override {
2921 STATS_DECLTRACK_FLOATING_ATTR(IsDead){ static llvm::Statistic NumIRFloating_IsDead = {"attributor"
, "NumIRFloating_IsDead", ("Number of floating values known to be '"
"IsDead" "'")};; ++(NumIRFloating_IsDead); }
2922 }
2923};
2924
2925struct AAIsDeadArgument : public AAIsDeadFloating {
2926 AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
2927 : AAIsDeadFloating(IRP, A) {}
2928
2929 /// See AbstractAttribute::initialize(...).
2930 void initialize(Attributor &A) override {
2931 if (!A.isFunctionIPOAmendable(*getAnchorScope()))
2932 indicatePessimisticFixpoint();
2933 }
2934
2935 /// See AbstractAttribute::manifest(...).
2936 ChangeStatus manifest(Attributor &A) override {
2937 ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2938 Argument &Arg = *getAssociatedArgument();
2939 if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
2940 if (A.registerFunctionSignatureRewrite(
2941 Arg, /* ReplacementTypes */ {},
2942 Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2943 Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
2944 Arg.dropDroppableUses();
2945 return ChangeStatus::CHANGED;
2946 }
2947 return Changed;
2948 }
2949
2950 /// See AbstractAttribute::trackStatistics()
2951 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead){ static llvm::Statistic NumIRArguments_IsDead = {"attributor"
, "NumIRArguments_IsDead", ("Number of " "arguments" " marked '"
"IsDead" "'")};; ++(NumIRArguments_IsDead); }
}
2952};
2953
2954struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2955 AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
2956 : AAIsDeadValueImpl(IRP, A) {}
2957
2958 /// See AbstractAttribute::initialize(...).
2959 void initialize(Attributor &A) override {
2960 if (isa<UndefValue>(getAssociatedValue()))
2961 indicatePessimisticFixpoint();
2962 }
2963
2964 /// See AbstractAttribute::updateImpl(...).
2965 ChangeStatus updateImpl(Attributor &A) override {
2966 // TODO: Once we have call site specific value information we can provide
2967 // call site specific liveness information and then it makes
2968 // sense to specialize attributes for call sites arguments instead of
2969 // redirecting requests to the callee argument.
2970 Argument *Arg = getAssociatedArgument();
2971 if (!Arg)
2972 return indicatePessimisticFixpoint();
2973 const IRPosition &ArgPos = IRPosition::argument(*Arg);
2974 auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
2975 return clampStateAndIndicateChange(getState(), ArgAA.getState());
2976 }
2977
2978 /// See AbstractAttribute::manifest(...).
2979 ChangeStatus manifest(Attributor &A) override {
2980 CallBase &CB = cast<CallBase>(getAnchorValue());
2981 Use &U = CB.getArgOperandUse(getCallSiteArgNo());
2982 assert(!isa<UndefValue>(U.get()) &&((!isa<UndefValue>(U.get()) && "Expected undef values to be filtered out!"
) ? static_cast<void> (0) : __assert_fail ("!isa<UndefValue>(U.get()) && \"Expected undef values to be filtered out!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 2983, __PRETTY_FUNCTION__))
2983 "Expected undef values to be filtered out!")((!isa<UndefValue>(U.get()) && "Expected undef values to be filtered out!"
) ? static_cast<void> (0) : __assert_fail ("!isa<UndefValue>(U.get()) && \"Expected undef values to be filtered out!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 2983, __PRETTY_FUNCTION__))
;
2984 UndefValue &UV = *UndefValue::get(U->getType());
2985 if (A.changeUseAfterManifest(U, UV))
2986 return ChangeStatus::CHANGED;
2987 return ChangeStatus::UNCHANGED;
2988 }
2989
2990 /// See AbstractAttribute::trackStatistics()
2991 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead){ static llvm::Statistic NumIRCSArguments_IsDead = {"attributor"
, "NumIRCSArguments_IsDead", ("Number of " "call site arguments"
" marked '" "IsDead" "'")};; ++(NumIRCSArguments_IsDead); }
}
2992};
2993
2994struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
2995 AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
2996 : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
2997
2998 /// See AAIsDead::isAssumedDead().
2999 bool isAssumedDead() const override {
3000 return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
3001 }
3002
3003 /// See AbstractAttribute::initialize(...).
3004 void initialize(Attributor &A) override {
3005 if (isa<UndefValue>(getAssociatedValue())) {
3006 indicatePessimisticFixpoint();
3007 return;
3008 }
3009
3010 // We track this separately as a secondary state.
3011 IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3012 }
3013
3014 /// See AbstractAttribute::updateImpl(...).
3015 ChangeStatus updateImpl(Attributor &A) override {
3016 ChangeStatus Changed = ChangeStatus::UNCHANGED;
3017 if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3018 IsAssumedSideEffectFree = false;
3019 Changed = ChangeStatus::CHANGED;
3020 }
3021
3022 if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3023 return indicatePessimisticFixpoint();
3024 return Changed;
3025 }
3026
3027 /// See AbstractAttribute::trackStatistics()
3028 void trackStatistics() const override {
3029 if (IsAssumedSideEffectFree)
3030 STATS_DECLTRACK_CSRET_ATTR(IsDead){ static llvm::Statistic NumIRCSReturn_IsDead = {"attributor"
, "NumIRCSReturn_IsDead", ("Number of " "call site returns" " marked '"
"IsDead" "'")};; ++(NumIRCSReturn_IsDead); }
3031 else
3032 STATS_DECLTRACK_CSRET_ATTR(UnusedResult){ static llvm::Statistic NumIRCSReturn_UnusedResult = {"attributor"
, "NumIRCSReturn_UnusedResult", ("Number of " "call site returns"
" marked '" "UnusedResult" "'")};; ++(NumIRCSReturn_UnusedResult
); }
3033 }
3034
3035 /// See AbstractAttribute::getAsStr().
3036 const std::string getAsStr() const override {
3037 return isAssumedDead()
3038 ? "assumed-dead"
3039 : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3040 }
3041
3042private:
3043 bool IsAssumedSideEffectFree;
3044};
3045
3046struct AAIsDeadReturned : public AAIsDeadValueImpl {
3047 AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3048 : AAIsDeadValueImpl(IRP, A) {}
3049
3050 /// See AbstractAttribute::updateImpl(...).
3051 ChangeStatus updateImpl(Attributor &A) override {
3052
3053 A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3054 {Instruction::Ret});
3055
3056 auto PredForCallSite = [&](AbstractCallSite ACS) {
3057 if (ACS.isCallbackCall() || !ACS.getInstruction())
3058 return false;
3059 return areAllUsesAssumedDead(A, *ACS.getInstruction());
3060 };
3061
3062 bool AllCallSitesKnown;
3063 if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3064 AllCallSitesKnown))
3065 return indicatePessimisticFixpoint();
3066
3067 return ChangeStatus::UNCHANGED;
3068 }
3069
3070 /// See AbstractAttribute::manifest(...).
3071 ChangeStatus manifest(Attributor &A) override {
3072 // TODO: Rewrite the signature to return void?
3073 bool AnyChange = false;
3074 UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3075 auto RetInstPred = [&](Instruction &I) {
3076 ReturnInst &RI = cast<ReturnInst>(I);
3077 if (!isa<UndefValue>(RI.getReturnValue()))
3078 AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3079 return true;
3080 };
3081 A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
3082 return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3083 }
3084
3085 /// See AbstractAttribute::trackStatistics()
3086 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead){ static llvm::Statistic NumIRFunctionReturn_IsDead = {"attributor"
, "NumIRFunctionReturn_IsDead", ("Number of " "function returns"
" marked '" "IsDead" "'")};; ++(NumIRFunctionReturn_IsDead);
}
}
3087};
3088
3089struct AAIsDeadFunction : public AAIsDead {
3090 AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3091
3092 /// See AbstractAttribute::initialize(...).
3093 void initialize(Attributor &A) override {
3094 const Function *F = getAnchorScope();
3095 if (F && !F->isDeclaration()) {
3096 // We only want to compute liveness once. If the function is not part of
3097 // the SCC, skip it.
3098 if (A.isRunOn(*const_cast<Function *>(F))) {
3099 ToBeExploredFrom.insert(&F->getEntryBlock().front());
3100 assumeLive(A, F->getEntryBlock());
3101 } else {
3102 indicatePessimisticFixpoint();
3103 }
3104 }
3105 }
3106
3107 /// See AbstractAttribute::getAsStr().
3108 const std::string getAsStr() const override {
3109 return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3110 std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3111 std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3112 std::to_string(KnownDeadEnds.size()) + "]";
3113 }
3114
3115 /// See AbstractAttribute::manifest(...).
3116 ChangeStatus manifest(Attributor &A) override {
3117 assert(getState().isValidState() &&((getState().isValidState() && "Attempted to manifest an invalid state!"
) ? static_cast<void> (0) : __assert_fail ("getState().isValidState() && \"Attempted to manifest an invalid state!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 3118, __PRETTY_FUNCTION__))
3118 "Attempted to manifest an invalid state!")((getState().isValidState() && "Attempted to manifest an invalid state!"
) ? static_cast<void> (0) : __assert_fail ("getState().isValidState() && \"Attempted to manifest an invalid state!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 3118, __PRETTY_FUNCTION__))
;
3119
3120 ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3121 Function &F = *getAnchorScope();
3122
3123 if (AssumedLiveBlocks.empty()) {
3124 A.deleteAfterManifest(F);
3125 return ChangeStatus::CHANGED;
3126 }
3127
3128 // Flag to determine if we can change an invoke to a call assuming the
3129 // callee is nounwind. This is not possible if the personality of the
3130 // function allows to catch asynchronous exceptions.
3131 bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3132
3133 KnownDeadEnds.set_union(ToBeExploredFrom);
3134 for (const Instruction *DeadEndI : KnownDeadEnds) {
3135 auto *CB = dyn_cast<CallBase>(DeadEndI);
3136 if (!CB)
3137 continue;
3138 const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3139 *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3140 bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3141 if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3142 continue;
3143
3144 if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3145 A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3146 else
3147 A.changeToUnreachableAfterManifest(
3148 const_cast<Instruction *>(DeadEndI->getNextNode()));
3149 HasChanged = ChangeStatus::CHANGED;
3150 }
3151
3152 STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.")static llvm::Statistic NumIRBasicBlock_AAIsDead = {"attributor"
, "NumIRBasicBlock_AAIsDead", "Number of dead basic blocks deleted."
};;
;
3153 for (BasicBlock &BB : F)
3154 if (!AssumedLiveBlocks.count(&BB)) {
3155 A.deleteAfterManifest(BB);
3156 ++BUILD_STAT_NAME(AAIsDead, BasicBlock)NumIRBasicBlock_AAIsDead;
3157 }
3158
3159 return HasChanged;
3160 }
3161
3162 /// See AbstractAttribute::updateImpl(...).
3163 ChangeStatus updateImpl(Attributor &A) override;
3164
3165 bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3166 return !AssumedLiveEdges.count(std::make_pair(From, To));
3167 }
3168
3169 /// See AbstractAttribute::trackStatistics()
3170 void trackStatistics() const override {}
3171
3172 /// Returns true if the function is assumed dead.
3173 bool isAssumedDead() const override { return false; }
3174
3175 /// See AAIsDead::isKnownDead().
3176 bool isKnownDead() const override { return false; }
3177
3178 /// See AAIsDead::isAssumedDead(BasicBlock *).
3179 bool isAssumedDead(const BasicBlock *BB) const override {
3180 assert(BB->getParent() == getAnchorScope() &&((BB->getParent() == getAnchorScope() && "BB must be in the same anchor scope function."
) ? static_cast<void> (0) : __assert_fail ("BB->getParent() == getAnchorScope() && \"BB must be in the same anchor scope function.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 3181, __PRETTY_FUNCTION__))
3181 "BB must be in the same anchor scope function.")((BB->getParent() == getAnchorScope() && "BB must be in the same anchor scope function."
) ? static_cast<void> (0) : __assert_fail ("BB->getParent() == getAnchorScope() && \"BB must be in the same anchor scope function.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 3181, __PRETTY_FUNCTION__))
;
3182
3183 if (!getAssumed())
3184 return false;
3185 return !AssumedLiveBlocks.count(BB);
3186 }
3187
3188 /// See AAIsDead::isKnownDead(BasicBlock *).
3189 bool isKnownDead(const BasicBlock *BB) const override {
3190 return getKnown() && isAssumedDead(BB);
3191 }
3192
3193 /// See AAIsDead::isAssumed(Instruction *I).
3194 bool isAssumedDead(const Instruction *I) const override {
3195 assert(I->getParent()->getParent() == getAnchorScope() &&((I->getParent()->getParent() == getAnchorScope() &&
"Instruction must be in the same anchor scope function.") ? static_cast
<void> (0) : __assert_fail ("I->getParent()->getParent() == getAnchorScope() && \"Instruction must be in the same anchor scope function.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 3196, __PRETTY_FUNCTION__))
3196 "Instruction must be in the same anchor scope function.")((I->getParent()->getParent() == getAnchorScope() &&
"Instruction must be in the same anchor scope function.") ? static_cast
<void> (0) : __assert_fail ("I->getParent()->getParent() == getAnchorScope() && \"Instruction must be in the same anchor scope function.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 3196, __PRETTY_FUNCTION__))
;
3197
3198 if (!getAssumed())
3199 return false;
3200
3201 // If it is not in AssumedLiveBlocks then it for sure dead.
3202 // Otherwise, it can still be after noreturn call in a live block.
3203 if (!AssumedLiveBlocks.count(I->getParent()))
3204 return true;
3205
3206 // If it is not after a liveness barrier it is live.
3207 const Instruction *PrevI = I->getPrevNode();
3208 while (PrevI) {
3209 if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3210 return true;
3211 PrevI = PrevI->getPrevNode();
3212 }
3213 return false;
3214 }
3215
3216 /// See AAIsDead::isKnownDead(Instruction *I).
3217 bool isKnownDead(const Instruction *I) const override {
3218 return getKnown() && isAssumedDead(I);
3219 }
3220
3221 /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3222 /// that internal function called from \p BB should now be looked at.
3223 bool assumeLive(Attributor &A, const BasicBlock &BB) {
3224 if (!AssumedLiveBlocks.insert(&BB).second)
3225 return false;
3226
3227 // We assume that all of BB is (probably) live now and if there are calls to
3228 // internal functions we will assume that those are now live as well. This
3229 // is a performance optimization for blocks with calls to a lot of internal
3230 // functions. It can however cause dead functions to be treated as live.
3231 for (const Instruction &I : BB)
3232 if (const auto *CB = dyn_cast<CallBase>(&I))
3233 if (const Function *F = CB->getCalledFunction())
3234 if (F->hasLocalLinkage())
3235 A.markLiveInternalFunction(*F);
3236 return true;
3237 }
3238
3239 /// Collection of instructions that need to be explored again, e.g., we
3240 /// did assume they do not transfer control to (one of their) successors.
3241 SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3242
3243 /// Collection of instructions that are known to not transfer control.
3244 SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3245
3246 /// Collection of all assumed live edges
3247 DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3248
3249 /// Collection of all assumed live BasicBlocks.
3250 DenseSet<const BasicBlock *> AssumedLiveBlocks;
3251};
3252
3253static bool
3254identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3255 AbstractAttribute &AA,
3256 SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3257 const IRPosition &IPos = IRPosition::callsite_function(CB);
3258
3259 const auto &NoReturnAA =
3260 A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3261 if (NoReturnAA.isAssumedNoReturn())
3262 return !NoReturnAA.isKnownNoReturn();
3263 if (CB.isTerminator())
3264 AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3265 else
3266 AliveSuccessors.push_back(CB.getNextNode());
3267 return false;
3268}
3269
3270static bool
3271identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3272 AbstractAttribute &AA,
3273 SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3274 bool UsedAssumedInformation =
3275 identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3276
3277 // First, determine if we can change an invoke to a call assuming the
3278 // callee is nounwind. This is not possible if the personality of the
3279 // function allows to catch asynchronous exceptions.
3280 if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3281 AliveSuccessors.push_back(&II.getUnwindDest()->front());
3282 } else {
3283 const IRPosition &IPos = IRPosition::callsite_function(II);
3284 const auto &AANoUnw =
3285 A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
3286 if (AANoUnw.isAssumedNoUnwind()) {
3287 UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3288 } else {
3289 AliveSuccessors.push_back(&II.getUnwindDest()->front());
3290 }
3291 }
3292 return UsedAssumedInformation;
3293}
3294
3295static bool
3296identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3297 AbstractAttribute &AA,
3298 SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3299 bool UsedAssumedInformation = false;
3300 if (BI.getNumSuccessors() == 1) {
3301 AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3302 } else {
3303 Optional<ConstantInt *> CI = getAssumedConstantInt(
3304 A, *BI.getCondition(), AA, UsedAssumedInformation);
3305 if (!CI.hasValue()) {
3306 // No value yet, assume both edges are dead.
3307 } else if (CI.getValue()) {
3308 const BasicBlock *SuccBB =
3309 BI.getSuccessor(1 - CI.getValue()->getZExtValue());
3310 AliveSuccessors.push_back(&SuccBB->front());
3311 } else {
3312 AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3313 AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3314 UsedAssumedInformation = false;
3315 }
3316 }
3317 return UsedAssumedInformation;
3318}
3319
3320static bool
3321identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3322 AbstractAttribute &AA,
3323 SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3324 bool UsedAssumedInformation = false;
3325 Optional<ConstantInt *> CI =
3326 getAssumedConstantInt(A, *SI.getCondition(), AA, UsedAssumedInformation);
3327 if (!CI.hasValue()) {
3328 // No value yet, assume all edges are dead.
3329 } else if (CI.getValue()) {
3330 for (auto &CaseIt : SI.cases()) {
3331 if (CaseIt.getCaseValue() == CI.getValue()) {
3332 AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3333 return UsedAssumedInformation;
3334 }
3335 }
3336 AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3337 return UsedAssumedInformation;
3338 } else {
3339 for (const BasicBlock *SuccBB : successors(SI.getParent()))
3340 AliveSuccessors.push_back(&SuccBB->front());
3341 }
3342 return UsedAssumedInformation;
3343}
3344
3345ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3346 ChangeStatus Change = ChangeStatus::UNCHANGED;
3347
3348 LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] Live [" <<
AssumedLiveBlocks.size() << "/" << getAnchorScope
()->size() << "] BBs and " << ToBeExploredFrom
.size() << " exploration points and " << KnownDeadEnds
.size() << " known dead ends\n"; } } while (false)
3349 << getAnchorScope()->size() << "] BBs and "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] Live [" <<
AssumedLiveBlocks.size() << "/" << getAnchorScope
()->size() << "] BBs and " << ToBeExploredFrom
.size() << " exploration points and " << KnownDeadEnds
.size() << " known dead ends\n"; } } while (false)
3350 << ToBeExploredFrom.size() << " exploration points and "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] Live [" <<
AssumedLiveBlocks.size() << "/" << getAnchorScope
()->size() << "] BBs and " << ToBeExploredFrom
.size() << " exploration points and " << KnownDeadEnds
.size() << " known dead ends\n"; } } while (false)
3351 << KnownDeadEnds.size() << " known dead ends\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] Live [" <<
AssumedLiveBlocks.size() << "/" << getAnchorScope
()->size() << "] BBs and " << ToBeExploredFrom
.size() << " exploration points and " << KnownDeadEnds
.size() << " known dead ends\n"; } } while (false)
;
3352
3353 // Copy and clear the list of instructions we need to explore from. It is
3354 // refilled with instructions the next update has to look at.
3355 SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3356 ToBeExploredFrom.end());
3357 decltype(ToBeExploredFrom) NewToBeExploredFrom;
3358
3359 SmallVector<const Instruction *, 8> AliveSuccessors;
3360 while (!Worklist.empty()) {
3361 const Instruction *I = Worklist.pop_back_val();
3362 LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] Exploration inst: "
<< *I << "\n"; } } while (false)
;
3363
3364 // Fast forward for uninteresting instructions. We could look for UB here
3365 // though.
3366 while (!I->isTerminator() && !isa<CallBase>(I)) {
3367 Change = ChangeStatus::CHANGED;
3368 I = I->getNextNode();
3369 }
3370
3371 AliveSuccessors.clear();
3372
3373 bool UsedAssumedInformation = false;
3374 switch (I->getOpcode()) {
3375 // TODO: look for (assumed) UB to backwards propagate "deadness".
3376 default:
3377 assert(I->isTerminator() &&((I->isTerminator() && "Expected non-terminators to be handled already!"
) ? static_cast<void> (0) : __assert_fail ("I->isTerminator() && \"Expected non-terminators to be handled already!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 3378, __PRETTY_FUNCTION__))
3378 "Expected non-terminators to be handled already!")((I->isTerminator() && "Expected non-terminators to be handled already!"
) ? static_cast<void> (0) : __assert_fail ("I->isTerminator() && \"Expected non-terminators to be handled already!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 3378, __PRETTY_FUNCTION__))
;
3379 for (const BasicBlock *SuccBB : successors(I->getParent()))
3380 AliveSuccessors.push_back(&SuccBB->front());
3381 break;
3382 case Instruction::Call:
3383 UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3384 *this, AliveSuccessors);
3385 break;
3386 case Instruction::Invoke:
3387 UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3388 *this, AliveSuccessors);
3389 break;
3390 case Instruction::Br:
3391 UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3392 *this, AliveSuccessors);
3393 break;
3394 case Instruction::Switch:
3395 UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3396 *this, AliveSuccessors);
3397 break;
3398 }
3399
3400 if (UsedAssumedInformation) {
3401 NewToBeExploredFrom.insert(I);
3402 } else {
3403 Change = ChangeStatus::CHANGED;
3404 if (AliveSuccessors.empty() ||
3405 (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3406 KnownDeadEnds.insert(I);
3407 }
3408
3409 LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] #AliveSuccessors: "
<< AliveSuccessors.size() << " UsedAssumedInformation: "
<< UsedAssumedInformation << "\n"; } } while (false
)
3410 << AliveSuccessors.size() << " UsedAssumedInformation: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] #AliveSuccessors: "
<< AliveSuccessors.size() << " UsedAssumedInformation: "
<< UsedAssumedInformation << "\n"; } } while (false
)
3411 << UsedAssumedInformation << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] #AliveSuccessors: "
<< AliveSuccessors.size() << " UsedAssumedInformation: "
<< UsedAssumedInformation << "\n"; } } while (false
)
;
3412
3413 for (const Instruction *AliveSuccessor : AliveSuccessors) {
3414 if (!I->isTerminator()) {
3415 assert(AliveSuccessors.size() == 1 &&((AliveSuccessors.size() == 1 && "Non-terminator expected to have a single successor!"
) ? static_cast<void> (0) : __assert_fail ("AliveSuccessors.size() == 1 && \"Non-terminator expected to have a single successor!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 3416, __PRETTY_FUNCTION__))
3416 "Non-terminator expected to have a single successor!")((AliveSuccessors.size() == 1 && "Non-terminator expected to have a single successor!"
) ? static_cast<void> (0) : __assert_fail ("AliveSuccessors.size() == 1 && \"Non-terminator expected to have a single successor!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 3416, __PRETTY_FUNCTION__))
;
3417 Worklist.push_back(AliveSuccessor);
3418 } else {
3419 // record the assumed live edge
3420 AssumedLiveEdges.insert(
3421 std::make_pair(I->getParent(), AliveSuccessor->getParent()));
3422 if (assumeLive(A, *AliveSuccessor->getParent()))
3423 Worklist.push_back(AliveSuccessor);
3424 }
3425 }
3426 }
3427
3428 ToBeExploredFrom = std::move(NewToBeExploredFrom);
3429
3430 // If we know everything is live there is no need to query for liveness.
3431 // Instead, indicating a pessimistic fixpoint will cause the state to be
3432 // "invalid" and all queries to be answered conservatively without lookups.
3433 // To be in this state we have to (1) finished the exploration and (3) not
3434 // discovered any non-trivial dead end and (2) not ruled unreachable code
3435 // dead.
3436 if (ToBeExploredFrom.empty() &&
3437 getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3438 llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3439 return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3440 }))
3441 return indicatePessimisticFixpoint();
3442 return Change;
3443}
3444
3445/// Liveness information for a call sites.
3446struct AAIsDeadCallSite final : AAIsDeadFunction {
3447 AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
3448 : AAIsDeadFunction(IRP, A) {}
3449
3450 /// See AbstractAttribute::initialize(...).
3451 void initialize(Attributor &A) override {
3452 // TODO: Once we have call site specific value information we can provide
3453 // call site specific liveness information and then it makes
3454 // sense to specialize attributes for call sites instead of
3455 // redirecting requests to the callee.
3456 llvm_unreachable("Abstract attributes for liveness are not "::llvm::llvm_unreachable_internal("Abstract attributes for liveness are not "
"supported for call sites yet!", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 3457)
3457 "supported for call sites yet!")::llvm::llvm_unreachable_internal("Abstract attributes for liveness are not "
"supported for call sites yet!", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 3457)
;
3458 }
3459
3460 /// See AbstractAttribute::updateImpl(...).
3461 ChangeStatus updateImpl(Attributor &A) override {
3462 return indicatePessimisticFixpoint();
3463 }
3464
3465 /// See AbstractAttribute::trackStatistics()
3466 void trackStatistics() const override {}
3467};
3468
3469/// -------------------- Dereferenceable Argument Attribute --------------------
3470
3471template <>
3472ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3473 const DerefState &R) {
3474 ChangeStatus CS0 =
3475 clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3476 ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3477 return CS0 | CS1;
3478}
3479
3480struct AADereferenceableImpl : AADereferenceable {
3481 AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
3482 : AADereferenceable(IRP, A) {}
3483 using StateType = DerefState;
3484
3485 /// See AbstractAttribute::initialize(...).
3486 void initialize(Attributor &A) override {
3487 SmallVector<Attribute, 4> Attrs;
3488 getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3489 Attrs, /* IgnoreSubsumingPositions */ false, &A);
3490 for (const Attribute &Attr : Attrs)
3491 takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3492
3493 const IRPosition &IRP = this->getIRPosition();
3494 NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
3495
3496 bool CanBeNull, CanBeFreed;
3497 takeKnownDerefBytesMaximum(
3498 IRP.getAssociatedValue().getPointerDereferenceableBytes(
3499 A.getDataLayout(), CanBeNull, CanBeFreed));
3500
3501 bool IsFnInterface = IRP.isFnInterfaceKind();
3502 Function *FnScope = IRP.getAnchorScope();
3503 if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
3504 indicatePessimisticFixpoint();
3505 return;
3506 }
3507
3508 if (Instruction *CtxI = getCtxI())
3509 followUsesInMBEC(*this, A, getState(), *CtxI);
3510 }
3511
3512 /// See AbstractAttribute::getState()
3513 /// {
3514 StateType &getState() override { return *this; }
3515 const StateType &getState() const override { return *this; }
3516 /// }
3517
3518 /// Helper function for collecting accessed bytes in must-be-executed-context
3519 void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
3520 DerefState &State) {
3521 const Value *UseV = U->get();
3522 if (!UseV->getType()->isPointerTy())
3523 return;
3524
3525 Type *PtrTy = UseV->getType();
3526 const DataLayout &DL = A.getDataLayout();
3527 int64_t Offset;
3528 if (const Value *Base = getBasePointerOfAccessPointerOperand(
3529 I, Offset, DL, /*AllowNonInbounds*/ true)) {
3530 if (Base == &getAssociatedValue() &&
3531 getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3532 uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3533 State.addAccessedBytes(Offset, Size);
3534 }
3535 }
3536 }
3537
3538 /// See followUsesInMBEC
3539 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3540 AADereferenceable::StateType &State) {
3541 bool IsNonNull = false;
3542 bool TrackUse = false;
3543 int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3544 A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3545 LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytesdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AADereferenceable] Deref bytes: "
<< DerefBytes << " for instruction " << *I
<< "\n"; } } while (false)
3546 << " for instruction " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AADereferenceable] Deref bytes: "
<< DerefBytes << " for instruction " << *I
<< "\n"; } } while (false)
;
3547
3548 addAccessedBytesForUse(A, U, I, State);
3549 State.takeKnownDerefBytesMaximum(DerefBytes);
3550 return TrackUse;
3551 }
3552
3553 /// See AbstractAttribute::manifest(...).
3554 ChangeStatus manifest(Attributor &A) override {
3555 ChangeStatus Change = AADereferenceable::manifest(A);
3556 if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3557 removeAttrs({Attribute::DereferenceableOrNull});
3558 return ChangeStatus::CHANGED;
3559 }
3560 return Change;
3561 }
3562
3563 void getDeducedAttributes(LLVMContext &Ctx,
3564 SmallVectorImpl<Attribute> &Attrs) const override {
3565 // TODO: Add *_globally support
3566 if (isAssumedNonNull())
3567 Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3568 Ctx, getAssumedDereferenceableBytes()));
3569 else
3570 Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3571 Ctx, getAssumedDereferenceableBytes()));
3572 }
3573
3574 /// See AbstractAttribute::getAsStr().
3575 const std::string getAsStr() const override {
3576 if (!getAssumedDereferenceableBytes())
3577 return "unknown-dereferenceable";
3578 return std::string("dereferenceable") +
3579 (isAssumedNonNull() ? "" : "_or_null") +
3580 (isAssumedGlobal() ? "_globally" : "") + "<" +
3581 std::to_string(getKnownDereferenceableBytes()) + "-" +
3582 std::to_string(getAssumedDereferenceableBytes()) + ">";
3583 }
3584};
3585
3586/// Dereferenceable attribute for a floating value.
3587struct AADereferenceableFloating : AADereferenceableImpl {
3588 AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
3589 : AADereferenceableImpl(IRP, A) {}
3590
3591 /// See AbstractAttribute::updateImpl(...).
3592 ChangeStatus updateImpl(Attributor &A) override {
3593 const DataLayout &DL = A.getDataLayout();
3594
3595 auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
3596 bool Stripped) -> bool {
3597 unsigned IdxWidth =
3598 DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3599 APInt Offset(IdxWidth, 0);
3600 const Value *Base =
3601 stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false);
3602
3603 const auto &AA = A.getAAFor<AADereferenceable>(
3604 *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
3605 int64_t DerefBytes = 0;
3606 if (!Stripped && this == &AA) {
3607 // Use IR information if we did not strip anything.
3608 // TODO: track globally.
3609 bool CanBeNull, CanBeFreed;
3610 DerefBytes =
3611 Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
3612 T.GlobalState.indicatePessimisticFixpoint();
3613 } else {
3614 const DerefState &DS = AA.getState();
3615 DerefBytes = DS.DerefBytesState.getAssumed();
3616 T.GlobalState &= DS.GlobalState;
3617 }
3618
3619 // For now we do not try to "increase" dereferenceability due to negative
3620 // indices as we first have to come up with code to deal with loops and
3621 // for overflows of the dereferenceable bytes.
3622 int64_t OffsetSExt = Offset.getSExtValue();
3623 if (OffsetSExt < 0)
3624 OffsetSExt = 0;
3625
3626 T.takeAssumedDerefBytesMinimum(
3627 std::max(int64_t(0), DerefBytes - OffsetSExt));
3628
3629 if (this == &AA) {
3630 if (!Stripped) {
3631 // If nothing was stripped IR information is all we got.
3632 T.takeKnownDerefBytesMaximum(
3633 std::max(int64_t(0), DerefBytes - OffsetSExt));
3634 T.indicatePessimisticFixpoint();
3635 } else if (OffsetSExt > 0) {
3636 // If something was stripped but there is circular reasoning we look
3637 // for the offset. If it is positive we basically decrease the
3638 // dereferenceable bytes in a circluar loop now, which will simply
3639 // drive them down to the known value in a very slow way which we
3640 // can accelerate.
3641 T.indicatePessimisticFixpoint();
3642 }
3643 }
3644
3645 return T.isValidState();
3646 };
3647
3648 DerefState T;
3649 if (!genericValueTraversal<AADereferenceable, DerefState>(
3650 A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
3651 return indicatePessimisticFixpoint();
3652
3653 return clampStateAndIndicateChange(getState(), T);
3654 }
3655
3656 /// See AbstractAttribute::trackStatistics()
3657 void trackStatistics() const override {
3658 STATS_DECLTRACK_FLOATING_ATTR(dereferenceable){ static llvm::Statistic NumIRFloating_dereferenceable = {"attributor"
, "NumIRFloating_dereferenceable", ("Number of floating values known to be '"
"dereferenceable" "'")};; ++(NumIRFloating_dereferenceable);
}
3659 }
3660};
3661
3662/// Dereferenceable attribute for a return value.
3663struct AADereferenceableReturned final
3664 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
3665 AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
3666 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
3667 IRP, A) {}
3668
3669 /// See AbstractAttribute::trackStatistics()
3670 void trackStatistics() const override {
3671 STATS_DECLTRACK_FNRET_ATTR(dereferenceable){ static llvm::Statistic NumIRFunctionReturn_dereferenceable =
{"attributor", "NumIRFunctionReturn_dereferenceable", ("Number of "
"function returns" " marked '" "dereferenceable" "'")};; ++(
NumIRFunctionReturn_dereferenceable); }
3672 }
3673};
3674
3675/// Dereferenceable attribute for an argument
3676struct AADereferenceableArgument final
3677 : AAArgumentFromCallSiteArguments<AADereferenceable,
3678 AADereferenceableImpl> {
3679 using Base =
3680 AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
3681 AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
3682 : Base(IRP, A) {}
3683
3684 /// See AbstractAttribute::trackStatistics()
3685 void trackStatistics() const override {
3686 STATS_DECLTRACK_ARG_ATTR(dereferenceable){ static llvm::Statistic NumIRArguments_dereferenceable = {"attributor"
, "NumIRArguments_dereferenceable", ("Number of " "arguments"
" marked '" "dereferenceable" "'")};; ++(NumIRArguments_dereferenceable
); }
3687 }
3688};
3689
3690/// Dereferenceable attribute for a call site argument.
3691struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3692 AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
3693 : AADereferenceableFloating(IRP, A) {}
3694
3695 /// See AbstractAttribute::trackStatistics()
3696 void trackStatistics() const override {
3697 STATS_DECLTRACK_CSARG_ATTR(dereferenceable){ static llvm::Statistic NumIRCSArguments_dereferenceable = {
"attributor", "NumIRCSArguments_dereferenceable", ("Number of "
"call site arguments" " marked '" "dereferenceable" "'")};; ++
(NumIRCSArguments_dereferenceable); }
3698 }
3699};
3700
3701/// Dereferenceable attribute deduction for a call site return value.
3702struct AADereferenceableCallSiteReturned final
3703 : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
3704 using Base =
3705 AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
3706 AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
3707 : Base(IRP, A) {}
3708
3709 /// See AbstractAttribute::trackStatistics()
3710 void trackStatistics() const override {
3711 STATS_DECLTRACK_CS_ATTR(dereferenceable){ static llvm::Statistic NumIRCS_dereferenceable = {"attributor"
, "NumIRCS_dereferenceable", ("Number of " "call site" " marked '"
"dereferenceable" "'")};; ++(NumIRCS_dereferenceable); }
;
3712 }
3713};
3714
3715// ------------------------ Align Argument Attribute ------------------------
3716
3717static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
3718 Value &AssociatedValue, const Use *U,
3719 const Instruction *I, bool &TrackUse) {
3720 // We need to follow common pointer manipulation uses to the accesses they
3721 // feed into.
3722 if (isa<CastInst>(I)) {
3723 // Follow all but ptr2int casts.
3724 TrackUse = !isa<PtrToIntInst>(I);
3725 return 0;
3726 }
3727 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3728 if (GEP->hasAllConstantIndices())
3729 TrackUse = true;
3730 return 0;
3731 }
3732
3733 MaybeAlign MA;
3734 if (const auto *CB = dyn_cast<CallBase>(I)) {
3735 if (CB->isBundleOperand(U) || CB->isCallee(U))
3736 return 0;
3737
3738 unsigned ArgNo = CB->getArgOperandNo(U);
3739 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
3740 // As long as we only use known information there is no need to track
3741 // dependences here.
3742 auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
3743 MA = MaybeAlign(AlignAA.getKnownAlign());
3744 }
3745
3746 const DataLayout &DL = A.getDataLayout();
3747 const Value *UseV = U->get();
3748 if (auto *SI = dyn_cast<StoreInst>(I)) {
3749 if (SI->getPointerOperand() == UseV)
3750 MA = SI->getAlign();
3751 } else if (auto *LI = dyn_cast<LoadInst>(I)) {
3752 if (LI->getPointerOperand() == UseV)
3753 MA = LI->getAlign();
3754 }
3755
3756 if (!MA || *MA <= QueryingAA.getKnownAlign())
3757 return 0;
3758
3759 unsigned Alignment = MA->value();
3760 int64_t Offset;
3761
3762 if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3763 if (Base == &AssociatedValue) {
3764 // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3765 // So we can say that the maximum power of two which is a divisor of
3766 // gcd(Offset, Alignment) is an alignment.
3767
3768 uint32_t gcd =
3769 greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3770 Alignment = llvm::PowerOf2Floor(gcd);
3771 }
3772 }
3773
3774 return Alignment;
3775}
3776
3777struct AAAlignImpl : AAAlign {
3778 AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
3779
3780 /// See AbstractAttribute::initialize(...).
3781 void initialize(Attributor &A) override {
3782 SmallVector<Attribute, 4> Attrs;
3783 getAttrs({Attribute::Alignment}, Attrs);
3784 for (const Attribute &Attr : Attrs)
3785 takeKnownMaximum(Attr.getValueAsInt());
3786
3787 Value &V = getAssociatedValue();
3788 // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
3789 // use of the function pointer. This was caused by D73131. We want to
3790 // avoid this for function pointers especially because we iterate
3791 // their uses and int2ptr is not handled. It is not a correctness
3792 // problem though!
3793 if (!V.getType()->getPointerElementType()->isFunctionTy())
3794 takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
3795
3796 if (getIRPosition().isFnInterfaceKind() &&
3797 (!getAnchorScope() ||
3798 !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
3799 indicatePessimisticFixpoint();
3800 return;
3801 }
3802
3803 if (Instruction *CtxI = getCtxI())
3804 followUsesInMBEC(*this, A, getState(), *CtxI);
3805 }
3806
3807 /// See AbstractAttribute::manifest(...).
3808 ChangeStatus manifest(Attributor &A) override {
3809 ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3810
3811 // Check for users that allow alignment annotations.
3812 Value &AssociatedValue = getAssociatedValue();
3813 for (const Use &U : AssociatedValue.uses()) {
3814 if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3815 if (SI->getPointerOperand() == &AssociatedValue)
3816 if (SI->getAlignment() < getAssumedAlign()) {
3817 STATS_DECLTRACK(AAAlign, Store,{ static llvm::Statistic NumIRStore_AAAlign = {"attributor", "NumIRStore_AAAlign"
, "Number of times alignment added to a store"};; ++(NumIRStore_AAAlign
); }
3818 "Number of times alignment added to a store"){ static llvm::Statistic NumIRStore_AAAlign = {"attributor", "NumIRStore_AAAlign"
, "Number of times alignment added to a store"};; ++(NumIRStore_AAAlign
); }
;
3819 SI->setAlignment(Align(getAssumedAlign()));
3820 LoadStoreChanged = ChangeStatus::CHANGED;
3821 }
3822 } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3823 if (LI->getPointerOperand() == &AssociatedValue)
3824 if (LI->getAlignment() < getAssumedAlign()) {
3825 LI->setAlignment(Align(getAssumedAlign()));
3826 STATS_DECLTRACK(AAAlign, Load,{ static llvm::Statistic NumIRLoad_AAAlign = {"attributor", "NumIRLoad_AAAlign"
, "Number of times alignment added to a load"};; ++(NumIRLoad_AAAlign
); }
3827 "Number of times alignment added to a load"){ static llvm::Statistic NumIRLoad_AAAlign = {"attributor", "NumIRLoad_AAAlign"
, "Number of times alignment added to a load"};; ++(NumIRLoad_AAAlign
); }
;
3828 LoadStoreChanged = ChangeStatus::CHANGED;
3829 }
3830 }
3831 }
3832
3833 ChangeStatus Changed = AAAlign::manifest(A);
3834
3835 Align InheritAlign =
3836 getAssociatedValue().getPointerAlignment(A.getDataLayout());
3837 if (InheritAlign >= getAssumedAlign())
3838 return LoadStoreChanged;
3839 return Changed | LoadStoreChanged;
3840 }
3841
3842 // TODO: Provide a helper to determine the implied ABI alignment and check in
3843 // the existing manifest method and a new one for AAAlignImpl that value
3844 // to avoid making the alignment explicit if it did not improve.
3845
3846 /// See AbstractAttribute::getDeducedAttributes
3847 virtual void
3848 getDeducedAttributes(LLVMContext &Ctx,
3849 SmallVectorImpl<Attribute> &Attrs) const override {
3850 if (getAssumedAlign() > 1)
3851 Attrs.emplace_back(
3852 Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3853 }
3854
3855 /// See followUsesInMBEC
3856 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3857 AAAlign::StateType &State) {
3858 bool TrackUse = false;
3859
3860 unsigned int KnownAlign =
3861 getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3862 State.takeKnownMaximum(KnownAlign);
3863
3864 return TrackUse;
3865 }
3866
3867 /// See AbstractAttribute::getAsStr().
3868 const std::string getAsStr() const override {
3869 return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3870 "-" + std::to_string(getAssumedAlign()) + ">")
3871 : "unknown-align";
3872 }
3873};
3874
3875/// Align attribute for a floating value.
3876struct AAAlignFloating : AAAlignImpl {
3877 AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
3878
3879 /// See AbstractAttribute::updateImpl(...).
3880 ChangeStatus updateImpl(Attributor &A) override {
3881 const DataLayout &DL = A.getDataLayout();
3882
3883 auto VisitValueCB = [&](Value &V, const Instruction *,
3884 AAAlign::StateType &T, bool Stripped) -> bool {
3885 const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
3886 DepClassTy::REQUIRED);
3887 if (!Stripped && this == &AA) {
3888 int64_t Offset;
3889 unsigned Alignment = 1;
3890 if (const Value *Base =
3891 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
3892 Align PA = Base->getPointerAlignment(DL);
3893 // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3894 // So we can say that the maximum power of two which is a divisor of
3895 // gcd(Offset, Alignment) is an alignment.
3896
3897 uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
3898 uint32_t(PA.value()));
3899 Alignment = llvm::PowerOf2Floor(gcd);
3900 } else {
3901 Alignment = V.getPointerAlignment(DL).value();
3902 }
3903 // Use only IR information if we did not strip anything.
3904 T.takeKnownMaximum(Alignment);
3905 T.indicatePessimisticFixpoint();
3906 } else {
3907 // Use abstract attribute information.
3908 const AAAlign::StateType &DS = AA.getState();
3909 T ^= DS;
3910 }
3911 return T.isValidState();
3912 };
3913
3914 StateType T;
3915 if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3916 VisitValueCB, getCtxI()))
3917 return indicatePessimisticFixpoint();
3918
3919 // TODO: If we know we visited all incoming values, thus no are assumed
3920 // dead, we can take the known information from the state T.
3921 return clampStateAndIndicateChange(getState(), T);
3922 }
3923
3924 /// See AbstractAttribute::trackStatistics()
3925 void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align){ static llvm::Statistic NumIRFloating_align = {"attributor",
"NumIRFloating_align", ("Number of floating values known to be '"
"align" "'")};; ++(NumIRFloating_align); }
}
3926};
3927
3928/// Align attribute for function return value.
3929struct AAAlignReturned final
3930 : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3931 using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
3932 AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3933
3934 /// See AbstractAttribute::initialize(...).
3935 void initialize(Attributor &A) override {
3936 Base::initialize(A);
3937 Function *F = getAssociatedFunction();
3938 if (!F || F->isDeclaration())
3939 indicatePessimisticFixpoint();
3940 }
3941
3942 /// See AbstractAttribute::trackStatistics()
3943 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned){ static llvm::Statistic NumIRFunctionReturn_aligned = {"attributor"
, "NumIRFunctionReturn_aligned", ("Number of " "function returns"
" marked '" "aligned" "'")};; ++(NumIRFunctionReturn_aligned
); }
}
3944};
3945
3946/// Align attribute for function argument.
3947struct AAAlignArgument final
3948 : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
3949 using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
3950 AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3951
3952 /// See AbstractAttribute::manifest(...).
3953 ChangeStatus manifest(Attributor &A) override {
3954 // If the associated argument is involved in a must-tail call we give up
3955 // because we would need to keep the argument alignments of caller and
3956 // callee in-sync. Just does not seem worth the trouble right now.
3957 if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
3958 return ChangeStatus::UNCHANGED;
3959 return Base::manifest(A);
3960 }
3961
3962 /// See AbstractAttribute::trackStatistics()
3963 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned){ static llvm::Statistic NumIRArguments_aligned = {"attributor"
, "NumIRArguments_aligned", ("Number of " "arguments" " marked '"
"aligned" "'")};; ++(NumIRArguments_aligned); }
}
3964};
3965
3966struct AAAlignCallSiteArgument final : AAAlignFloating {
3967 AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
3968 : AAAlignFloating(IRP, A) {}
3969
3970 /// See AbstractAttribute::manifest(...).
3971 ChangeStatus manifest(Attributor &A) override {
3972 // If the associated argument is involved in a must-tail call we give up
3973 // because we would need to keep the argument alignments of caller and
3974 // callee in-sync. Just does not seem worth the trouble right now.
3975 if (Argument *Arg = getAssociatedArgument())
3976 if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
3977 return ChangeStatus::UNCHANGED;
3978 ChangeStatus Changed = AAAlignImpl::manifest(A);
3979 Align InheritAlign =
3980 getAssociatedValue().getPointerAlignment(A.getDataLayout());
3981 if (InheritAlign >= getAssumedAlign())
3982 Changed = ChangeStatus::UNCHANGED;
3983 return Changed;
3984 }
3985
3986 /// See AbstractAttribute::updateImpl(Attributor &A).
3987 ChangeStatus updateImpl(Attributor &A) override {
3988 ChangeStatus Changed = AAAlignFloating::updateImpl(A);
3989 if (Argument *Arg = getAssociatedArgument()) {
3990 // We only take known information from the argument
3991 // so we do not need to track a dependence.
3992 const auto &ArgAlignAA = A.getAAFor<AAAlign>(
3993 *this, IRPosition::argument(*Arg), DepClassTy::NONE);
3994 takeKnownMaximum(ArgAlignAA.getKnownAlign());
3995 }
3996 return Changed;
3997 }
3998
3999 /// See AbstractAttribute::trackStatistics()
4000 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned){ static llvm::Statistic NumIRCSArguments_aligned = {"attributor"
, "NumIRCSArguments_aligned", ("Number of " "call site arguments"
" marked '" "aligned" "'")};; ++(NumIRCSArguments_aligned); }
}
4001};
4002
4003/// Align attribute deduction for a call site return value.
4004struct AAAlignCallSiteReturned final
4005 : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
4006 using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
4007 AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
4008 : Base(IRP, A) {}
4009
4010 /// See AbstractAttribute::initialize(...).
4011 void initialize(Attributor &A) override {
4012 Base::initialize(A);
4013 Function *F = getAssociatedFunction();
4014 if (!F || F->isDeclaration())
4015 indicatePessimisticFixpoint();
4016 }
4017
4018 /// See AbstractAttribute::trackStatistics()
4019 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align){ static llvm::Statistic NumIRCS_align = {"attributor", "NumIRCS_align"
, ("Number of " "call site" " marked '" "align" "'")};; ++(NumIRCS_align
); }
; }
4020};
4021
4022/// ------------------ Function No-Return Attribute ----------------------------
4023struct AANoReturnImpl : public AANoReturn {
4024 AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4025
4026 /// See AbstractAttribute::initialize(...).
4027 void initialize(Attributor &A) override {
4028 AANoReturn::initialize(A);
4029 Function *F = getAssociatedFunction();
4030 if (!F || F->isDeclaration())
4031 indicatePessimisticFixpoint();
4032 }
4033
4034 /// See AbstractAttribute::getAsStr().
4035 const std::string getAsStr() const override {
4036 return getAssumed() ? "noreturn" : "may-return";
4037 }
4038
4039 /// See AbstractAttribute::updateImpl(Attributor &A).
4040 virtual ChangeStatus updateImpl(Attributor &A) override {
4041 auto CheckForNoReturn = [](Instruction &) { return false; };
4042 if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4043 {(unsigned)Instruction::Ret}))
4044 return indicatePessimisticFixpoint();
4045 return ChangeStatus::UNCHANGED;
4046 }
4047};
4048
4049struct AANoReturnFunction final : AANoReturnImpl {
4050 AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4051 : AANoReturnImpl(IRP, A) {}
4052
4053 /// See AbstractAttribute::trackStatistics()
4054 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn){ static llvm::Statistic NumIRFunction_noreturn = {"attributor"
, "NumIRFunction_noreturn", ("Number of " "functions" " marked '"
"noreturn" "'")};; ++(NumIRFunction_noreturn); }
}
4055};
4056
4057/// NoReturn attribute deduction for a call sites.
4058struct AANoReturnCallSite final : AANoReturnImpl {
4059 AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4060 : AANoReturnImpl(IRP, A) {}
4061
4062 /// See AbstractAttribute::initialize(...).
4063 void initialize(Attributor &A) override {
4064 AANoReturnImpl::initialize(A);
4065 if (Function *F = getAssociatedFunction()) {
4066 const IRPosition &FnPos = IRPosition::function(*F);
4067 auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4068 if (!FnAA.isAssumedNoReturn())
4069 indicatePessimisticFixpoint();
4070 }
4071 }
4072
4073 /// See AbstractAttribute::updateImpl(...).
4074 ChangeStatus updateImpl(Attributor &A) override {
4075 // TODO: Once we have call site specific value information we can provide
4076 // call site specific liveness information and then it makes
4077 // sense to specialize attributes for call sites arguments instead of
4078 // redirecting requests to the callee argument.
4079 Function *F = getAssociatedFunction();
4080 const IRPosition &FnPos = IRPosition::function(*F);
4081 auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4082 return clampStateAndIndicateChange(getState(), FnAA.getState());
4083 }
4084
4085 /// See AbstractAttribute::trackStatistics()
4086 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn){ static llvm::Statistic NumIRCS_noreturn = {"attributor", "NumIRCS_noreturn"
, ("Number of " "call site" " marked '" "noreturn" "'")};; ++
(NumIRCS_noreturn); }
; }
4087};
4088
4089/// ----------------------- Variable Capturing ---------------------------------
4090
4091/// A class to hold the state of for no-capture attributes.
4092struct AANoCaptureImpl : public AANoCapture {
4093 AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4094
4095 /// See AbstractAttribute::initialize(...).
4096 void initialize(Attributor &A) override {
4097 if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4098 indicateOptimisticFixpoint();
4099 return;
4100 }
4101 Function *AnchorScope = getAnchorScope();
4102 if (isFnInterfaceKind() &&
4103 (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4104 indicatePessimisticFixpoint();
4105 return;
4106 }
4107
4108 // You cannot "capture" null in the default address space.
4109 if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4110 getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4111 indicateOptimisticFixpoint();
4112 return;
4113 }
4114
4115 const Function *F =
4116 isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4117
4118 // Check what state the associated function can actually capture.
4119 if (F)
4120 determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4121 else
4122 indicatePessimisticFixpoint();
4123 }
4124
4125 /// See AbstractAttribute::updateImpl(...).
4126 ChangeStatus updateImpl(Attributor &A) override;
4127
4128 /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4129 virtual void
4130 getDeducedAttributes(LLVMContext &Ctx,
4131 SmallVectorImpl<Attribute> &Attrs) const override {
4132 if (!isAssumedNoCaptureMaybeReturned())
4133 return;
4134
4135 if (isArgumentPosition()) {
4136 if (isAssumedNoCapture())
4137 Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4138 else if (ManifestInternal)
4139 Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4140 }
4141 }
4142
4143 /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4144 /// depending on the ability of the function associated with \p IRP to capture
4145 /// state in memory and through "returning/throwing", respectively.
4146 static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4147 const Function &F,
4148 BitIntegerState &State) {
4149 // TODO: Once we have memory behavior attributes we should use them here.
4150
4151 // If we know we cannot communicate or write to memory, we do not care about
4152 // ptr2int anymore.
4153 if (F.onlyReadsMemory() && F.doesNotThrow() &&
4154 F.getReturnType()->isVoidTy()) {
4155 State.addKnownBits(NO_CAPTURE);
4156 return;
4157 }
4158
4159 // A function cannot capture state in memory if it only reads memory, it can
4160 // however return/throw state and the state might be influenced by the
4161 // pointer value, e.g., loading from a returned pointer might reveal a bit.
4162 if (F.onlyReadsMemory())
4163 State.addKnownBits(NOT_CAPTURED_IN_MEM);
4164
4165 // A function cannot communicate state back if it does not through
4166 // exceptions and doesn not return values.
4167 if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4168 State.addKnownBits(NOT_CAPTURED_IN_RET);
4169
4170 // Check existing "returned" attributes.
4171 int ArgNo = IRP.getCalleeArgNo();
4172 if (F.doesNotThrow() && ArgNo >= 0) {
4173 for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4174 if (F.hasParamAttribute(u, Attribute::Returned)) {
4175 if (u == unsigned(ArgNo))
4176 State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4177 else if (F.onlyReadsMemory())
4178 State.addKnownBits(NO_CAPTURE);
4179 else
4180 State.addKnownBits(NOT_CAPTURED_IN_RET);
4181 break;
4182 }
4183 }
4184 }
4185
4186 /// See AbstractState::getAsStr().
4187 const std::string getAsStr() const override {
4188 if (isKnownNoCapture())
4189 return "known not-captured";
4190 if (isAssumedNoCapture())
4191 return "assumed not-captured";
4192 if (isKnownNoCaptureMaybeReturned())
4193 return "known not-captured-maybe-returned";
4194 if (isAssumedNoCaptureMaybeReturned())
4195 return "assumed not-captured-maybe-returned";
4196 return "assumed-captured";
4197 }
4198};
4199
4200/// Attributor-aware capture tracker.
4201struct AACaptureUseTracker final : public CaptureTracker {
4202
4203 /// Create a capture tracker that can lookup in-flight abstract attributes
4204 /// through the Attributor \p A.
4205 ///
4206 /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4207 /// search is stopped. If a use leads to a return instruction,
4208 /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4209 /// If a use leads to a ptr2int which may capture the value,
4210 /// \p CapturedInInteger is set. If a use is found that is currently assumed
4211 /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4212 /// set. All values in \p PotentialCopies are later tracked as well. For every
4213 /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4214 /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4215 /// conservatively set to true.
4216 AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4217 const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4218 SmallVectorImpl<const Value *> &PotentialCopies,
4219 unsigned &RemainingUsesToExplore)
4220 : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4221 PotentialCopies(PotentialCopies),
4222 RemainingUsesToExplore(RemainingUsesToExplore) {}
4223
4224 /// Determine if \p V maybe captured. *Also updates the state!*
4225 bool valueMayBeCaptured(const Value *V) {
4226 if (V->getType()->isPointerTy()) {
4227 PointerMayBeCaptured(V, this);
4228 } else {
4229 State.indicatePessimisticFixpoint();
4230 }
4231 return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4232 }
4233
4234 /// See CaptureTracker::tooManyUses().
4235 void tooManyUses() override {
4236 State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4237 }
4238
4239 bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4240 if (CaptureTracker::isDereferenceableOrNull(O, DL))
4241 return true;
4242 const auto &DerefAA = A.getAAFor<AADereferenceable>(
4243 NoCaptureAA, IRPosition::value(*O), DepClassTy::OPTIONAL);
4244 return DerefAA.getAssumedDereferenceableBytes();
4245 }
4246
4247 /// See CaptureTracker::captured(...).
4248 bool captured(const Use *U) override {
4249 Instruction *UInst = cast<Instruction>(U->getUser());
4250 LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInstdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "Check use: " << *U->
get() << " in " << *UInst << "\n"; } } while
(false)
4251 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "Check use: " << *U->
get() << " in " << *UInst << "\n"; } } while
(false)
;
4252
4253 // Because we may reuse the tracker multiple times we keep track of the
4254 // number of explored uses ourselves as well.
4255 if (RemainingUsesToExplore-- == 0) {
4256 LLVM_DEBUG(dbgs() << " - too many uses to explore!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << " - too many uses to explore!\n"
; } } while (false)
;
4257 return isCapturedIn(/* Memory */ true, /* Integer */ true,
4258 /* Return */ true);
4259 }
4260
4261 // Deal with ptr2int by following uses.
4262 if (isa<PtrToIntInst>(UInst)) {
4263 LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << " - ptr2int assume the worst!\n"
; } } while (false)
;
4264 return valueMayBeCaptured(UInst);
4265 }
4266
4267 // Explicitly catch return instructions.
4268 if (isa<ReturnInst>(UInst))
4269 return isCapturedIn(/* Memory */ false, /* Integer */ false,
4270 /* Return */ true);
4271
4272 // For now we only use special logic for call sites. However, the tracker
4273 // itself knows about a lot of other non-capturing cases already.
4274 auto *CB = dyn_cast<CallBase>(UInst);
4275 if (!CB || !CB->isArgOperand(U))
4276 return isCapturedIn(/* Memory */ true, /* Integer */ true,
4277 /* Return */ true);
4278
4279 unsigned ArgNo = CB->getArgOperandNo(U);
4280 const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4281 // If we have a abstract no-capture attribute for the argument we can use
4282 // it to justify a non-capture attribute here. This allows recursion!
4283 auto &ArgNoCaptureAA =
4284 A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos, DepClassTy::REQUIRED);
4285 if (ArgNoCaptureAA.isAssumedNoCapture())
4286 return isCapturedIn(/* Memory */ false, /* Integer */ false,
4287 /* Return */ false);
4288 if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4289 addPotentialCopy(*CB);
4290 return isCapturedIn(/* Memory */ false, /* Integer */ false,
4291 /* Return */ false);
4292 }
4293
4294 // Lastly, we could not find a reason no-capture can be assumed so we don't.
4295 return isCapturedIn(/* Memory */ true, /* Integer */ true,
4296 /* Return */ true);
4297 }
4298
4299 /// Register \p CS as potential copy of the value we are checking.
4300 void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); }
4301
4302 /// See CaptureTracker::shouldExplore(...).
4303 bool shouldExplore(const Use *U) override {
4304 // Check liveness and ignore droppable users.
4305 return !U->getUser()->isDroppable() &&
4306 !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA);
4307 }
4308
4309 /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4310 /// \p CapturedInRet, then return the appropriate value for use in the
4311 /// CaptureTracker::captured() interface.
4312 bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4313 bool CapturedInRet) {
4314 LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << " - captures [Mem " <<
CapturedInMem << "|Int " << CapturedInInt <<
"|Ret " << CapturedInRet << "]\n"; } } while (false
)
4315 << CapturedInInt << "|Ret " << CapturedInRet << "]\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << " - captures [Mem " <<
CapturedInMem << "|Int " << CapturedInInt <<
"|Ret " << CapturedInRet << "]\n"; } } while (false
)
;
4316 if (CapturedInMem)
4317 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4318 if (CapturedInInt)
4319 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4320 if (CapturedInRet)
4321 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4322 return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4323 }
4324
4325private:
4326 /// The attributor providing in-flight abstract attributes.
4327 Attributor &A;
4328
4329 /// The abstract attribute currently updated.
4330 AANoCapture &NoCaptureAA;
4331
4332 /// The abstract liveness state.
4333 const AAIsDead &IsDeadAA;
4334
4335 /// The state currently updated.
4336 AANoCapture::StateType &State;
4337
4338 /// Set of potential copies of the tracked value.
4339 SmallVectorImpl<const Value *> &PotentialCopies;
4340
4341 /// Global counter to limit the number of explored uses.
4342 unsigned &RemainingUsesToExplore;
4343};
4344
4345ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4346 const IRPosition &IRP = getIRPosition();
4347 const Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
4348 : &IRP.getAssociatedValue();
4349 if (!V)
4350 return indicatePessimisticFixpoint();
4351
4352 const Function *F =
4353 isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4354 assert(F && "Expected a function!")((F && "Expected a function!") ? static_cast<void>
(0) : __assert_fail ("F && \"Expected a function!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 4354, __PRETTY_FUNCTION__))
;
4355 const IRPosition &FnPos = IRPosition::function(*F);
4356 const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos, DepClassTy::NONE);
4357
4358 AANoCapture::StateType T;
4359
4360 // Readonly means we cannot capture through memory.
4361 const auto &FnMemAA =
4362 A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::NONE);
4363 if (FnMemAA.isAssumedReadOnly()) {
4364 T.addKnownBits(NOT_CAPTURED_IN_MEM);
4365 if (FnMemAA.isKnownReadOnly())
4366 addKnownBits(NOT_CAPTURED_IN_MEM);
4367 else
4368 A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4369 }
4370
4371 // Make sure all returned values are different than the underlying value.
4372 // TODO: we could do this in a more sophisticated way inside
4373 // AAReturnedValues, e.g., track all values that escape through returns
4374 // directly somehow.
4375 auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4376 bool SeenConstant = false;
4377 for (auto &It : RVAA.returned_values()) {
4378 if (isa<Constant>(It.first)) {
4379 if (SeenConstant)
4380 return false;
4381 SeenConstant = true;
4382 } else if (!isa<Argument>(It.first) ||
4383 It.first == getAssociatedArgument())
4384 return false;
4385 }
4386 return true;
4387 };
4388
4389 const auto &NoUnwindAA =
4390 A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
4391 if (NoUnwindAA.isAssumedNoUnwind()) {
4392 bool IsVoidTy = F->getReturnType()->isVoidTy();
4393 const AAReturnedValues *RVAA =
4394 IsVoidTy ? nullptr
4395 : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4396
4397 DepClassTy::OPTIONAL);
4398 if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4399 T.addKnownBits(NOT_CAPTURED_IN_RET);
4400 if (T.isKnown(NOT_CAPTURED_IN_MEM))
4401 return ChangeStatus::UNCHANGED;
4402 if (NoUnwindAA.isKnownNoUnwind() &&
4403 (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4404 addKnownBits(NOT_CAPTURED_IN_RET);
4405 if (isKnown(NOT_CAPTURED_IN_MEM))
4406 return indicateOptimisticFixpoint();
4407 }
4408 }
4409 }
4410
4411 // Use the CaptureTracker interface and logic with the specialized tracker,
4412 // defined in AACaptureUseTracker, that can look at in-flight abstract
4413 // attributes and directly updates the assumed state.
4414 SmallVector<const Value *, 4> PotentialCopies;
4415 unsigned RemainingUsesToExplore =
4416 getDefaultMaxUsesToExploreForCaptureTracking();
4417 AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4418 RemainingUsesToExplore);
4419
4420 // Check all potential copies of the associated value until we can assume
4421 // none will be captured or we have to assume at least one might be.
4422 unsigned Idx = 0;
4423 PotentialCopies.push_back(V);
4424 while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4425 Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4426
4427 AANoCapture::StateType &S = getState();
4428 auto Assumed = S.getAssumed();
4429 S.intersectAssumedBits(T.getAssumed());
4430 if (!isAssumedNoCaptureMaybeReturned())
4431 return indicatePessimisticFixpoint();
4432 return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4433 : ChangeStatus::CHANGED;
4434}
4435
4436/// NoCapture attribute for function arguments.
4437struct AANoCaptureArgument final : AANoCaptureImpl {
4438 AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
4439 : AANoCaptureImpl(IRP, A) {}
4440
4441 /// See AbstractAttribute::trackStatistics()
4442 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture){ static llvm::Statistic NumIRArguments_nocapture = {"attributor"
, "NumIRArguments_nocapture", ("Number of " "arguments" " marked '"
"nocapture" "'")};; ++(NumIRArguments_nocapture); }
}
4443};
4444
4445/// NoCapture attribute for call site arguments.
4446struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4447 AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
4448 : AANoCaptureImpl(IRP, A) {}
4449
4450 /// See AbstractAttribute::initialize(...).
4451 void initialize(Attributor &A) override {
4452 if (Argument *Arg = getAssociatedArgument())
4453 if (Arg->hasByValAttr())
4454 indicateOptimisticFixpoint();
4455 AANoCaptureImpl::initialize(A);
4456 }
4457
4458 /// See AbstractAttribute::updateImpl(...).
4459 ChangeStatus updateImpl(Attributor &A) override {
4460 // TODO: Once we have call site specific value information we can provide
4461 // call site specific liveness information and then it makes
4462 // sense to specialize attributes for call sites arguments instead of
4463 // redirecting requests to the callee argument.
4464 Argument *Arg = getAssociatedArgument();
4465 if (!Arg)
4466 return indicatePessimisticFixpoint();
4467 const IRPosition &ArgPos = IRPosition::argument(*Arg);
4468 auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
4469 return clampStateAndIndicateChange(getState(), ArgAA.getState());
4470 }
4471
4472 /// See AbstractAttribute::trackStatistics()
4473 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture){ static llvm::Statistic NumIRCSArguments_nocapture = {"attributor"
, "NumIRCSArguments_nocapture", ("Number of " "call site arguments"
" marked '" "nocapture" "'")};; ++(NumIRCSArguments_nocapture
); }
};
4474};
4475
4476/// NoCapture attribute for floating values.
4477struct AANoCaptureFloating final : AANoCaptureImpl {
4478 AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
4479 : AANoCaptureImpl(IRP, A) {}
4480
4481 /// See AbstractAttribute::trackStatistics()
4482 void trackStatistics() const override {
4483 STATS_DECLTRACK_FLOATING_ATTR(nocapture){ static llvm::Statistic NumIRFloating_nocapture = {"attributor"
, "NumIRFloating_nocapture", ("Number of floating values known to be '"
"nocapture" "'")};; ++(NumIRFloating_nocapture); }
4484 }
4485};
4486
4487/// NoCapture attribute for function return value.
4488struct AANoCaptureReturned final : AANoCaptureImpl {
4489 AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
4490 : AANoCaptureImpl(IRP, A) {
4491 llvm_unreachable("NoCapture is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoCapture is not applicable to function returns!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 4491)
;
4492 }
4493
4494 /// See AbstractAttribute::initialize(...).
4495 void initialize(Attributor &A) override {
4496 llvm_unreachable("NoCapture is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoCapture is not applicable to function returns!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 4496)
;
4497 }
4498
4499 /// See AbstractAttribute::updateImpl(...).
4500 ChangeStatus updateImpl(Attributor &A) override {
4501 llvm_unreachable("NoCapture is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoCapture is not applicable to function returns!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 4501)
;
4502 }
4503
4504 /// See AbstractAttribute::trackStatistics()
4505 void trackStatistics() const override {}
4506};
4507
4508/// NoCapture attribute deduction for a call site return value.
4509struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4510 AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
4511 : AANoCaptureImpl(IRP, A) {}
4512
4513 /// See AbstractAttribute::initialize(...).
4514 void initialize(Attributor &A) override {
4515 const Function *F = getAnchorScope();
4516 // Check what state the associated function can actually capture.
4517 determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4518 }
4519
4520 /// See AbstractAttribute::trackStatistics()
4521 void trackStatistics() const override {
4522 STATS_DECLTRACK_CSRET_ATTR(nocapture){ static llvm::Statistic NumIRCSReturn_nocapture = {"attributor"
, "NumIRCSReturn_nocapture", ("Number of " "call site returns"
" marked '" "nocapture" "'")};; ++(NumIRCSReturn_nocapture);
}
4523 }
4524};
4525
4526/// ------------------ Value Simplify Attribute ----------------------------
4527struct AAValueSimplifyImpl : AAValueSimplify {
4528 AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
4529 : AAValueSimplify(IRP, A) {}
4530
4531 /// See AbstractAttribute::initialize(...).
4532 void initialize(Attributor &A) override {
4533 if (getAssociatedValue().getType()->isVoidTy())
4534 indicatePessimisticFixpoint();
4535 }
4536
4537 /// See AbstractAttribute::getAsStr().
4538 const std::string getAsStr() const override {
4539 return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4540 : "not-simple";
4541 }
4542
4543 /// See AbstractAttribute::trackStatistics()
4544 void trackStatistics() const override {}
4545
4546 /// See AAValueSimplify::getAssumedSimplifiedValue()
4547 Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4548 if (!getAssumed())
4549 return const_cast<Value *>(&getAssociatedValue());
4550 return SimplifiedAssociatedValue;
4551 }
4552
4553 /// Helper function for querying AAValueSimplify and updating candicate.
4554 /// \param QueryingValue Value trying to unify with SimplifiedValue
4555 /// \param AccumulatedSimplifiedValue Current simplification result.
4556 static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4557 Value &QueryingValue,
4558 Optional<Value *> &AccumulatedSimplifiedValue) {
4559 // FIXME: Add a typecast support.
4560
4561 auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
4562 QueryingAA, IRPosition::value(QueryingValue), DepClassTy::REQUIRED);
4563
4564 Optional<Value *> QueryingValueSimplified =
4565 ValueSimplifyAA.getAssumedSimplifiedValue(A);
4566
4567 if (!QueryingValueSimplified.hasValue())
4568 return true;
4569
4570 if (!QueryingValueSimplified.getValue())
4571 return false;
4572
4573 Value &QueryingValueSimplifiedUnwrapped =
4574 *QueryingValueSimplified.getValue();
4575
4576 if (AccumulatedSimplifiedValue.hasValue() &&
4577 !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) &&
4578 !isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4579 return AccumulatedSimplifiedValue == QueryingValueSimplified;
4580 if (AccumulatedSimplifiedValue.hasValue() &&
4581 isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4582 return true;
4583
4584 LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValuedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[ValueSimplify] " <<
QueryingValue << " is assumed to be " << QueryingValueSimplifiedUnwrapped
<< "\n"; } } while (false)
4585 << " is assumed to be "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[ValueSimplify] " <<
QueryingValue << " is assumed to be " << QueryingValueSimplifiedUnwrapped
<< "\n"; } } while (false)
4586 << QueryingValueSimplifiedUnwrapped << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[ValueSimplify] " <<
QueryingValue << " is assumed to be " << QueryingValueSimplifiedUnwrapped
<< "\n"; } } while (false)
;
4587
4588 AccumulatedSimplifiedValue = QueryingValueSimplified;
4589 return true;
4590 }
4591
4592 /// Returns a candidate is found or not
4593 template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
4594 if (!getAssociatedValue().getType()->isIntegerTy())
4595 return false;
4596
4597 const auto &AA =
4598 A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
4599
4600 Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A);
4601
4602 if (!COpt.hasValue()) {
4603 SimplifiedAssociatedValue = llvm::None;
4604 A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4605 return true;
4606 }
4607 if (auto *C = COpt.getValue()) {
4608 SimplifiedAssociatedValue = C;
4609 A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4610 return true;
4611 }
4612 return false;
4613 }
4614
4615 bool askSimplifiedValueForOtherAAs(Attributor &A) {
4616 if (askSimplifiedValueFor<AAValueConstantRange>(A))
4617 return true;
4618 if (askSimplifiedValueFor<AAPotentialValues>(A))
4619 return true;
4620 return false;
4621 }
4622
4623 /// See AbstractAttribute::manifest(...).
4624 ChangeStatus manifest(Attributor &A) override {
4625 ChangeStatus Changed = ChangeStatus::UNCHANGED;
4626
4627 if (SimplifiedAssociatedValue.hasValue() &&
4628 !SimplifiedAssociatedValue.getValue())
4629 return Changed;
4630
4631 Value &V = getAssociatedValue();
4632 auto *C = SimplifiedAssociatedValue.hasValue()
4633 ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4634 : UndefValue::get(V.getType());
4635 if (C) {
4636 // We can replace the AssociatedValue with the constant.
4637 if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
4638 LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *Cdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[ValueSimplify] " <<
V << " -> " << *C << " :: " << *this
<< "\n"; } } while (false)
4639 << " :: " << *this << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[ValueSimplify] " <<
V << " -> " << *C << " :: " << *this
<< "\n"; } } while (false)
;
4640 if (A.changeValueAfterManifest(V, *C))
4641 Changed = ChangeStatus::CHANGED;
4642 }
4643 }
4644
4645 return Changed | AAValueSimplify::manifest(A);
4646 }
4647
4648 /// See AbstractState::indicatePessimisticFixpoint(...).
4649 ChangeStatus indicatePessimisticFixpoint() override {
4650 // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4651 // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4652 SimplifiedAssociatedValue = &getAssociatedValue();
4653 indicateOptimisticFixpoint();
4654 return ChangeStatus::CHANGED;
4655 }
4656
4657protected:
4658 // An assumed simplified value. Initially, it is set to Optional::None, which
4659 // means that the value is not clear under current assumption. If in the
4660 // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4661 // returns orignal associated value.
4662 Optional<Value *> SimplifiedAssociatedValue;
4663};
4664
4665struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4666 AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
4667 : AAValueSimplifyImpl(IRP, A) {}
4668
4669 void initialize(Attributor &A) override {
4670 AAValueSimplifyImpl::initialize(A);
4671 if (!getAnchorScope() || getAnchorScope()->isDeclaration())
4672 indicatePessimisticFixpoint();
4673 if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
4674 Attribute::StructRet, Attribute::Nest},
4675 /* IgnoreSubsumingPositions */ true))
4676 indicatePessimisticFixpoint();
4677
4678 // FIXME: This is a hack to prevent us from propagating function poiner in
4679 // the new pass manager CGSCC pass as it creates call edges the
4680 // CallGraphUpdater cannot handle yet.
4681 Value &V = getAssociatedValue();
4682 if (V.getType()->isPointerTy() &&
4683 V.getType()->getPointerElementType()->isFunctionTy() &&
4684 !A.isModulePass())
4685 indicatePessimisticFixpoint();
4686 }
4687
4688 /// See AbstractAttribute::updateImpl(...).
4689 ChangeStatus updateImpl(Attributor &A) override {
4690 // Byval is only replacable if it is readonly otherwise we would write into
4691 // the replaced value and not the copy that byval creates implicitly.
4692 Argument *Arg = getAssociatedArgument();
4693 if (Arg->hasByValAttr()) {
4694 // TODO: We probably need to verify synchronization is not an issue, e.g.,
4695 // there is no race by not copying a constant byval.
4696 const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
4697 DepClassTy::REQUIRED);
4698 if (!MemAA.isAssumedReadOnly())
4699 return indicatePessimisticFixpoint();
4700 }
4701
4702 bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4703
4704 auto PredForCallSite = [&](AbstractCallSite ACS) {
4705 const IRPosition &ACSArgPos =
4706 IRPosition::callsite_argument(ACS, getCallSiteArgNo());
4707 // Check if a coresponding argument was found or if it is on not
4708 // associated (which can happen for callback calls).
4709 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4710 return false;
4711
4712 // We can only propagate thread independent values through callbacks.
4713 // This is different to direct/indirect call sites because for them we
4714 // know the thread executing the caller and callee is the same. For
4715 // callbacks this is not guaranteed, thus a thread dependent value could
4716 // be different for the caller and callee, making it invalid to propagate.
4717 Value &ArgOp = ACSArgPos.getAssociatedValue();
4718 if (ACS.isCallbackCall())
4719 if (auto *C = dyn_cast<Constant>(&ArgOp))
4720 if (C->isThreadDependent())
4721 return false;
4722 return checkAndUpdate(A, *this, ArgOp, SimplifiedAssociatedValue);
4723 };
4724
4725 bool AllCallSitesKnown;
4726 if (!A.checkForAllCallSites(PredForCallSite, *this, true,
4727 AllCallSitesKnown))
4728 if (!askSimplifiedValueForOtherAAs(A))
4729 return indicatePessimisticFixpoint();
4730
4731 // If a candicate was found in this update, return CHANGED.
4732 return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4733 ? ChangeStatus::UNCHANGED
4734 : ChangeStatus ::CHANGED;
4735 }
4736
4737 /// See AbstractAttribute::trackStatistics()
4738 void trackStatistics() const override {
4739 STATS_DECLTRACK_ARG_ATTR(value_simplify){ static llvm::Statistic NumIRArguments_value_simplify = {"attributor"
, "NumIRArguments_value_simplify", ("Number of " "arguments" " marked '"
"value_simplify" "'")};; ++(NumIRArguments_value_simplify); }
4740 }
4741};
4742
4743struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4744 AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
4745 : AAValueSimplifyImpl(IRP, A) {}
4746
4747 /// See AbstractAttribute::updateImpl(...).
4748 ChangeStatus updateImpl(Attributor &A) override {
4749 bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4750
4751 auto PredForReturned = [&](Value &V) {
4752 return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4753 };
4754
4755 if (!A.checkForAllReturnedValues(PredForReturned, *this))
4756 if (!askSimplifiedValueForOtherAAs(A))
4757 return indicatePessimisticFixpoint();
4758
4759 // If a candicate was found in this update, return CHANGED.
4760 return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4761 ? ChangeStatus::UNCHANGED
4762 : ChangeStatus ::CHANGED;
4763 }
4764
4765 ChangeStatus manifest(Attributor &A) override {
4766 ChangeStatus Changed = ChangeStatus::UNCHANGED;
4767
4768 if (SimplifiedAssociatedValue.hasValue() &&
4769 !SimplifiedAssociatedValue.getValue())
4770 return Changed;
4771
4772 Value &V = getAssociatedValue();
4773 auto *C = SimplifiedAssociatedValue.hasValue()
4774 ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4775 : UndefValue::get(V.getType());
4776 if (C) {
4777 auto PredForReturned =
4778 [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
4779 // We can replace the AssociatedValue with the constant.
4780 if (&V == C || V.getType() != C->getType() || isa<UndefValue>(V))
4781 return true;
4782
4783 for (ReturnInst *RI : RetInsts) {
4784 if (RI->getFunction() != getAnchorScope())
4785 continue;
4786 auto *RC = C;
4787 if (RC->getType() != RI->getReturnValue()->getType())
4788 RC = ConstantExpr::getBitCast(RC,
4789 RI->getReturnValue()->getType());
4790 LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *RCdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[ValueSimplify] " <<
V << " -> " << *RC << " in " << *
RI << " :: " << *this << "\n"; } } while (false
)
4791 << " in " << *RI << " :: " << *this << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[ValueSimplify] " <<
V << " -> " << *RC << " in " << *
RI << " :: " << *this << "\n"; } } while (false
)
;
4792 if (A.changeUseAfterManifest(RI->getOperandUse(0), *RC))
4793 Changed = ChangeStatus::CHANGED;
4794 }
4795 return true;
4796 };
4797 A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
4798 }
4799
4800 return Changed | AAValueSimplify::manifest(A);
4801 }
4802
4803 /// See AbstractAttribute::trackStatistics()
4804 void trackStatistics() const override {
4805 STATS_DECLTRACK_FNRET_ATTR(value_simplify){ static llvm::Statistic NumIRFunctionReturn_value_simplify =
{"attributor", "NumIRFunctionReturn_value_simplify", ("Number of "
"function returns" " marked '" "value_simplify" "'")};; ++(NumIRFunctionReturn_value_simplify
); }
4806 }
4807};
4808
4809struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4810 AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
4811 : AAValueSimplifyImpl(IRP, A) {}
4812
4813 /// See AbstractAttribute::initialize(...).
4814 void initialize(Attributor &A) override {
4815 // FIXME: This might have exposed a SCC iterator update bug in the old PM.
4816 // Needs investigation.
4817 // AAValueSimplifyImpl::initialize(A);
4818 Value &V = getAnchorValue();
4819
4820 // TODO: add other stuffs
4821 if (isa<Constant>(V))
4822 indicatePessimisticFixpoint();
4823 }
4824
4825 /// Check if \p ICmp is an equality comparison (==/!=) with at least one
4826 /// nullptr. If so, try to simplify it using AANonNull on the other operand.
4827 /// Return true if successful, in that case SimplifiedAssociatedValue will be
4828 /// updated and \p Changed is set appropriately.
4829 bool checkForNullPtrCompare(Attributor &A, ICmpInst *ICmp,
4830 ChangeStatus &Changed) {
4831 if (!ICmp)
4832 return false;
4833 if (!ICmp->isEquality())
4834 return false;
4835
4836 // This is a comparison with == or !-. We check for nullptr now.
4837 bool Op0IsNull = isa<ConstantPointerNull>(ICmp->getOperand(0));
4838 bool Op1IsNull = isa<ConstantPointerNull>(ICmp->getOperand(1));
4839 if (!Op0IsNull && !Op1IsNull)
4840 return false;
4841
4842 LLVMContext &Ctx = ICmp->getContext();
4843 // Check for `nullptr ==/!= nullptr` first:
4844 if (Op0IsNull && Op1IsNull) {
4845 Value *NewVal = ConstantInt::get(
4846 Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_EQ);
4847 assert(!SimplifiedAssociatedValue.hasValue() &&((!SimplifiedAssociatedValue.hasValue() && "Did not expect non-fixed value for constant comparison"
) ? static_cast<void> (0) : __assert_fail ("!SimplifiedAssociatedValue.hasValue() && \"Did not expect non-fixed value for constant comparison\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 4848, __PRETTY_FUNCTION__))
4848 "Did not expect non-fixed value for constant comparison")((!SimplifiedAssociatedValue.hasValue() && "Did not expect non-fixed value for constant comparison"
) ? static_cast<void> (0) : __assert_fail ("!SimplifiedAssociatedValue.hasValue() && \"Did not expect non-fixed value for constant comparison\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 4848, __PRETTY_FUNCTION__))
;
4849 SimplifiedAssociatedValue = NewVal;
4850 indicateOptimisticFixpoint();
4851 Changed = ChangeStatus::CHANGED;
4852 return true;
4853 }
4854
4855 // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
4856 // non-nullptr operand and if we assume it's non-null we can conclude the
4857 // result of the comparison.
4858 assert((Op0IsNull || Op1IsNull) &&(((Op0IsNull || Op1IsNull) && "Expected nullptr versus non-nullptr comparison at this point"
) ? static_cast<void> (0) : __assert_fail ("(Op0IsNull || Op1IsNull) && \"Expected nullptr versus non-nullptr comparison at this point\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 4859, __PRETTY_FUNCTION__))
4859 "Expected nullptr versus non-nullptr comparison at this point")(((Op0IsNull || Op1IsNull) && "Expected nullptr versus non-nullptr comparison at this point"
) ? static_cast<void> (0) : __assert_fail ("(Op0IsNull || Op1IsNull) && \"Expected nullptr versus non-nullptr comparison at this point\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 4859, __PRETTY_FUNCTION__))
;
4860
4861 // The index is the operand that we assume is not null.
4862 unsigned PtrIdx = Op0IsNull;
4863 auto &PtrNonNullAA = A.getAAFor<AANonNull>(
4864 *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
4865 DepClassTy::REQUIRED);
4866 if (!PtrNonNullAA.isAssumedNonNull())
4867 return false;
4868
4869 // The new value depends on the predicate, true for != and false for ==.
4870 Value *NewVal = ConstantInt::get(Type::getInt1Ty(Ctx),
4871 ICmp->getPredicate() == CmpInst::ICMP_NE);
4872
4873 assert((!SimplifiedAssociatedValue.hasValue() ||(((!SimplifiedAssociatedValue.hasValue() || SimplifiedAssociatedValue
== NewVal) && "Did not expect to change value for zero-comparison"
) ? static_cast<void> (0) : __assert_fail ("(!SimplifiedAssociatedValue.hasValue() || SimplifiedAssociatedValue == NewVal) && \"Did not expect to change value for zero-comparison\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 4875, __PRETTY_FUNCTION__))
4874 SimplifiedAssociatedValue == NewVal) &&(((!SimplifiedAssociatedValue.hasValue() || SimplifiedAssociatedValue
== NewVal) && "Did not expect to change value for zero-comparison"
) ? static_cast<void> (0) : __assert_fail ("(!SimplifiedAssociatedValue.hasValue() || SimplifiedAssociatedValue == NewVal) && \"Did not expect to change value for zero-comparison\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 4875, __PRETTY_FUNCTION__))
4875 "Did not expect to change value for zero-comparison")(((!SimplifiedAssociatedValue.hasValue() || SimplifiedAssociatedValue
== NewVal) && "Did not expect to change value for zero-comparison"
) ? static_cast<void> (0) : __assert_fail ("(!SimplifiedAssociatedValue.hasValue() || SimplifiedAssociatedValue == NewVal) && \"Did not expect to change value for zero-comparison\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 4875, __PRETTY_FUNCTION__))
;
4876
4877 bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4878 SimplifiedAssociatedValue = NewVal;
4879
4880 if (PtrNonNullAA.isKnownNonNull())
4881 indicateOptimisticFixpoint();
4882
4883 Changed = HasValueBefore ? ChangeStatus::UNCHANGED : ChangeStatus ::CHANGED;
4884 return true;
4885 }
4886
4887 /// See AbstractAttribute::updateImpl(...).
4888 ChangeStatus updateImpl(Attributor &A) override {
4889 bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4890
4891 ChangeStatus Changed;
4892 if (checkForNullPtrCompare(A, dyn_cast<ICmpInst>(&getAnchorValue()),
4893 Changed))
4894 return Changed;
4895
4896 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
4897 bool Stripped) -> bool {
4898 auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V),
4899 DepClassTy::REQUIRED);
4900 if (!Stripped && this == &AA) {
4901 // TODO: Look the instruction and check recursively.
4902
4903 LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << Vdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[ValueSimplify] Can't be stripped more : "
<< V << "\n"; } } while (false)
4904 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[ValueSimplify] Can't be stripped more : "
<< V << "\n"; } } while (false)
;
4905 return false;
4906 }
4907 return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4908 };
4909
4910 bool Dummy = false;
4911 if (!genericValueTraversal<AAValueSimplify, bool>(
4912 A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI(),
4913 /* UseValueSimplify */ false))
4914 if (!askSimplifiedValueForOtherAAs(A))
4915 return indicatePessimisticFixpoint();
4916
4917 // If a candicate was found in this update, return CHANGED.
4918
4919 return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4920 ? ChangeStatus::UNCHANGED
4921 : ChangeStatus ::CHANGED;
4922 }
4923
4924 /// See AbstractAttribute::trackStatistics()
4925 void trackStatistics() const override {
4926 STATS_DECLTRACK_FLOATING_ATTR(value_simplify){ static llvm::Statistic NumIRFloating_value_simplify = {"attributor"
, "NumIRFloating_value_simplify", ("Number of floating values known to be '"
"value_simplify" "'")};; ++(NumIRFloating_value_simplify); }
4927 }
4928};
4929
4930struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4931 AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
4932 : AAValueSimplifyImpl(IRP, A) {}
4933
4934 /// See AbstractAttribute::initialize(...).
4935 void initialize(Attributor &A) override {
4936 SimplifiedAssociatedValue = &getAnchorValue();
4937 indicateOptimisticFixpoint();
4938 }
4939 /// See AbstractAttribute::initialize(...).
4940 ChangeStatus updateImpl(Attributor &A) override {
4941 llvm_unreachable(::llvm::llvm_unreachable_internal("AAValueSimplify(Function|CallSite)::updateImpl will not be called"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 4942)
4942 "AAValueSimplify(Function|CallSite)::updateImpl will not be called")::llvm::llvm_unreachable_internal("AAValueSimplify(Function|CallSite)::updateImpl will not be called"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 4942)
;
4943 }
4944 /// See AbstractAttribute::trackStatistics()
4945 void trackStatistics() const override {
4946 STATS_DECLTRACK_FN_ATTR(value_simplify){ static llvm::Statistic NumIRFunction_value_simplify = {"attributor"
, "NumIRFunction_value_simplify", ("Number of " "functions" " marked '"
"value_simplify" "'")};; ++(NumIRFunction_value_simplify); }
4947 }
4948};
4949
4950struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4951 AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
4952 : AAValueSimplifyFunction(IRP, A) {}
4953 /// See AbstractAttribute::trackStatistics()
4954 void trackStatistics() const override {
4955 STATS_DECLTRACK_CS_ATTR(value_simplify){ static llvm::Statistic NumIRCS_value_simplify = {"attributor"
, "NumIRCS_value_simplify", ("Number of " "call site" " marked '"
"value_simplify" "'")};; ++(NumIRCS_value_simplify); }
4956 }
4957};
4958
4959struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4960 AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
4961 : AAValueSimplifyReturned(IRP, A) {}
4962
4963 /// See AbstractAttribute::manifest(...).
4964 ChangeStatus manifest(Attributor &A) override {
4965 return AAValueSimplifyImpl::manifest(A);
4966 }
4967
4968 void trackStatistics() const override {
4969 STATS_DECLTRACK_CSRET_ATTR(value_simplify){ static llvm::Statistic NumIRCSReturn_value_simplify = {"attributor"
, "NumIRCSReturn_value_simplify", ("Number of " "call site returns"
" marked '" "value_simplify" "'")};; ++(NumIRCSReturn_value_simplify
); }
4970 }
4971};
4972struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4973 AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
4974 : AAValueSimplifyFloating(IRP, A) {}
4975
4976 /// See AbstractAttribute::manifest(...).
4977 ChangeStatus manifest(Attributor &A) override {
4978 ChangeStatus Changed = ChangeStatus::UNCHANGED;
4979
4980 if (SimplifiedAssociatedValue.hasValue() &&
4981 !SimplifiedAssociatedValue.getValue())
4982 return Changed;
4983
4984 Value &V = getAssociatedValue();
4985 auto *C = SimplifiedAssociatedValue.hasValue()
4986 ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4987 : UndefValue::get(V.getType());
4988 if (C) {
4989 Use &U = cast<CallBase>(&getAnchorValue())
4990 ->getArgOperandUse(getCallSiteArgNo());
4991 // We can replace the AssociatedValue with the constant.
4992 if (&V != C && V.getType() == C->getType()) {
4993 if (A.changeUseAfterManifest(U, *C))
4994 Changed = ChangeStatus::CHANGED;
4995 }
4996 }
4997
4998 return Changed | AAValueSimplify::manifest(A);
4999 }
5000
5001 void trackStatistics() const override {
5002 STATS_DECLTRACK_CSARG_ATTR(value_simplify){ static llvm::Statistic NumIRCSArguments_value_simplify = {"attributor"
, "NumIRCSArguments_value_simplify", ("Number of " "call site arguments"
" marked '" "value_simplify" "'")};; ++(NumIRCSArguments_value_simplify
); }
5003 }
5004};
5005
5006/// ----------------------- Heap-To-Stack Conversion ---------------------------
5007struct AAHeapToStackImpl : public AAHeapToStack {
5008 AAHeapToStackImpl(const IRPosition &IRP, Attributor &A)
5009 : AAHeapToStack(IRP, A) {}
5010
5011 const std::string getAsStr() const override {
5012 return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
5013 }
5014
5015 ChangeStatus manifest(Attributor &A) override {
5016 assert(getState().isValidState() &&((getState().isValidState() && "Attempted to manifest an invalid state!"
) ? static_cast<void> (0) : __assert_fail ("getState().isValidState() && \"Attempted to manifest an invalid state!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 5017, __PRETTY_FUNCTION__))
5017 "Attempted to manifest an invalid state!")((getState().isValidState() && "Attempted to manifest an invalid state!"
) ? static_cast<void> (0) : __assert_fail ("getState().isValidState() && \"Attempted to manifest an invalid state!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 5017, __PRETTY_FUNCTION__))
;
5018
5019 ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
5020 Function *F = getAnchorScope();
5021 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5022
5023 for (Instruction *MallocCall : MallocCalls) {
5024 // This malloc cannot be replaced.
5025 if (BadMallocCalls.count(MallocCall))
5026 continue;
5027
5028 for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
5029 LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "H2S: Removing free call: "
<< *FreeCall << "\n"; } } while (false)
;
5030 A.deleteAfterManifest(*FreeCall);
5031 HasChanged = ChangeStatus::CHANGED;
Value stored to 'HasChanged' is never read
5032 }
5033
5034 LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCalldo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "H2S: Removing malloc call: "
<< *MallocCall << "\n"; } } while (false)
5035 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "H2S: Removing malloc call: "
<< *MallocCall << "\n"; } } while (false)
;
5036
5037 Align Alignment;
5038 Value *Size;
5039 if (isCallocLikeFn(MallocCall, TLI)) {
5040 auto *Num = MallocCall->getOperand(0);
5041 auto *SizeT = MallocCall->getOperand(1);
5042 IRBuilder<> B(MallocCall);
5043 Size = B.CreateMul(Num, SizeT, "h2s.calloc.size");
5044 } else if (isAlignedAllocLikeFn(MallocCall, TLI)) {
5045 Size = MallocCall->getOperand(1);
5046 Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0))
5047 ->getValue()
5048 .getZExtValue())
5049 .valueOrOne();
5050 } else {
5051 Size = MallocCall->getOperand(0);
5052 }
5053
5054 unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
5055 Instruction *AI =
5056 new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
5057 "", MallocCall->getNextNode());
5058
5059 if (AI->getType() != MallocCall->getType())
5060 AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
5061 AI->getNextNode());
5062
5063 A.changeValueAfterManifest(*MallocCall, *AI);
5064
5065 if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
5066 auto *NBB = II->getNormalDest();
5067 BranchInst::Create(NBB, MallocCall->getParent());
5068 A.deleteAfterManifest(*MallocCall);
5069 } else {
5070 A.deleteAfterManifest(*MallocCall);
5071 }
5072
5073 // Zero out the allocated memory if it was a calloc.
5074 if (isCallocLikeFn(MallocCall, TLI)) {
5075 auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
5076 AI->getNextNode());
5077 Value *Ops[] = {
5078 BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
5079 ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
5080
5081 Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
5082 Module *M = F->getParent();
5083 Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
5084 CallInst::Create(Fn, Ops, "", BI->getNextNode());
5085 }
5086 HasChanged = ChangeStatus::CHANGED;
5087 }
5088
5089 return HasChanged;
5090 }
5091
5092 /// Collection of all malloc calls in a function.
5093 SmallSetVector<Instruction *, 4> MallocCalls;
5094
5095 /// Collection of malloc calls that cannot be converted.
5096 DenseSet<const Instruction *> BadMallocCalls;
5097
5098 /// A map for each malloc call to the set of associated free calls.
5099 DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
5100
5101 ChangeStatus updateImpl(Attributor &A) override;
5102};
5103
5104ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
5105 const Function *F = getAnchorScope();
5106 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5107
5108 MustBeExecutedContextExplorer &Explorer =
5109 A.getInfoCache().getMustBeExecutedContextExplorer();
5110
5111 auto FreeCheck = [&](Instruction &I) {
5112 const auto &Frees = FreesForMalloc.lookup(&I);
5113 if (Frees.size() != 1)
5114 return false;
5115 Instruction *UniqueFree = *Frees.begin();
5116 return Explorer.findInContextOf(UniqueFree, I.getNextNode());
5117 };
5118
5119 auto UsesCheck = [&](Instruction &I) {
5120 bool ValidUsesOnly = true;
5121 bool MustUse = true;
5122 auto Pred = [&](const Use &U, bool &Follow) -> bool {
5123 Instruction *UserI = cast<Instruction>(U.getUser());
5124 if (isa<LoadInst>(UserI))
5125 return true;
5126 if (auto *SI = dyn_cast<StoreInst>(UserI)) {
5127 if (SI->getValueOperand() == U.get()) {
5128 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] escaping store to memory: "
<< *UserI << "\n"; } } while (false)
5129 << "[H2S] escaping store to memory: " << *UserI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] escaping store to memory: "
<< *UserI << "\n"; } } while (false)
;
5130 ValidUsesOnly = false;
5131 } else {
5132 // A store into the malloc'ed memory is fine.
5133 }
5134 return true;
5135 }
5136 if (auto *CB = dyn_cast<CallBase>(UserI)) {
5137 if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
5138 return true;
5139 // Record malloc.
5140 if (isFreeCall(UserI, TLI)) {
5141 if (MustUse) {
5142 FreesForMalloc[&I].insert(UserI);
5143 } else {
5144 LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] free potentially on different mallocs: "
<< *UserI << "\n"; } } while (false)
5145 << *UserI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] free potentially on different mallocs: "
<< *UserI << "\n"; } } while (false)
;
5146 ValidUsesOnly = false;
5147 }
5148 return true;
5149 }
5150
5151 unsigned ArgNo = CB->getArgOperandNo(&U);
5152
5153 const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
5154 *this, IRPosition::callsite_argument(*CB, ArgNo),
5155 DepClassTy::REQUIRED);
5156
5157 // If a callsite argument use is nofree, we are fine.
5158 const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
5159 *this, IRPosition::callsite_argument(*CB, ArgNo),
5160 DepClassTy::REQUIRED);
5161
5162 if (!NoCaptureAA.isAssumedNoCapture() ||
5163 !ArgNoFreeAA.isAssumedNoFree()) {
5164 LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] Bad user: " <<
*UserI << "\n"; } } while (false)
;
5165 ValidUsesOnly = false;
5166 }
5167 return true;
5168 }
5169
5170 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
5171 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
5172 MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
5173 Follow = true;
5174 return true;
5175 }
5176 // Unknown user for which we can not track uses further (in a way that
5177 // makes sense).
5178 LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] Unknown user: " <<
*UserI << "\n"; } } while (false)
;
5179 ValidUsesOnly = false;
5180 return true;
5181 };
5182 A.checkForAllUses(Pred, *this, I);
5183 return ValidUsesOnly;
5184 };
5185
5186 auto MallocCallocCheck = [&](Instruction &I) {
5187 if (BadMallocCalls.count(&I))
5188 return true;
5189
5190 bool IsMalloc = isMallocLikeFn(&I, TLI);
5191 bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI);
5192 bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
5193 if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) {
5194 BadMallocCalls.insert(&I);
5195 return true;
5196 }
5197
5198 if (IsMalloc) {
5199 if (MaxHeapToStackSize == -1) {
5200 if (UsesCheck(I) || FreeCheck(I)) {
5201 MallocCalls.insert(&I);
5202 return true;
5203 }
5204 }
5205 if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
5206 if (Size->getValue().ule(MaxHeapToStackSize))
5207 if (UsesCheck(I) || FreeCheck(I)) {
5208 MallocCalls.insert(&I);
5209 return true;
5210 }
5211 } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) {
5212 if (MaxHeapToStackSize == -1) {
5213 if (UsesCheck(I) || FreeCheck(I)) {
5214 MallocCalls.insert(&I);
5215 return true;
5216 }
5217 }
5218 // Only if the alignment and sizes are constant.
5219 if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5220 if (Size->getValue().ule(MaxHeapToStackSize))
5221 if (UsesCheck(I) || FreeCheck(I)) {
5222 MallocCalls.insert(&I);
5223 return true;
5224 }
5225 } else if (IsCalloc) {
5226 if (MaxHeapToStackSize == -1) {
5227 if (UsesCheck(I) || FreeCheck(I)) {
5228 MallocCalls.insert(&I);
5229 return true;
5230 }
5231 }
5232 bool Overflow = false;
5233 if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
5234 if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5235 if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
5236 .ule(MaxHeapToStackSize))
5237 if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
5238 MallocCalls.insert(&I);
5239 return true;
5240 }
5241 }
5242
5243 BadMallocCalls.insert(&I);
5244 return true;
5245 };
5246
5247 size_t NumBadMallocs = BadMallocCalls.size();
5248
5249 A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
5250
5251 if (NumBadMallocs != BadMallocCalls.size())
5252 return ChangeStatus::CHANGED;
5253
5254 return ChangeStatus::UNCHANGED;
5255}
5256
5257struct AAHeapToStackFunction final : public AAHeapToStackImpl {
5258 AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
5259 : AAHeapToStackImpl(IRP, A) {}
5260
5261 /// See AbstractAttribute::trackStatistics().
5262 void trackStatistics() const override {
5263 STATS_DECL(static llvm::Statistic NumIRFunction_MallocCalls = {"attributor"
, "NumIRFunction_MallocCalls", "Number of malloc/calloc/aligned_alloc calls converted to allocas"
};;
5264 MallocCalls, Function,static llvm::Statistic NumIRFunction_MallocCalls = {"attributor"
, "NumIRFunction_MallocCalls", "Number of malloc/calloc/aligned_alloc calls converted to allocas"
};;
5265 "Number of malloc/calloc/aligned_alloc calls converted to allocas")static llvm::Statistic NumIRFunction_MallocCalls = {"attributor"
, "NumIRFunction_MallocCalls", "Number of malloc/calloc/aligned_alloc calls converted to allocas"
};;
;
5266 for (auto *C : MallocCalls)
5267 if (!BadMallocCalls.count(C))
5268 ++BUILD_STAT_NAME(MallocCalls, Function)NumIRFunction_MallocCalls;
5269 }
5270};
5271
5272/// ----------------------- Privatizable Pointers ------------------------------
5273struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
5274 AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
5275 : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
5276
5277 ChangeStatus indicatePessimisticFixpoint() override {
5278 AAPrivatizablePtr::indicatePessimisticFixpoint();
5279 PrivatizableType = nullptr;
5280 return ChangeStatus::CHANGED;
5281 }
5282
5283 /// Identify the type we can chose for a private copy of the underlying
5284 /// argument. None means it is not clear yet, nullptr means there is none.
5285 virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
5286
5287 /// Return a privatizable type that encloses both T0 and T1.
5288 /// TODO: This is merely a stub for now as we should manage a mapping as well.
5289 Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
5290 if (!T0.hasValue())
5291 return T1;
5292 if (!T1.hasValue())
5293 return T0;
5294 if (T0 == T1)
5295 return T0;
5296 return nullptr;
5297 }
5298
5299 Optional<Type *> getPrivatizableType() const override {
5300 return PrivatizableType;
5301 }
5302
5303 const std::string getAsStr() const override {
5304 return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
5305 }
5306
5307protected:
5308 Optional<Type *> PrivatizableType;
5309};
5310
5311// TODO: Do this for call site arguments (probably also other values) as well.
5312
5313struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
5314 AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
5315 : AAPrivatizablePtrImpl(IRP, A) {}
5316
5317 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5318 Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5319 // If this is a byval argument and we know all the call sites (so we can
5320 // rewrite them), there is no need to check them explicitly.
5321 bool AllCallSitesKnown;
5322 if (getIRPosition().hasAttr(Attribute::ByVal) &&
5323 A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
5324 true, AllCallSitesKnown))
5325 return getAssociatedValue().getType()->getPointerElementType();
5326
5327 Optional<Type *> Ty;
5328 unsigned ArgNo = getIRPosition().getCallSiteArgNo();
5329
5330 // Make sure the associated call site argument has the same type at all call
5331 // sites and it is an allocation we know is safe to privatize, for now that
5332 // means we only allow alloca instructions.
5333 // TODO: We can additionally analyze the accesses in the callee to create
5334 // the type from that information instead. That is a little more
5335 // involved and will be done in a follow up patch.
5336 auto CallSiteCheck = [&](AbstractCallSite ACS) {
5337 IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
5338 // Check if a coresponding argument was found or if it is one not
5339 // associated (which can happen for callback calls).
5340 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5341 return false;
5342
5343 // Check that all call sites agree on a type.
5344 auto &PrivCSArgAA =
5345 A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
5346 Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
5347
5348 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: "
<< ACSArgPos << ", CSTy: "; if (CSTy.hasValue() &&
CSTy.getValue()) CSTy.getValue()->print(dbgs()); else if (
CSTy.hasValue()) dbgs() << "<nullptr>"; else dbgs
() << "<none>"; }; } } while (false)
5349 dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: "
<< ACSArgPos << ", CSTy: "; if (CSTy.hasValue() &&
CSTy.getValue()) CSTy.getValue()->print(dbgs()); else if (
CSTy.hasValue()) dbgs() << "<nullptr>"; else dbgs
() << "<none>"; }; } } while (false)
5350 if (CSTy.hasValue() && CSTy.getValue())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: "
<< ACSArgPos << ", CSTy: "; if (CSTy.hasValue() &&
CSTy.getValue()) CSTy.getValue()->print(dbgs()); else if (
CSTy.hasValue()) dbgs() << "<nullptr>"; else dbgs
() << "<none>"; }; } } while (false)
5351 CSTy.getValue()->print(dbgs());do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: "
<< ACSArgPos << ", CSTy: "; if (CSTy.hasValue() &&
CSTy.getValue()) CSTy.getValue()->print(dbgs()); else if (
CSTy.hasValue()) dbgs() << "<nullptr>"; else dbgs
() << "<none>"; }; } } while (false)
5352 else if (CSTy.hasValue())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: "
<< ACSArgPos << ", CSTy: "; if (CSTy.hasValue() &&
CSTy.getValue()) CSTy.getValue()->print(dbgs()); else if (
CSTy.hasValue()) dbgs() << "<nullptr>"; else dbgs
() << "<none>"; }; } } while (false)
5353 dbgs() << "<nullptr>";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: "
<< ACSArgPos << ", CSTy: "; if (CSTy.hasValue() &&
CSTy.getValue()) CSTy.getValue()->print(dbgs()); else if (
CSTy.hasValue()) dbgs() << "<nullptr>"; else dbgs
() << "<none>"; }; } } while (false)
5354 elsedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: "
<< ACSArgPos << ", CSTy: "; if (CSTy.hasValue() &&
CSTy.getValue()) CSTy.getValue()->print(dbgs()); else if (
CSTy.hasValue()) dbgs() << "<nullptr>"; else dbgs
() << "<none>"; }; } } while (false)
5355 dbgs() << "<none>";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: "
<< ACSArgPos << ", CSTy: "; if (CSTy.hasValue() &&
CSTy.getValue()) CSTy.getValue()->print(dbgs()); else if (
CSTy.hasValue()) dbgs() << "<nullptr>"; else dbgs
() << "<none>"; }; } } while (false)
5356 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: "
<< ACSArgPos << ", CSTy: "; if (CSTy.hasValue() &&
CSTy.getValue()) CSTy.getValue()->print(dbgs()); else if (
CSTy.hasValue()) dbgs() << "<nullptr>"; else dbgs
() << "<none>"; }; } } while (false)
;
5357
5358 Ty = combineTypes(Ty, CSTy);
5359
5360 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << " : New Type: "; if (Ty.hasValue
() && Ty.getValue()) Ty.getValue()->print(dbgs());
else if (Ty.hasValue()) dbgs() << "<nullptr>"; else
dbgs() << "<none>"; dbgs() << "\n"; }; } }
while (false)
5361 dbgs() << " : New Type: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << " : New Type: "; if (Ty.hasValue
() && Ty.getValue()) Ty.getValue()->print(dbgs());
else if (Ty.hasValue()) dbgs() << "<nullptr>"; else
dbgs() << "<none>"; dbgs() << "\n"; }; } }
while (false)
5362 if (Ty.hasValue() && Ty.getValue())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << " : New Type: "; if (Ty.hasValue
() && Ty.getValue()) Ty.getValue()->print(dbgs());
else if (Ty.hasValue()) dbgs() << "<nullptr>"; else
dbgs() << "<none>"; dbgs() << "\n"; }; } }
while (false)
5363 Ty.getValue()->print(dbgs());do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << " : New Type: "; if (Ty.hasValue
() && Ty.getValue()) Ty.getValue()->print(dbgs());
else if (Ty.hasValue()) dbgs() << "<nullptr>"; else
dbgs() << "<none>"; dbgs() << "\n"; }; } }
while (false)
5364 else if (Ty.hasValue())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << " : New Type: "; if (Ty.hasValue
() && Ty.getValue()) Ty.getValue()->print(dbgs());
else if (Ty.hasValue()) dbgs() << "<nullptr>"; else
dbgs() << "<none>"; dbgs() << "\n"; }; } }
while (false)
5365 dbgs() << "<nullptr>";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << " : New Type: "; if (Ty.hasValue
() && Ty.getValue()) Ty.getValue()->print(dbgs());
else if (Ty.hasValue()) dbgs() << "<nullptr>"; else
dbgs() << "<none>"; dbgs() << "\n"; }; } }
while (false)
5366 elsedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << " : New Type: "; if (Ty.hasValue
() && Ty.getValue()) Ty.getValue()->print(dbgs());
else if (Ty.hasValue()) dbgs() << "<nullptr>"; else
dbgs() << "<none>"; dbgs() << "\n"; }; } }
while (false)
5367 dbgs() << "<none>";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << " : New Type: "; if (Ty.hasValue
() && Ty.getValue()) Ty.getValue()->print(dbgs());
else if (Ty.hasValue()) dbgs() << "<nullptr>"; else
dbgs() << "<none>"; dbgs() << "\n"; }; } }
while (false)
5368 dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << " : New Type: "; if (Ty.hasValue
() && Ty.getValue()) Ty.getValue()->print(dbgs());
else if (Ty.hasValue()) dbgs() << "<nullptr>"; else
dbgs() << "<none>"; dbgs() << "\n"; }; } }
while (false)
5369 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << " : New Type: "; if (Ty.hasValue
() && Ty.getValue()) Ty.getValue()->print(dbgs());
else if (Ty.hasValue()) dbgs() << "<nullptr>"; else
dbgs() << "<none>"; dbgs() << "\n"; }; } }
while (false)
;
5370
5371 return !Ty.hasValue() || Ty.getValue();
5372 };
5373
5374 if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
5375 return nullptr;
5376 return Ty;
5377 }
5378
5379 /// See AbstractAttribute::updateImpl(...).
5380 ChangeStatus updateImpl(Attributor &A) override {
5381 PrivatizableType = identifyPrivatizableType(A);
5382 if (!PrivatizableType.hasValue())
5383 return ChangeStatus::UNCHANGED;
5384 if (!PrivatizableType.getValue())
5385 return indicatePessimisticFixpoint();
5386
5387 // The dependence is optional so we don't give up once we give up on the
5388 // alignment.
5389 A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
5390 DepClassTy::OPTIONAL);
5391
5392 // Avoid arguments with padding for now.
5393 if (!getIRPosition().hasAttr(Attribute::ByVal) &&
5394 !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
5395 A.getInfoCache().getDL())) {
5396 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPrivatizablePtr] Padding detected\n"
; } } while (false)
;
5397 return indicatePessimisticFixpoint();
5398 }
5399
5400 // Verify callee and caller agree on how the promoted argument would be
5401 // passed.
5402 // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
5403 // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
5404 // which doesn't require the arguments ArgumentPromotion wanted to pass.
5405 Function &Fn = *getIRPosition().getAnchorScope();
5406 SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
5407 ArgsToPromote.insert(getAssociatedArgument());
5408 const auto *TTI =
5409 A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
5410 if (!TTI ||
5411 !ArgumentPromotionPass::areFunctionArgsABICompatible(
5412 Fn, *TTI, ArgsToPromote, Dummy) ||
5413 ArgsToPromote.empty()) {
5414 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
<< Fn.getName() << "\n"; } } while (false)
5415 dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
<< Fn.getName() << "\n"; } } while (false)
5416 << Fn.getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
<< Fn.getName() << "\n"; } } while (false)
;
5417 return indicatePessimisticFixpoint();
5418 }
5419
5420 // Collect the types that will replace the privatizable type in the function
5421 // signature.
5422 SmallVector<Type *, 16> ReplacementTypes;
5423 identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5424
5425 // Register a rewrite of the argument.
5426 Argument *Arg = getAssociatedArgument();
5427 if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
5428 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n"
; } } while (false)
;
5429 return indicatePessimisticFixpoint();
5430 }
5431
5432 unsigned ArgNo = Arg->getArgNo();
5433
5434 // Helper to check if for the given call site the associated argument is
5435 // passed to a callback where the privatization would be different.
5436 auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
5437 SmallVector<const Use *, 4> CallbackUses;
5438 AbstractCallSite::getCallbackUses(CB, CallbackUses);
5439 for (const Use *U : CallbackUses) {
5440 AbstractCallSite CBACS(U);
5441 assert(CBACS && CBACS.isCallbackCall())((CBACS && CBACS.isCallbackCall()) ? static_cast<void
> (0) : __assert_fail ("CBACS && CBACS.isCallbackCall()"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 5441, __PRETTY_FUNCTION__))
;
5442 for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
5443 int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
5444
5445 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << "check if can be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ")\n[AAPrivatizablePtr] " << CBArg
<< " : " << CBACS.getCallArgOperand(CBArg) <<
" vs " << CB.getArgOperand(ArgNo) << "\n" <<
"[AAPrivatizablePtr] " << CBArg << " : " <<
CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo
<< "\n"; }; } } while (false)
5446 dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << "check if can be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ")\n[AAPrivatizablePtr] " << CBArg
<< " : " << CBACS.getCallArgOperand(CBArg) <<
" vs " << CB.getArgOperand(ArgNo) << "\n" <<
"[AAPrivatizablePtr] " << CBArg << " : " <<
CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo
<< "\n"; }; } } while (false)
5447 << "[AAPrivatizablePtr] Argument " << *Argdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << "check if can be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ")\n[AAPrivatizablePtr] " << CBArg
<< " : " << CBACS.getCallArgOperand(CBArg) <<
" vs " << CB.getArgOperand(ArgNo) << "\n" <<
"[AAPrivatizablePtr] " << CBArg << " : " <<
CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo
<< "\n"; }; } } while (false)
5448 << "check if can be privatized in the context of its parent ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << "check if can be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ")\n[AAPrivatizablePtr] " << CBArg
<< " : " << CBACS.getCallArgOperand(CBArg) <<
" vs " << CB.getArgOperand(ArgNo) << "\n" <<
"[AAPrivatizablePtr] " << CBArg << " : " <<
CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo
<< "\n"; }; } } while (false)
5449 << Arg->getParent()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << "check if can be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ")\n[AAPrivatizablePtr] " << CBArg
<< " : " << CBACS.getCallArgOperand(CBArg) <<
" vs " << CB.getArgOperand(ArgNo) << "\n" <<
"[AAPrivatizablePtr] " << CBArg << " : " <<
CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo
<< "\n"; }; } } while (false)
5450 << ")\n[AAPrivatizablePtr] because it is an argument in a "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << "check if can be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ")\n[AAPrivatizablePtr] " << CBArg
<< " : " << CBACS.getCallArgOperand(CBArg) <<
" vs " << CB.getArgOperand(ArgNo) << "\n" <<
"[AAPrivatizablePtr] " << CBArg << " : " <<
CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo
<< "\n"; }; } } while (false)
5451 "callback ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << "check if can be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ")\n[AAPrivatizablePtr] " << CBArg
<< " : " << CBACS.getCallArgOperand(CBArg) <<
" vs " << CB.getArgOperand(ArgNo) << "\n" <<
"[AAPrivatizablePtr] " << CBArg << " : " <<
CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo
<< "\n"; }; } } while (false)
5452 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << "check if can be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ")\n[AAPrivatizablePtr] " << CBArg
<< " : " << CBACS.getCallArgOperand(CBArg) <<
" vs " << CB.getArgOperand(ArgNo) << "\n" <<
"[AAPrivatizablePtr] " << CBArg << " : " <<
CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo
<< "\n"; }; } } while (false)
5453 << ")\n[AAPrivatizablePtr] " << CBArg << " : "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << "check if can be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ")\n[AAPrivatizablePtr] " << CBArg
<< " : " << CBACS.getCallArgOperand(CBArg) <<
" vs " << CB.getArgOperand(ArgNo) << "\n" <<
"[AAPrivatizablePtr] " << CBArg << " : " <<
CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo
<< "\n"; }; } } while (false)
5454 << CBACS.getCallArgOperand(CBArg) << " vs "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << "check if can be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ")\n[AAPrivatizablePtr] " << CBArg
<< " : " << CBACS.getCallArgOperand(CBArg) <<
" vs " << CB.getArgOperand(ArgNo) << "\n" <<
"[AAPrivatizablePtr] " << CBArg << " : " <<
CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo
<< "\n"; }; } } while (false)
5455 << CB.getArgOperand(ArgNo) << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << "check if can be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ")\n[AAPrivatizablePtr] " << CBArg
<< " : " << CBACS.getCallArgOperand(CBArg) <<
" vs " << CB.getArgOperand(ArgNo) << "\n" <<
"[AAPrivatizablePtr] " << CBArg << " : " <<
CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo
<< "\n"; }; } } while (false)
5456 << "[AAPrivatizablePtr] " << CBArg << " : "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << "check if can be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ")\n[AAPrivatizablePtr] " << CBArg
<< " : " << CBACS.getCallArgOperand(CBArg) <<
" vs " << CB.getArgOperand(ArgNo) << "\n" <<
"[AAPrivatizablePtr] " << CBArg << " : " <<
CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo
<< "\n"; }; } } while (false)
5457 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << "check if can be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ")\n[AAPrivatizablePtr] " << CBArg
<< " : " << CBACS.getCallArgOperand(CBArg) <<
" vs " << CB.getArgOperand(ArgNo) << "\n" <<
"[AAPrivatizablePtr] " << CBArg << " : " <<
CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo
<< "\n"; }; } } while (false)
5458 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << "check if can be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ")\n[AAPrivatizablePtr] " << CBArg
<< " : " << CBACS.getCallArgOperand(CBArg) <<
" vs " << CB.getArgOperand(ArgNo) << "\n" <<
"[AAPrivatizablePtr] " << CBArg << " : " <<
CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo
<< "\n"; }; } } while (false)
;
5459
5460 if (CBArgNo != int(ArgNo))
5461 continue;
5462 const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5463 *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
5464 if (CBArgPrivAA.isValidState()) {
5465 auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
5466 if (!CBArgPrivTy.hasValue())
5467 continue;
5468 if (CBArgPrivTy.getValue() == PrivatizableType)
5469 continue;
5470 }
5471
5472 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " cannot be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ").\n[AAPrivatizablePtr] for which the argument "
"privatization is not compatible.\n"; }; } } while (false)
5473 dbgs() << "[AAPrivatizablePtr] Argument " << *Argdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " cannot be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ").\n[AAPrivatizablePtr] for which the argument "
"privatization is not compatible.\n"; }; } } while (false)
5474 << " cannot be privatized in the context of its parent ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " cannot be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ").\n[AAPrivatizablePtr] for which the argument "
"privatization is not compatible.\n"; }; } } while (false)
5475 << Arg->getParent()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " cannot be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ").\n[AAPrivatizablePtr] for which the argument "
"privatization is not compatible.\n"; }; } } while (false)
5476 << ")\n[AAPrivatizablePtr] because it is an argument in a "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " cannot be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ").\n[AAPrivatizablePtr] for which the argument "
"privatization is not compatible.\n"; }; } } while (false)
5477 "callback ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " cannot be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ").\n[AAPrivatizablePtr] for which the argument "
"privatization is not compatible.\n"; }; } } while (false)
5478 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " cannot be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ").\n[AAPrivatizablePtr] for which the argument "
"privatization is not compatible.\n"; }; } } while (false)
5479 << ").\n[AAPrivatizablePtr] for which the argument "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " cannot be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ").\n[AAPrivatizablePtr] for which the argument "
"privatization is not compatible.\n"; }; } } while (false)
5480 "privatization is not compatible.\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " cannot be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ").\n[AAPrivatizablePtr] for which the argument "
"privatization is not compatible.\n"; }; } } while (false)
5481 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " cannot be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ").\n[AAPrivatizablePtr] for which the argument "
"privatization is not compatible.\n"; }; } } while (false)
;
5482 return false;
5483 }
5484 }
5485 return true;
5486 };
5487
5488 // Helper to check if for the given call site the associated argument is
5489 // passed to a direct call where the privatization would be different.
5490 auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
5491 CallBase *DC = cast<CallBase>(ACS.getInstruction());