Bug Summary

File:llvm/include/llvm/Analysis/ValueTracking.h
Warning:line 290, column 49
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name AttributorAttributes.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Transforms/IPO -I /build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/lib/Transforms/IPO -I include -I /build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-01-16-232930-107970-1 -x c++ /build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/lib/Transforms/IPO/AttributorAttributes.cpp

/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/lib/Transforms/IPO/AttributorAttributes.cpp

1//===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// See the Attributor.h file comment and the class descriptions in that file for
10// more information.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Transforms/IPO/Attributor.h"
15
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/SCCIterator.h"
18#include "llvm/ADT/SetOperations.h"
19#include "llvm/ADT/SmallPtrSet.h"
20#include "llvm/ADT/Statistic.h"
21#include "llvm/Analysis/AliasAnalysis.h"
22#include "llvm/Analysis/AssumeBundleQueries.h"
23#include "llvm/Analysis/AssumptionCache.h"
24#include "llvm/Analysis/CaptureTracking.h"
25#include "llvm/Analysis/InstructionSimplify.h"
26#include "llvm/Analysis/LazyValueInfo.h"
27#include "llvm/Analysis/MemoryBuiltins.h"
28#include "llvm/Analysis/OptimizationRemarkEmitter.h"
29#include "llvm/Analysis/ScalarEvolution.h"
30#include "llvm/Analysis/TargetTransformInfo.h"
31#include "llvm/Analysis/ValueTracking.h"
32#include "llvm/IR/Assumptions.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/IRBuilder.h"
35#include "llvm/IR/Instruction.h"
36#include "llvm/IR/Instructions.h"
37#include "llvm/IR/IntrinsicInst.h"
38#include "llvm/IR/NoFolder.h"
39#include "llvm/Support/Alignment.h"
40#include "llvm/Support/Casting.h"
41#include "llvm/Support/CommandLine.h"
42#include "llvm/Support/ErrorHandling.h"
43#include "llvm/Support/FileSystem.h"
44#include "llvm/Support/raw_ostream.h"
45#include "llvm/Transforms/IPO/ArgumentPromotion.h"
46#include "llvm/Transforms/Utils/Local.h"
47#include <cassert>
48
49using namespace llvm;
50
51#define DEBUG_TYPE"attributor" "attributor"
52
53static cl::opt<bool> ManifestInternal(
54 "attributor-manifest-internal", cl::Hidden,
55 cl::desc("Manifest Attributor internal string attributes."),
56 cl::init(false));
57
58static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
59 cl::Hidden);
60
61template <>
62unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
63
64static cl::opt<unsigned, true> MaxPotentialValues(
65 "attributor-max-potential-values", cl::Hidden,
66 cl::desc("Maximum number of potential values to be "
67 "tracked for each position."),
68 cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
69 cl::init(7));
70
71STATISTIC(NumAAs, "Number of abstract attributes created")static llvm::Statistic NumAAs = {"attributor", "NumAAs", "Number of abstract attributes created"
}
;
72
73// Some helper macros to deal with statistics tracking.
74//
75// Usage:
76// For simple IR attribute tracking overload trackStatistics in the abstract
77// attribute and choose the right STATS_DECLTRACK_********* macro,
78// e.g.,:
79// void trackStatistics() const override {
80// STATS_DECLTRACK_ARG_ATTR(returned)
81// }
82// If there is a single "increment" side one can use the macro
83// STATS_DECLTRACK with a custom message. If there are multiple increment
84// sides, STATS_DECL and STATS_TRACK can also be used separately.
85//
86#define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)("Number of " "TYPE" " marked '" "NAME" "'") \
87 ("Number of " #TYPE " marked '" #NAME "'")
88#define BUILD_STAT_NAME(NAME, TYPE)NumIRTYPE_NAME NumIR##TYPE##_##NAME
89#define STATS_DECL_(NAME, MSG)static llvm::Statistic NAME = {"attributor", "NAME", MSG}; STATISTIC(NAME, MSG)static llvm::Statistic NAME = {"attributor", "NAME", MSG};
90#define STATS_DECL(NAME, TYPE, MSG)static llvm::Statistic NumIRTYPE_NAME = {"attributor", "NumIRTYPE_NAME"
, MSG};;
\
91 STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG)static llvm::Statistic NumIRTYPE_NAME = {"attributor", "NumIRTYPE_NAME"
, MSG};
;
92#define STATS_TRACK(NAME, TYPE)++(NumIRTYPE_NAME); ++(BUILD_STAT_NAME(NAME, TYPE)NumIRTYPE_NAME);
93#define STATS_DECLTRACK(NAME, TYPE, MSG){ static llvm::Statistic NumIRTYPE_NAME = {"attributor", "NumIRTYPE_NAME"
, MSG};; ++(NumIRTYPE_NAME); }
\
94 { \
95 STATS_DECL(NAME, TYPE, MSG)static llvm::Statistic NumIRTYPE_NAME = {"attributor", "NumIRTYPE_NAME"
, MSG};;
\
96 STATS_TRACK(NAME, TYPE)++(NumIRTYPE_NAME); \
97 }
98#define STATS_DECLTRACK_ARG_ATTR(NAME){ static llvm::Statistic NumIRArguments_NAME = {"attributor",
"NumIRArguments_NAME", ("Number of " "arguments" " marked '"
"NAME" "'")};; ++(NumIRArguments_NAME); }
\
99 STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME)){ static llvm::Statistic NumIRArguments_NAME = {"attributor",
"NumIRArguments_NAME", ("Number of " "arguments" " marked '"
"NAME" "'")};; ++(NumIRArguments_NAME); }
100#define STATS_DECLTRACK_CSARG_ATTR(NAME){ static llvm::Statistic NumIRCSArguments_NAME = {"attributor"
, "NumIRCSArguments_NAME", ("Number of " "call site arguments"
" marked '" "NAME" "'")};; ++(NumIRCSArguments_NAME); }
\
101 STATS_DECLTRACK(NAME, CSArguments, \{ static llvm::Statistic NumIRCSArguments_NAME = {"attributor"
, "NumIRCSArguments_NAME", ("Number of " "call site arguments"
" marked '" "NAME" "'")};; ++(NumIRCSArguments_NAME); }
102 BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME)){ static llvm::Statistic NumIRCSArguments_NAME = {"attributor"
, "NumIRCSArguments_NAME", ("Number of " "call site arguments"
" marked '" "NAME" "'")};; ++(NumIRCSArguments_NAME); }
103#define STATS_DECLTRACK_FN_ATTR(NAME){ static llvm::Statistic NumIRFunction_NAME = {"attributor", "NumIRFunction_NAME"
, ("Number of " "functions" " marked '" "NAME" "'")};; ++(NumIRFunction_NAME
); }
\
104 STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME)){ static llvm::Statistic NumIRFunction_NAME = {"attributor", "NumIRFunction_NAME"
, ("Number of " "functions" " marked '" "NAME" "'")};; ++(NumIRFunction_NAME
); }
105#define STATS_DECLTRACK_CS_ATTR(NAME){ static llvm::Statistic NumIRCS_NAME = {"attributor", "NumIRCS_NAME"
, ("Number of " "call site" " marked '" "NAME" "'")};; ++(NumIRCS_NAME
); }
\
106 STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME)){ static llvm::Statistic NumIRCS_NAME = {"attributor", "NumIRCS_NAME"
, ("Number of " "call site" " marked '" "NAME" "'")};; ++(NumIRCS_NAME
); }
107#define STATS_DECLTRACK_FNRET_ATTR(NAME){ static llvm::Statistic NumIRFunctionReturn_NAME = {"attributor"
, "NumIRFunctionReturn_NAME", ("Number of " "function returns"
" marked '" "NAME" "'")};; ++(NumIRFunctionReturn_NAME); }
\
108 STATS_DECLTRACK(NAME, FunctionReturn, \{ static llvm::Statistic NumIRFunctionReturn_NAME = {"attributor"
, "NumIRFunctionReturn_NAME", ("Number of " "function returns"
" marked '" "NAME" "'")};; ++(NumIRFunctionReturn_NAME); }
109 BUILD_STAT_MSG_IR_ATTR(function returns, NAME)){ static llvm::Statistic NumIRFunctionReturn_NAME = {"attributor"
, "NumIRFunctionReturn_NAME", ("Number of " "function returns"
" marked '" "NAME" "'")};; ++(NumIRFunctionReturn_NAME); }
110#define STATS_DECLTRACK_CSRET_ATTR(NAME){ static llvm::Statistic NumIRCSReturn_NAME = {"attributor", "NumIRCSReturn_NAME"
, ("Number of " "call site returns" " marked '" "NAME" "'")};
; ++(NumIRCSReturn_NAME); }
\
111 STATS_DECLTRACK(NAME, CSReturn, \{ static llvm::Statistic NumIRCSReturn_NAME = {"attributor", "NumIRCSReturn_NAME"
, ("Number of " "call site returns" " marked '" "NAME" "'")};
; ++(NumIRCSReturn_NAME); }
112 BUILD_STAT_MSG_IR_ATTR(call site returns, NAME)){ static llvm::Statistic NumIRCSReturn_NAME = {"attributor", "NumIRCSReturn_NAME"
, ("Number of " "call site returns" " marked '" "NAME" "'")};
; ++(NumIRCSReturn_NAME); }
113#define STATS_DECLTRACK_FLOATING_ATTR(NAME){ static llvm::Statistic NumIRFloating_NAME = {"attributor", "NumIRFloating_NAME"
, ("Number of floating values known to be '" "NAME" "'")};; ++
(NumIRFloating_NAME); }
\
114 STATS_DECLTRACK(NAME, Floating, \{ static llvm::Statistic NumIRFloating_NAME = {"attributor", "NumIRFloating_NAME"
, ("Number of floating values known to be '" #NAME "'")};; ++
(NumIRFloating_NAME); }
115 ("Number of floating values known to be '" #NAME "'")){ static llvm::Statistic NumIRFloating_NAME = {"attributor", "NumIRFloating_NAME"
, ("Number of floating values known to be '" #NAME "'")};; ++
(NumIRFloating_NAME); }
116
117// Specialization of the operator<< for abstract attributes subclasses. This
118// disambiguates situations where multiple operators are applicable.
119namespace llvm {
120#define PIPE_OPERATOR(CLASS) \
121 raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) { \
122 return OS << static_cast<const AbstractAttribute &>(AA); \
123 }
124
125PIPE_OPERATOR(AAIsDead)
126PIPE_OPERATOR(AANoUnwind)
127PIPE_OPERATOR(AANoSync)
128PIPE_OPERATOR(AANoRecurse)
129PIPE_OPERATOR(AAWillReturn)
130PIPE_OPERATOR(AANoReturn)
131PIPE_OPERATOR(AAReturnedValues)
132PIPE_OPERATOR(AANonNull)
133PIPE_OPERATOR(AANoAlias)
134PIPE_OPERATOR(AADereferenceable)
135PIPE_OPERATOR(AAAlign)
136PIPE_OPERATOR(AANoCapture)
137PIPE_OPERATOR(AAValueSimplify)
138PIPE_OPERATOR(AANoFree)
139PIPE_OPERATOR(AAHeapToStack)
140PIPE_OPERATOR(AAReachability)
141PIPE_OPERATOR(AAMemoryBehavior)
142PIPE_OPERATOR(AAMemoryLocation)
143PIPE_OPERATOR(AAValueConstantRange)
144PIPE_OPERATOR(AAPrivatizablePtr)
145PIPE_OPERATOR(AAUndefinedBehavior)
146PIPE_OPERATOR(AAPotentialValues)
147PIPE_OPERATOR(AANoUndef)
148PIPE_OPERATOR(AACallEdges)
149PIPE_OPERATOR(AAFunctionReachability)
150PIPE_OPERATOR(AAPointerInfo)
151PIPE_OPERATOR(AAAssumptionInfo)
152
153#undef PIPE_OPERATOR
154
155template <>
156ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
157 const DerefState &R) {
158 ChangeStatus CS0 =
159 clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
160 ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
161 return CS0 | CS1;
162}
163
164} // namespace llvm
165
166/// Get pointer operand of memory accessing instruction. If \p I is
167/// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
168/// is set to false and the instruction is volatile, return nullptr.
169static const Value *getPointerOperand(const Instruction *I,
170 bool AllowVolatile) {
171 if (!AllowVolatile && I->isVolatile())
172 return nullptr;
173
174 if (auto *LI = dyn_cast<LoadInst>(I)) {
175 return LI->getPointerOperand();
176 }
177
178 if (auto *SI = dyn_cast<StoreInst>(I)) {
179 return SI->getPointerOperand();
180 }
181
182 if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
183 return CXI->getPointerOperand();
184 }
185
186 if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
187 return RMWI->getPointerOperand();
188 }
189
190 return nullptr;
191}
192
193/// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
194/// advanced by \p Offset bytes. To aid later analysis the method tries to build
195/// getelement pointer instructions that traverse the natural type of \p Ptr if
196/// possible. If that fails, the remaining offset is adjusted byte-wise, hence
197/// through a cast to i8*.
198///
199/// TODO: This could probably live somewhere more prominantly if it doesn't
200/// already exist.
201static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
202 int64_t Offset, IRBuilder<NoFolder> &IRB,
203 const DataLayout &DL) {
204 assert(Offset >= 0 && "Negative offset not supported yet!")(static_cast <bool> (Offset >= 0 && "Negative offset not supported yet!"
) ? void (0) : __assert_fail ("Offset >= 0 && \"Negative offset not supported yet!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 204, __extension__
__PRETTY_FUNCTION__))
;
205 LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offsetdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "Construct pointer: " <<
*Ptr << " + " << Offset << "-bytes as " <<
*ResTy << "\n"; } } while (false)
206 << "-bytes as " << *ResTy << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "Construct pointer: " <<
*Ptr << " + " << Offset << "-bytes as " <<
*ResTy << "\n"; } } while (false)
;
207
208 if (Offset) {
209 Type *Ty = PtrElemTy;
210 APInt IntOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset);
211 SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(Ty, IntOffset);
212
213 SmallVector<Value *, 4> ValIndices;
214 std::string GEPName = Ptr->getName().str();
215 for (const APInt &Index : IntIndices) {
216 ValIndices.push_back(IRB.getInt(Index));
217 GEPName += "." + std::to_string(Index.getZExtValue());
218 }
219
220 // Create a GEP for the indices collected above.
221 Ptr = IRB.CreateGEP(PtrElemTy, Ptr, ValIndices, GEPName);
222
223 // If an offset is left we use byte-wise adjustment.
224 if (IntOffset != 0) {
225 Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
226 Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(IntOffset),
227 GEPName + ".b" + Twine(IntOffset.getZExtValue()));
228 }
229 }
230
231 // Ensure the result has the requested type.
232 Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
233
234 LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "Constructed pointer: " <<
*Ptr << "\n"; } } while (false)
;
235 return Ptr;
236}
237
238/// Recursively visit all values that might become \p IRP at some point. This
239/// will be done by looking through cast instructions, selects, phis, and calls
240/// with the "returned" attribute. Once we cannot look through the value any
241/// further, the callback \p VisitValueCB is invoked and passed the current
242/// value, the \p State, and a flag to indicate if we stripped anything.
243/// Stripped means that we unpacked the value associated with \p IRP at least
244/// once. Note that the value used for the callback may still be the value
245/// associated with \p IRP (due to PHIs). To limit how much effort is invested,
246/// we will never visit more values than specified by \p MaxValues.
247template <typename StateTy>
248static bool genericValueTraversal(
249 Attributor &A, IRPosition IRP, const AbstractAttribute &QueryingAA,
250 StateTy &State,
251 function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
252 VisitValueCB,
253 const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
254 function_ref<Value *(Value *)> StripCB = nullptr) {
255
256 const AAIsDead *LivenessAA = nullptr;
257 if (IRP.getAnchorScope())
258 LivenessAA = &A.getAAFor<AAIsDead>(
259 QueryingAA,
260 IRPosition::function(*IRP.getAnchorScope(), IRP.getCallBaseContext()),
261 DepClassTy::NONE);
262 bool AnyDead = false;
263
264 Value *InitialV = &IRP.getAssociatedValue();
265 using Item = std::pair<Value *, const Instruction *>;
266 SmallSet<Item, 16> Visited;
267 SmallVector<Item, 16> Worklist;
268 Worklist.push_back({InitialV, CtxI});
269
270 int Iteration = 0;
271 do {
272 Item I = Worklist.pop_back_val();
273 Value *V = I.first;
274 CtxI = I.second;
275 if (StripCB)
276 V = StripCB(V);
277
278 // Check if we should process the current value. To prevent endless
279 // recursion keep a record of the values we followed!
280 if (!Visited.insert(I).second)
281 continue;
282
283 // Make sure we limit the compile time for complex expressions.
284 if (Iteration++ >= MaxValues)
285 return false;
286
287 // Explicitly look through calls with a "returned" attribute if we do
288 // not have a pointer as stripPointerCasts only works on them.
289 Value *NewV = nullptr;
290 if (V->getType()->isPointerTy()) {
291 NewV = V->stripPointerCasts();
292 } else {
293 auto *CB = dyn_cast<CallBase>(V);
294 if (CB && CB->getCalledFunction()) {
295 for (Argument &Arg : CB->getCalledFunction()->args())
296 if (Arg.hasReturnedAttr()) {
297 NewV = CB->getArgOperand(Arg.getArgNo());
298 break;
299 }
300 }
301 }
302 if (NewV && NewV != V) {
303 Worklist.push_back({NewV, CtxI});
304 continue;
305 }
306
307 // Look through select instructions, visit assumed potential values.
308 if (auto *SI = dyn_cast<SelectInst>(V)) {
309 bool UsedAssumedInformation = false;
310 Optional<Constant *> C = A.getAssumedConstant(
311 *SI->getCondition(), QueryingAA, UsedAssumedInformation);
312 bool NoValueYet = !C.hasValue();
313 if (NoValueYet || isa_and_nonnull<UndefValue>(*C))
314 continue;
315 if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) {
316 if (CI->isZero())
317 Worklist.push_back({SI->getFalseValue(), CtxI});
318 else
319 Worklist.push_back({SI->getTrueValue(), CtxI});
320 continue;
321 }
322 // We could not simplify the condition, assume both values.(
323 Worklist.push_back({SI->getTrueValue(), CtxI});
324 Worklist.push_back({SI->getFalseValue(), CtxI});
325 continue;
326 }
327
328 // Look through phi nodes, visit all live operands.
329 if (auto *PHI = dyn_cast<PHINode>(V)) {
330 assert(LivenessAA &&(static_cast <bool> (LivenessAA && "Expected liveness in the presence of instructions!"
) ? void (0) : __assert_fail ("LivenessAA && \"Expected liveness in the presence of instructions!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 331, __extension__
__PRETTY_FUNCTION__))
331 "Expected liveness in the presence of instructions!")(static_cast <bool> (LivenessAA && "Expected liveness in the presence of instructions!"
) ? void (0) : __assert_fail ("LivenessAA && \"Expected liveness in the presence of instructions!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 331, __extension__
__PRETTY_FUNCTION__))
;
332 for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
333 BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
334 bool UsedAssumedInformation = false;
335 if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
336 LivenessAA, UsedAssumedInformation,
337 /* CheckBBLivenessOnly */ true)) {
338 AnyDead = true;
339 continue;
340 }
341 Worklist.push_back(
342 {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
343 }
344 continue;
345 }
346
347 if (UseValueSimplify && !isa<Constant>(V)) {
348 bool UsedAssumedInformation = false;
349 Optional<Value *> SimpleV =
350 A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation);
351 if (!SimpleV.hasValue())
352 continue;
353 if (!SimpleV.getValue())
354 return false;
355 Value *NewV = SimpleV.getValue();
356 if (NewV != V) {
357 Worklist.push_back({NewV, CtxI});
358 continue;
359 }
360 }
361
362 // Once a leaf is reached we inform the user through the callback.
363 if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
364 return false;
365 } while (!Worklist.empty());
366
367 // If we actually used liveness information so we have to record a dependence.
368 if (AnyDead)
369 A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
370
371 // All values have been visited.
372 return true;
373}
374
375bool AA::getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr,
376 SmallVectorImpl<Value *> &Objects,
377 const AbstractAttribute &QueryingAA,
378 const Instruction *CtxI) {
379 auto StripCB = [&](Value *V) { return getUnderlyingObject(V); };
380 SmallPtrSet<Value *, 8> SeenObjects;
381 auto VisitValueCB = [&SeenObjects](Value &Val, const Instruction *,
382 SmallVectorImpl<Value *> &Objects,
383 bool) -> bool {
384 if (SeenObjects.insert(&Val).second)
385 Objects.push_back(&Val);
386 return true;
387 };
388 if (!genericValueTraversal<decltype(Objects)>(
389 A, IRPosition::value(Ptr), QueryingAA, Objects, VisitValueCB, CtxI,
390 true, 32, StripCB))
391 return false;
392 return true;
393}
394
395const Value *stripAndAccumulateMinimalOffsets(
396 Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val,
397 const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
398 bool UseAssumed = false) {
399
400 auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
401 const IRPosition &Pos = IRPosition::value(V);
402 // Only track dependence if we are going to use the assumed info.
403 const AAValueConstantRange &ValueConstantRangeAA =
404 A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
405 UseAssumed ? DepClassTy::OPTIONAL
406 : DepClassTy::NONE);
407 ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
408 : ValueConstantRangeAA.getKnown();
409 // We can only use the lower part of the range because the upper part can
410 // be higher than what the value can really be.
411 ROffset = Range.getSignedMin();
412 return true;
413 };
414
415 return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
416 /* AllowInvariant */ false,
417 AttributorAnalysis);
418}
419
420static const Value *getMinimalBaseOfAccessPointerOperand(
421 Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I,
422 int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) {
423 const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
424 if (!Ptr)
425 return nullptr;
426 APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
427 const Value *Base = stripAndAccumulateMinimalOffsets(
428 A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
429
430 BytesOffset = OffsetAPInt.getSExtValue();
431 return Base;
432}
433
434static const Value *
435getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
436 const DataLayout &DL,
437 bool AllowNonInbounds = false) {
438 const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
439 if (!Ptr)
440 return nullptr;
441
442 return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
443 AllowNonInbounds);
444}
445
446/// Clamp the information known for all returned values of a function
447/// (identified by \p QueryingAA) into \p S.
448template <typename AAType, typename StateType = typename AAType::StateType>
449static void clampReturnedValueStates(
450 Attributor &A, const AAType &QueryingAA, StateType &S,
451 const IRPosition::CallBaseContext *CBContext = nullptr) {
452 LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Clamp return value states for "
<< QueryingAA << " into " << S << "\n"
; } } while (false)
453 << QueryingAA << " into " << S << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Clamp return value states for "
<< QueryingAA << " into " << S << "\n"
; } } while (false)
;
454
455 assert((QueryingAA.getIRPosition().getPositionKind() ==(static_cast <bool> ((QueryingAA.getIRPosition().getPositionKind
() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().
getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) &&
"Can only clamp returned value states for a function returned or call "
"site returned position!") ? void (0) : __assert_fail ("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 460, __extension__
__PRETTY_FUNCTION__))
456 IRPosition::IRP_RETURNED ||(static_cast <bool> ((QueryingAA.getIRPosition().getPositionKind
() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().
getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) &&
"Can only clamp returned value states for a function returned or call "
"site returned position!") ? void (0) : __assert_fail ("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 460, __extension__
__PRETTY_FUNCTION__))
457 QueryingAA.getIRPosition().getPositionKind() ==(static_cast <bool> ((QueryingAA.getIRPosition().getPositionKind
() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().
getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) &&
"Can only clamp returned value states for a function returned or call "
"site returned position!") ? void (0) : __assert_fail ("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 460, __extension__
__PRETTY_FUNCTION__))
458 IRPosition::IRP_CALL_SITE_RETURNED) &&(static_cast <bool> ((QueryingAA.getIRPosition().getPositionKind
() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().
getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) &&
"Can only clamp returned value states for a function returned or call "
"site returned position!") ? void (0) : __assert_fail ("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 460, __extension__
__PRETTY_FUNCTION__))
459 "Can only clamp returned value states for a function returned or call "(static_cast <bool> ((QueryingAA.getIRPosition().getPositionKind
() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().
getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) &&
"Can only clamp returned value states for a function returned or call "
"site returned position!") ? void (0) : __assert_fail ("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 460, __extension__
__PRETTY_FUNCTION__))
460 "site returned position!")(static_cast <bool> ((QueryingAA.getIRPosition().getPositionKind
() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().
getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) &&
"Can only clamp returned value states for a function returned or call "
"site returned position!") ? void (0) : __assert_fail ("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 460, __extension__
__PRETTY_FUNCTION__))
;
461
462 // Use an optional state as there might not be any return values and we want
463 // to join (IntegerState::operator&) the state of all there are.
464 Optional<StateType> T;
465
466 // Callback for each possibly returned value.
467 auto CheckReturnValue = [&](Value &RV) -> bool {
468 const IRPosition &RVPos = IRPosition::value(RV, CBContext);
469 const AAType &AA =
470 A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
471 LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] RV: " <<
RV << " AA: " << AA.getAsStr() << " @ " <<
RVPos << "\n"; } } while (false)
472 << " @ " << RVPos << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] RV: " <<
RV << " AA: " << AA.getAsStr() << " @ " <<
RVPos << "\n"; } } while (false)
;
473 const StateType &AAS = AA.getState();
474 if (T.hasValue())
475 *T &= AAS;
476 else
477 T = AAS;
478 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << Tdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] AA State: " <<
AAS << " RV State: " << T << "\n"; } } while
(false)
479 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] AA State: " <<
AAS << " RV State: " << T << "\n"; } } while
(false)
;
480 return T->isValidState();
481 };
482
483 if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
484 S.indicatePessimisticFixpoint();
485 else if (T.hasValue())
486 S ^= *T;
487}
488
489namespace {
490/// Helper class for generic deduction: return value -> returned position.
491template <typename AAType, typename BaseType,
492 typename StateType = typename BaseType::StateType,
493 bool PropagateCallBaseContext = false>
494struct AAReturnedFromReturnedValues : public BaseType {
495 AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
496 : BaseType(IRP, A) {}
497
498 /// See AbstractAttribute::updateImpl(...).
499 ChangeStatus updateImpl(Attributor &A) override {
500 StateType S(StateType::getBestState(this->getState()));
501 clampReturnedValueStates<AAType, StateType>(
502 A, *this, S,
503 PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
504 // TODO: If we know we visited all returned values, thus no are assumed
505 // dead, we can take the known information from the state T.
506 return clampStateAndIndicateChange<StateType>(this->getState(), S);
507 }
508};
509
510/// Clamp the information known at all call sites for a given argument
511/// (identified by \p QueryingAA) into \p S.
512template <typename AAType, typename StateType = typename AAType::StateType>
513static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
514 StateType &S) {
515 LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Clamp call site argument states for "
<< QueryingAA << " into " << S << "\n"
; } } while (false)
516 << QueryingAA << " into " << S << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Clamp call site argument states for "
<< QueryingAA << " into " << S << "\n"
; } } while (false)
;
517
518 assert(QueryingAA.getIRPosition().getPositionKind() ==(static_cast <bool> (QueryingAA.getIRPosition().getPositionKind
() == IRPosition::IRP_ARGUMENT && "Can only clamp call site argument states for an argument position!"
) ? void (0) : __assert_fail ("QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_ARGUMENT && \"Can only clamp call site argument states for an argument position!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 520, __extension__
__PRETTY_FUNCTION__))
519 IRPosition::IRP_ARGUMENT &&(static_cast <bool> (QueryingAA.getIRPosition().getPositionKind
() == IRPosition::IRP_ARGUMENT && "Can only clamp call site argument states for an argument position!"
) ? void (0) : __assert_fail ("QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_ARGUMENT && \"Can only clamp call site argument states for an argument position!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 520, __extension__
__PRETTY_FUNCTION__))
520 "Can only clamp call site argument states for an argument position!")(static_cast <bool> (QueryingAA.getIRPosition().getPositionKind
() == IRPosition::IRP_ARGUMENT && "Can only clamp call site argument states for an argument position!"
) ? void (0) : __assert_fail ("QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_ARGUMENT && \"Can only clamp call site argument states for an argument position!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 520, __extension__
__PRETTY_FUNCTION__))
;
521
522 // Use an optional state as there might not be any return values and we want
523 // to join (IntegerState::operator&) the state of all there are.
524 Optional<StateType> T;
525
526 // The argument number which is also the call site argument number.
527 unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
528
529 auto CallSiteCheck = [&](AbstractCallSite ACS) {
530 const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
531 // Check if a coresponding argument was found or if it is on not associated
532 // (which can happen for callback calls).
533 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
534 return false;
535
536 const AAType &AA =
537 A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
538 LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] ACS: " <<
*ACS.getInstruction() << " AA: " << AA.getAsStr(
) << " @" << ACSArgPos << "\n"; } } while (
false)
539 << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] ACS: " <<
*ACS.getInstruction() << " AA: " << AA.getAsStr(
) << " @" << ACSArgPos << "\n"; } } while (
false)
;
540 const StateType &AAS = AA.getState();
541 if (T.hasValue())
542 *T &= AAS;
543 else
544 T = AAS;
545 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << Tdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] AA State: " <<
AAS << " CSA State: " << T << "\n"; } } while
(false)
546 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] AA State: " <<
AAS << " CSA State: " << T << "\n"; } } while
(false)
;
547 return T->isValidState();
548 };
549
550 bool AllCallSitesKnown;
551 if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
552 AllCallSitesKnown))
553 S.indicatePessimisticFixpoint();
554 else if (T.hasValue())
555 S ^= *T;
556}
557
558/// This function is the bridge between argument position and the call base
559/// context.
560template <typename AAType, typename BaseType,
561 typename StateType = typename AAType::StateType>
562bool getArgumentStateFromCallBaseContext(Attributor &A,
563 BaseType &QueryingAttribute,
564 IRPosition &Pos, StateType &State) {
565 assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&(static_cast <bool> ((Pos.getPositionKind() == IRPosition
::IRP_ARGUMENT) && "Expected an 'argument' position !"
) ? void (0) : __assert_fail ("(Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) && \"Expected an 'argument' position !\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 566, __extension__
__PRETTY_FUNCTION__))
566 "Expected an 'argument' position !")(static_cast <bool> ((Pos.getPositionKind() == IRPosition
::IRP_ARGUMENT) && "Expected an 'argument' position !"
) ? void (0) : __assert_fail ("(Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) && \"Expected an 'argument' position !\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 566, __extension__
__PRETTY_FUNCTION__))
;
567 const CallBase *CBContext = Pos.getCallBaseContext();
568 if (!CBContext)
569 return false;
570
571 int ArgNo = Pos.getCallSiteArgNo();
572 assert(ArgNo >= 0 && "Invalid Arg No!")(static_cast <bool> (ArgNo >= 0 && "Invalid Arg No!"
) ? void (0) : __assert_fail ("ArgNo >= 0 && \"Invalid Arg No!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 572, __extension__
__PRETTY_FUNCTION__))
;
573
574 const auto &AA = A.getAAFor<AAType>(
575 QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
576 DepClassTy::REQUIRED);
577 const StateType &CBArgumentState =
578 static_cast<const StateType &>(AA.getState());
579
580 LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Briding Call site context to argument"
<< "Position:" << Pos << "CB Arg state:" <<
CBArgumentState << "\n"; } } while (false)
581 << "Position:" << Pos << "CB Arg state:" << CBArgumentStatedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Briding Call site context to argument"
<< "Position:" << Pos << "CB Arg state:" <<
CBArgumentState << "\n"; } } while (false)
582 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Briding Call site context to argument"
<< "Position:" << Pos << "CB Arg state:" <<
CBArgumentState << "\n"; } } while (false)
;
583
584 // NOTE: If we want to do call site grouping it should happen here.
585 State ^= CBArgumentState;
586 return true;
587}
588
589/// Helper class for generic deduction: call site argument -> argument position.
590template <typename AAType, typename BaseType,
591 typename StateType = typename AAType::StateType,
592 bool BridgeCallBaseContext = false>
593struct AAArgumentFromCallSiteArguments : public BaseType {
594 AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
595 : BaseType(IRP, A) {}
596
597 /// See AbstractAttribute::updateImpl(...).
598 ChangeStatus updateImpl(Attributor &A) override {
599 StateType S = StateType::getBestState(this->getState());
600
601 if (BridgeCallBaseContext) {
602 bool Success =
603 getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
604 A, *this, this->getIRPosition(), S);
605 if (Success)
606 return clampStateAndIndicateChange<StateType>(this->getState(), S);
607 }
608 clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
609
610 // TODO: If we know we visited all incoming values, thus no are assumed
611 // dead, we can take the known information from the state T.
612 return clampStateAndIndicateChange<StateType>(this->getState(), S);
613 }
614};
615
616/// Helper class for generic replication: function returned -> cs returned.
617template <typename AAType, typename BaseType,
618 typename StateType = typename BaseType::StateType,
619 bool IntroduceCallBaseContext = false>
620struct AACallSiteReturnedFromReturned : public BaseType {
621 AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
622 : BaseType(IRP, A) {}
623
624 /// See AbstractAttribute::updateImpl(...).
625 ChangeStatus updateImpl(Attributor &A) override {
626 assert(this->getIRPosition().getPositionKind() ==(static_cast <bool> (this->getIRPosition().getPositionKind
() == IRPosition::IRP_CALL_SITE_RETURNED && "Can only wrap function returned positions for call site returned "
"positions!") ? void (0) : __assert_fail ("this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED && \"Can only wrap function returned positions for call site returned \" \"positions!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 629, __extension__
__PRETTY_FUNCTION__))
627 IRPosition::IRP_CALL_SITE_RETURNED &&(static_cast <bool> (this->getIRPosition().getPositionKind
() == IRPosition::IRP_CALL_SITE_RETURNED && "Can only wrap function returned positions for call site returned "
"positions!") ? void (0) : __assert_fail ("this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED && \"Can only wrap function returned positions for call site returned \" \"positions!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 629, __extension__
__PRETTY_FUNCTION__))
628 "Can only wrap function returned positions for call site returned "(static_cast <bool> (this->getIRPosition().getPositionKind
() == IRPosition::IRP_CALL_SITE_RETURNED && "Can only wrap function returned positions for call site returned "
"positions!") ? void (0) : __assert_fail ("this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED && \"Can only wrap function returned positions for call site returned \" \"positions!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 629, __extension__
__PRETTY_FUNCTION__))
629 "positions!")(static_cast <bool> (this->getIRPosition().getPositionKind
() == IRPosition::IRP_CALL_SITE_RETURNED && "Can only wrap function returned positions for call site returned "
"positions!") ? void (0) : __assert_fail ("this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED && \"Can only wrap function returned positions for call site returned \" \"positions!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 629, __extension__
__PRETTY_FUNCTION__))
;
630 auto &S = this->getState();
631
632 const Function *AssociatedFunction =
633 this->getIRPosition().getAssociatedFunction();
634 if (!AssociatedFunction)
635 return S.indicatePessimisticFixpoint();
636
637 CallBase &CBContext = static_cast<CallBase &>(this->getAnchorValue());
638 if (IntroduceCallBaseContext)
639 LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Introducing call base context:"
<< CBContext << "\n"; } } while (false)
640 << CBContext << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Introducing call base context:"
<< CBContext << "\n"; } } while (false)
;
641
642 IRPosition FnPos = IRPosition::returned(
643 *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
644 const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
645 return clampStateAndIndicateChange(S, AA.getState());
646 }
647};
648} // namespace
649
650/// Helper function to accumulate uses.
651template <class AAType, typename StateType = typename AAType::StateType>
652static void followUsesInContext(AAType &AA, Attributor &A,
653 MustBeExecutedContextExplorer &Explorer,
654 const Instruction *CtxI,
655 SetVector<const Use *> &Uses,
656 StateType &State) {
657 auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
658 for (unsigned u = 0; u < Uses.size(); ++u) {
659 const Use *U = Uses[u];
660 if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
661 bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
662 if (Found && AA.followUseInMBEC(A, U, UserI, State))
663 for (const Use &Us : UserI->uses())
664 Uses.insert(&Us);
665 }
666 }
667}
668
669/// Use the must-be-executed-context around \p I to add information into \p S.
670/// The AAType class is required to have `followUseInMBEC` method with the
671/// following signature and behaviour:
672///
673/// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
674/// U - Underlying use.
675/// I - The user of the \p U.
676/// Returns true if the value should be tracked transitively.
677///
678template <class AAType, typename StateType = typename AAType::StateType>
679static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
680 Instruction &CtxI) {
681
682 // Container for (transitive) uses of the associated value.
683 SetVector<const Use *> Uses;
684 for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
685 Uses.insert(&U);
686
687 MustBeExecutedContextExplorer &Explorer =
688 A.getInfoCache().getMustBeExecutedContextExplorer();
689
690 followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
691
692 if (S.isAtFixpoint())
693 return;
694
695 SmallVector<const BranchInst *, 4> BrInsts;
696 auto Pred = [&](const Instruction *I) {
697 if (const BranchInst *Br = dyn_cast<BranchInst>(I))
698 if (Br->isConditional())
699 BrInsts.push_back(Br);
700 return true;
701 };
702
703 // Here, accumulate conditional branch instructions in the context. We
704 // explore the child paths and collect the known states. The disjunction of
705 // those states can be merged to its own state. Let ParentState_i be a state
706 // to indicate the known information for an i-th branch instruction in the
707 // context. ChildStates are created for its successors respectively.
708 //
709 // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
710 // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
711 // ...
712 // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
713 //
714 // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
715 //
716 // FIXME: Currently, recursive branches are not handled. For example, we
717 // can't deduce that ptr must be dereferenced in below function.
718 //
719 // void f(int a, int c, int *ptr) {
720 // if(a)
721 // if (b) {
722 // *ptr = 0;
723 // } else {
724 // *ptr = 1;
725 // }
726 // else {
727 // if (b) {
728 // *ptr = 0;
729 // } else {
730 // *ptr = 1;
731 // }
732 // }
733 // }
734
735 Explorer.checkForAllContext(&CtxI, Pred);
736 for (const BranchInst *Br : BrInsts) {
737 StateType ParentState;
738
739 // The known state of the parent state is a conjunction of children's
740 // known states so it is initialized with a best state.
741 ParentState.indicateOptimisticFixpoint();
742
743 for (const BasicBlock *BB : Br->successors()) {
744 StateType ChildState;
745
746 size_t BeforeSize = Uses.size();
747 followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
748
749 // Erase uses which only appear in the child.
750 for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
751 It = Uses.erase(It);
752
753 ParentState &= ChildState;
754 }
755
756 // Use only known state.
757 S += ParentState;
758 }
759}
760
761/// ------------------------ PointerInfo ---------------------------------------
762
763namespace llvm {
764namespace AA {
765namespace PointerInfo {
766
767/// An access kind description as used by AAPointerInfo.
768struct OffsetAndSize;
769
770struct State;
771
772} // namespace PointerInfo
773} // namespace AA
774
775/// Helper for AA::PointerInfo::Acccess DenseMap/Set usage.
776template <>
777struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> {
778 using Access = AAPointerInfo::Access;
779 static inline Access getEmptyKey();
780 static inline Access getTombstoneKey();
781 static unsigned getHashValue(const Access &A);
782 static bool isEqual(const Access &LHS, const Access &RHS);
783};
784
785/// Helper that allows OffsetAndSize as a key in a DenseMap.
786template <>
787struct DenseMapInfo<AA::PointerInfo ::OffsetAndSize>
788 : DenseMapInfo<std::pair<int64_t, int64_t>> {};
789
790/// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign
791/// but the instruction
792struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> {
793 using Base = DenseMapInfo<Instruction *>;
794 using Access = AAPointerInfo::Access;
795 static inline Access getEmptyKey();
796 static inline Access getTombstoneKey();
797 static unsigned getHashValue(const Access &A);
798 static bool isEqual(const Access &LHS, const Access &RHS);
799};
800
801} // namespace llvm
802
803/// Helper to represent an access offset and size, with logic to deal with
804/// uncertainty and check for overlapping accesses.
805struct AA::PointerInfo::OffsetAndSize : public std::pair<int64_t, int64_t> {
806 using BaseTy = std::pair<int64_t, int64_t>;
807 OffsetAndSize(int64_t Offset, int64_t Size) : BaseTy(Offset, Size) {}
808 OffsetAndSize(const BaseTy &P) : BaseTy(P) {}
809 int64_t getOffset() const { return first; }
810 int64_t getSize() const { return second; }
811 static OffsetAndSize getUnknown() { return OffsetAndSize(Unknown, Unknown); }
812
813 /// Return true if offset or size are unknown.
814 bool offsetOrSizeAreUnknown() const {
815 return getOffset() == OffsetAndSize::Unknown ||
816 getSize() == OffsetAndSize::Unknown;
817 }
818
819 /// Return true if this offset and size pair might describe an address that
820 /// overlaps with \p OAS.
821 bool mayOverlap(const OffsetAndSize &OAS) const {
822 // Any unknown value and we are giving up -> overlap.
823 if (offsetOrSizeAreUnknown() || OAS.offsetOrSizeAreUnknown())
824 return true;
825
826 // Check if one offset point is in the other interval [offset, offset+size].
827 return OAS.getOffset() + OAS.getSize() > getOffset() &&
828 OAS.getOffset() < getOffset() + getSize();
829 }
830
831 /// Constant used to represent unknown offset or sizes.
832 static constexpr int64_t Unknown = 1 << 31;
833};
834
835/// Implementation of the DenseMapInfo.
836///
837///{
838inline llvm::AccessAsInstructionInfo::Access
839llvm::AccessAsInstructionInfo::getEmptyKey() {
840 return Access(Base::getEmptyKey(), nullptr, AAPointerInfo::AK_READ, nullptr);
841}
842inline llvm::AccessAsInstructionInfo::Access
843llvm::AccessAsInstructionInfo::getTombstoneKey() {
844 return Access(Base::getTombstoneKey(), nullptr, AAPointerInfo::AK_READ,
845 nullptr);
846}
847unsigned llvm::AccessAsInstructionInfo::getHashValue(
848 const llvm::AccessAsInstructionInfo::Access &A) {
849 return Base::getHashValue(A.getRemoteInst());
850}
851bool llvm::AccessAsInstructionInfo::isEqual(
852 const llvm::AccessAsInstructionInfo::Access &LHS,
853 const llvm::AccessAsInstructionInfo::Access &RHS) {
854 return LHS.getRemoteInst() == RHS.getRemoteInst();
855}
856inline llvm::DenseMapInfo<AAPointerInfo::Access>::Access
857llvm::DenseMapInfo<AAPointerInfo::Access>::getEmptyKey() {
858 return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_READ,
859 nullptr);
860}
861inline llvm::DenseMapInfo<AAPointerInfo::Access>::Access
862llvm::DenseMapInfo<AAPointerInfo::Access>::getTombstoneKey() {
863 return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_WRITE,
864 nullptr);
865}
866
867unsigned llvm::DenseMapInfo<AAPointerInfo::Access>::getHashValue(
868 const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &A) {
869 return detail::combineHashValue(
870 DenseMapInfo<Instruction *>::getHashValue(A.getRemoteInst()),
871 (A.isWrittenValueYetUndetermined()
872 ? ~0
873 : DenseMapInfo<Value *>::getHashValue(A.getWrittenValue()))) +
874 A.getKind();
875}
876
877bool llvm::DenseMapInfo<AAPointerInfo::Access>::isEqual(
878 const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &LHS,
879 const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &RHS) {
880 return LHS == RHS;
881}
882///}
883
884/// A type to track pointer/struct usage and accesses for AAPointerInfo.
885struct AA::PointerInfo::State : public AbstractState {
886
887 /// Return the best possible representable state.
888 static State getBestState(const State &SIS) { return State(); }
889
890 /// Return the worst possible representable state.
891 static State getWorstState(const State &SIS) {
892 State R;
893 R.indicatePessimisticFixpoint();
894 return R;
895 }
896
897 State() {}
898 State(const State &SIS) : AccessBins(SIS.AccessBins) {}
899 State(State &&SIS) : AccessBins(std::move(SIS.AccessBins)) {}
900
901 const State &getAssumed() const { return *this; }
902
903 /// See AbstractState::isValidState().
904 bool isValidState() const override { return BS.isValidState(); }
905
906 /// See AbstractState::isAtFixpoint().
907 bool isAtFixpoint() const override { return BS.isAtFixpoint(); }
908
909 /// See AbstractState::indicateOptimisticFixpoint().
910 ChangeStatus indicateOptimisticFixpoint() override {
911 BS.indicateOptimisticFixpoint();
912 return ChangeStatus::UNCHANGED;
913 }
914
915 /// See AbstractState::indicatePessimisticFixpoint().
916 ChangeStatus indicatePessimisticFixpoint() override {
917 BS.indicatePessimisticFixpoint();
918 return ChangeStatus::CHANGED;
919 }
920
921 State &operator=(const State &R) {
922 if (this == &R)
923 return *this;
924 BS = R.BS;
925 AccessBins = R.AccessBins;
926 return *this;
927 }
928
929 State &operator=(State &&R) {
930 if (this == &R)
931 return *this;
932 std::swap(BS, R.BS);
933 std::swap(AccessBins, R.AccessBins);
934 return *this;
935 }
936
937 bool operator==(const State &R) const {
938 if (BS != R.BS)
939 return false;
940 if (AccessBins.size() != R.AccessBins.size())
941 return false;
942 auto It = begin(), RIt = R.begin(), E = end();
943 while (It != E) {
944 if (It->getFirst() != RIt->getFirst())
945 return false;
946 auto &Accs = It->getSecond();
947 auto &RAccs = RIt->getSecond();
948 if (Accs.size() != RAccs.size())
949 return false;
950 auto AccIt = Accs.begin(), RAccIt = RAccs.begin(), AccE = Accs.end();
951 while (AccIt != AccE) {
952 if (*AccIt != *RAccIt)
953 return false;
954 ++AccIt;
955 ++RAccIt;
956 }
957 ++It;
958 ++RIt;
959 }
960 return true;
961 }
962 bool operator!=(const State &R) const { return !(*this == R); }
963
964 /// We store accesses in a set with the instruction as key.
965 using Accesses = DenseSet<AAPointerInfo::Access, AccessAsInstructionInfo>;
966
967 /// We store all accesses in bins denoted by their offset and size.
968 using AccessBinsTy = DenseMap<OffsetAndSize, Accesses>;
969
970 AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); }
971 AccessBinsTy::const_iterator end() const { return AccessBins.end(); }
972
973protected:
974 /// The bins with all the accesses for the associated pointer.
975 DenseMap<OffsetAndSize, Accesses> AccessBins;
976
977 /// Add a new access to the state at offset \p Offset and with size \p Size.
978 /// The access is associated with \p I, writes \p Content (if anything), and
979 /// is of kind \p Kind.
980 /// \Returns CHANGED, if the state changed, UNCHANGED otherwise.
981 ChangeStatus addAccess(int64_t Offset, int64_t Size, Instruction &I,
982 Optional<Value *> Content,
983 AAPointerInfo::AccessKind Kind, Type *Ty,
984 Instruction *RemoteI = nullptr,
985 Accesses *BinPtr = nullptr) {
986 OffsetAndSize Key{Offset, Size};
987 Accesses &Bin = BinPtr ? *BinPtr : AccessBins[Key];
988 AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty);
989 // Check if we have an access for this instruction in this bin, if not,
990 // simply add it.
991 auto It = Bin.find(Acc);
992 if (It == Bin.end()) {
993 Bin.insert(Acc);
994 return ChangeStatus::CHANGED;
995 }
996 // If the existing access is the same as then new one, nothing changed.
997 AAPointerInfo::Access Before = *It;
998 // The new one will be combined with the existing one.
999 *It &= Acc;
1000 return *It == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
1001 }
1002
1003 /// See AAPointerInfo::forallInterferingAccesses.
1004 bool forallInterferingAccesses(
1005 Instruction &I,
1006 function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
1007 if (!isValidState())
1008 return false;
1009 // First find the offset and size of I.
1010 OffsetAndSize OAS(-1, -1);
1011 for (auto &It : AccessBins) {
1012 for (auto &Access : It.getSecond()) {
1013 if (Access.getRemoteInst() == &I) {
1014 OAS = It.getFirst();
1015 break;
1016 }
1017 }
1018 if (OAS.getSize() != -1)
1019 break;
1020 }
1021 if (OAS.getSize() == -1)
1022 return true;
1023
1024 // Now that we have an offset and size, find all overlapping ones and use
1025 // the callback on the accesses.
1026 for (auto &It : AccessBins) {
1027 OffsetAndSize ItOAS = It.getFirst();
1028 if (!OAS.mayOverlap(ItOAS))
1029 continue;
1030 bool IsExact = OAS == ItOAS && !OAS.offsetOrSizeAreUnknown();
1031 for (auto &Access : It.getSecond())
1032 if (!CB(Access, IsExact))
1033 return false;
1034 }
1035 return true;
1036 }
1037
1038private:
1039 /// State to track fixpoint and validity.
1040 BooleanState BS;
1041};
1042
1043namespace {
1044struct AAPointerInfoImpl
1045 : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> {
1046 using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>;
1047 AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
1048
1049 /// See AbstractAttribute::initialize(...).
1050 void initialize(Attributor &A) override { AAPointerInfo::initialize(A); }
1051
1052 /// See AbstractAttribute::getAsStr().
1053 const std::string getAsStr() const override {
1054 return std::string("PointerInfo ") +
1055 (isValidState() ? (std::string("#") +
1056 std::to_string(AccessBins.size()) + " bins")
1057 : "<invalid>");
1058 }
1059
1060 /// See AbstractAttribute::manifest(...).
1061 ChangeStatus manifest(Attributor &A) override {
1062 return AAPointerInfo::manifest(A);
1063 }
1064
1065 bool forallInterferingAccesses(
1066 LoadInst &LI, function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1067 const override {
1068 return State::forallInterferingAccesses(LI, CB);
1069 }
1070 bool forallInterferingAccesses(
1071 StoreInst &SI, function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1072 const override {
1073 return State::forallInterferingAccesses(SI, CB);
1074 }
1075
1076 ChangeStatus translateAndAddCalleeState(Attributor &A,
1077 const AAPointerInfo &CalleeAA,
1078 int64_t CallArgOffset, CallBase &CB) {
1079 using namespace AA::PointerInfo;
1080 if (!CalleeAA.getState().isValidState() || !isValidState())
1081 return indicatePessimisticFixpoint();
1082
1083 const auto &CalleeImplAA = static_cast<const AAPointerInfoImpl &>(CalleeAA);
1084 bool IsByval = CalleeImplAA.getAssociatedArgument()->hasByValAttr();
1085
1086 // Combine the accesses bin by bin.
1087 ChangeStatus Changed = ChangeStatus::UNCHANGED;
1088 for (auto &It : CalleeImplAA.getState()) {
1089 OffsetAndSize OAS = OffsetAndSize::getUnknown();
1090 if (CallArgOffset != OffsetAndSize::Unknown)
1091 OAS = OffsetAndSize(It.first.getOffset() + CallArgOffset,
1092 It.first.getSize());
1093 Accesses &Bin = AccessBins[OAS];
1094 for (const AAPointerInfo::Access &RAcc : It.second) {
1095 if (IsByval && !RAcc.isRead())
1096 continue;
1097 bool UsedAssumedInformation = false;
1098 Optional<Value *> Content = A.translateArgumentToCallSiteContent(
1099 RAcc.getContent(), CB, *this, UsedAssumedInformation);
1100 AccessKind AK =
1101 AccessKind(RAcc.getKind() & (IsByval ? AccessKind::AK_READ
1102 : AccessKind::AK_READ_WRITE));
1103 Changed =
1104 Changed | addAccess(OAS.getOffset(), OAS.getSize(), CB, Content, AK,
1105 RAcc.getType(), RAcc.getRemoteInst(), &Bin);
1106 }
1107 }
1108 return Changed;
1109 }
1110
1111 /// Statistic tracking for all AAPointerInfo implementations.
1112 /// See AbstractAttribute::trackStatistics().
1113 void trackPointerInfoStatistics(const IRPosition &IRP) const {}
1114};
1115
1116struct AAPointerInfoFloating : public AAPointerInfoImpl {
1117 using AccessKind = AAPointerInfo::AccessKind;
1118 AAPointerInfoFloating(const IRPosition &IRP, Attributor &A)
1119 : AAPointerInfoImpl(IRP, A) {}
1120
1121 /// See AbstractAttribute::initialize(...).
1122 void initialize(Attributor &A) override { AAPointerInfoImpl::initialize(A); }
1123
1124 /// Deal with an access and signal if it was handled successfully.
1125 bool handleAccess(Attributor &A, Instruction &I, Value &Ptr,
1126 Optional<Value *> Content, AccessKind Kind, int64_t Offset,
1127 ChangeStatus &Changed, Type *Ty,
1128 int64_t Size = AA::PointerInfo::OffsetAndSize::Unknown) {
1129 using namespace AA::PointerInfo;
1130 // No need to find a size if one is given or the offset is unknown.
1131 if (Offset != OffsetAndSize::Unknown && Size == OffsetAndSize::Unknown &&
1132 Ty) {
1133 const DataLayout &DL = A.getDataLayout();
1134 TypeSize AccessSize = DL.getTypeStoreSize(Ty);
1135 if (!AccessSize.isScalable())
1136 Size = AccessSize.getFixedSize();
1137 }
1138 Changed = Changed | addAccess(Offset, Size, I, Content, Kind, Ty);
1139 return true;
1140 };
1141
1142 /// Helper struct, will support ranges eventually.
1143 struct OffsetInfo {
1144 int64_t Offset = AA::PointerInfo::OffsetAndSize::Unknown;
1145
1146 bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; }
1147 };
1148
1149 /// See AbstractAttribute::updateImpl(...).
1150 ChangeStatus updateImpl(Attributor &A) override {
1151 using namespace AA::PointerInfo;
1152 State S = getState();
1153 ChangeStatus Changed = ChangeStatus::UNCHANGED;
1154 Value &AssociatedValue = getAssociatedValue();
1155
1156 const DataLayout &DL = A.getDataLayout();
1157 DenseMap<Value *, OffsetInfo> OffsetInfoMap;
1158 OffsetInfoMap[&AssociatedValue] = OffsetInfo{0};
1159
1160 auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo &PtrOI,
1161 bool &Follow) {
1162 OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1163 UsrOI = PtrOI;
1164 Follow = true;
1165 return true;
1166 };
1167
1168 const auto *TLI = getAnchorScope()
1169 ? A.getInfoCache().getTargetLibraryInfoForFunction(
1170 *getAnchorScope())
1171 : nullptr;
1172 auto UsePred = [&](const Use &U, bool &Follow) -> bool {
1173 Value *CurPtr = U.get();
1174 User *Usr = U.getUser();
1175 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] Analyze " <<
*CurPtr << " in " << *Usr << "\n"; } } while
(false)
1176 << *Usr << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] Analyze " <<
*CurPtr << " in " << *Usr << "\n"; } } while
(false)
;
1177 assert(OffsetInfoMap.count(CurPtr) &&(static_cast <bool> (OffsetInfoMap.count(CurPtr) &&
"The current pointer offset should have been seeded!") ? void
(0) : __assert_fail ("OffsetInfoMap.count(CurPtr) && \"The current pointer offset should have been seeded!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 1178, __extension__
__PRETTY_FUNCTION__))
1178 "The current pointer offset should have been seeded!")(static_cast <bool> (OffsetInfoMap.count(CurPtr) &&
"The current pointer offset should have been seeded!") ? void
(0) : __assert_fail ("OffsetInfoMap.count(CurPtr) && \"The current pointer offset should have been seeded!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 1178, __extension__
__PRETTY_FUNCTION__))
;
1179
1180 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) {
1181 if (CE->isCast())
1182 return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
1183 if (CE->isCompare())
1184 return true;
1185 if (!CE->isGEPWithNoNotionalOverIndexing()) {
1186 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CEdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] Unhandled constant user "
<< *CE << "\n"; } } while (false)
1187 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] Unhandled constant user "
<< *CE << "\n"; } } while (false)
;
1188 return false;
1189 }
1190 }
1191 if (auto *GEP = dyn_cast<GEPOperator>(Usr)) {
1192 // Note the order here, the Usr access might change the map, CurPtr is
1193 // already in it though.
1194 OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1195 OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1196 UsrOI = PtrOI;
1197
1198 // TODO: Use range information.
1199 if (PtrOI.Offset == OffsetAndSize::Unknown ||
1200 !GEP->hasAllConstantIndices()) {
1201 UsrOI.Offset = OffsetAndSize::Unknown;
1202 Follow = true;
1203 return true;
1204 }
1205
1206 SmallVector<Value *, 8> Indices;
1207 for (Use &Idx : GEP->indices()) {
1208 if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) {
1209 Indices.push_back(CIdx);
1210 continue;
1211 }
1212
1213 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEPdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] Non constant GEP index "
<< *GEP << " : " << *Idx << "\n"; } }
while (false)
1214 << " : " << *Idx << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] Non constant GEP index "
<< *GEP << " : " << *Idx << "\n"; } }
while (false)
;
1215 return false;
1216 }
1217 UsrOI.Offset = PtrOI.Offset +
1218 DL.getIndexedOffsetInType(
1219 CurPtr->getType()->getPointerElementType(), Indices);
1220 Follow = true;
1221 return true;
1222 }
1223 if (isa<CastInst>(Usr) || isa<SelectInst>(Usr))
1224 return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
1225
1226 // For PHIs we need to take care of the recurrence explicitly as the value
1227 // might change while we iterate through a loop. For now, we give up if
1228 // the PHI is not invariant.
1229 if (isa<PHINode>(Usr)) {
1230 // Note the order here, the Usr access might change the map, CurPtr is
1231 // already in it though.
1232 OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1233 OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1234 // Check if the PHI is invariant (so far).
1235 if (UsrOI == PtrOI)
1236 return true;
1237
1238 // Check if the PHI operand has already an unknown offset as we can't
1239 // improve on that anymore.
1240 if (PtrOI.Offset == OffsetAndSize::Unknown) {
1241 UsrOI = PtrOI;
1242 Follow = true;
1243 return true;
1244 }
1245
1246 // Check if the PHI operand is not dependent on the PHI itself.
1247 // TODO: This is not great as we look at the pointer type. However, it
1248 // is unclear where the Offset size comes from with typeless pointers.
1249 APInt Offset(
1250 DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()),
1251 0);
1252 if (&AssociatedValue == CurPtr->stripAndAccumulateConstantOffsets(
1253 DL, Offset, /* AllowNonInbounds */ true)) {
1254 if (Offset != PtrOI.Offset) {
1255 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] PHI operand pointer offset mismatch "
<< *CurPtr << " in " << *Usr << "\n"
; } } while (false)
1256 << "[AAPointerInfo] PHI operand pointer offset mismatch "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] PHI operand pointer offset mismatch "
<< *CurPtr << " in " << *Usr << "\n"
; } } while (false)
1257 << *CurPtr << " in " << *Usr << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] PHI operand pointer offset mismatch "
<< *CurPtr << " in " << *Usr << "\n"
; } } while (false)
;
1258 return false;
1259 }
1260 return HandlePassthroughUser(Usr, PtrOI, Follow);
1261 }
1262
1263 // TODO: Approximate in case we know the direction of the recurrence.
1264 LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] PHI operand is too complex "
<< *CurPtr << " in " << *Usr << "\n"
; } } while (false)
1265 << *CurPtr << " in " << *Usr << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] PHI operand is too complex "
<< *CurPtr << " in " << *Usr << "\n"
; } } while (false)
;
1266 UsrOI = PtrOI;
1267 UsrOI.Offset = OffsetAndSize::Unknown;
1268 Follow = true;
1269 return true;
1270 }
1271
1272 if (auto *LoadI = dyn_cast<LoadInst>(Usr))
1273 return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr,
1274 AccessKind::AK_READ, OffsetInfoMap[CurPtr].Offset,
1275 Changed, LoadI->getType());
1276 if (auto *StoreI = dyn_cast<StoreInst>(Usr)) {
1277 if (StoreI->getValueOperand() == CurPtr) {
1278 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] Escaping use in store "
<< *StoreI << "\n"; } } while (false)
1279 << *StoreI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] Escaping use in store "
<< *StoreI << "\n"; } } while (false)
;
1280 return false;
1281 }
1282 bool UsedAssumedInformation = false;
1283 Optional<Value *> Content = A.getAssumedSimplified(
1284 *StoreI->getValueOperand(), *this, UsedAssumedInformation);
1285 return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE,
1286 OffsetInfoMap[CurPtr].Offset, Changed,
1287 StoreI->getValueOperand()->getType());
1288 }
1289 if (auto *CB = dyn_cast<CallBase>(Usr)) {
1290 if (CB->isLifetimeStartOrEnd())
1291 return true;
1292 if (TLI && isFreeCall(CB, TLI))
1293 return true;
1294 if (CB->isArgOperand(&U)) {
1295 unsigned ArgNo = CB->getArgOperandNo(&U);
1296 const auto &CSArgPI = A.getAAFor<AAPointerInfo>(
1297 *this, IRPosition::callsite_argument(*CB, ArgNo),
1298 DepClassTy::REQUIRED);
1299 Changed = translateAndAddCalleeState(
1300 A, CSArgPI, OffsetInfoMap[CurPtr].Offset, *CB) |
1301 Changed;
1302 return true;
1303 }
1304 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CBdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] Call user not handled "
<< *CB << "\n"; } } while (false)
1305 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] Call user not handled "
<< *CB << "\n"; } } while (false)
;
1306 // TODO: Allow some call uses
1307 return false;
1308 }
1309
1310 LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] User not handled "
<< *Usr << "\n"; } } while (false)
;
1311 return false;
1312 };
1313 auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
1314 if (OffsetInfoMap.count(NewU))
1315 return OffsetInfoMap[NewU] == OffsetInfoMap[OldU];
1316 OffsetInfoMap[NewU] = OffsetInfoMap[OldU];
1317 return true;
1318 };
1319 if (!A.checkForAllUses(UsePred, *this, AssociatedValue,
1320 /* CheckBBLivenessOnly */ true, DepClassTy::OPTIONAL,
1321 EquivalentUseCB))
1322 return indicatePessimisticFixpoint();
1323
1324 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
().size() << "\n"; for (auto &Acc : It.getSecond())
{ dbgs() << " - " << Acc.getKind() << " - "
<< *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) dbgs() << " - " << Acc.getWrittenValue() <<
"\n"; } } }; } } while (false)
1325 dbgs() << "Accesses by bin after update:\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
().size() << "\n"; for (auto &Acc : It.getSecond())
{ dbgs() << " - " << Acc.getKind() << " - "
<< *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) dbgs() << " - " << Acc.getWrittenValue() <<
"\n"; } } }; } } while (false)
1326 for (auto &It : AccessBins) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
().size() << "\n"; for (auto &Acc : It.getSecond())
{ dbgs() << " - " << Acc.getKind() << " - "
<< *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) dbgs() << " - " << Acc.getWrittenValue() <<
"\n"; } } }; } } while (false)
1327 dbgs() << "[" << It.first.getOffset() << "-"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
().size() << "\n"; for (auto &Acc : It.getSecond())
{ dbgs() << " - " << Acc.getKind() << " - "
<< *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) dbgs() << " - " << Acc.getWrittenValue() <<
"\n"; } } }; } } while (false)
1328 << It.first.getOffset() + It.first.getSize()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
().size() << "\n"; for (auto &Acc : It.getSecond())
{ dbgs() << " - " << Acc.getKind() << " - "
<< *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) dbgs() << " - " << Acc.getWrittenValue() <<
"\n"; } } }; } } while (false)
1329 << "] : " << It.getSecond().size() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
().size() << "\n"; for (auto &Acc : It.getSecond())
{ dbgs() << " - " << Acc.getKind() << " - "
<< *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) dbgs() << " - " << Acc.getWrittenValue() <<
"\n"; } } }; } } while (false)
1330 for (auto &Acc : It.getSecond()) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
().size() << "\n"; for (auto &Acc : It.getSecond())
{ dbgs() << " - " << Acc.getKind() << " - "
<< *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) dbgs() << " - " << Acc.getWrittenValue() <<
"\n"; } } }; } } while (false)
1331 dbgs() << " - " << Acc.getKind() << " - " << *Acc.getLocalInst()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
().size() << "\n"; for (auto &Acc : It.getSecond())
{ dbgs() << " - " << Acc.getKind() << " - "
<< *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) dbgs() << " - " << Acc.getWrittenValue() <<
"\n"; } } }; } } while (false)
1332 << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
().size() << "\n"; for (auto &Acc : It.getSecond())
{ dbgs() << " - " << Acc.getKind() << " - "
<< *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) dbgs() << " - " << Acc.getWrittenValue() <<
"\n"; } } }; } } while (false)
1333 if (Acc.getLocalInst() != Acc.getRemoteInst())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
().size() << "\n"; for (auto &Acc : It.getSecond())
{ dbgs() << " - " << Acc.getKind() << " - "
<< *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) dbgs() << " - " << Acc.getWrittenValue() <<
"\n"; } } }; } } while (false)
1334 dbgs() << " --> "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
().size() << "\n"; for (auto &Acc : It.getSecond())
{ dbgs() << " - " << Acc.getKind() << " - "
<< *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) dbgs() << " - " << Acc.getWrittenValue() <<
"\n"; } } }; } } while (false)
1335 << *Acc.getRemoteInst() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
().size() << "\n"; for (auto &Acc : It.getSecond())
{ dbgs() << " - " << Acc.getKind() << " - "
<< *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) dbgs() << " - " << Acc.getWrittenValue() <<
"\n"; } } }; } } while (false)
1336 if (!Acc.isWrittenValueYetUndetermined())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
().size() << "\n"; for (auto &Acc : It.getSecond())
{ dbgs() << " - " << Acc.getKind() << " - "
<< *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) dbgs() << " - " << Acc.getWrittenValue() <<
"\n"; } } }; } } while (false)
1337 dbgs() << " - " << Acc.getWrittenValue() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
().size() << "\n"; for (auto &Acc : It.getSecond())
{ dbgs() << " - " << Acc.getKind() << " - "
<< *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) dbgs() << " - " << Acc.getWrittenValue() <<
"\n"; } } }; } } while (false)
1338 }do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
().size() << "\n"; for (auto &Acc : It.getSecond())
{ dbgs() << " - " << Acc.getKind() << " - "
<< *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) dbgs() << " - " << Acc.getWrittenValue() <<
"\n"; } } }; } } while (false)
1339 }do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
().size() << "\n"; for (auto &Acc : It.getSecond())
{ dbgs() << " - " << Acc.getKind() << " - "
<< *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) dbgs() << " - " << Acc.getWrittenValue() <<
"\n"; } } }; } } while (false)
1340 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
().size() << "\n"; for (auto &Acc : It.getSecond())
{ dbgs() << " - " << Acc.getKind() << " - "
<< *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) dbgs() << " - " << Acc.getWrittenValue() <<
"\n"; } } }; } } while (false)
;
1341
1342 return Changed;
1343 }
1344
1345 /// See AbstractAttribute::trackStatistics()
1346 void trackStatistics() const override {
1347 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1348 }
1349};
1350
1351struct AAPointerInfoReturned final : AAPointerInfoImpl {
1352 AAPointerInfoReturned(const IRPosition &IRP, Attributor &A)
1353 : AAPointerInfoImpl(IRP, A) {}
1354
1355 /// See AbstractAttribute::updateImpl(...).
1356 ChangeStatus updateImpl(Attributor &A) override {
1357 return indicatePessimisticFixpoint();
1358 }
1359
1360 /// See AbstractAttribute::trackStatistics()
1361 void trackStatistics() const override {
1362 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1363 }
1364};
1365
1366struct AAPointerInfoArgument final : AAPointerInfoFloating {
1367 AAPointerInfoArgument(const IRPosition &IRP, Attributor &A)
1368 : AAPointerInfoFloating(IRP, A) {}
1369
1370 /// See AbstractAttribute::initialize(...).
1371 void initialize(Attributor &A) override {
1372 AAPointerInfoFloating::initialize(A);
1373 if (getAnchorScope()->isDeclaration())
1374 indicatePessimisticFixpoint();
1375 }
1376
1377 /// See AbstractAttribute::trackStatistics()
1378 void trackStatistics() const override {
1379 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1380 }
1381};
1382
1383struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
1384 AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
1385 : AAPointerInfoFloating(IRP, A) {}
1386
1387 /// See AbstractAttribute::updateImpl(...).
1388 ChangeStatus updateImpl(Attributor &A) override {
1389 using namespace AA::PointerInfo;
1390 // We handle memory intrinsics explicitly, at least the first (=
1391 // destination) and second (=source) arguments as we know how they are
1392 // accessed.
1393 if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) {
1394 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
1395 int64_t LengthVal = OffsetAndSize::Unknown;
1396 if (Length)
1397 LengthVal = Length->getSExtValue();
1398 Value &Ptr = getAssociatedValue();
1399 unsigned ArgNo = getIRPosition().getCallSiteArgNo();
1400 ChangeStatus Changed;
1401 if (ArgNo == 0) {
1402 handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed,
1403 nullptr, LengthVal);
1404 } else if (ArgNo == 1) {
1405 handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed,
1406 nullptr, LengthVal);
1407 } else {
1408 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "
<< *MI << "\n"; } } while (false)
1409 << *MI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "
<< *MI << "\n"; } } while (false)
;
1410 return indicatePessimisticFixpoint();
1411 }
1412 return Changed;
1413 }
1414
1415 // TODO: Once we have call site specific value information we can provide
1416 // call site specific liveness information and then it makes
1417 // sense to specialize attributes for call sites arguments instead of
1418 // redirecting requests to the callee argument.
1419 Argument *Arg = getAssociatedArgument();
1420 if (!Arg)
1421 return indicatePessimisticFixpoint();
1422 const IRPosition &ArgPos = IRPosition::argument(*Arg);
1423 auto &ArgAA =
1424 A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED);
1425 return translateAndAddCalleeState(A, ArgAA, 0, *cast<CallBase>(getCtxI()));
1426 }
1427
1428 /// See AbstractAttribute::trackStatistics()
1429 void trackStatistics() const override {
1430 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1431 }
1432};
1433
1434struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating {
1435 AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
1436 : AAPointerInfoFloating(IRP, A) {}
1437
1438 /// See AbstractAttribute::trackStatistics()
1439 void trackStatistics() const override {
1440 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1441 }
1442};
1443
1444/// -----------------------NoUnwind Function Attribute--------------------------
1445
1446struct AANoUnwindImpl : AANoUnwind {
1447 AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
1448
1449 const std::string getAsStr() const override {
1450 return getAssumed() ? "nounwind" : "may-unwind";
1451 }
1452
1453 /// See AbstractAttribute::updateImpl(...).
1454 ChangeStatus updateImpl(Attributor &A) override {
1455 auto Opcodes = {
1456 (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
1457 (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet,
1458 (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
1459
1460 auto CheckForNoUnwind = [&](Instruction &I) {
1461 if (!I.mayThrow())
1462 return true;
1463
1464 if (const auto *CB = dyn_cast<CallBase>(&I)) {
1465 const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
1466 *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1467 return NoUnwindAA.isAssumedNoUnwind();
1468 }
1469 return false;
1470 };
1471
1472 bool UsedAssumedInformation = false;
1473 if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes,
1474 UsedAssumedInformation))
1475 return indicatePessimisticFixpoint();
1476
1477 return ChangeStatus::UNCHANGED;
1478 }
1479};
1480
1481struct AANoUnwindFunction final : public AANoUnwindImpl {
1482 AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
1483 : AANoUnwindImpl(IRP, A) {}
1484
1485 /// See AbstractAttribute::trackStatistics()
1486 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind){ static llvm::Statistic NumIRFunction_nounwind = {"attributor"
, "NumIRFunction_nounwind", ("Number of " "functions" " marked '"
"nounwind" "'")};; ++(NumIRFunction_nounwind); }
}
1487};
1488
1489/// NoUnwind attribute deduction for a call sites.
1490struct AANoUnwindCallSite final : AANoUnwindImpl {
1491 AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
1492 : AANoUnwindImpl(IRP, A) {}
1493
1494 /// See AbstractAttribute::initialize(...).
1495 void initialize(Attributor &A) override {
1496 AANoUnwindImpl::initialize(A);
1497 Function *F = getAssociatedFunction();
1498 if (!F || F->isDeclaration())
1499 indicatePessimisticFixpoint();
1500 }
1501
1502 /// See AbstractAttribute::updateImpl(...).
1503 ChangeStatus updateImpl(Attributor &A) override {
1504 // TODO: Once we have call site specific value information we can provide
1505 // call site specific liveness information and then it makes
1506 // sense to specialize attributes for call sites arguments instead of
1507 // redirecting requests to the callee argument.
1508 Function *F = getAssociatedFunction();
1509 const IRPosition &FnPos = IRPosition::function(*F);
1510 auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
1511 return clampStateAndIndicateChange(getState(), FnAA.getState());
1512 }
1513
1514 /// See AbstractAttribute::trackStatistics()
1515 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind){ static llvm::Statistic NumIRCS_nounwind = {"attributor", "NumIRCS_nounwind"
, ("Number of " "call site" " marked '" "nounwind" "'")};; ++
(NumIRCS_nounwind); }
; }
1516};
1517
1518/// --------------------- Function Return Values -------------------------------
1519
1520/// "Attribute" that collects all potential returned values and the return
1521/// instructions that they arise from.
1522///
1523/// If there is a unique returned value R, the manifest method will:
1524/// - mark R with the "returned" attribute, if R is an argument.
1525class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
1526
1527 /// Mapping of values potentially returned by the associated function to the
1528 /// return instructions that might return them.
1529 MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
1530
1531 /// State flags
1532 ///
1533 ///{
1534 bool IsFixed = false;
1535 bool IsValidState = true;
1536 ///}
1537
1538public:
1539 AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
1540 : AAReturnedValues(IRP, A) {}
1541
1542 /// See AbstractAttribute::initialize(...).
1543 void initialize(Attributor &A) override {
1544 // Reset the state.
1545 IsFixed = false;
1546 IsValidState = true;
1547 ReturnedValues.clear();
1548
1549 Function *F = getAssociatedFunction();
1550 if (!F || F->isDeclaration()) {
1551 indicatePessimisticFixpoint();
1552 return;
1553 }
1554 assert(!F->getReturnType()->isVoidTy() &&(static_cast <bool> (!F->getReturnType()->isVoidTy
() && "Did not expect a void return type!") ? void (0
) : __assert_fail ("!F->getReturnType()->isVoidTy() && \"Did not expect a void return type!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 1555, __extension__
__PRETTY_FUNCTION__))
1555 "Did not expect a void return type!")(static_cast <bool> (!F->getReturnType()->isVoidTy
() && "Did not expect a void return type!") ? void (0
) : __assert_fail ("!F->getReturnType()->isVoidTy() && \"Did not expect a void return type!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 1555, __extension__
__PRETTY_FUNCTION__))
;
1556
1557 // The map from instruction opcodes to those instructions in the function.
1558 auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
1559
1560 // Look through all arguments, if one is marked as returned we are done.
1561 for (Argument &Arg : F->args()) {
1562 if (Arg.hasReturnedAttr()) {
1563 auto &ReturnInstSet = ReturnedValues[&Arg];
1564 if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
1565 for (Instruction *RI : *Insts)
1566 ReturnInstSet.insert(cast<ReturnInst>(RI));
1567
1568 indicateOptimisticFixpoint();
1569 return;
1570 }
1571 }
1572
1573 if (!A.isFunctionIPOAmendable(*F))
1574 indicatePessimisticFixpoint();
1575 }
1576
1577 /// See AbstractAttribute::manifest(...).
1578 ChangeStatus manifest(Attributor &A) override;
1579
1580 /// See AbstractAttribute::getState(...).
1581 AbstractState &getState() override { return *this; }
1582
1583 /// See AbstractAttribute::getState(...).
1584 const AbstractState &getState() const override { return *this; }
1585
1586 /// See AbstractAttribute::updateImpl(Attributor &A).
1587 ChangeStatus updateImpl(Attributor &A) override;
1588
1589 llvm::iterator_range<iterator> returned_values() override {
1590 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1591 }
1592
1593 llvm::iterator_range<const_iterator> returned_values() const override {
1594 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1595 }
1596
1597 /// Return the number of potential return values, -1 if unknown.
1598 size_t getNumReturnValues() const override {
1599 return isValidState() ? ReturnedValues.size() : -1;
1600 }
1601
1602 /// Return an assumed unique return value if a single candidate is found. If
1603 /// there cannot be one, return a nullptr. If it is not clear yet, return the
1604 /// Optional::NoneType.
1605 Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
1606
1607 /// See AbstractState::checkForAllReturnedValues(...).
1608 bool checkForAllReturnedValuesAndReturnInsts(
1609 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1610 const override;
1611
1612 /// Pretty print the attribute similar to the IR representation.
1613 const std::string getAsStr() const override;
1614
1615 /// See AbstractState::isAtFixpoint().
1616 bool isAtFixpoint() const override { return IsFixed; }
1617
1618 /// See AbstractState::isValidState().
1619 bool isValidState() const override { return IsValidState; }
1620
1621 /// See AbstractState::indicateOptimisticFixpoint(...).
1622 ChangeStatus indicateOptimisticFixpoint() override {
1623 IsFixed = true;
1624 return ChangeStatus::UNCHANGED;
1625 }
1626
1627 ChangeStatus indicatePessimisticFixpoint() override {
1628 IsFixed = true;
1629 IsValidState = false;
1630 return ChangeStatus::CHANGED;
1631 }
1632};
1633
1634ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
1635 ChangeStatus Changed = ChangeStatus::UNCHANGED;
1636
1637 // Bookkeeping.
1638 assert(isValidState())(static_cast <bool> (isValidState()) ? void (0) : __assert_fail
("isValidState()", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 1638, __extension__ __PRETTY_FUNCTION__))
;
1639 STATS_DECLTRACK(KnownReturnValues, FunctionReturn,{ static llvm::Statistic NumIRFunctionReturn_KnownReturnValues
= {"attributor", "NumIRFunctionReturn_KnownReturnValues", "Number of function with known return values"
};; ++(NumIRFunctionReturn_KnownReturnValues); }
1640 "Number of function with known return values"){ static llvm::Statistic NumIRFunctionReturn_KnownReturnValues
= {"attributor", "NumIRFunctionReturn_KnownReturnValues", "Number of function with known return values"
};; ++(NumIRFunctionReturn_KnownReturnValues); }
;
1641
1642 // Check if we have an assumed unique return value that we could manifest.
1643 Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
1644
1645 if (!UniqueRV.hasValue() || !UniqueRV.getValue())
1646 return Changed;
1647
1648 // Bookkeeping.
1649 STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,{ static llvm::Statistic NumIRFunctionReturn_UniqueReturnValue
= {"attributor", "NumIRFunctionReturn_UniqueReturnValue", "Number of function with unique return"
};; ++(NumIRFunctionReturn_UniqueReturnValue); }
1650 "Number of function with unique return"){ static llvm::Statistic NumIRFunctionReturn_UniqueReturnValue
= {"attributor", "NumIRFunctionReturn_UniqueReturnValue", "Number of function with unique return"
};; ++(NumIRFunctionReturn_UniqueReturnValue); }
;
1651 // If the assumed unique return value is an argument, annotate it.
1652 if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
1653 if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
1654 getAssociatedFunction()->getReturnType())) {
1655 getIRPosition() = IRPosition::argument(*UniqueRVArg);
1656 Changed = IRAttribute::manifest(A);
1657 }
1658 }
1659 return Changed;
1660}
1661
1662const std::string AAReturnedValuesImpl::getAsStr() const {
1663 return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1664 (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")";
1665}
1666
1667Optional<Value *>
1668AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1669 // If checkForAllReturnedValues provides a unique value, ignoring potential
1670 // undef values that can also be present, it is assumed to be the actual
1671 // return value and forwarded to the caller of this method. If there are
1672 // multiple, a nullptr is returned indicating there cannot be a unique
1673 // returned value.
1674 Optional<Value *> UniqueRV;
1675 Type *Ty = getAssociatedFunction()->getReturnType();
1676
1677 auto Pred = [&](Value &RV) -> bool {
1678 UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty);
1679 return UniqueRV != Optional<Value *>(nullptr);
1680 };
1681
1682 if (!A.checkForAllReturnedValues(Pred, *this))
1683 UniqueRV = nullptr;
1684
1685 return UniqueRV;
1686}
1687
1688bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1689 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1690 const {
1691 if (!isValidState())
1692 return false;
1693
1694 // Check all returned values but ignore call sites as long as we have not
1695 // encountered an overdefined one during an update.
1696 for (auto &It : ReturnedValues) {
1697 Value *RV = It.first;
1698 if (!Pred(*RV, It.second))
1699 return false;
1700 }
1701
1702 return true;
1703}
1704
1705ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1706 ChangeStatus Changed = ChangeStatus::UNCHANGED;
1707
1708 auto ReturnValueCB = [&](Value &V, const Instruction *CtxI, ReturnInst &Ret,
1709 bool) -> bool {
1710 bool UsedAssumedInformation = false;
1711 Optional<Value *> SimpleRetVal =
1712 A.getAssumedSimplified(V, *this, UsedAssumedInformation);
1713 if (!SimpleRetVal.hasValue())
1714 return true;
1715 if (!SimpleRetVal.getValue())
1716 return false;
1717 Value *RetVal = *SimpleRetVal;
1718 assert(AA::isValidInScope(*RetVal, Ret.getFunction()) &&(static_cast <bool> (AA::isValidInScope(*RetVal, Ret.getFunction
()) && "Assumed returned value should be valid in function scope!"
) ? void (0) : __assert_fail ("AA::isValidInScope(*RetVal, Ret.getFunction()) && \"Assumed returned value should be valid in function scope!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 1719, __extension__
__PRETTY_FUNCTION__))
1719 "Assumed returned value should be valid in function scope!")(static_cast <bool> (AA::isValidInScope(*RetVal, Ret.getFunction
()) && "Assumed returned value should be valid in function scope!"
) ? void (0) : __assert_fail ("AA::isValidInScope(*RetVal, Ret.getFunction()) && \"Assumed returned value should be valid in function scope!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 1719, __extension__
__PRETTY_FUNCTION__))
;
1720 if (ReturnedValues[RetVal].insert(&Ret))
1721 Changed = ChangeStatus::CHANGED;
1722 return true;
1723 };
1724
1725 auto ReturnInstCB = [&](Instruction &I) {
1726 ReturnInst &Ret = cast<ReturnInst>(I);
1727 return genericValueTraversal<ReturnInst>(
1728 A, IRPosition::value(*Ret.getReturnValue()), *this, Ret, ReturnValueCB,
1729 &I);
1730 };
1731
1732 // Discover returned values from all live returned instructions in the
1733 // associated function.
1734 bool UsedAssumedInformation = false;
1735 if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
1736 UsedAssumedInformation))
1737 return indicatePessimisticFixpoint();
1738 return Changed;
1739}
1740
1741struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1742 AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1743 : AAReturnedValuesImpl(IRP, A) {}
1744
1745 /// See AbstractAttribute::trackStatistics()
1746 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned){ static llvm::Statistic NumIRArguments_returned = {"attributor"
, "NumIRArguments_returned", ("Number of " "arguments" " marked '"
"returned" "'")};; ++(NumIRArguments_returned); }
}
1747};
1748
1749/// Returned values information for a call sites.
1750struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1751 AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1752 : AAReturnedValuesImpl(IRP, A) {}
1753
1754 /// See AbstractAttribute::initialize(...).
1755 void initialize(Attributor &A) override {
1756 // TODO: Once we have call site specific value information we can provide
1757 // call site specific liveness information and then it makes
1758 // sense to specialize attributes for call sites instead of
1759 // redirecting requests to the callee.
1760 llvm_unreachable("Abstract attributes for returned values are not "::llvm::llvm_unreachable_internal("Abstract attributes for returned values are not "
"supported for call sites yet!", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 1761)
1761 "supported for call sites yet!")::llvm::llvm_unreachable_internal("Abstract attributes for returned values are not "
"supported for call sites yet!", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 1761)
;
1762 }
1763
1764 /// See AbstractAttribute::updateImpl(...).
1765 ChangeStatus updateImpl(Attributor &A) override {
1766 return indicatePessimisticFixpoint();
1767 }
1768
1769 /// See AbstractAttribute::trackStatistics()
1770 void trackStatistics() const override {}
1771};
1772
1773/// ------------------------ NoSync Function Attribute -------------------------
1774
1775struct AANoSyncImpl : AANoSync {
1776 AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1777
1778 const std::string getAsStr() const override {
1779 return getAssumed() ? "nosync" : "may-sync";
1780 }
1781
1782 /// See AbstractAttribute::updateImpl(...).
1783 ChangeStatus updateImpl(Attributor &A) override;
1784
1785 /// Helper function used to determine whether an instruction is non-relaxed
1786 /// atomic. In other words, if an atomic instruction does not have unordered
1787 /// or monotonic ordering
1788 static bool isNonRelaxedAtomic(Instruction *I);
1789
1790 /// Helper function specific for intrinsics which are potentially volatile
1791 static bool isNoSyncIntrinsic(Instruction *I);
1792};
1793
1794bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1795 if (!I->isAtomic())
1796 return false;
1797
1798 if (auto *FI = dyn_cast<FenceInst>(I))
1799 // All legal orderings for fence are stronger than monotonic.
1800 return FI->getSyncScopeID() != SyncScope::SingleThread;
1801 else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
1802 // Unordered is not a legal ordering for cmpxchg.
1803 return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
1804 AI->getFailureOrdering() != AtomicOrdering::Monotonic);
1805 }
1806
1807 AtomicOrdering Ordering;
1808 switch (I->getOpcode()) {
1809 case Instruction::AtomicRMW:
1810 Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1811 break;
1812 case Instruction::Store:
1813 Ordering = cast<StoreInst>(I)->getOrdering();
1814 break;
1815 case Instruction::Load:
1816 Ordering = cast<LoadInst>(I)->getOrdering();
1817 break;
1818 default:
1819 llvm_unreachable(::llvm::llvm_unreachable_internal("New atomic operations need to be known in the attributor."
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 1820)
1820 "New atomic operations need to be known in the attributor.")::llvm::llvm_unreachable_internal("New atomic operations need to be known in the attributor."
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 1820)
;
1821 }
1822
1823 return (Ordering != AtomicOrdering::Unordered &&
1824 Ordering != AtomicOrdering::Monotonic);
1825}
1826
1827/// Return true if this intrinsic is nosync. This is only used for intrinsics
1828/// which would be nosync except that they have a volatile flag. All other
1829/// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
1830bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1831 if (auto *MI = dyn_cast<MemIntrinsic>(I))
1832 return !MI->isVolatile();
1833 return false;
1834}
1835
1836ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1837
1838 auto CheckRWInstForNoSync = [&](Instruction &I) {
1839 /// We are looking for volatile instructions or Non-Relaxed atomics.
1840
1841 if (const auto *CB = dyn_cast<CallBase>(&I)) {
1842 if (CB->hasFnAttr(Attribute::NoSync))
1843 return true;
1844
1845 if (isNoSyncIntrinsic(&I))
1846 return true;
1847
1848 const auto &NoSyncAA = A.getAAFor<AANoSync>(
1849 *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1850 return NoSyncAA.isAssumedNoSync();
1851 }
1852
1853 if (!I.isVolatile() && !isNonRelaxedAtomic(&I))
1854 return true;
1855
1856 return false;
1857 };
1858
1859 auto CheckForNoSync = [&](Instruction &I) {
1860 // At this point we handled all read/write effects and they are all
1861 // nosync, so they can be skipped.
1862 if (I.mayReadOrWriteMemory())
1863 return true;
1864
1865 // non-convergent and readnone imply nosync.
1866 return !cast<CallBase>(I).isConvergent();
1867 };
1868
1869 bool UsedAssumedInformation = false;
1870 if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this,
1871 UsedAssumedInformation) ||
1872 !A.checkForAllCallLikeInstructions(CheckForNoSync, *this,
1873 UsedAssumedInformation))
1874 return indicatePessimisticFixpoint();
1875
1876 return ChangeStatus::UNCHANGED;
1877}
1878
1879struct AANoSyncFunction final : public AANoSyncImpl {
1880 AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1881 : AANoSyncImpl(IRP, A) {}
1882
1883 /// See AbstractAttribute::trackStatistics()
1884 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync){ static llvm::Statistic NumIRFunction_nosync = {"attributor"
, "NumIRFunction_nosync", ("Number of " "functions" " marked '"
"nosync" "'")};; ++(NumIRFunction_nosync); }
}
1885};
1886
1887/// NoSync attribute deduction for a call sites.
1888struct AANoSyncCallSite final : AANoSyncImpl {
1889 AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1890 : AANoSyncImpl(IRP, A) {}
1891
1892 /// See AbstractAttribute::initialize(...).
1893 void initialize(Attributor &A) override {
1894 AANoSyncImpl::initialize(A);
1895 Function *F = getAssociatedFunction();
1896 if (!F || F->isDeclaration())
1897 indicatePessimisticFixpoint();
1898 }
1899
1900 /// See AbstractAttribute::updateImpl(...).
1901 ChangeStatus updateImpl(Attributor &A) override {
1902 // TODO: Once we have call site specific value information we can provide
1903 // call site specific liveness information and then it makes
1904 // sense to specialize attributes for call sites arguments instead of
1905 // redirecting requests to the callee argument.
1906 Function *F = getAssociatedFunction();
1907 const IRPosition &FnPos = IRPosition::function(*F);
1908 auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
1909 return clampStateAndIndicateChange(getState(), FnAA.getState());
1910 }
1911
1912 /// See AbstractAttribute::trackStatistics()
1913 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync){ static llvm::Statistic NumIRCS_nosync = {"attributor", "NumIRCS_nosync"
, ("Number of " "call site" " marked '" "nosync" "'")};; ++(NumIRCS_nosync
); }
; }
1914};
1915
1916/// ------------------------ No-Free Attributes ----------------------------
1917
1918struct AANoFreeImpl : public AANoFree {
1919 AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1920
1921 /// See AbstractAttribute::updateImpl(...).
1922 ChangeStatus updateImpl(Attributor &A) override {
1923 auto CheckForNoFree = [&](Instruction &I) {
1924 const auto &CB = cast<CallBase>(I);
1925 if (CB.hasFnAttr(Attribute::NoFree))
1926 return true;
1927
1928 const auto &NoFreeAA = A.getAAFor<AANoFree>(
1929 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1930 return NoFreeAA.isAssumedNoFree();
1931 };
1932
1933 bool UsedAssumedInformation = false;
1934 if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this,
1935 UsedAssumedInformation))
1936 return indicatePessimisticFixpoint();
1937 return ChangeStatus::UNCHANGED;
1938 }
1939
1940 /// See AbstractAttribute::getAsStr().
1941 const std::string getAsStr() const override {
1942 return getAssumed() ? "nofree" : "may-free";
1943 }
1944};
1945
1946struct AANoFreeFunction final : public AANoFreeImpl {
1947 AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1948 : AANoFreeImpl(IRP, A) {}
1949
1950 /// See AbstractAttribute::trackStatistics()
1951 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree){ static llvm::Statistic NumIRFunction_nofree = {"attributor"
, "NumIRFunction_nofree", ("Number of " "functions" " marked '"
"nofree" "'")};; ++(NumIRFunction_nofree); }
}
1952};
1953
1954/// NoFree attribute deduction for a call sites.
1955struct AANoFreeCallSite final : AANoFreeImpl {
1956 AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1957 : AANoFreeImpl(IRP, A) {}
1958
1959 /// See AbstractAttribute::initialize(...).
1960 void initialize(Attributor &A) override {
1961 AANoFreeImpl::initialize(A);
1962 Function *F = getAssociatedFunction();
1963 if (!F || F->isDeclaration())
1964 indicatePessimisticFixpoint();
1965 }
1966
1967 /// See AbstractAttribute::updateImpl(...).
1968 ChangeStatus updateImpl(Attributor &A) override {
1969 // TODO: Once we have call site specific value information we can provide
1970 // call site specific liveness information and then it makes
1971 // sense to specialize attributes for call sites arguments instead of
1972 // redirecting requests to the callee argument.
1973 Function *F = getAssociatedFunction();
1974 const IRPosition &FnPos = IRPosition::function(*F);
1975 auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
1976 return clampStateAndIndicateChange(getState(), FnAA.getState());
1977 }
1978
1979 /// See AbstractAttribute::trackStatistics()
1980 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree){ static llvm::Statistic NumIRCS_nofree = {"attributor", "NumIRCS_nofree"
, ("Number of " "call site" " marked '" "nofree" "'")};; ++(NumIRCS_nofree
); }
; }
1981};
1982
1983/// NoFree attribute for floating values.
1984struct AANoFreeFloating : AANoFreeImpl {
1985 AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1986 : AANoFreeImpl(IRP, A) {}
1987
1988 /// See AbstractAttribute::trackStatistics()
1989 void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree){ static llvm::Statistic NumIRFloating_nofree = {"attributor"
, "NumIRFloating_nofree", ("Number of floating values known to be '"
"nofree" "'")};; ++(NumIRFloating_nofree); }
}
1990
1991 /// See Abstract Attribute::updateImpl(...).
1992 ChangeStatus updateImpl(Attributor &A) override {
1993 const IRPosition &IRP = getIRPosition();
1994
1995 const auto &NoFreeAA = A.getAAFor<AANoFree>(
1996 *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
1997 if (NoFreeAA.isAssumedNoFree())
1998 return ChangeStatus::UNCHANGED;
1999
2000 Value &AssociatedValue = getIRPosition().getAssociatedValue();
2001 auto Pred = [&](const Use &U, bool &Follow) -> bool {
2002 Instruction *UserI = cast<Instruction>(U.getUser());
2003 if (auto *CB = dyn_cast<CallBase>(UserI)) {
2004 if (CB->isBundleOperand(&U))
2005 return false;
2006 if (!CB->isArgOperand(&U))
2007 return true;
2008 unsigned ArgNo = CB->getArgOperandNo(&U);
2009
2010 const auto &NoFreeArg = A.getAAFor<AANoFree>(
2011 *this, IRPosition::callsite_argument(*CB, ArgNo),
2012 DepClassTy::REQUIRED);
2013 return NoFreeArg.isAssumedNoFree();
2014 }
2015
2016 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
2017 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
2018 Follow = true;
2019 return true;
2020 }
2021 if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) ||
2022 isa<ReturnInst>(UserI))
2023 return true;
2024
2025 // Unknown user.
2026 return false;
2027 };
2028 if (!A.checkForAllUses(Pred, *this, AssociatedValue))
2029 return indicatePessimisticFixpoint();
2030
2031 return ChangeStatus::UNCHANGED;
2032 }
2033};
2034
2035/// NoFree attribute for a call site argument.
2036struct AANoFreeArgument final : AANoFreeFloating {
2037 AANoFreeArgument(const IRPosition &IRP, Attributor &A)
2038 : AANoFreeFloating(IRP, A) {}
2039
2040 /// See AbstractAttribute::trackStatistics()
2041 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree){ static llvm::Statistic NumIRArguments_nofree = {"attributor"
, "NumIRArguments_nofree", ("Number of " "arguments" " marked '"
"nofree" "'")};; ++(NumIRArguments_nofree); }
}
2042};
2043
2044/// NoFree attribute for call site arguments.
2045struct AANoFreeCallSiteArgument final : AANoFreeFloating {
2046 AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
2047 : AANoFreeFloating(IRP, A) {}
2048
2049 /// See AbstractAttribute::updateImpl(...).
2050 ChangeStatus updateImpl(Attributor &A) override {
2051 // TODO: Once we have call site specific value information we can provide
2052 // call site specific liveness information and then it makes
2053 // sense to specialize attributes for call sites arguments instead of
2054 // redirecting requests to the callee argument.
2055 Argument *Arg = getAssociatedArgument();
2056 if (!Arg)
2057 return indicatePessimisticFixpoint();
2058 const IRPosition &ArgPos = IRPosition::argument(*Arg);
2059 auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
2060 return clampStateAndIndicateChange(getState(), ArgAA.getState());
2061 }
2062
2063 /// See AbstractAttribute::trackStatistics()
2064 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree){ static llvm::Statistic NumIRCSArguments_nofree = {"attributor"
, "NumIRCSArguments_nofree", ("Number of " "call site arguments"
" marked '" "nofree" "'")};; ++(NumIRCSArguments_nofree); }
};
2065};
2066
2067/// NoFree attribute for function return value.
2068struct AANoFreeReturned final : AANoFreeFloating {
2069 AANoFreeReturned(const IRPosition &IRP, Attributor &A)
2070 : AANoFreeFloating(IRP, A) {
2071 llvm_unreachable("NoFree is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoFree is not applicable to function returns!"
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 2071)
;
2072 }
2073
2074 /// See AbstractAttribute::initialize(...).
2075 void initialize(Attributor &A) override {
2076 llvm_unreachable("NoFree is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoFree is not applicable to function returns!"
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 2076)
;
2077 }
2078
2079 /// See AbstractAttribute::updateImpl(...).
2080 ChangeStatus updateImpl(Attributor &A) override {
2081 llvm_unreachable("NoFree is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoFree is not applicable to function returns!"
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 2081)
;
2082 }
2083
2084 /// See AbstractAttribute::trackStatistics()
2085 void trackStatistics() const override {}
2086};
2087
2088/// NoFree attribute deduction for a call site return value.
2089struct AANoFreeCallSiteReturned final : AANoFreeFloating {
2090 AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
2091 : AANoFreeFloating(IRP, A) {}
2092
2093 ChangeStatus manifest(Attributor &A) override {
2094 return ChangeStatus::UNCHANGED;
2095 }
2096 /// See AbstractAttribute::trackStatistics()
2097 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree){ static llvm::Statistic NumIRCSReturn_nofree = {"attributor"
, "NumIRCSReturn_nofree", ("Number of " "call site returns" " marked '"
"nofree" "'")};; ++(NumIRCSReturn_nofree); }
}
2098};
2099
2100/// ------------------------ NonNull Argument Attribute ------------------------
2101static int64_t getKnownNonNullAndDerefBytesForUse(
2102 Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
2103 const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
2104 TrackUse = false;
2105
2106 const Value *UseV = U->get();
2107 if (!UseV->getType()->isPointerTy())
2108 return 0;
2109
2110 // We need to follow common pointer manipulation uses to the accesses they
2111 // feed into. We can try to be smart to avoid looking through things we do not
2112 // like for now, e.g., non-inbounds GEPs.
2113 if (isa<CastInst>(I)) {
2114 TrackUse = true;
2115 return 0;
2116 }
2117
2118 if (isa<GetElementPtrInst>(I)) {
2119 TrackUse = true;
2120 return 0;
2121 }
2122
2123 Type *PtrTy = UseV->getType();
2124 const Function *F = I->getFunction();
2125 bool NullPointerIsDefined =
2126 F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
2127 const DataLayout &DL = A.getInfoCache().getDL();
2128 if (const auto *CB = dyn_cast<CallBase>(I)) {
2129 if (CB->isBundleOperand(U)) {
2130 if (RetainedKnowledge RK = getKnowledgeFromUse(
2131 U, {Attribute::NonNull, Attribute::Dereferenceable})) {
2132 IsNonNull |=
2133 (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
2134 return RK.ArgValue;
2135 }
2136 return 0;
2137 }
2138
2139 if (CB->isCallee(U)) {
2140 IsNonNull |= !NullPointerIsDefined;
2141 return 0;
2142 }
2143
2144 unsigned ArgNo = CB->getArgOperandNo(U);
2145 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
2146 // As long as we only use known information there is no need to track
2147 // dependences here.
2148 auto &DerefAA =
2149 A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
2150 IsNonNull |= DerefAA.isKnownNonNull();
2151 return DerefAA.getKnownDereferenceableBytes();
2152 }
2153
2154 int64_t Offset;
2155 const Value *Base =
2156 getMinimalBaseOfAccessPointerOperand(A, QueryingAA, I, Offset, DL);
2157 if (Base) {
2158 if (Base == &AssociatedValue &&
2159 getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
2160 int64_t DerefBytes =
2161 (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
2162
2163 IsNonNull |= !NullPointerIsDefined;
2164 return std::max(int64_t(0), DerefBytes);
2165 }
2166 }
2167
2168 /// Corner case when an offset is 0.
2169 Base = getBasePointerOfAccessPointerOperand(I, Offset, DL,
2170 /*AllowNonInbounds*/ true);
2171 if (Base) {
2172 if (Offset == 0 && Base == &AssociatedValue &&
2173 getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
2174 int64_t DerefBytes =
2175 (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
2176 IsNonNull |= !NullPointerIsDefined;
2177 return std::max(int64_t(0), DerefBytes);
2178 }
2179 }
2180
2181 return 0;
2182}
2183
2184struct AANonNullImpl : AANonNull {
2185 AANonNullImpl(const IRPosition &IRP, Attributor &A)
2186 : AANonNull(IRP, A),
2187 NullIsDefined(NullPointerIsDefined(
2188 getAnchorScope(),
2189 getAssociatedValue().getType()->getPointerAddressSpace())) {}
2190
2191 /// See AbstractAttribute::initialize(...).
2192 void initialize(Attributor &A) override {
2193 Value &V = getAssociatedValue();
2194 if (!NullIsDefined &&
2195 hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
2196 /* IgnoreSubsumingPositions */ false, &A)) {
2197 indicateOptimisticFixpoint();
2198 return;
2199 }
2200
2201 if (isa<ConstantPointerNull>(V)) {
2202 indicatePessimisticFixpoint();
2203 return;
2204 }
2205
2206 AANonNull::initialize(A);
2207
2208 bool CanBeNull, CanBeFreed;
2209 if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
2210 CanBeFreed)) {
2211 if (!CanBeNull) {
2212 indicateOptimisticFixpoint();
2213 return;
2214 }
2215 }
2216
2217 if (isa<GlobalValue>(&getAssociatedValue())) {
2218 indicatePessimisticFixpoint();
2219 return;
2220 }
2221
2222 if (Instruction *CtxI = getCtxI())
2223 followUsesInMBEC(*this, A, getState(), *CtxI);
2224 }
2225
2226 /// See followUsesInMBEC
2227 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
2228 AANonNull::StateType &State) {
2229 bool IsNonNull = false;
2230 bool TrackUse = false;
2231 getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
2232 IsNonNull, TrackUse);
2233 State.setKnown(IsNonNull);
2234 return TrackUse;
2235 }
2236
2237 /// See AbstractAttribute::getAsStr().
2238 const std::string getAsStr() const override {
2239 return getAssumed() ? "nonnull" : "may-null";
2240 }
2241
2242 /// Flag to determine if the underlying value can be null and still allow
2243 /// valid accesses.
2244 const bool NullIsDefined;
2245};
2246
2247/// NonNull attribute for a floating value.
2248struct AANonNullFloating : public AANonNullImpl {
2249 AANonNullFloating(const IRPosition &IRP, Attributor &A)
2250 : AANonNullImpl(IRP, A) {}
2251
2252 /// See AbstractAttribute::updateImpl(...).
2253 ChangeStatus updateImpl(Attributor &A) override {
2254 const DataLayout &DL = A.getDataLayout();
2255
2256 DominatorTree *DT = nullptr;
2257 AssumptionCache *AC = nullptr;
2258 InformationCache &InfoCache = A.getInfoCache();
2259 if (const Function *Fn = getAnchorScope()) {
2260 DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
2261 AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
2262 }
2263
2264 auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
2265 AANonNull::StateType &T, bool Stripped) -> bool {
2266 const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
2267 DepClassTy::REQUIRED);
2268 if (!Stripped && this == &AA) {
2269 if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
2270 T.indicatePessimisticFixpoint();
2271 } else {
2272 // Use abstract attribute information.
2273 const AANonNull::StateType &NS = AA.getState();
2274 T ^= NS;
2275 }
2276 return T.isValidState();
2277 };
2278
2279 StateType T;
2280 if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
2281 VisitValueCB, getCtxI()))
2282 return indicatePessimisticFixpoint();
2283
2284 return clampStateAndIndicateChange(getState(), T);
2285 }
2286
2287 /// See AbstractAttribute::trackStatistics()
2288 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull){ static llvm::Statistic NumIRFunctionReturn_nonnull = {"attributor"
, "NumIRFunctionReturn_nonnull", ("Number of " "function returns"
" marked '" "nonnull" "'")};; ++(NumIRFunctionReturn_nonnull
); }
}
2289};
2290
2291/// NonNull attribute for function return value.
2292struct AANonNullReturned final
2293 : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
2294 AANonNullReturned(const IRPosition &IRP, Attributor &A)
2295 : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
2296
2297 /// See AbstractAttribute::getAsStr().
2298 const std::string getAsStr() const override {
2299 return getAssumed() ? "nonnull" : "may-null";
2300 }
2301
2302 /// See AbstractAttribute::trackStatistics()
2303 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull){ static llvm::Statistic NumIRFunctionReturn_nonnull = {"attributor"
, "NumIRFunctionReturn_nonnull", ("Number of " "function returns"
" marked '" "nonnull" "'")};; ++(NumIRFunctionReturn_nonnull
); }
}
2304};
2305
2306/// NonNull attribute for function argument.
2307struct AANonNullArgument final
2308 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
2309 AANonNullArgument(const IRPosition &IRP, Attributor &A)
2310 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
2311
2312 /// See AbstractAttribute::trackStatistics()
2313 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull){ static llvm::Statistic NumIRArguments_nonnull = {"attributor"
, "NumIRArguments_nonnull", ("Number of " "arguments" " marked '"
"nonnull" "'")};; ++(NumIRArguments_nonnull); }
}
2314};
2315
2316struct AANonNullCallSiteArgument final : AANonNullFloating {
2317 AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
2318 : AANonNullFloating(IRP, A) {}
2319
2320 /// See AbstractAttribute::trackStatistics()
2321 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull){ static llvm::Statistic NumIRCSArguments_nonnull = {"attributor"
, "NumIRCSArguments_nonnull", ("Number of " "call site arguments"
" marked '" "nonnull" "'")};; ++(NumIRCSArguments_nonnull); }
}
2322};
2323
2324/// NonNull attribute for a call site return position.
2325struct AANonNullCallSiteReturned final
2326 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
2327 AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
2328 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
2329
2330 /// See AbstractAttribute::trackStatistics()
2331 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull){ static llvm::Statistic NumIRCSReturn_nonnull = {"attributor"
, "NumIRCSReturn_nonnull", ("Number of " "call site returns" " marked '"
"nonnull" "'")};; ++(NumIRCSReturn_nonnull); }
}
2332};
2333
2334/// ------------------------ No-Recurse Attributes ----------------------------
2335
2336struct AANoRecurseImpl : public AANoRecurse {
2337 AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
2338
2339 /// See AbstractAttribute::getAsStr()
2340 const std::string getAsStr() const override {
2341 return getAssumed() ? "norecurse" : "may-recurse";
2342 }
2343};
2344
2345struct AANoRecurseFunction final : AANoRecurseImpl {
2346 AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
2347 : AANoRecurseImpl(IRP, A) {}
2348
2349 /// See AbstractAttribute::initialize(...).
2350 void initialize(Attributor &A) override {
2351 AANoRecurseImpl::initialize(A);
2352 // TODO: We should build a call graph ourselves to enable this in the module
2353 // pass as well.
2354 if (const Function *F = getAnchorScope())
2355 if (A.getInfoCache().getSccSize(*F) != 1)
2356 indicatePessimisticFixpoint();
2357 }
2358
2359 /// See AbstractAttribute::updateImpl(...).
2360 ChangeStatus updateImpl(Attributor &A) override {
2361
2362 // If all live call sites are known to be no-recurse, we are as well.
2363 auto CallSitePred = [&](AbstractCallSite ACS) {
2364 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2365 *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
2366 DepClassTy::NONE);
2367 return NoRecurseAA.isKnownNoRecurse();
2368 };
2369 bool AllCallSitesKnown;
2370 if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
2371 // If we know all call sites and all are known no-recurse, we are done.
2372 // If all known call sites, which might not be all that exist, are known
2373 // to be no-recurse, we are not done but we can continue to assume
2374 // no-recurse. If one of the call sites we have not visited will become
2375 // live, another update is triggered.
2376 if (AllCallSitesKnown)
2377 indicateOptimisticFixpoint();
2378 return ChangeStatus::UNCHANGED;
2379 }
2380
2381 // If the above check does not hold anymore we look at the calls.
2382 auto CheckForNoRecurse = [&](Instruction &I) {
2383 const auto &CB = cast<CallBase>(I);
2384 if (CB.hasFnAttr(Attribute::NoRecurse))
2385 return true;
2386
2387 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2388 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
2389 if (!NoRecurseAA.isAssumedNoRecurse())
2390 return false;
2391
2392 // Recursion to the same function
2393 if (CB.getCalledFunction() == getAnchorScope())
2394 return false;
2395
2396 return true;
2397 };
2398
2399 bool UsedAssumedInformation = false;
2400 if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this,
2401 UsedAssumedInformation))
2402 return indicatePessimisticFixpoint();
2403 return ChangeStatus::UNCHANGED;
2404 }
2405
2406 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse){ static llvm::Statistic NumIRFunction_norecurse = {"attributor"
, "NumIRFunction_norecurse", ("Number of " "functions" " marked '"
"norecurse" "'")};; ++(NumIRFunction_norecurse); }
}
2407};
2408
2409/// NoRecurse attribute deduction for a call sites.
2410struct AANoRecurseCallSite final : AANoRecurseImpl {
2411 AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
2412 : AANoRecurseImpl(IRP, A) {}
2413
2414 /// See AbstractAttribute::initialize(...).
2415 void initialize(Attributor &A) override {
2416 AANoRecurseImpl::initialize(A);
2417 Function *F = getAssociatedFunction();
2418 if (!F || F->isDeclaration())
2419 indicatePessimisticFixpoint();
2420 }
2421
2422 /// See AbstractAttribute::updateImpl(...).
2423 ChangeStatus updateImpl(Attributor &A) override {
2424 // TODO: Once we have call site specific value information we can provide
2425 // call site specific liveness information and then it makes
2426 // sense to specialize attributes for call sites arguments instead of
2427 // redirecting requests to the callee argument.
2428 Function *F = getAssociatedFunction();
2429 const IRPosition &FnPos = IRPosition::function(*F);
2430 auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
2431 return clampStateAndIndicateChange(getState(), FnAA.getState());
2432 }
2433
2434 /// See AbstractAttribute::trackStatistics()
2435 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse){ static llvm::Statistic NumIRCS_norecurse = {"attributor", "NumIRCS_norecurse"
, ("Number of " "call site" " marked '" "norecurse" "'")};; ++
(NumIRCS_norecurse); }
; }
2436};
2437
2438/// -------------------- Undefined-Behavior Attributes ------------------------
2439
2440struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2441 AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
2442 : AAUndefinedBehavior(IRP, A) {}
2443
2444 /// See AbstractAttribute::updateImpl(...).
2445 // through a pointer (i.e. also branches etc.)
2446 ChangeStatus updateImpl(Attributor &A) override {
2447 const size_t UBPrevSize = KnownUBInsts.size();
2448 const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2449
2450 auto InspectMemAccessInstForUB = [&](Instruction &I) {
2451 // Lang ref now states volatile store is not UB, let's skip them.
2452 if (I.isVolatile() && I.mayWriteToMemory())
2453 return true;
2454
2455 // Skip instructions that are already saved.
2456 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2457 return true;
2458
2459 // If we reach here, we know we have an instruction
2460 // that accesses memory through a pointer operand,
2461 // for which getPointerOperand() should give it to us.
2462 Value *PtrOp =
2463 const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true));
2464 assert(PtrOp &&(static_cast <bool> (PtrOp && "Expected pointer operand of memory accessing instruction"
) ? void (0) : __assert_fail ("PtrOp && \"Expected pointer operand of memory accessing instruction\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 2465, __extension__
__PRETTY_FUNCTION__))
2465 "Expected pointer operand of memory accessing instruction")(static_cast <bool> (PtrOp && "Expected pointer operand of memory accessing instruction"
) ? void (0) : __assert_fail ("PtrOp && \"Expected pointer operand of memory accessing instruction\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 2465, __extension__
__PRETTY_FUNCTION__))
;
2466
2467 // Either we stopped and the appropriate action was taken,
2468 // or we got back a simplified value to continue.
2469 Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
2470 if (!SimplifiedPtrOp.hasValue() || !SimplifiedPtrOp.getValue())
2471 return true;
2472 const Value *PtrOpVal = SimplifiedPtrOp.getValue();
2473
2474 // A memory access through a pointer is considered UB
2475 // only if the pointer has constant null value.
2476 // TODO: Expand it to not only check constant values.
2477 if (!isa<ConstantPointerNull>(PtrOpVal)) {
2478 AssumedNoUBInsts.insert(&I);
2479 return true;
2480 }
2481 const Type *PtrTy = PtrOpVal->getType();
2482
2483 // Because we only consider instructions inside functions,
2484 // assume that a parent function exists.
2485 const Function *F = I.getFunction();
2486
2487 // A memory access using constant null pointer is only considered UB
2488 // if null pointer is _not_ defined for the target platform.
2489 if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
2490 AssumedNoUBInsts.insert(&I);
2491 else
2492 KnownUBInsts.insert(&I);
2493 return true;
2494 };
2495
2496 auto InspectBrInstForUB = [&](Instruction &I) {
2497 // A conditional branch instruction is considered UB if it has `undef`
2498 // condition.
2499
2500 // Skip instructions that are already saved.
2501 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2502 return true;
2503
2504 // We know we have a branch instruction.
2505 auto *BrInst = cast<BranchInst>(&I);
2506
2507 // Unconditional branches are never considered UB.
2508 if (BrInst->isUnconditional())
2509 return true;
2510
2511 // Either we stopped and the appropriate action was taken,
2512 // or we got back a simplified value to continue.
2513 Optional<Value *> SimplifiedCond =
2514 stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2515 if (!SimplifiedCond.hasValue() || !SimplifiedCond.getValue())
2516 return true;
2517 AssumedNoUBInsts.insert(&I);
2518 return true;
2519 };
2520
2521 auto InspectCallSiteForUB = [&](Instruction &I) {
2522 // Check whether a callsite always cause UB or not
2523
2524 // Skip instructions that are already saved.
2525 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2526 return true;
2527
2528 // Check nonnull and noundef argument attribute violation for each
2529 // callsite.
2530 CallBase &CB = cast<CallBase>(I);
2531 Function *Callee = CB.getCalledFunction();
2532 if (!Callee)
2533 return true;
2534 for (unsigned idx = 0; idx < CB.arg_size(); idx++) {
2535 // If current argument is known to be simplified to null pointer and the
2536 // corresponding argument position is known to have nonnull attribute,
2537 // the argument is poison. Furthermore, if the argument is poison and
2538 // the position is known to have noundef attriubte, this callsite is
2539 // considered UB.
2540 if (idx >= Callee->arg_size())
2541 break;
2542 Value *ArgVal = CB.getArgOperand(idx);
2543 if (!ArgVal)
2544 continue;
2545 // Here, we handle three cases.
2546 // (1) Not having a value means it is dead. (we can replace the value
2547 // with undef)
2548 // (2) Simplified to undef. The argument violate noundef attriubte.
2549 // (3) Simplified to null pointer where known to be nonnull.
2550 // The argument is a poison value and violate noundef attribute.
2551 IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2552 auto &NoUndefAA =
2553 A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2554 if (!NoUndefAA.isKnownNoUndef())
2555 continue;
2556 bool UsedAssumedInformation = false;
2557 Optional<Value *> SimplifiedVal = A.getAssumedSimplified(
2558 IRPosition::value(*ArgVal), *this, UsedAssumedInformation);
2559 if (UsedAssumedInformation)
2560 continue;
2561 if (SimplifiedVal.hasValue() && !SimplifiedVal.getValue())
2562 return true;
2563 if (!SimplifiedVal.hasValue() ||
2564 isa<UndefValue>(*SimplifiedVal.getValue())) {
2565 KnownUBInsts.insert(&I);
2566 continue;
2567 }
2568 if (!ArgVal->getType()->isPointerTy() ||
2569 !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2570 continue;
2571 auto &NonNullAA =
2572 A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2573 if (NonNullAA.isKnownNonNull())
2574 KnownUBInsts.insert(&I);
2575 }
2576 return true;
2577 };
2578
2579 auto InspectReturnInstForUB =
2580 [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) {
2581 // Check if a return instruction always cause UB or not
2582 // Note: It is guaranteed that the returned position of the anchor
2583 // scope has noundef attribute when this is called.
2584 // We also ensure the return position is not "assumed dead"
2585 // because the returned value was then potentially simplified to
2586 // `undef` in AAReturnedValues without removing the `noundef`
2587 // attribute yet.
2588
2589 // When the returned position has noundef attriubte, UB occur in the
2590 // following cases.
2591 // (1) Returned value is known to be undef.
2592 // (2) The value is known to be a null pointer and the returned
2593 // position has nonnull attribute (because the returned value is
2594 // poison).
2595 bool FoundUB = false;
2596 if (isa<UndefValue>(V)) {
2597 FoundUB = true;
2598 } else {
2599 if (isa<ConstantPointerNull>(V)) {
2600 auto &NonNullAA = A.getAAFor<AANonNull>(
2601 *this, IRPosition::returned(*getAnchorScope()),
2602 DepClassTy::NONE);
2603 if (NonNullAA.isKnownNonNull())
2604 FoundUB = true;
2605 }
2606 }
2607
2608 if (FoundUB)
2609 for (ReturnInst *RI : RetInsts)
2610 KnownUBInsts.insert(RI);
2611 return true;
2612 };
2613
2614 bool UsedAssumedInformation = false;
2615 A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2616 {Instruction::Load, Instruction::Store,
2617 Instruction::AtomicCmpXchg,
2618 Instruction::AtomicRMW},
2619 UsedAssumedInformation,
2620 /* CheckBBLivenessOnly */ true);
2621 A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2622 UsedAssumedInformation,
2623 /* CheckBBLivenessOnly */ true);
2624 A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this,
2625 UsedAssumedInformation);
2626
2627 // If the returned position of the anchor scope has noundef attriubte, check
2628 // all returned instructions.
2629 if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2630 const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2631 if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) {
2632 auto &RetPosNoUndefAA =
2633 A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2634 if (RetPosNoUndefAA.isKnownNoUndef())
2635 A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB,
2636 *this);
2637 }
2638 }
2639
2640 if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2641 UBPrevSize != KnownUBInsts.size())
2642 return ChangeStatus::CHANGED;
2643 return ChangeStatus::UNCHANGED;
2644 }
2645
2646 bool isKnownToCauseUB(Instruction *I) const override {
2647 return KnownUBInsts.count(I);
2648 }
2649
2650 bool isAssumedToCauseUB(Instruction *I) const override {
2651 // In simple words, if an instruction is not in the assumed to _not_
2652 // cause UB, then it is assumed UB (that includes those
2653 // in the KnownUBInsts set). The rest is boilerplate
2654 // is to ensure that it is one of the instructions we test
2655 // for UB.
2656
2657 switch (I->getOpcode()) {
2658 case Instruction::Load:
2659 case Instruction::Store:
2660 case Instruction::AtomicCmpXchg:
2661 case Instruction::AtomicRMW:
2662 return !AssumedNoUBInsts.count(I);
2663 case Instruction::Br: {
2664 auto BrInst = cast<BranchInst>(I);
2665 if (BrInst->isUnconditional())
2666 return false;
2667 return !AssumedNoUBInsts.count(I);
2668 } break;
2669 default:
2670 return false;
2671 }
2672 return false;
2673 }
2674
2675 ChangeStatus manifest(Attributor &A) override {
2676 if (KnownUBInsts.empty())
2677 return ChangeStatus::UNCHANGED;
2678 for (Instruction *I : KnownUBInsts)
2679 A.changeToUnreachableAfterManifest(I);
2680 return ChangeStatus::CHANGED;
2681 }
2682
2683 /// See AbstractAttribute::getAsStr()
2684 const std::string getAsStr() const override {
2685 return getAssumed() ? "undefined-behavior" : "no-ub";
2686 }
2687
2688 /// Note: The correctness of this analysis depends on the fact that the
2689 /// following 2 sets will stop changing after some point.
2690 /// "Change" here means that their size changes.
2691 /// The size of each set is monotonically increasing
2692 /// (we only add items to them) and it is upper bounded by the number of
2693 /// instructions in the processed function (we can never save more
2694 /// elements in either set than this number). Hence, at some point,
2695 /// they will stop increasing.
2696 /// Consequently, at some point, both sets will have stopped
2697 /// changing, effectively making the analysis reach a fixpoint.
2698
2699 /// Note: These 2 sets are disjoint and an instruction can be considered
2700 /// one of 3 things:
2701 /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2702 /// the KnownUBInsts set.
2703 /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2704 /// has a reason to assume it).
2705 /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2706 /// could not find a reason to assume or prove that it can cause UB,
2707 /// hence it assumes it doesn't. We have a set for these instructions
2708 /// so that we don't reprocess them in every update.
2709 /// Note however that instructions in this set may cause UB.
2710
2711protected:
2712 /// A set of all live instructions _known_ to cause UB.
2713 SmallPtrSet<Instruction *, 8> KnownUBInsts;
2714
2715private:
2716 /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2717 SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2718
2719 // Should be called on updates in which if we're processing an instruction
2720 // \p I that depends on a value \p V, one of the following has to happen:
2721 // - If the value is assumed, then stop.
2722 // - If the value is known but undef, then consider it UB.
2723 // - Otherwise, do specific processing with the simplified value.
2724 // We return None in the first 2 cases to signify that an appropriate
2725 // action was taken and the caller should stop.
2726 // Otherwise, we return the simplified value that the caller should
2727 // use for specific processing.
2728 Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V,
2729 Instruction *I) {
2730 bool UsedAssumedInformation = false;
2731 Optional<Value *> SimplifiedV = A.getAssumedSimplified(
2732 IRPosition::value(*V), *this, UsedAssumedInformation);
2733 if (!UsedAssumedInformation) {
2734 // Don't depend on assumed values.
2735 if (!SimplifiedV.hasValue()) {
2736 // If it is known (which we tested above) but it doesn't have a value,
2737 // then we can assume `undef` and hence the instruction is UB.
2738 KnownUBInsts.insert(I);
2739 return llvm::None;
2740 }
2741 if (!SimplifiedV.getValue())
2742 return nullptr;
2743 V = *SimplifiedV;
2744 }
2745 if (isa<UndefValue>(V)) {
2746 KnownUBInsts.insert(I);
2747 return llvm::None;
2748 }
2749 return V;
2750 }
2751};
2752
2753struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2754 AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2755 : AAUndefinedBehaviorImpl(IRP, A) {}
2756
2757 /// See AbstractAttribute::trackStatistics()
2758 void trackStatistics() const override {
2759 STATS_DECL(UndefinedBehaviorInstruction, Instruction,static llvm::Statistic NumIRInstruction_UndefinedBehaviorInstruction
= {"attributor", "NumIRInstruction_UndefinedBehaviorInstruction"
, "Number of instructions known to have UB"};;
2760 "Number of instructions known to have UB")static llvm::Statistic NumIRInstruction_UndefinedBehaviorInstruction
= {"attributor", "NumIRInstruction_UndefinedBehaviorInstruction"
, "Number of instructions known to have UB"};;
;
2761 BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction)NumIRInstruction_UndefinedBehaviorInstruction +=
2762 KnownUBInsts.size();
2763 }
2764};
2765
2766/// ------------------------ Will-Return Attributes ----------------------------
2767
2768// Helper function that checks whether a function has any cycle which we don't
2769// know if it is bounded or not.
2770// Loops with maximum trip count are considered bounded, any other cycle not.
2771static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2772 ScalarEvolution *SE =
2773 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2774 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2775 // If either SCEV or LoopInfo is not available for the function then we assume
2776 // any cycle to be unbounded cycle.
2777 // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2778 // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2779 if (!SE || !LI) {
2780 for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2781 if (SCCI.hasCycle())
2782 return true;
2783 return false;
2784 }
2785
2786 // If there's irreducible control, the function may contain non-loop cycles.
2787 if (mayContainIrreducibleControl(F, LI))
2788 return true;
2789
2790 // Any loop that does not have a max trip count is considered unbounded cycle.
2791 for (auto *L : LI->getLoopsInPreorder()) {
2792 if (!SE->getSmallConstantMaxTripCount(L))
2793 return true;
2794 }
2795 return false;
2796}
2797
2798struct AAWillReturnImpl : public AAWillReturn {
2799 AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2800 : AAWillReturn(IRP, A) {}
2801
2802 /// See AbstractAttribute::initialize(...).
2803 void initialize(Attributor &A) override {
2804 AAWillReturn::initialize(A);
2805
2806 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2807 indicateOptimisticFixpoint();
2808 return;
2809 }
2810 }
2811
2812 /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
2813 bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
2814 // Check for `mustprogress` in the scope and the associated function which
2815 // might be different if this is a call site.
2816 if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2817 (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2818 return false;
2819
2820 const auto &MemAA =
2821 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
2822 if (!MemAA.isAssumedReadOnly())
2823 return false;
2824 if (KnownOnly && !MemAA.isKnownReadOnly())
2825 return false;
2826 if (!MemAA.isKnownReadOnly())
2827 A.recordDependence(MemAA, *this, DepClassTy::OPTIONAL);
2828
2829 return true;
2830 }
2831
2832 /// See AbstractAttribute::updateImpl(...).
2833 ChangeStatus updateImpl(Attributor &A) override {
2834 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2835 return ChangeStatus::UNCHANGED;
2836
2837 auto CheckForWillReturn = [&](Instruction &I) {
2838 IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2839 const auto &WillReturnAA =
2840 A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2841 if (WillReturnAA.isKnownWillReturn())
2842 return true;
2843 if (!WillReturnAA.isAssumedWillReturn())
2844 return false;
2845 const auto &NoRecurseAA =
2846 A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2847 return NoRecurseAA.isAssumedNoRecurse();
2848 };
2849
2850 bool UsedAssumedInformation = false;
2851 if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this,
2852 UsedAssumedInformation))
2853 return indicatePessimisticFixpoint();
2854
2855 return ChangeStatus::UNCHANGED;
2856 }
2857
2858 /// See AbstractAttribute::getAsStr()
2859 const std::string getAsStr() const override {
2860 return getAssumed() ? "willreturn" : "may-noreturn";
2861 }
2862};
2863
2864struct AAWillReturnFunction final : AAWillReturnImpl {
2865 AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2866 : AAWillReturnImpl(IRP, A) {}
2867
2868 /// See AbstractAttribute::initialize(...).
2869 void initialize(Attributor &A) override {
2870 AAWillReturnImpl::initialize(A);
2871
2872 Function *F = getAnchorScope();
2873 if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2874 indicatePessimisticFixpoint();
2875 }
2876
2877 /// See AbstractAttribute::trackStatistics()
2878 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn){ static llvm::Statistic NumIRFunction_willreturn = {"attributor"
, "NumIRFunction_willreturn", ("Number of " "functions" " marked '"
"willreturn" "'")};; ++(NumIRFunction_willreturn); }
}
2879};
2880
2881/// WillReturn attribute deduction for a call sites.
2882struct AAWillReturnCallSite final : AAWillReturnImpl {
2883 AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2884 : AAWillReturnImpl(IRP, A) {}
2885
2886 /// See AbstractAttribute::initialize(...).
2887 void initialize(Attributor &A) override {
2888 AAWillReturnImpl::initialize(A);
2889 Function *F = getAssociatedFunction();
2890 if (!F || !A.isFunctionIPOAmendable(*F))
2891 indicatePessimisticFixpoint();
2892 }
2893
2894 /// See AbstractAttribute::updateImpl(...).
2895 ChangeStatus updateImpl(Attributor &A) override {
2896 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2897 return ChangeStatus::UNCHANGED;
2898
2899 // TODO: Once we have call site specific value information we can provide
2900 // call site specific liveness information and then it makes
2901 // sense to specialize attributes for call sites arguments instead of
2902 // redirecting requests to the callee argument.
2903 Function *F = getAssociatedFunction();
2904 const IRPosition &FnPos = IRPosition::function(*F);
2905 auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
2906 return clampStateAndIndicateChange(getState(), FnAA.getState());
2907 }
2908
2909 /// See AbstractAttribute::trackStatistics()
2910 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn){ static llvm::Statistic NumIRCS_willreturn = {"attributor", "NumIRCS_willreturn"
, ("Number of " "call site" " marked '" "willreturn" "'")};; ++
(NumIRCS_willreturn); }
; }
2911};
2912
2913/// -------------------AAReachability Attribute--------------------------
2914
2915struct AAReachabilityImpl : AAReachability {
2916 AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2917 : AAReachability(IRP, A) {}
2918
2919 const std::string getAsStr() const override {
2920 // TODO: Return the number of reachable queries.
2921 return "reachable";
2922 }
2923
2924 /// See AbstractAttribute::updateImpl(...).
2925 ChangeStatus updateImpl(Attributor &A) override {
2926 return ChangeStatus::UNCHANGED;
2927 }
2928};
2929
2930struct AAReachabilityFunction final : public AAReachabilityImpl {
2931 AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2932 : AAReachabilityImpl(IRP, A) {}
2933
2934 /// See AbstractAttribute::trackStatistics()
2935 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable){ static llvm::Statistic NumIRFunction_reachable = {"attributor"
, "NumIRFunction_reachable", ("Number of " "functions" " marked '"
"reachable" "'")};; ++(NumIRFunction_reachable); }
; }
2936};
2937
2938/// ------------------------ NoAlias Argument Attribute ------------------------
2939
2940struct AANoAliasImpl : AANoAlias {
2941 AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2942 assert(getAssociatedType()->isPointerTy() &&(static_cast <bool> (getAssociatedType()->isPointerTy
() && "Noalias is a pointer attribute") ? void (0) : __assert_fail
("getAssociatedType()->isPointerTy() && \"Noalias is a pointer attribute\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 2943, __extension__
__PRETTY_FUNCTION__))
2943 "Noalias is a pointer attribute")(static_cast <bool> (getAssociatedType()->isPointerTy
() && "Noalias is a pointer attribute") ? void (0) : __assert_fail
("getAssociatedType()->isPointerTy() && \"Noalias is a pointer attribute\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 2943, __extension__
__PRETTY_FUNCTION__))
;
2944 }
2945
2946 const std::string getAsStr() const override {
2947 return getAssumed() ? "noalias" : "may-alias";
2948 }
2949};
2950
2951/// NoAlias attribute for a floating value.
2952struct AANoAliasFloating final : AANoAliasImpl {
2953 AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2954 : AANoAliasImpl(IRP, A) {}
2955
2956 /// See AbstractAttribute::initialize(...).
2957 void initialize(Attributor &A) override {
2958 AANoAliasImpl::initialize(A);
2959 Value *Val = &getAssociatedValue();
2960 do {
2961 CastInst *CI = dyn_cast<CastInst>(Val);
2962 if (!CI)
2963 break;
2964 Value *Base = CI->getOperand(0);
2965 if (!Base->hasOneUse())
2966 break;
2967 Val = Base;
2968 } while (true);
2969
2970 if (!Val->getType()->isPointerTy()) {
2971 indicatePessimisticFixpoint();
2972 return;
2973 }
2974
2975 if (isa<AllocaInst>(Val))
2976 indicateOptimisticFixpoint();
2977 else if (isa<ConstantPointerNull>(Val) &&
2978 !NullPointerIsDefined(getAnchorScope(),
2979 Val->getType()->getPointerAddressSpace()))
2980 indicateOptimisticFixpoint();
2981 else if (Val != &getAssociatedValue()) {
2982 const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
2983 *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
2984 if (ValNoAliasAA.isKnownNoAlias())
2985 indicateOptimisticFixpoint();
2986 }
2987 }
2988
2989 /// See AbstractAttribute::updateImpl(...).
2990 ChangeStatus updateImpl(Attributor &A) override {
2991 // TODO: Implement this.
2992 return indicatePessimisticFixpoint();
2993 }
2994
2995 /// See AbstractAttribute::trackStatistics()
2996 void trackStatistics() const override {
2997 STATS_DECLTRACK_FLOATING_ATTR(noalias){ static llvm::Statistic NumIRFloating_noalias = {"attributor"
, "NumIRFloating_noalias", ("Number of floating values known to be '"
"noalias" "'")};; ++(NumIRFloating_noalias); }
2998 }
2999};
3000
3001/// NoAlias attribute for an argument.
3002struct AANoAliasArgument final
3003 : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
3004 using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
3005 AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3006
3007 /// See AbstractAttribute::initialize(...).
3008 void initialize(Attributor &A) override {
3009 Base::initialize(A);
3010 // See callsite argument attribute and callee argument attribute.
3011 if (hasAttr({Attribute::ByVal}))
3012 indicateOptimisticFixpoint();
3013 }
3014
3015 /// See AbstractAttribute::update(...).
3016 ChangeStatus updateImpl(Attributor &A) override {
3017 // We have to make sure no-alias on the argument does not break
3018 // synchronization when this is a callback argument, see also [1] below.
3019 // If synchronization cannot be affected, we delegate to the base updateImpl
3020 // function, otherwise we give up for now.
3021
3022 // If the function is no-sync, no-alias cannot break synchronization.
3023 const auto &NoSyncAA =
3024 A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
3025 DepClassTy::OPTIONAL);
3026 if (NoSyncAA.isAssumedNoSync())
3027 return Base::updateImpl(A);
3028
3029 // If the argument is read-only, no-alias cannot break synchronization.
3030 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3031 *this, getIRPosition(), DepClassTy::OPTIONAL);
3032 if (MemBehaviorAA.isAssumedReadOnly())
3033 return Base::updateImpl(A);
3034
3035 // If the argument is never passed through callbacks, no-alias cannot break
3036 // synchronization.
3037 bool AllCallSitesKnown;
3038 if (A.checkForAllCallSites(
3039 [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
3040 true, AllCallSitesKnown))
3041 return Base::updateImpl(A);
3042
3043 // TODO: add no-alias but make sure it doesn't break synchronization by
3044 // introducing fake uses. See:
3045 // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
3046 // International Workshop on OpenMP 2018,
3047 // http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
3048
3049 return indicatePessimisticFixpoint();
3050 }
3051
3052 /// See AbstractAttribute::trackStatistics()
3053 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias){ static llvm::Statistic NumIRArguments_noalias = {"attributor"
, "NumIRArguments_noalias", ("Number of " "arguments" " marked '"
"noalias" "'")};; ++(NumIRArguments_noalias); }
}
3054};
3055
3056struct AANoAliasCallSiteArgument final : AANoAliasImpl {
3057 AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
3058 : AANoAliasImpl(IRP, A) {}
3059
3060 /// See AbstractAttribute::initialize(...).
3061 void initialize(Attributor &A) override {
3062 // See callsite argument attribute and callee argument attribute.
3063 const auto &CB = cast<CallBase>(getAnchorValue());
3064 if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
3065 indicateOptimisticFixpoint();
3066 Value &Val = getAssociatedValue();
3067 if (isa<ConstantPointerNull>(Val) &&
3068 !NullPointerIsDefined(getAnchorScope(),
3069 Val.getType()->getPointerAddressSpace()))
3070 indicateOptimisticFixpoint();
3071 }
3072
3073 /// Determine if the underlying value may alias with the call site argument
3074 /// \p OtherArgNo of \p ICS (= the underlying call site).
3075 bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
3076 const AAMemoryBehavior &MemBehaviorAA,
3077 const CallBase &CB, unsigned OtherArgNo) {
3078 // We do not need to worry about aliasing with the underlying IRP.
3079 if (this->getCalleeArgNo() == (int)OtherArgNo)
3080 return false;
3081
3082 // If it is not a pointer or pointer vector we do not alias.
3083 const Value *ArgOp = CB.getArgOperand(OtherArgNo);
3084 if (!ArgOp->getType()->isPtrOrPtrVectorTy())
3085 return false;
3086
3087 auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3088 *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
3089
3090 // If the argument is readnone, there is no read-write aliasing.
3091 if (CBArgMemBehaviorAA.isAssumedReadNone()) {
3092 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3093 return false;
3094 }
3095
3096 // If the argument is readonly and the underlying value is readonly, there
3097 // is no read-write aliasing.
3098 bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
3099 if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
3100 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3101 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3102 return false;
3103 }
3104
3105 // We have to utilize actual alias analysis queries so we need the object.
3106 if (!AAR)
3107 AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
3108
3109 // Try to rule it out at the call site.
3110 bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
3111 LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[NoAliasCSArg] Check alias between "
"callsite arguments: " << getAssociatedValue() <<
" " << *ArgOp << " => " << (IsAliasing ?
"" : "no-") << "alias \n"; } } while (false)
3112 "callsite arguments: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[NoAliasCSArg] Check alias between "
"callsite arguments: " << getAssociatedValue() <<
" " << *ArgOp << " => " << (IsAliasing ?
"" : "no-") << "alias \n"; } } while (false)
3113 << getAssociatedValue() << " " << *ArgOp << " => "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[NoAliasCSArg] Check alias between "
"callsite arguments: " << getAssociatedValue() <<
" " << *ArgOp << " => " << (IsAliasing ?
"" : "no-") << "alias \n"; } } while (false)
3114 << (IsAliasing ? "" : "no-") << "alias \n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[NoAliasCSArg] Check alias between "
"callsite arguments: " << getAssociatedValue() <<
" " << *ArgOp << " => " << (IsAliasing ?
"" : "no-") << "alias \n"; } } while (false)
;
3115
3116 return IsAliasing;
3117 }
3118
3119 bool
3120 isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
3121 const AAMemoryBehavior &MemBehaviorAA,
3122 const AANoAlias &NoAliasAA) {
3123 // We can deduce "noalias" if the following conditions hold.
3124 // (i) Associated value is assumed to be noalias in the definition.
3125 // (ii) Associated value is assumed to be no-capture in all the uses
3126 // possibly executed before this callsite.
3127 // (iii) There is no other pointer argument which could alias with the
3128 // value.
3129
3130 bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
3131 if (!AssociatedValueIsNoAliasAtDef) {
3132 LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AANoAlias] " << getAssociatedValue
() << " is not no-alias at the definition\n"; } } while
(false)
3133 << " is not no-alias at the definition\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AANoAlias] " << getAssociatedValue
() << " is not no-alias at the definition\n"; } } while
(false)
;
3134 return false;
3135 }
3136
3137 A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
3138
3139 const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3140 const Function *ScopeFn = VIRP.getAnchorScope();
3141 auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
3142 // Check whether the value is captured in the scope using AANoCapture.
3143 // Look at CFG and check only uses possibly executed before this
3144 // callsite.
3145 auto UsePred = [&](const Use &U, bool &Follow) -> bool {
3146 Instruction *UserI = cast<Instruction>(U.getUser());
3147
3148 // If UserI is the curr instruction and there is a single potential use of
3149 // the value in UserI we allow the use.
3150 // TODO: We should inspect the operands and allow those that cannot alias
3151 // with the value.
3152 if (UserI == getCtxI() && UserI->getNumOperands() == 1)
3153 return true;
3154
3155 if (ScopeFn) {
3156 const auto &ReachabilityAA = A.getAAFor<AAReachability>(
3157 *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL);
3158
3159 if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI()))
3160 return true;
3161
3162 if (auto *CB = dyn_cast<CallBase>(UserI)) {
3163 if (CB->isArgOperand(&U)) {
3164
3165 unsigned ArgNo = CB->getArgOperandNo(&U);
3166
3167 const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
3168 *this, IRPosition::callsite_argument(*CB, ArgNo),
3169 DepClassTy::OPTIONAL);
3170
3171 if (NoCaptureAA.isAssumedNoCapture())
3172 return true;
3173 }
3174 }
3175 }
3176
3177 // For cases which can potentially have more users
3178 if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
3179 isa<SelectInst>(U)) {
3180 Follow = true;
3181 return true;
3182 }
3183
3184 LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AANoAliasCSArg] Unknown user: "
<< *U << "\n"; } } while (false)
;
3185 return false;
3186 };
3187
3188 if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3189 if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
3190 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AANoAliasCSArg] " <<
getAssociatedValue() << " cannot be noalias as it is potentially captured\n"
; } } while (false)
3191 dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AANoAliasCSArg] " <<
getAssociatedValue() << " cannot be noalias as it is potentially captured\n"
; } } while (false)
3192 << " cannot be noalias as it is potentially captured\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AANoAliasCSArg] " <<
getAssociatedValue() << " cannot be noalias as it is potentially captured\n"
; } } while (false)
;
3193 return false;
3194 }
3195 }
3196 A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
3197
3198 // Check there is no other pointer argument which could alias with the
3199 // value passed at this call site.
3200 // TODO: AbstractCallSite
3201 const auto &CB = cast<CallBase>(getAnchorValue());
3202 for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++)
3203 if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
3204 return false;
3205
3206 return true;
3207 }
3208
3209 /// See AbstractAttribute::updateImpl(...).
3210 ChangeStatus updateImpl(Attributor &A) override {
3211 // If the argument is readnone we are done as there are no accesses via the
3212 // argument.
3213 auto &MemBehaviorAA =
3214 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
3215 if (MemBehaviorAA.isAssumedReadNone()) {
3216 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3217 return ChangeStatus::UNCHANGED;
3218 }
3219
3220 const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3221 const auto &NoAliasAA =
3222 A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
3223
3224 AAResults *AAR = nullptr;
3225 if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
3226 NoAliasAA)) {
3227 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n"
; } } while (false)
3228 dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n"
; } } while (false)
;
3229 return ChangeStatus::UNCHANGED;
3230 }
3231
3232 return indicatePessimisticFixpoint();
3233 }
3234
3235 /// See AbstractAttribute::trackStatistics()
3236 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias){ static llvm::Statistic NumIRCSArguments_noalias = {"attributor"
, "NumIRCSArguments_noalias", ("Number of " "call site arguments"
" marked '" "noalias" "'")};; ++(NumIRCSArguments_noalias); }
}
3237};
3238
3239/// NoAlias attribute for function return value.
3240struct AANoAliasReturned final : AANoAliasImpl {
3241 AANoAliasReturned(const IRPosition &IRP, Attributor &A)
3242 : AANoAliasImpl(IRP, A) {}
3243
3244 /// See AbstractAttribute::initialize(...).
3245 void initialize(Attributor &A) override {
3246 AANoAliasImpl::initialize(A);
3247 Function *F = getAssociatedFunction();
3248 if (!F || F->isDeclaration())
3249 indicatePessimisticFixpoint();
3250 }
3251
3252 /// See AbstractAttribute::updateImpl(...).
3253 virtual ChangeStatus updateImpl(Attributor &A) override {
3254
3255 auto CheckReturnValue = [&](Value &RV) -> bool {
3256 if (Constant *C = dyn_cast<Constant>(&RV))
3257 if (C->isNullValue() || isa<UndefValue>(C))
3258 return true;
3259
3260 /// For now, we can only deduce noalias if we have call sites.
3261 /// FIXME: add more support.
3262 if (!isa<CallBase>(&RV))
3263 return false;
3264
3265 const IRPosition &RVPos = IRPosition::value(RV);
3266 const auto &NoAliasAA =
3267 A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
3268 if (!NoAliasAA.isAssumedNoAlias())
3269 return false;
3270
3271 const auto &NoCaptureAA =
3272 A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
3273 return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
3274 };
3275
3276 if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
3277 return indicatePessimisticFixpoint();
3278
3279 return ChangeStatus::UNCHANGED;
3280 }
3281
3282 /// See AbstractAttribute::trackStatistics()
3283 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias){ static llvm::Statistic NumIRFunctionReturn_noalias = {"attributor"
, "NumIRFunctionReturn_noalias", ("Number of " "function returns"
" marked '" "noalias" "'")};; ++(NumIRFunctionReturn_noalias
); }
}
3284};
3285
3286/// NoAlias attribute deduction for a call site return value.
3287struct AANoAliasCallSiteReturned final : AANoAliasImpl {
3288 AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
3289 : AANoAliasImpl(IRP, A) {}
3290
3291 /// See AbstractAttribute::initialize(...).
3292 void initialize(Attributor &A) override {
3293 AANoAliasImpl::initialize(A);
3294 Function *F = getAssociatedFunction();
3295 if (!F || F->isDeclaration())
3296 indicatePessimisticFixpoint();
3297 }
3298
3299 /// See AbstractAttribute::updateImpl(...).
3300 ChangeStatus updateImpl(Attributor &A) override {
3301 // TODO: Once we have call site specific value information we can provide
3302 // call site specific liveness information and then it makes
3303 // sense to specialize attributes for call sites arguments instead of
3304 // redirecting requests to the callee argument.
3305 Function *F = getAssociatedFunction();
3306 const IRPosition &FnPos = IRPosition::returned(*F);
3307 auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
3308 return clampStateAndIndicateChange(getState(), FnAA.getState());
3309 }
3310
3311 /// See AbstractAttribute::trackStatistics()
3312 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias){ static llvm::Statistic NumIRCSReturn_noalias = {"attributor"
, "NumIRCSReturn_noalias", ("Number of " "call site returns" " marked '"
"noalias" "'")};; ++(NumIRCSReturn_noalias); }
; }
3313};
3314
3315/// -------------------AAIsDead Function Attribute-----------------------
3316
3317struct AAIsDeadValueImpl : public AAIsDead {
3318 AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3319
3320 /// See AAIsDead::isAssumedDead().
3321 bool isAssumedDead() const override { return isAssumed(IS_DEAD); }
3322
3323 /// See AAIsDead::isKnownDead().
3324 bool isKnownDead() const override { return isKnown(IS_DEAD); }
3325
3326 /// See AAIsDead::isAssumedDead(BasicBlock *).
3327 bool isAssumedDead(const BasicBlock *BB) const override { return false; }
3328
3329 /// See AAIsDead::isKnownDead(BasicBlock *).
3330 bool isKnownDead(const BasicBlock *BB) const override { return false; }
3331
3332 /// See AAIsDead::isAssumedDead(Instruction *I).
3333 bool isAssumedDead(const Instruction *I) const override {
3334 return I == getCtxI() && isAssumedDead();
3335 }
3336
3337 /// See AAIsDead::isKnownDead(Instruction *I).
3338 bool isKnownDead(const Instruction *I) const override {
3339 return isAssumedDead(I) && isKnownDead();
3340 }
3341
3342 /// See AbstractAttribute::getAsStr().
3343 const std::string getAsStr() const override {
3344 return isAssumedDead() ? "assumed-dead" : "assumed-live";
3345 }
3346
3347 /// Check if all uses are assumed dead.
3348 bool areAllUsesAssumedDead(Attributor &A, Value &V) {
3349 // Callers might not check the type, void has no uses.
3350 if (V.getType()->isVoidTy())
3351 return true;
3352
3353 // If we replace a value with a constant there are no uses left afterwards.
3354 if (!isa<Constant>(V)) {
3355 bool UsedAssumedInformation = false;
3356 Optional<Constant *> C =
3357 A.getAssumedConstant(V, *this, UsedAssumedInformation);
3358 if (!C.hasValue() || *C)
3359 return true;
3360 }
3361
3362 auto UsePred = [&](const Use &U, bool &Follow) { return false; };
3363 // Explicitly set the dependence class to required because we want a long
3364 // chain of N dependent instructions to be considered live as soon as one is
3365 // without going through N update cycles. This is not required for
3366 // correctness.
3367 return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false,
3368 DepClassTy::REQUIRED);
3369 }
3370
3371 /// Determine if \p I is assumed to be side-effect free.
3372 bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
3373 if (!I || wouldInstructionBeTriviallyDead(I))
3374 return true;
3375
3376 auto *CB = dyn_cast<CallBase>(I);
3377 if (!CB || isa<IntrinsicInst>(CB))
3378 return false;
3379
3380 const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
3381 const auto &NoUnwindAA =
3382 A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
3383 if (!NoUnwindAA.isAssumedNoUnwind())
3384 return false;
3385 if (!NoUnwindAA.isKnownNoUnwind())
3386 A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
3387
3388 const auto &MemBehaviorAA =
3389 A.getAndUpdateAAFor<AAMemoryBehavior>(*this, CallIRP, DepClassTy::NONE);
3390 if (MemBehaviorAA.isAssumedReadOnly()) {
3391 if (!MemBehaviorAA.isKnownReadOnly())
3392 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3393 return true;
3394 }
3395 return false;
3396 }
3397};
3398
3399struct AAIsDeadFloating : public AAIsDeadValueImpl {
3400 AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
3401 : AAIsDeadValueImpl(IRP, A) {}
3402
3403 /// See AbstractAttribute::initialize(...).
3404 void initialize(Attributor &A) override {
3405 if (isa<UndefValue>(getAssociatedValue())) {
3406 indicatePessimisticFixpoint();
3407 return;
3408 }
3409
3410 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3411 if (!isAssumedSideEffectFree(A, I)) {
3412 if (!isa_and_nonnull<StoreInst>(I))
3413 indicatePessimisticFixpoint();
3414 else
3415 removeAssumedBits(HAS_NO_EFFECT);
3416 }
3417 }
3418
3419 bool isDeadStore(Attributor &A, StoreInst &SI) {
3420 // Lang ref now states volatile store is not UB/dead, let's skip them.
3421 if (SI.isVolatile())
3422 return false;
3423
3424 bool UsedAssumedInformation = false;
3425 SmallSetVector<Value *, 4> PotentialCopies;
3426 if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this,
3427 UsedAssumedInformation))
3428 return false;
3429 return llvm::all_of(PotentialCopies, [&](Value *V) {
3430 return A.isAssumedDead(IRPosition::value(*V), this, nullptr,
3431 UsedAssumedInformation);
3432 });
3433 }
3434
3435 /// See AbstractAttribute::updateImpl(...).
3436 ChangeStatus updateImpl(Attributor &A) override {
3437 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3438 if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
3439 if (!isDeadStore(A, *SI))
3440 return indicatePessimisticFixpoint();
3441 } else {
3442 if (!isAssumedSideEffectFree(A, I))
3443 return indicatePessimisticFixpoint();
3444 if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3445 return indicatePessimisticFixpoint();
3446 }
3447 return ChangeStatus::UNCHANGED;
3448 }
3449
3450 /// See AbstractAttribute::manifest(...).
3451 ChangeStatus manifest(Attributor &A) override {
3452 Value &V = getAssociatedValue();
3453 if (auto *I = dyn_cast<Instruction>(&V)) {
3454 // If we get here we basically know the users are all dead. We check if
3455 // isAssumedSideEffectFree returns true here again because it might not be
3456 // the case and only the users are dead but the instruction (=call) is
3457 // still needed.
3458 if (isa<StoreInst>(I) ||
3459 (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) {
3460 A.deleteAfterManifest(*I);
3461 return ChangeStatus::CHANGED;
3462 }
3463 }
3464 if (V.use_empty())
3465 return ChangeStatus::UNCHANGED;
3466
3467 bool UsedAssumedInformation = false;
3468 Optional<Constant *> C =
3469 A.getAssumedConstant(V, *this, UsedAssumedInformation);
3470 if (C.hasValue() && C.getValue())
3471 return ChangeStatus::UNCHANGED;
3472
3473 // Replace the value with undef as it is dead but keep droppable uses around
3474 // as they provide information we don't want to give up on just yet.
3475 UndefValue &UV = *UndefValue::get(V.getType());
3476 bool AnyChange =
3477 A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
3478 return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3479 }
3480
3481 /// See AbstractAttribute::trackStatistics()
3482 void trackStatistics() const override {
3483 STATS_DECLTRACK_FLOATING_ATTR(IsDead){ static llvm::Statistic NumIRFloating_IsDead = {"attributor"
, "NumIRFloating_IsDead", ("Number of floating values known to be '"
"IsDead" "'")};; ++(NumIRFloating_IsDead); }
3484 }
3485};
3486
3487struct AAIsDeadArgument : public AAIsDeadFloating {
3488 AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
3489 : AAIsDeadFloating(IRP, A) {}
3490
3491 /// See AbstractAttribute::initialize(...).
3492 void initialize(Attributor &A) override {
3493 if (!A.isFunctionIPOAmendable(*getAnchorScope()))
3494 indicatePessimisticFixpoint();
3495 }
3496
3497 /// See AbstractAttribute::manifest(...).
3498 ChangeStatus manifest(Attributor &A) override {
3499 ChangeStatus Changed = AAIsDeadFloating::manifest(A);
3500 Argument &Arg = *getAssociatedArgument();
3501 if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
3502 if (A.registerFunctionSignatureRewrite(
3503 Arg, /* ReplacementTypes */ {},
3504 Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
3505 Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
3506 Arg.dropDroppableUses();
3507 return ChangeStatus::CHANGED;
3508 }
3509 return Changed;
3510 }
3511
3512 /// See AbstractAttribute::trackStatistics()
3513 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead){ static llvm::Statistic NumIRArguments_IsDead = {"attributor"
, "NumIRArguments_IsDead", ("Number of " "arguments" " marked '"
"IsDead" "'")};; ++(NumIRArguments_IsDead); }
}
3514};
3515
3516struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
3517 AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
3518 : AAIsDeadValueImpl(IRP, A) {}
3519
3520 /// See AbstractAttribute::initialize(...).
3521 void initialize(Attributor &A) override {
3522 if (isa<UndefValue>(getAssociatedValue()))
3523 indicatePessimisticFixpoint();
3524 }
3525
3526 /// See AbstractAttribute::updateImpl(...).
3527 ChangeStatus updateImpl(Attributor &A) override {
3528 // TODO: Once we have call site specific value information we can provide
3529 // call site specific liveness information and then it makes
3530 // sense to specialize attributes for call sites arguments instead of
3531 // redirecting requests to the callee argument.
3532 Argument *Arg = getAssociatedArgument();
3533 if (!Arg)
3534 return indicatePessimisticFixpoint();
3535 const IRPosition &ArgPos = IRPosition::argument(*Arg);
3536 auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
3537 return clampStateAndIndicateChange(getState(), ArgAA.getState());
3538 }
3539
3540 /// See AbstractAttribute::manifest(...).
3541 ChangeStatus manifest(Attributor &A) override {
3542 CallBase &CB = cast<CallBase>(getAnchorValue());
3543 Use &U = CB.getArgOperandUse(getCallSiteArgNo());
3544 assert(!isa<UndefValue>(U.get()) &&(static_cast <bool> (!isa<UndefValue>(U.get()) &&
"Expected undef values to be filtered out!") ? void (0) : __assert_fail
("!isa<UndefValue>(U.get()) && \"Expected undef values to be filtered out!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3545, __extension__
__PRETTY_FUNCTION__))
3545 "Expected undef values to be filtered out!")(static_cast <bool> (!isa<UndefValue>(U.get()) &&
"Expected undef values to be filtered out!") ? void (0) : __assert_fail
("!isa<UndefValue>(U.get()) && \"Expected undef values to be filtered out!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3545, __extension__
__PRETTY_FUNCTION__))
;
3546 UndefValue &UV = *UndefValue::get(U->getType());
3547 if (A.changeUseAfterManifest(U, UV))
3548 return ChangeStatus::CHANGED;
3549 return ChangeStatus::UNCHANGED;
3550 }
3551
3552 /// See AbstractAttribute::trackStatistics()
3553 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead){ static llvm::Statistic NumIRCSArguments_IsDead = {"attributor"
, "NumIRCSArguments_IsDead", ("Number of " "call site arguments"
" marked '" "IsDead" "'")};; ++(NumIRCSArguments_IsDead); }
}
3554};
3555
3556struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
3557 AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
3558 : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
3559
3560 /// See AAIsDead::isAssumedDead().
3561 bool isAssumedDead() const override {
3562 return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
3563 }
3564
3565 /// See AbstractAttribute::initialize(...).
3566 void initialize(Attributor &A) override {
3567 if (isa<UndefValue>(getAssociatedValue())) {
3568 indicatePessimisticFixpoint();
3569 return;
3570 }
3571
3572 // We track this separately as a secondary state.
3573 IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3574 }
3575
3576 /// See AbstractAttribute::updateImpl(...).
3577 ChangeStatus updateImpl(Attributor &A) override {
3578 ChangeStatus Changed = ChangeStatus::UNCHANGED;
3579 if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3580 IsAssumedSideEffectFree = false;
3581 Changed = ChangeStatus::CHANGED;
3582 }
3583 if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3584 return indicatePessimisticFixpoint();
3585 return Changed;
3586 }
3587
3588 /// See AbstractAttribute::trackStatistics()
3589 void trackStatistics() const override {
3590 if (IsAssumedSideEffectFree)
3591 STATS_DECLTRACK_CSRET_ATTR(IsDead){ static llvm::Statistic NumIRCSReturn_IsDead = {"attributor"
, "NumIRCSReturn_IsDead", ("Number of " "call site returns" " marked '"
"IsDead" "'")};; ++(NumIRCSReturn_IsDead); }
3592 else
3593 STATS_DECLTRACK_CSRET_ATTR(UnusedResult){ static llvm::Statistic NumIRCSReturn_UnusedResult = {"attributor"
, "NumIRCSReturn_UnusedResult", ("Number of " "call site returns"
" marked '" "UnusedResult" "'")};; ++(NumIRCSReturn_UnusedResult
); }
3594 }
3595
3596 /// See AbstractAttribute::getAsStr().
3597 const std::string getAsStr() const override {
3598 return isAssumedDead()
3599 ? "assumed-dead"
3600 : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3601 }
3602
3603private:
3604 bool IsAssumedSideEffectFree;
3605};
3606
3607struct AAIsDeadReturned : public AAIsDeadValueImpl {
3608 AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3609 : AAIsDeadValueImpl(IRP, A) {}
3610
3611 /// See AbstractAttribute::updateImpl(...).
3612 ChangeStatus updateImpl(Attributor &A) override {
3613
3614 bool UsedAssumedInformation = false;
3615 A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3616 {Instruction::Ret}, UsedAssumedInformation);
3617
3618 auto PredForCallSite = [&](AbstractCallSite ACS) {
3619 if (ACS.isCallbackCall() || !ACS.getInstruction())
3620 return false;
3621 return areAllUsesAssumedDead(A, *ACS.getInstruction());
3622 };
3623
3624 bool AllCallSitesKnown;
3625 if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3626 AllCallSitesKnown))
3627 return indicatePessimisticFixpoint();
3628
3629 return ChangeStatus::UNCHANGED;
3630 }
3631
3632 /// See AbstractAttribute::manifest(...).
3633 ChangeStatus manifest(Attributor &A) override {
3634 // TODO: Rewrite the signature to return void?
3635 bool AnyChange = false;
3636 UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3637 auto RetInstPred = [&](Instruction &I) {
3638 ReturnInst &RI = cast<ReturnInst>(I);
3639 if (!isa<UndefValue>(RI.getReturnValue()))
3640 AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3641 return true;
3642 };
3643 bool UsedAssumedInformation = false;
3644 A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
3645 UsedAssumedInformation);
3646 return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3647 }
3648
3649 /// See AbstractAttribute::trackStatistics()
3650 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead){ static llvm::Statistic NumIRFunctionReturn_IsDead = {"attributor"
, "NumIRFunctionReturn_IsDead", ("Number of " "function returns"
" marked '" "IsDead" "'")};; ++(NumIRFunctionReturn_IsDead);
}
}
3651};
3652
3653struct AAIsDeadFunction : public AAIsDead {
3654 AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3655
3656 /// See AbstractAttribute::initialize(...).
3657 void initialize(Attributor &A) override {
3658 const Function *F = getAnchorScope();
3659 if (F && !F->isDeclaration()) {
3660 // We only want to compute liveness once. If the function is not part of
3661 // the SCC, skip it.
3662 if (A.isRunOn(*const_cast<Function *>(F))) {
3663 ToBeExploredFrom.insert(&F->getEntryBlock().front());
3664 assumeLive(A, F->getEntryBlock());
3665 } else {
3666 indicatePessimisticFixpoint();
3667 }
3668 }
3669 }
3670
3671 /// See AbstractAttribute::getAsStr().
3672 const std::string getAsStr() const override {
3673 return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3674 std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3675 std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3676 std::to_string(KnownDeadEnds.size()) + "]";
3677 }
3678
3679 /// See AbstractAttribute::manifest(...).
3680 ChangeStatus manifest(Attributor &A) override {
3681 assert(getState().isValidState() &&(static_cast <bool> (getState().isValidState() &&
"Attempted to manifest an invalid state!") ? void (0) : __assert_fail
("getState().isValidState() && \"Attempted to manifest an invalid state!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3682, __extension__
__PRETTY_FUNCTION__))
3682 "Attempted to manifest an invalid state!")(static_cast <bool> (getState().isValidState() &&
"Attempted to manifest an invalid state!") ? void (0) : __assert_fail
("getState().isValidState() && \"Attempted to manifest an invalid state!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3682, __extension__
__PRETTY_FUNCTION__))
;
3683
3684 ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3685 Function &F = *getAnchorScope();
3686
3687 if (AssumedLiveBlocks.empty()) {
3688 A.deleteAfterManifest(F);
3689 return ChangeStatus::CHANGED;
3690 }
3691
3692 // Flag to determine if we can change an invoke to a call assuming the
3693 // callee is nounwind. This is not possible if the personality of the
3694 // function allows to catch asynchronous exceptions.
3695 bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3696
3697 KnownDeadEnds.set_union(ToBeExploredFrom);
3698 for (const Instruction *DeadEndI : KnownDeadEnds) {
3699 auto *CB = dyn_cast<CallBase>(DeadEndI);
3700 if (!CB)
3701 continue;
3702 const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3703 *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3704 bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3705 if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3706 continue;
3707
3708 if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3709 A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3710 else
3711 A.changeToUnreachableAfterManifest(
3712 const_cast<Instruction *>(DeadEndI->getNextNode()));
3713 HasChanged = ChangeStatus::CHANGED;
3714 }
3715
3716 STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.")static llvm::Statistic NumIRBasicBlock_AAIsDead = {"attributor"
, "NumIRBasicBlock_AAIsDead", "Number of dead basic blocks deleted."
};;
;
3717 for (BasicBlock &BB : F)
3718 if (!AssumedLiveBlocks.count(&BB)) {
3719 A.deleteAfterManifest(BB);
3720 ++BUILD_STAT_NAME(AAIsDead, BasicBlock)NumIRBasicBlock_AAIsDead;
3721 }
3722
3723 return HasChanged;
3724 }
3725
3726 /// See AbstractAttribute::updateImpl(...).
3727 ChangeStatus updateImpl(Attributor &A) override;
3728
3729 bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3730 return !AssumedLiveEdges.count(std::make_pair(From, To));
3731 }
3732
3733 /// See AbstractAttribute::trackStatistics()
3734 void trackStatistics() const override {}
3735
3736 /// Returns true if the function is assumed dead.
3737 bool isAssumedDead() const override { return false; }
3738
3739 /// See AAIsDead::isKnownDead().
3740 bool isKnownDead() const override { return false; }
3741
3742 /// See AAIsDead::isAssumedDead(BasicBlock *).
3743 bool isAssumedDead(const BasicBlock *BB) const override {
3744 assert(BB->getParent() == getAnchorScope() &&(static_cast <bool> (BB->getParent() == getAnchorScope
() && "BB must be in the same anchor scope function."
) ? void (0) : __assert_fail ("BB->getParent() == getAnchorScope() && \"BB must be in the same anchor scope function.\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3745, __extension__
__PRETTY_FUNCTION__))
3745 "BB must be in the same anchor scope function.")(static_cast <bool> (BB->getParent() == getAnchorScope
() && "BB must be in the same anchor scope function."
) ? void (0) : __assert_fail ("BB->getParent() == getAnchorScope() && \"BB must be in the same anchor scope function.\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3745, __extension__
__PRETTY_FUNCTION__))
;
3746
3747 if (!getAssumed())
3748 return false;
3749 return !AssumedLiveBlocks.count(BB);
3750 }
3751
3752 /// See AAIsDead::isKnownDead(BasicBlock *).
3753 bool isKnownDead(const BasicBlock *BB) const override {
3754 return getKnown() && isAssumedDead(BB);
3755 }
3756
3757 /// See AAIsDead::isAssumed(Instruction *I).
3758 bool isAssumedDead(const Instruction *I) const override {
3759 assert(I->getParent()->getParent() == getAnchorScope() &&(static_cast <bool> (I->getParent()->getParent() ==
getAnchorScope() && "Instruction must be in the same anchor scope function."
) ? void (0) : __assert_fail ("I->getParent()->getParent() == getAnchorScope() && \"Instruction must be in the same anchor scope function.\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3760, __extension__
__PRETTY_FUNCTION__))
3760 "Instruction must be in the same anchor scope function.")(static_cast <bool> (I->getParent()->getParent() ==
getAnchorScope() && "Instruction must be in the same anchor scope function."
) ? void (0) : __assert_fail ("I->getParent()->getParent() == getAnchorScope() && \"Instruction must be in the same anchor scope function.\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3760, __extension__
__PRETTY_FUNCTION__))
;
3761
3762 if (!getAssumed())
3763 return false;
3764
3765 // If it is not in AssumedLiveBlocks then it for sure dead.
3766 // Otherwise, it can still be after noreturn call in a live block.
3767 if (!AssumedLiveBlocks.count(I->getParent()))
3768 return true;
3769
3770 // If it is not after a liveness barrier it is live.
3771 const Instruction *PrevI = I->getPrevNode();
3772 while (PrevI) {
3773 if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3774 return true;
3775 PrevI = PrevI->getPrevNode();
3776 }
3777 return false;
3778 }
3779
3780 /// See AAIsDead::isKnownDead(Instruction *I).
3781 bool isKnownDead(const Instruction *I) const override {
3782 return getKnown() && isAssumedDead(I);
3783 }
3784
3785 /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3786 /// that internal function called from \p BB should now be looked at.
3787 bool assumeLive(Attributor &A, const BasicBlock &BB) {
3788 if (!AssumedLiveBlocks.insert(&BB).second)
3789 return false;
3790
3791 // We assume that all of BB is (probably) live now and if there are calls to
3792 // internal functions we will assume that those are now live as well. This
3793 // is a performance optimization for blocks with calls to a lot of internal
3794 // functions. It can however cause dead functions to be treated as live.
3795 for (const Instruction &I : BB)
3796 if (const auto *CB = dyn_cast<CallBase>(&I))
3797 if (const Function *F = CB->getCalledFunction())
3798 if (F->hasLocalLinkage())
3799 A.markLiveInternalFunction(*F);
3800 return true;
3801 }
3802
3803 /// Collection of instructions that need to be explored again, e.g., we
3804 /// did assume they do not transfer control to (one of their) successors.
3805 SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3806
3807 /// Collection of instructions that are known to not transfer control.
3808 SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3809
3810 /// Collection of all assumed live edges
3811 DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3812
3813 /// Collection of all assumed live BasicBlocks.
3814 DenseSet<const BasicBlock *> AssumedLiveBlocks;
3815};
3816
3817static bool
3818identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3819 AbstractAttribute &AA,
3820 SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3821 const IRPosition &IPos = IRPosition::callsite_function(CB);
3822
3823 const auto &NoReturnAA =
3824 A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3825 if (NoReturnAA.isAssumedNoReturn())
3826 return !NoReturnAA.isKnownNoReturn();
3827 if (CB.isTerminator())
3828 AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3829 else
3830 AliveSuccessors.push_back(CB.getNextNode());
3831 return false;
3832}
3833
3834static bool
3835identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3836 AbstractAttribute &AA,
3837 SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3838 bool UsedAssumedInformation =
3839 identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3840
3841 // First, determine if we can change an invoke to a call assuming the
3842 // callee is nounwind. This is not possible if the personality of the
3843 // function allows to catch asynchronous exceptions.
3844 if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3845 AliveSuccessors.push_back(&II.getUnwindDest()->front());
3846 } else {
3847 const IRPosition &IPos = IRPosition::callsite_function(II);
3848 const auto &AANoUnw =
3849 A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
3850 if (AANoUnw.isAssumedNoUnwind()) {
3851 UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3852 } else {
3853 AliveSuccessors.push_back(&II.getUnwindDest()->front());
3854 }
3855 }
3856 return UsedAssumedInformation;
3857}
3858
3859static bool
3860identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3861 AbstractAttribute &AA,
3862 SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3863 bool UsedAssumedInformation = false;
3864 if (BI.getNumSuccessors() == 1) {
3865 AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3866 } else {
3867 Optional<Constant *> C =
3868 A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
3869 if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3870 // No value yet, assume both edges are dead.
3871 } else if (isa_and_nonnull<ConstantInt>(*C)) {
3872 const BasicBlock *SuccBB =
3873 BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
3874 AliveSuccessors.push_back(&SuccBB->front());
3875 } else {
3876 AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3877 AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3878 UsedAssumedInformation = false;
3879 }
3880 }
3881 return UsedAssumedInformation;
3882}
3883
3884static bool
3885identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3886 AbstractAttribute &AA,
3887 SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3888 bool UsedAssumedInformation = false;
3889 Optional<Constant *> C =
3890 A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
3891 if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3892 // No value yet, assume all edges are dead.
3893 } else if (isa_and_nonnull<ConstantInt>(C.getValue())) {
3894 for (auto &CaseIt : SI.cases()) {
3895 if (CaseIt.getCaseValue() == C.getValue()) {
3896 AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3897 return UsedAssumedInformation;
3898 }
3899 }
3900 AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3901 return UsedAssumedInformation;
3902 } else {
3903 for (const BasicBlock *SuccBB : successors(SI.getParent()))
3904 AliveSuccessors.push_back(&SuccBB->front());
3905 }
3906 return UsedAssumedInformation;
3907}
3908
3909ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3910 ChangeStatus Change = ChangeStatus::UNCHANGED;
3911
3912 LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] Live [" <<
AssumedLiveBlocks.size() << "/" << getAnchorScope
()->size() << "] BBs and " << ToBeExploredFrom
.size() << " exploration points and " << KnownDeadEnds
.size() << " known dead ends\n"; } } while (false)
3913 << getAnchorScope()->size() << "] BBs and "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] Live [" <<
AssumedLiveBlocks.size() << "/" << getAnchorScope
()->size() << "] BBs and " << ToBeExploredFrom
.size() << " exploration points and " << KnownDeadEnds
.size() << " known dead ends\n"; } } while (false)
3914 << ToBeExploredFrom.size() << " exploration points and "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] Live [" <<
AssumedLiveBlocks.size() << "/" << getAnchorScope
()->size() << "] BBs and " << ToBeExploredFrom
.size() << " exploration points and " << KnownDeadEnds
.size() << " known dead ends\n"; } } while (false)
3915 << KnownDeadEnds.size() << " known dead ends\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] Live [" <<
AssumedLiveBlocks.size() << "/" << getAnchorScope
()->size() << "] BBs and " << ToBeExploredFrom
.size() << " exploration points and " << KnownDeadEnds
.size() << " known dead ends\n"; } } while (false)
;
3916
3917 // Copy and clear the list of instructions we need to explore from. It is
3918 // refilled with instructions the next update has to look at.
3919 SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3920 ToBeExploredFrom.end());
3921 decltype(ToBeExploredFrom) NewToBeExploredFrom;
3922
3923 SmallVector<const Instruction *, 8> AliveSuccessors;
3924 while (!Worklist.empty()) {
3925 const Instruction *I = Worklist.pop_back_val();
3926 LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] Exploration inst: "
<< *I << "\n"; } } while (false)
;
3927
3928 // Fast forward for uninteresting instructions. We could look for UB here
3929 // though.
3930 while (!I->isTerminator() && !isa<CallBase>(I))
3931 I = I->getNextNode();
3932
3933 AliveSuccessors.clear();
3934
3935 bool UsedAssumedInformation = false;
3936 switch (I->getOpcode()) {
3937 // TODO: look for (assumed) UB to backwards propagate "deadness".
3938 default:
3939 assert(I->isTerminator() &&(static_cast <bool> (I->isTerminator() && "Expected non-terminators to be handled already!"
) ? void (0) : __assert_fail ("I->isTerminator() && \"Expected non-terminators to be handled already!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3940, __extension__
__PRETTY_FUNCTION__))
3940 "Expected non-terminators to be handled already!")(static_cast <bool> (I->isTerminator() && "Expected non-terminators to be handled already!"
) ? void (0) : __assert_fail ("I->isTerminator() && \"Expected non-terminators to be handled already!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3940, __extension__
__PRETTY_FUNCTION__))
;
3941 for (const BasicBlock *SuccBB : successors(I->getParent()))
3942 AliveSuccessors.push_back(&SuccBB->front());
3943 break;
3944 case Instruction::Call:
3945 UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3946 *this, AliveSuccessors);
3947 break;
3948 case Instruction::Invoke:
3949 UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3950 *this, AliveSuccessors);
3951 break;
3952 case Instruction::Br:
3953 UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3954 *this, AliveSuccessors);
3955 break;
3956 case Instruction::Switch:
3957 UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3958 *this, AliveSuccessors);
3959 break;
3960 }
3961
3962 if (UsedAssumedInformation) {
3963 NewToBeExploredFrom.insert(I);
3964 } else if (AliveSuccessors.empty() ||
3965 (I->isTerminator() &&
3966 AliveSuccessors.size() < I->getNumSuccessors())) {
3967 if (KnownDeadEnds.insert(I))
3968 Change = ChangeStatus::CHANGED;
3969 }
3970
3971 LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] #AliveSuccessors: "
<< AliveSuccessors.size() << " UsedAssumedInformation: "
<< UsedAssumedInformation << "\n"; } } while (false
)
3972 << AliveSuccessors.size() << " UsedAssumedInformation: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] #AliveSuccessors: "
<< AliveSuccessors.size() << " UsedAssumedInformation: "
<< UsedAssumedInformation << "\n"; } } while (false
)
3973 << UsedAssumedInformation << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] #AliveSuccessors: "
<< AliveSuccessors.size() << " UsedAssumedInformation: "
<< UsedAssumedInformation << "\n"; } } while (false
)
;
3974
3975 for (const Instruction *AliveSuccessor : AliveSuccessors) {
3976 if (!I->isTerminator()) {
3977 assert(AliveSuccessors.size() == 1 &&(static_cast <bool> (AliveSuccessors.size() == 1 &&
"Non-terminator expected to have a single successor!") ? void
(0) : __assert_fail ("AliveSuccessors.size() == 1 && \"Non-terminator expected to have a single successor!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3978, __extension__
__PRETTY_FUNCTION__))
3978 "Non-terminator expected to have a single successor!")(static_cast <bool> (AliveSuccessors.size() == 1 &&
"Non-terminator expected to have a single successor!") ? void
(0) : __assert_fail ("AliveSuccessors.size() == 1 && \"Non-terminator expected to have a single successor!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3978, __extension__
__PRETTY_FUNCTION__))
;
3979 Worklist.push_back(AliveSuccessor);
3980 } else {
3981 // record the assumed live edge
3982 auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent());
3983 if (AssumedLiveEdges.insert(Edge).second)
3984 Change = ChangeStatus::CHANGED;
3985 if (assumeLive(A, *AliveSuccessor->getParent()))
3986 Worklist.push_back(AliveSuccessor);
3987 }
3988 }
3989 }
3990
3991 // Check if the content of ToBeExploredFrom changed, ignore the order.
3992 if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() ||
3993 llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) {
3994 return !ToBeExploredFrom.count(I);
3995 })) {
3996 Change = ChangeStatus::CHANGED;
3997 ToBeExploredFrom = std::move(NewToBeExploredFrom);
3998 }
3999
4000 // If we know everything is live there is no need to query for liveness.
4001 // Instead, indicating a pessimistic fixpoint will cause the state to be
4002 // "invalid" and all queries to be answered conservatively without lookups.
4003 // To be in this state we have to (1) finished the exploration and (3) not
4004 // discovered any non-trivial dead end and (2) not ruled unreachable code
4005 // dead.
4006 if (ToBeExploredFrom.empty() &&
4007 getAnchorScope()->size() == AssumedLiveBlocks.size() &&
4008 llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
4009 return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
4010 }))
4011 return indicatePessimisticFixpoint();
4012 return Change;
4013}
4014
4015/// Liveness information for a call sites.
4016struct AAIsDeadCallSite final : AAIsDeadFunction {
4017 AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
4018 : AAIsDeadFunction(IRP, A) {}
4019
4020 /// See AbstractAttribute::initialize(...).
4021 void initialize(Attributor &A) override {
4022 // TODO: Once we have call site specific value information we can provide
4023 // call site specific liveness information and then it makes
4024 // sense to specialize attributes for call sites instead of
4025 // redirecting requests to the callee.
4026 llvm_unreachable("Abstract attributes for liveness are not "::llvm::llvm_unreachable_internal("Abstract attributes for liveness are not "
"supported for call sites yet!", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 4027)
4027 "supported for call sites yet!")::llvm::llvm_unreachable_internal("Abstract attributes for liveness are not "
"supported for call sites yet!", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 4027)
;
4028 }
4029
4030 /// See AbstractAttribute::updateImpl(...).
4031 ChangeStatus updateImpl(Attributor &A) override {
4032 return indicatePessimisticFixpoint();
4033 }
4034
4035 /// See AbstractAttribute::trackStatistics()
4036 void trackStatistics() const override {}
4037};
4038
4039/// -------------------- Dereferenceable Argument Attribute --------------------
4040
4041struct AADereferenceableImpl : AADereferenceable {
4042 AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
4043 : AADereferenceable(IRP, A) {}
4044 using StateType = DerefState;
4045
4046 /// See AbstractAttribute::initialize(...).
4047 void initialize(Attributor &A) override {
4048 SmallVector<Attribute, 4> Attrs;
4049 getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
4050 Attrs, /* IgnoreSubsumingPositions */ false, &A);
4051 for (const Attribute &Attr : Attrs)
4052 takeKnownDerefBytesMaximum(Attr.getValueAsInt());
4053
4054 const IRPosition &IRP = this->getIRPosition();
4055 NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
4056
4057 bool CanBeNull, CanBeFreed;
4058 takeKnownDerefBytesMaximum(
4059 IRP.getAssociatedValue().getPointerDereferenceableBytes(
4060 A.getDataLayout(), CanBeNull, CanBeFreed));
4061
4062 bool IsFnInterface = IRP.isFnInterfaceKind();
4063 Function *FnScope = IRP.getAnchorScope();
4064 if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
4065 indicatePessimisticFixpoint();
4066 return;
4067 }
4068
4069 if (Instruction *CtxI = getCtxI())
4070 followUsesInMBEC(*this, A, getState(), *CtxI);
4071 }
4072
4073 /// See AbstractAttribute::getState()
4074 /// {
4075 StateType &getState() override { return *this; }
4076 const StateType &getState() const override { return *this; }
4077 /// }
4078
4079 /// Helper function for collecting accessed bytes in must-be-executed-context
4080 void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
4081 DerefState &State) {
4082 const Value *UseV = U->get();
4083 if (!UseV->getType()->isPointerTy())
4084 return;
4085
4086 Type *PtrTy = UseV->getType();
4087 const DataLayout &DL = A.getDataLayout();
4088 int64_t Offset;
4089 if (const Value *Base = getBasePointerOfAccessPointerOperand(
4090 I, Offset, DL, /*AllowNonInbounds*/ true)) {
4091 if (Base == &getAssociatedValue() &&
4092 getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
4093 uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
4094 State.addAccessedBytes(Offset, Size);
4095 }
4096 }
4097 }
4098
4099 /// See followUsesInMBEC
4100 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4101 AADereferenceable::StateType &State) {
4102 bool IsNonNull = false;
4103 bool TrackUse = false;
4104 int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
4105 A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
4106 LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytesdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AADereferenceable] Deref bytes: "
<< DerefBytes << " for instruction " << *I
<< "\n"; } } while (false)
4107 << " for instruction " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AADereferenceable] Deref bytes: "
<< DerefBytes << " for instruction " << *I
<< "\n"; } } while (false)
;
4108
4109 addAccessedBytesForUse(A, U, I, State);
4110 State.takeKnownDerefBytesMaximum(DerefBytes);
4111 return TrackUse;
4112 }
4113
4114 /// See AbstractAttribute::manifest(...).
4115 ChangeStatus manifest(Attributor &A) override {
4116 ChangeStatus Change = AADereferenceable::manifest(A);
4117 if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
4118 removeAttrs({Attribute::DereferenceableOrNull});
4119 return ChangeStatus::CHANGED;
4120 }
4121 return Change;
4122 }
4123
4124 void getDeducedAttributes(LLVMContext &Ctx,
4125 SmallVectorImpl<Attribute> &Attrs) const override {
4126 // TODO: Add *_globally support
4127 if (isAssumedNonNull())
4128 Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
4129 Ctx, getAssumedDereferenceableBytes()));
4130 else
4131 Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
4132 Ctx, getAssumedDereferenceableBytes()));
4133 }
4134
4135 /// See AbstractAttribute::getAsStr().
4136 const std::string getAsStr() const override {
4137 if (!getAssumedDereferenceableBytes())
4138 return "unknown-dereferenceable";
4139 return std::string("dereferenceable") +
4140 (isAssumedNonNull() ? "" : "_or_null") +
4141 (isAssumedGlobal() ? "_globally" : "") + "<" +
4142 std::to_string(getKnownDereferenceableBytes()) + "-" +
4143 std::to_string(getAssumedDereferenceableBytes()) + ">";
4144 }
4145};
4146
4147/// Dereferenceable attribute for a floating value.
4148struct AADereferenceableFloating : AADereferenceableImpl {
4149 AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
4150 : AADereferenceableImpl(IRP, A) {}
4151
4152 /// See AbstractAttribute::updateImpl(...).
4153 ChangeStatus updateImpl(Attributor &A) override {
4154 const DataLayout &DL = A.getDataLayout();
4155
4156 auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
4157 bool Stripped) -> bool {
4158 unsigned IdxWidth =
4159 DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
4160 APInt Offset(IdxWidth, 0);
4161 const Value *Base =
4162 stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false);
4163
4164 const auto &AA = A.getAAFor<AADereferenceable>(
4165 *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
4166 int64_t DerefBytes = 0;
4167 if (!Stripped && this == &AA) {
4168 // Use IR information if we did not strip anything.
4169 // TODO: track globally.
4170 bool CanBeNull, CanBeFreed;
4171 DerefBytes =
4172 Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
4173 T.GlobalState.indicatePessimisticFixpoint();
4174 } else {
4175 const DerefState &DS = AA.getState();
4176 DerefBytes = DS.DerefBytesState.getAssumed();
4177 T.GlobalState &= DS.GlobalState;
4178 }
4179
4180 // For now we do not try to "increase" dereferenceability due to negative
4181 // indices as we first have to come up with code to deal with loops and
4182 // for overflows of the dereferenceable bytes.
4183 int64_t OffsetSExt = Offset.getSExtValue();
4184 if (OffsetSExt < 0)
4185 OffsetSExt = 0;
4186
4187 T.takeAssumedDerefBytesMinimum(
4188 std::max(int64_t(0), DerefBytes - OffsetSExt));
4189
4190 if (this == &AA) {
4191 if (!Stripped) {
4192 // If nothing was stripped IR information is all we got.
4193 T.takeKnownDerefBytesMaximum(
4194 std::max(int64_t(0), DerefBytes - OffsetSExt));
4195 T.indicatePessimisticFixpoint();
4196 } else if (OffsetSExt > 0) {
4197 // If something was stripped but there is circular reasoning we look
4198 // for the offset. If it is positive we basically decrease the
4199 // dereferenceable bytes in a circluar loop now, which will simply
4200 // drive them down to the known value in a very slow way which we
4201 // can accelerate.
4202 T.indicatePessimisticFixpoint();
4203 }
4204 }
4205
4206 return T.isValidState();
4207 };
4208
4209 DerefState T;
4210 if (!genericValueTraversal<DerefState>(A, getIRPosition(), *this, T,
4211 VisitValueCB, getCtxI()))
4212 return indicatePessimisticFixpoint();
4213
4214 return clampStateAndIndicateChange(getState(), T);
4215 }
4216
4217 /// See AbstractAttribute::trackStatistics()
4218 void trackStatistics() const override {
4219 STATS_DECLTRACK_FLOATING_ATTR(dereferenceable){ static llvm::Statistic NumIRFloating_dereferenceable = {"attributor"
, "NumIRFloating_dereferenceable", ("Number of floating values known to be '"
"dereferenceable" "'")};; ++(NumIRFloating_dereferenceable);
}
4220 }
4221};
4222
4223/// Dereferenceable attribute for a return value.
4224struct AADereferenceableReturned final
4225 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
4226 AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
4227 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
4228 IRP, A) {}
4229
4230 /// See AbstractAttribute::trackStatistics()
4231 void trackStatistics() const override {
4232 STATS_DECLTRACK_FNRET_ATTR(dereferenceable){ static llvm::Statistic NumIRFunctionReturn_dereferenceable =
{"attributor", "NumIRFunctionReturn_dereferenceable", ("Number of "
"function returns" " marked '" "dereferenceable" "'")};; ++(
NumIRFunctionReturn_dereferenceable); }
4233 }
4234};
4235
4236/// Dereferenceable attribute for an argument
4237struct AADereferenceableArgument final
4238 : AAArgumentFromCallSiteArguments<AADereferenceable,
4239 AADereferenceableImpl> {
4240 using Base =
4241 AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
4242 AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
4243 : Base(IRP, A) {}
4244
4245 /// See AbstractAttribute::trackStatistics()
4246 void trackStatistics() const override {
4247 STATS_DECLTRACK_ARG_ATTR(dereferenceable){ static llvm::Statistic NumIRArguments_dereferenceable = {"attributor"
, "NumIRArguments_dereferenceable", ("Number of " "arguments"
" marked '" "dereferenceable" "'")};; ++(NumIRArguments_dereferenceable
); }
4248 }
4249};
4250
4251/// Dereferenceable attribute for a call site argument.
4252struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
4253 AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
4254 : AADereferenceableFloating(IRP, A) {}
4255
4256 /// See AbstractAttribute::trackStatistics()
4257 void trackStatistics() const override {
4258 STATS_DECLTRACK_CSARG_ATTR(dereferenceable){ static llvm::Statistic NumIRCSArguments_dereferenceable = {
"attributor", "NumIRCSArguments_dereferenceable", ("Number of "
"call site arguments" " marked '" "dereferenceable" "'")};; ++
(NumIRCSArguments_dereferenceable); }
4259 }
4260};
4261
4262/// Dereferenceable attribute deduction for a call site return value.
4263struct AADereferenceableCallSiteReturned final
4264 : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
4265 using Base =
4266 AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
4267 AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
4268 : Base(IRP, A) {}
4269
4270 /// See AbstractAttribute::trackStatistics()
4271 void trackStatistics() const override {
4272 STATS_DECLTRACK_CS_ATTR(dereferenceable){ static llvm::Statistic NumIRCS_dereferenceable = {"attributor"
, "NumIRCS_dereferenceable", ("Number of " "call site" " marked '"
"dereferenceable" "'")};; ++(NumIRCS_dereferenceable); }
;
4273 }
4274};
4275
4276// ------------------------ Align Argument Attribute ------------------------
4277
4278static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
4279 Value &AssociatedValue, const Use *U,
4280 const Instruction *I, bool &TrackUse) {
4281 // We need to follow common pointer manipulation uses to the accesses they
4282 // feed into.
4283 if (isa<CastInst>(I)) {
1
Assuming 'I' is not a 'CastInst'
2
Taking false branch
4284 // Follow all but ptr2int casts.
4285 TrackUse = !isa<PtrToIntInst>(I);
4286 return 0;
4287 }
4288 if (auto *GEP
3.1
'GEP' is null
3.1
'GEP' is null
3.1
'GEP' is null
3.1
'GEP' is null
= dyn_cast<GetElementPtrInst>(I)) {
3
Assuming 'I' is not a 'GetElementPtrInst'
4
Taking false branch
4289 if (GEP->hasAllConstantIndices())
4290 TrackUse = true;
4291 return 0;
4292 }
4293
4294 MaybeAlign MA;
4295 if (const auto *CB
5.1
'CB' is null
5.1
'CB' is null
5.1
'CB' is null
5.1
'CB' is null
= dyn_cast<CallBase>(I)) {
5
Assuming 'I' is not a 'CallBase'
6
Taking false branch
4296 if (CB->isBundleOperand(U) || CB->isCallee(U))
4297 return 0;
4298
4299 unsigned ArgNo = CB->getArgOperandNo(U);
4300 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
4301 // As long as we only use known information there is no need to track
4302 // dependences here.
4303 auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
4304 MA = MaybeAlign(AlignAA.getKnownAlign());
4305 }
4306
4307 const DataLayout &DL = A.getDataLayout();
4308 const Value *UseV = U->get();
7
'UseV' initialized here
4309 if (auto *SI
8.1
'SI' is non-null
8.1
'SI' is non-null
8.1
'SI' is non-null
8.1
'SI' is non-null
= dyn_cast<StoreInst>(I)) {
8
Assuming 'I' is a 'StoreInst'
9
Taking true branch
4310 if (SI->getPointerOperand() == UseV)
10
Assuming pointer value is null
11
Taking true branch
4311 MA = SI->getAlign();
4312 } else if (auto *LI = dyn_cast<LoadInst>(I)) {
4313 if (LI->getPointerOperand() == UseV)
4314 MA = LI->getAlign();
4315 }
4316
4317 if (!MA || *MA <= QueryingAA.getKnownAlign())
12
Calling 'Optional::operator bool'
20
Returning from 'Optional::operator bool'
21
Calling 'operator<='
26
Returning from 'operator<='
27
Taking false branch
4318 return 0;
4319
4320 unsigned Alignment = MA->value();
4321 int64_t Offset;
4322
4323 if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
28
Passing null pointer value via 1st parameter 'Ptr'
29
Calling 'GetPointerBaseWithConstantOffset'
4324 if (Base == &AssociatedValue) {
4325 // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4326 // So we can say that the maximum power of two which is a divisor of
4327 // gcd(Offset, Alignment) is an alignment.
4328
4329 uint32_t gcd =
4330 greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
4331 Alignment = llvm::PowerOf2Floor(gcd);
4332 }
4333 }
4334
4335 return Alignment;
4336}
4337
4338struct AAAlignImpl : AAAlign {
4339 AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
4340
4341 /// See AbstractAttribute::initialize(...).
4342 void initialize(Attributor &A) override {
4343 SmallVector<Attribute, 4> Attrs;
4344 getAttrs({Attribute::Alignment}, Attrs);
4345 for (const Attribute &Attr : Attrs)
4346 takeKnownMaximum(Attr.getValueAsInt());
4347
4348 Value &V = getAssociatedValue();
4349 // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
4350 // use of the function pointer. This was caused by D73131. We want to
4351 // avoid this for function pointers especially because we iterate
4352 // their uses and int2ptr is not handled. It is not a correctness
4353 // problem though!
4354 if (!V.getType()->getPointerElementType()->isFunctionTy())
4355 takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
4356
4357 if (getIRPosition().isFnInterfaceKind() &&
4358 (!getAnchorScope() ||
4359 !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
4360 indicatePessimisticFixpoint();
4361 return;
4362 }
4363
4364 if (Instruction *CtxI = getCtxI())
4365 followUsesInMBEC(*this, A, getState(), *CtxI);
4366 }
4367
4368 /// See AbstractAttribute::manifest(...).
4369 ChangeStatus manifest(Attributor &A) override {
4370 ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
4371
4372 // Check for users that allow alignment annotations.
4373 Value &AssociatedValue = getAssociatedValue();
4374 for (const Use &U : AssociatedValue.uses()) {
4375 if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
4376 if (SI->getPointerOperand() == &AssociatedValue)
4377 if (SI->getAlignment() < getAssumedAlign()) {
4378 STATS_DECLTRACK(AAAlign, Store,{ static llvm::Statistic NumIRStore_AAAlign = {"attributor", "NumIRStore_AAAlign"
, "Number of times alignment added to a store"};; ++(NumIRStore_AAAlign
); }
4379 "Number of times alignment added to a store"){ static llvm::Statistic NumIRStore_AAAlign = {"attributor", "NumIRStore_AAAlign"
, "Number of times alignment added to a store"};; ++(NumIRStore_AAAlign
); }
;
4380 SI->setAlignment(Align(getAssumedAlign()));
4381 LoadStoreChanged = ChangeStatus::CHANGED;
4382 }
4383 } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
4384 if (LI->getPointerOperand() == &AssociatedValue)
4385 if (LI->getAlignment() < getAssumedAlign()) {
4386 LI->setAlignment(Align(getAssumedAlign()));
4387 STATS_DECLTRACK(AAAlign, Load,{ static llvm::Statistic NumIRLoad_AAAlign = {"attributor", "NumIRLoad_AAAlign"
, "Number of times alignment added to a load"};; ++(NumIRLoad_AAAlign
); }
4388 "Number of times alignment added to a load"){ static llvm::Statistic NumIRLoad_AAAlign = {"attributor", "NumIRLoad_AAAlign"
, "Number of times alignment added to a load"};; ++(NumIRLoad_AAAlign
); }
;
4389 LoadStoreChanged = ChangeStatus::CHANGED;
4390 }
4391 }
4392 }
4393
4394 ChangeStatus Changed = AAAlign::manifest(A);
4395
4396 Align InheritAlign =
4397 getAssociatedValue().getPointerAlignment(A.getDataLayout());
4398 if (InheritAlign >= getAssumedAlign())
4399 return LoadStoreChanged;
4400 return Changed | LoadStoreChanged;
4401 }
4402
4403 // TODO: Provide a helper to determine the implied ABI alignment and check in
4404 // the existing manifest method and a new one for AAAlignImpl that value
4405 // to avoid making the alignment explicit if it did not improve.
4406
4407 /// See AbstractAttribute::getDeducedAttributes
4408 virtual void
4409 getDeducedAttributes(LLVMContext &Ctx,
4410 SmallVectorImpl<Attribute> &Attrs) const override {
4411 if (getAssumedAlign() > 1)
4412 Attrs.emplace_back(
4413 Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
4414 }
4415
4416 /// See followUsesInMBEC
4417 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4418 AAAlign::StateType &State) {
4419 bool TrackUse = false;
4420
4421 unsigned int KnownAlign =
4422 getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
4423 State.takeKnownMaximum(KnownAlign);
4424
4425 return TrackUse;
4426 }
4427
4428 /// See AbstractAttribute::getAsStr().
4429 const std::string getAsStr() const override {
4430 return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
4431 "-" + std::to_string(getAssumedAlign()) + ">")
4432 : "unknown-align";
4433 }
4434};
4435
4436/// Align attribute for a floating value.
4437struct AAAlignFloating : AAAlignImpl {
4438 AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
4439
4440 /// See AbstractAttribute::updateImpl(...).
4441 ChangeStatus updateImpl(Attributor &A) override {
4442 const DataLayout &DL = A.getDataLayout();
4443
4444 auto VisitValueCB = [&](Value &V, const Instruction *,
4445 AAAlign::StateType &T, bool Stripped) -> bool {
4446 const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
4447 DepClassTy::REQUIRED);
4448 if (!Stripped && this == &AA) {
4449 int64_t Offset;
4450 unsigned Alignment = 1;
4451 if (const Value *Base =
4452 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
4453 Align PA = Base->getPointerAlignment(DL);
4454 // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4455 // So we can say that the maximum power of two which is a divisor of
4456 // gcd(Offset, Alignment) is an alignment.
4457
4458 uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
4459 uint32_t(PA.value()));
4460 Alignment = llvm::PowerOf2Floor(gcd);
4461 } else {
4462 Alignment = V.getPointerAlignment(DL).value();
4463 }
4464 // Use only IR information if we did not strip anything.
4465 T.takeKnownMaximum(Alignment);
4466 T.indicatePessimisticFixpoint();
4467 } else {
4468 // Use abstract attribute information.
4469 const AAAlign::StateType &DS = AA.getState();
4470 T ^= DS;
4471 }
4472 return T.isValidState();
4473 };
4474
4475 StateType T;
4476 if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
4477 VisitValueCB, getCtxI()))
4478 return indicatePessimisticFixpoint();
4479
4480 // TODO: If we know we visited all incoming values, thus no are assumed
4481 // dead, we can take the known information from the state T.
4482 return clampStateAndIndicateChange(getState(), T);
4483 }
4484
4485 /// See AbstractAttribute::trackStatistics()
4486 void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align){ static llvm::Statistic NumIRFloating_align = {"attributor",
"NumIRFloating_align", ("Number of floating values known to be '"
"align" "'")};; ++(NumIRFloating_align); }
}
4487};
4488
4489/// Align attribute for function return value.
4490struct AAAlignReturned final
4491 : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
4492 using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
4493 AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4494
4495 /// See AbstractAttribute::initialize(...).
4496 void initialize(Attributor &A) override {
4497 Base::initialize(A);
4498 Function *F = getAssociatedFunction();
4499 if (!F || F->isDeclaration())
4500 indicatePessimisticFixpoint();
4501 }
4502
4503 /// See AbstractAttribute::trackStatistics()
4504 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned){ static llvm::Statistic NumIRFunctionReturn_aligned = {"attributor"
, "NumIRFunctionReturn_aligned", ("Number of " "function returns"
" marked '" "aligned" "'")};; ++(NumIRFunctionReturn_aligned
); }
}
4505};
4506
4507/// Align attribute for function argument.
4508struct AAAlignArgument final
4509 : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
4510 using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
4511 AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4512
4513 /// See AbstractAttribute::manifest(...).
4514 ChangeStatus manifest(Attributor &A) override {
4515 // If the associated argument is involved in a must-tail call we give up
4516 // because we would need to keep the argument alignments of caller and
4517 // callee in-sync. Just does not seem worth the trouble right now.
4518 if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
4519 return ChangeStatus::UNCHANGED;
4520 return Base::manifest(A);
4521 }
4522
4523 /// See AbstractAttribute::trackStatistics()
4524 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned){ static llvm::Statistic NumIRArguments_aligned = {"attributor"
, "NumIRArguments_aligned", ("Number of " "arguments" " marked '"
"aligned" "'")};; ++(NumIRArguments_aligned); }
}
4525};
4526
4527struct AAAlignCallSiteArgument final : AAAlignFloating {
4528 AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
4529 : AAAlignFloating(IRP, A) {}
4530
4531 /// See AbstractAttribute::manifest(...).
4532 ChangeStatus manifest(Attributor &A) override {
4533 // If the associated argument is involved in a must-tail call we give up
4534 // because we would need to keep the argument alignments of caller and
4535 // callee in-sync. Just does not seem worth the trouble right now.
4536 if (Argument *Arg = getAssociatedArgument())
4537 if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
4538 return ChangeStatus::UNCHANGED;
4539 ChangeStatus Changed = AAAlignImpl::manifest(A);
4540 Align InheritAlign =
4541 getAssociatedValue().getPointerAlignment(A.getDataLayout());
4542 if (InheritAlign >= getAssumedAlign())
4543 Changed = ChangeStatus::UNCHANGED;
4544 return Changed;
4545 }
4546
4547 /// See AbstractAttribute::updateImpl(Attributor &A).
4548 ChangeStatus updateImpl(Attributor &A) override {
4549 ChangeStatus Changed = AAAlignFloating::updateImpl(A);
4550 if (Argument *Arg = getAssociatedArgument()) {
4551 // We only take known information from the argument
4552 // so we do not need to track a dependence.
4553 const auto &ArgAlignAA = A.getAAFor<AAAlign>(
4554 *this, IRPosition::argument(*Arg), DepClassTy::NONE);
4555 takeKnownMaximum(ArgAlignAA.getKnownAlign());
4556 }
4557 return Changed;
4558 }
4559
4560 /// See AbstractAttribute::trackStatistics()
4561 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned){ static llvm::Statistic NumIRCSArguments_aligned = {"attributor"
, "NumIRCSArguments_aligned", ("Number of " "call site arguments"
" marked '" "aligned" "'")};; ++(NumIRCSArguments_aligned); }
}
4562};
4563
4564/// Align attribute deduction for a call site return value.
4565struct AAAlignCallSiteReturned final
4566 : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
4567 using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
4568 AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
4569 : Base(IRP, A) {}
4570
4571 /// See AbstractAttribute::initialize(...).
4572 void initialize(Attributor &A) override {
4573 Base::initialize(A);
4574 Function *F = getAssociatedFunction();
4575 if (!F || F->isDeclaration())
4576 indicatePessimisticFixpoint();
4577 }
4578
4579 /// See AbstractAttribute::trackStatistics()
4580 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align){ static llvm::Statistic NumIRCS_align = {"attributor", "NumIRCS_align"
, ("Number of " "call site" " marked '" "align" "'")};; ++(NumIRCS_align
); }
; }
4581};
4582
4583/// ------------------ Function No-Return Attribute ----------------------------
4584struct AANoReturnImpl : public AANoReturn {
4585 AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4586
4587 /// See AbstractAttribute::initialize(...).
4588 void initialize(Attributor &A) override {
4589 AANoReturn::initialize(A);
4590 Function *F = getAssociatedFunction();
4591 if (!F || F->isDeclaration())
4592 indicatePessimisticFixpoint();
4593 }
4594
4595 /// See AbstractAttribute::getAsStr().
4596 const std::string getAsStr() const override {
4597 return getAssumed() ? "noreturn" : "may-return";
4598 }
4599
4600 /// See AbstractAttribute::updateImpl(Attributor &A).
4601 virtual ChangeStatus updateImpl(Attributor &A) override {
4602 auto CheckForNoReturn = [](Instruction &) { return false; };
4603 bool UsedAssumedInformation = false;
4604 if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4605 {(unsigned)Instruction::Ret},
4606 UsedAssumedInformation))
4607 return indicatePessimisticFixpoint();
4608 return ChangeStatus::UNCHANGED;
4609 }
4610};
4611
4612struct AANoReturnFunction final : AANoReturnImpl {
4613 AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4614 : AANoReturnImpl(IRP, A) {}
4615
4616 /// See AbstractAttribute::trackStatistics()
4617 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn){ static llvm::Statistic NumIRFunction_noreturn = {"attributor"
, "NumIRFunction_noreturn", ("Number of " "functions" " marked '"
"noreturn" "'")};; ++(NumIRFunction_noreturn); }
}
4618};
4619
4620/// NoReturn attribute deduction for a call sites.
4621struct AANoReturnCallSite final : AANoReturnImpl {
4622 AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4623 : AANoReturnImpl(IRP, A) {}
4624
4625 /// See AbstractAttribute::initialize(...).
4626 void initialize(Attributor &A) override {
4627 AANoReturnImpl::initialize(A);
4628 if (Function *F = getAssociatedFunction()) {
4629 const IRPosition &FnPos = IRPosition::function(*F);
4630 auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4631 if (!FnAA.isAssumedNoReturn())
4632 indicatePessimisticFixpoint();
4633 }
4634 }
4635
4636 /// See AbstractAttribute::updateImpl(...).
4637 ChangeStatus updateImpl(Attributor &A) override {
4638 // TODO: Once we have call site specific value information we can provide
4639 // call site specific liveness information and then it makes
4640 // sense to specialize attributes for call sites arguments instead of
4641 // redirecting requests to the callee argument.
4642 Function *F = getAssociatedFunction();
4643 const IRPosition &FnPos = IRPosition::function(*F);
4644 auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4645 return clampStateAndIndicateChange(getState(), FnAA.getState());
4646 }
4647
4648 /// See AbstractAttribute::trackStatistics()
4649 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn){ static llvm::Statistic NumIRCS_noreturn = {"attributor", "NumIRCS_noreturn"
, ("Number of " "call site" " marked '" "noreturn" "'")};; ++
(NumIRCS_noreturn); }
; }
4650};
4651
4652/// ----------------------- Variable Capturing ---------------------------------
4653
4654/// A class to hold the state of for no-capture attributes.
4655struct AANoCaptureImpl : public AANoCapture {
4656 AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4657
4658 /// See AbstractAttribute::initialize(...).
4659 void initialize(Attributor &A) override {
4660 if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4661 indicateOptimisticFixpoint();
4662 return;
4663 }
4664 Function *AnchorScope = getAnchorScope();
4665 if (isFnInterfaceKind() &&
4666 (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4667 indicatePessimisticFixpoint();
4668 return;
4669 }
4670
4671 // You cannot "capture" null in the default address space.
4672 if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4673 getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4674 indicateOptimisticFixpoint();
4675 return;
4676 }
4677
4678 const Function *F =
4679 isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4680
4681 // Check what state the associated function can actually capture.
4682 if (F)
4683 determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4684 else
4685 indicatePessimisticFixpoint();
4686 }
4687
4688 /// See AbstractAttribute::updateImpl(...).
4689 ChangeStatus updateImpl(Attributor &A) override;
4690
4691 /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4692 virtual void
4693 getDeducedAttributes(LLVMContext &Ctx,
4694 SmallVectorImpl<Attribute> &Attrs) const override {
4695 if (!isAssumedNoCaptureMaybeReturned())
4696 return;
4697
4698 if (isArgumentPosition()) {
4699 if (isAssumedNoCapture())
4700 Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4701 else if (ManifestInternal)
4702 Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4703 }
4704 }
4705
4706 /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4707 /// depending on the ability of the function associated with \p IRP to capture
4708 /// state in memory and through "returning/throwing", respectively.
4709 static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4710 const Function &F,
4711 BitIntegerState &State) {
4712 // TODO: Once we have memory behavior attributes we should use them here.
4713
4714 // If we know we cannot communicate or write to memory, we do not care about
4715 // ptr2int anymore.
4716 if (F.onlyReadsMemory() && F.doesNotThrow() &&
4717 F.getReturnType()->isVoidTy()) {
4718 State.addKnownBits(NO_CAPTURE);
4719 return;
4720 }
4721
4722 // A function cannot capture state in memory if it only reads memory, it can
4723 // however return/throw state and the state might be influenced by the
4724 // pointer value, e.g., loading from a returned pointer might reveal a bit.
4725 if (F.onlyReadsMemory())
4726 State.addKnownBits(NOT_CAPTURED_IN_MEM);
4727
4728 // A function cannot communicate state back if it does not through
4729 // exceptions and doesn not return values.
4730 if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4731 State.addKnownBits(NOT_CAPTURED_IN_RET);
4732
4733 // Check existing "returned" attributes.
4734 int ArgNo = IRP.getCalleeArgNo();
4735 if (F.doesNotThrow() && ArgNo >= 0) {
4736 for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4737 if (F.hasParamAttribute(u, Attribute::Returned)) {
4738 if (u == unsigned(ArgNo))
4739 State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4740 else if (F.onlyReadsMemory())
4741 State.addKnownBits(NO_CAPTURE);
4742 else
4743 State.addKnownBits(NOT_CAPTURED_IN_RET);
4744 break;
4745 }
4746 }
4747 }
4748
4749 /// See AbstractState::getAsStr().
4750 const std::string getAsStr() const override {
4751 if (isKnownNoCapture())
4752 return "known not-captured";
4753 if (isAssumedNoCapture())
4754 return "assumed not-captured";
4755 if (isKnownNoCaptureMaybeReturned())
4756 return "known not-captured-maybe-returned";
4757 if (isAssumedNoCaptureMaybeReturned())
4758 return "assumed not-captured-maybe-returned";
4759 return "assumed-captured";
4760 }
4761};
4762
4763/// Attributor-aware capture tracker.
4764struct AACaptureUseTracker final : public CaptureTracker {
4765
4766 /// Create a capture tracker that can lookup in-flight abstract attributes
4767 /// through the Attributor \p A.
4768 ///
4769 /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4770 /// search is stopped. If a use leads to a return instruction,
4771 /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4772 /// If a use leads to a ptr2int which may capture the value,
4773 /// \p CapturedInInteger is set. If a use is found that is currently assumed
4774 /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4775 /// set. All values in \p PotentialCopies are later tracked as well. For every
4776 /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4777 /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4778 /// conservatively set to true.
4779 AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4780 const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4781 SmallSetVector<Value *, 4> &PotentialCopies,
4782 unsigned &RemainingUsesToExplore)
4783 : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4784 PotentialCopies(PotentialCopies),
4785 RemainingUsesToExplore(RemainingUsesToExplore) {}
4786
4787 /// Determine if \p V maybe captured. *Also updates the state!*
4788 bool valueMayBeCaptured(const Value *V) {
4789 if (V->getType()->isPointerTy()) {
4790 PointerMayBeCaptured(V, this);
4791 } else {
4792 State.indicatePessimisticFixpoint();
4793 }
4794 return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4795 }
4796
4797 /// See CaptureTracker::tooManyUses().
4798 void tooManyUses() override {
4799 State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4800 }
4801
4802 bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4803 if (CaptureTracker::isDereferenceableOrNull(O, DL))
4804 return true;
4805 const auto &DerefAA = A.getAAFor<AADereferenceable>(
4806 NoCaptureAA, IRPosition::value(*O), DepClassTy::OPTIONAL);
4807 return DerefAA.getAssumedDereferenceableBytes();
4808 }
4809
4810 /// See CaptureTracker::captured(...).
4811 bool captured(const Use *U) override {
4812 Instruction *UInst = cast<Instruction>(U->getUser());
4813 LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInstdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "Check use: " << *U->
get() << " in " << *UInst << "\n"; } } while
(false)
4814 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "Check use: " << *U->
get() << " in " << *UInst << "\n"; } } while
(false)
;
4815
4816 // Because we may reuse the tracker multiple times we keep track of the
4817 // number of explored uses ourselves as well.
4818 if (RemainingUsesToExplore-- == 0) {
4819 LLVM_DEBUG(dbgs() << " - too many uses to explore!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << " - too many uses to explore!\n"
; } } while (false)
;
4820 return isCapturedIn(/* Memory */ true, /* Integer */ true,
4821 /* Return */ true);
4822 }
4823
4824 // Deal with ptr2int by following uses.
4825 if (isa<PtrToIntInst>(UInst)) {
4826 LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << " - ptr2int assume the worst!\n"
; } } while (false)
;
4827 return valueMayBeCaptured(UInst);
4828 }
4829
4830 // For stores we check if we can follow the value through memory or not.
4831 if (auto *SI = dyn_cast<StoreInst>(UInst)) {
4832 if (SI->isVolatile())
4833 return isCapturedIn(/* Memory */ true, /* Integer */ false,
4834 /* Return */ false);
4835 bool UsedAssumedInformation = false;
4836 if (!AA::getPotentialCopiesOfStoredValue(
4837 A, *SI, PotentialCopies, NoCaptureAA, UsedAssumedInformation))
4838 return isCapturedIn(/* Memory */ true, /* Integer */ false,
4839 /* Return */ false);
4840 // Not captured directly, potential copies will be checked.
4841 return isCapturedIn(/* Memory */ false, /* Integer */ false,
4842 /* Return */ false);
4843 }
4844
4845 // Explicitly catch return instructions.
4846 if (isa<ReturnInst>(UInst)) {
4847 if (UInst->getFunction() == NoCaptureAA.getAnchorScope())
4848 return isCapturedIn(/* Memory */ false, /* Integer */ false,
4849 /* Return */ true);
4850 return isCapturedIn(/* Memory */ true, /* Integer */ true,
4851 /* Return */ true);
4852 }
4853
4854 // For now we only use special logic for call sites. However, the tracker
4855 // itself knows about a lot of other non-capturing cases already.
4856 auto *CB = dyn_cast<CallBase>(UInst);
4857 if (!CB || !CB->isArgOperand(U))
4858 return isCapturedIn(/* Memory */ true, /* Integer */ true,
4859 /* Return */ true);
4860
4861 unsigned ArgNo = CB->getArgOperandNo(U);
4862 const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4863 // If we have a abstract no-capture attribute for the argument we can use
4864 // it to justify a non-capture attribute here. This allows recursion!
4865 auto &ArgNoCaptureAA =
4866 A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos, DepClassTy::REQUIRED);
4867 if (ArgNoCaptureAA.isAssumedNoCapture())
4868 return isCapturedIn(/* Memory */ false, /* Integer */ false,
4869 /* Return */ false);
4870 if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4871 addPotentialCopy(*CB);
4872 return isCapturedIn(/* Memory */ false, /* Integer */ false,
4873 /* Return */ false);
4874 }
4875
4876 // Lastly, we could not find a reason no-capture can be assumed so we don't.
4877 return isCapturedIn(/* Memory */ true, /* Integer */ true,
4878 /* Return */ true);
4879 }
4880
4881 /// Register \p CS as potential copy of the value we are checking.
4882 void addPotentialCopy(CallBase &CB) { PotentialCopies.insert(&CB); }
4883
4884 /// See CaptureTracker::shouldExplore(...).
4885 bool shouldExplore(const Use *U) override {
4886 // Check liveness and ignore droppable users.
4887 bool UsedAssumedInformation = false;
4888 return !U->getUser()->isDroppable() &&
4889 !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA,
4890 UsedAssumedInformation);
4891 }
4892
4893 /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4894 /// \p CapturedInRet, then return the appropriate value for use in the
4895 /// CaptureTracker::captured() interface.
4896 bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4897 bool CapturedInRet) {
4898 LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << " - captures [Mem " <<
CapturedInMem << "|Int " << CapturedInInt <<
"|Ret " << CapturedInRet << "]\n"; } } while (false
)
4899 << CapturedInInt << "|Ret " << CapturedInRet << "]\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << " - captures [Mem " <<
CapturedInMem << "|Int " << CapturedInInt <<
"|Ret " << CapturedInRet << "]\n"; } } while (false
)
;
4900 if (CapturedInMem)
4901 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4902 if (CapturedInInt)
4903 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4904 if (CapturedInRet)
4905 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4906 return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4907 }
4908
4909private:
4910 /// The attributor providing in-flight abstract attributes.
4911 Attributor &A;
4912
4913 /// The abstract attribute currently updated.
4914 AANoCapture &NoCaptureAA;
4915
4916 /// The abstract liveness state.
4917 const AAIsDead &IsDeadAA;
4918
4919 /// The state currently updated.
4920 AANoCapture::StateType &State;
4921
4922 /// Set of potential copies of the tracked value.
4923 SmallSetVector<Value *, 4> &PotentialCopies;
4924
4925 /// Global counter to limit the number of explored uses.
4926 unsigned &RemainingUsesToExplore;
4927};
4928
4929ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4930 const IRPosition &IRP = getIRPosition();
4931 Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
4932 : &IRP.getAssociatedValue();
4933 if (!V)
4934 return indicatePessimisticFixpoint();
4935
4936 const Function *F =
4937 isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4938 assert(F && "Expected a function!")(static_cast <bool> (F && "Expected a function!"
) ? void (0) : __assert_fail ("F && \"Expected a function!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 4938, __extension__
__PRETTY_FUNCTION__))
;
4939 const IRPosition &FnPos = IRPosition::function(*F);
4940 const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos, DepClassTy::NONE);
4941
4942 AANoCapture::StateType T;
4943
4944 // Readonly means we cannot capture through memory.
4945 const auto &FnMemAA =
4946 A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::NONE);
4947 if (FnMemAA.isAssumedReadOnly()) {
4948 T.addKnownBits(NOT_CAPTURED_IN_MEM);
4949 if (FnMemAA.isKnownReadOnly())
4950 addKnownBits(NOT_CAPTURED_IN_MEM);
4951 else
4952 A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4953 }
4954
4955 // Make sure all returned values are different than the underlying value.
4956 // TODO: we could do this in a more sophisticated way inside
4957 // AAReturnedValues, e.g., track all values that escape through returns
4958 // directly somehow.
4959 auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4960 bool SeenConstant = false;
4961 for (auto &It : RVAA.returned_values()) {
4962 if (isa<Constant>(It.first)) {
4963 if (SeenConstant)
4964 return false;
4965 SeenConstant = true;
4966 } else if (!isa<Argument>(It.first) ||
4967 It.first == getAssociatedArgument())
4968 return false;
4969 }
4970 return true;
4971 };
4972
4973 const auto &NoUnwindAA =
4974 A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
4975 if (NoUnwindAA.isAssumedNoUnwind()) {
4976 bool IsVoidTy = F->getReturnType()->isVoidTy();
4977 const AAReturnedValues *RVAA =
4978 IsVoidTy ? nullptr
4979 : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4980
4981 DepClassTy::OPTIONAL);
4982 if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4983 T.addKnownBits(NOT_CAPTURED_IN_RET);
4984 if (T.isKnown(NOT_CAPTURED_IN_MEM))
4985 return ChangeStatus::UNCHANGED;
4986 if (NoUnwindAA.isKnownNoUnwind() &&
4987 (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4988 addKnownBits(NOT_CAPTURED_IN_RET);
4989 if (isKnown(NOT_CAPTURED_IN_MEM))
4990 return indicateOptimisticFixpoint();
4991 }
4992 }
4993 }
4994
4995 // Use the CaptureTracker interface and logic with the specialized tracker,
4996 // defined in AACaptureUseTracker, that can look at in-flight abstract
4997 // attributes and directly updates the assumed state.
4998 SmallSetVector<Value *, 4> PotentialCopies;
4999 unsigned RemainingUsesToExplore =
5000 getDefaultMaxUsesToExploreForCaptureTracking();
5001 AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
5002 RemainingUsesToExplore);
5003
5004 // Check all potential copies of the associated value until we can assume
5005 // none will be captured or we have to assume at least one might be.
5006 unsigned Idx = 0;
5007 PotentialCopies.insert(V);
5008 while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
5009 Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
5010
5011 AANoCapture::StateType &S = getState();
5012 auto Assumed = S.getAssumed();
5013 S.intersectAssumedBits(T.getAssumed());
5014 if (!isAssumedNoCaptureMaybeReturned())
5015 return indicatePessimisticFixpoint();
5016 return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
5017 : ChangeStatus::CHANGED;
5018}
5019
5020/// NoCapture attribute for function arguments.
5021struct AANoCaptureArgument final : AANoCaptureImpl {
5022 AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
5023 : AANoCaptureImpl(IRP, A) {}
5024
5025 /// See AbstractAttribute::trackStatistics()
5026 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture){ static llvm::Statistic NumIRArguments_nocapture = {"attributor"
, "NumIRArguments_nocapture", ("Number of " "arguments" " marked '"
"nocapture" "'")};; ++(NumIRArguments_nocapture); }
}
5027};
5028
5029/// NoCapture attribute for call site arguments.
5030struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
5031 AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
5032 : AANoCaptureImpl(IRP, A) {}
5033
5034 /// See AbstractAttribute::initialize(...).
5035 void initialize(Attributor &A) override {
5036 if (Argument *Arg = getAssociatedArgument())
5037 if (Arg->hasByValAttr())
5038 indicateOptimisticFixpoint();
5039 AANoCaptureImpl::initialize(A);
5040 }
5041
5042 /// See AbstractAttribute::updateImpl(...).
5043 ChangeStatus updateImpl(Attributor &A) override {
5044 // TODO: Once we have call site specific value information we can provide
5045 // call site specific liveness information and then it makes
5046 // sense to specialize attributes for call sites arguments instead of
5047 // redirecting requests to the callee argument.
5048 Argument *Arg = getAssociatedArgument();
5049 if (!Arg)
5050 return indicatePessimisticFixpoint();
5051 const IRPosition &ArgPos = IRPosition::argument(*Arg);
5052 auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
5053 return clampStateAndIndicateChange(getState(), ArgAA.getState());
5054 }
5055
5056 /// See AbstractAttribute::trackStatistics()
5057 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture){ static llvm::Statistic NumIRCSArguments_nocapture = {"attributor"
, "NumIRCSArguments_nocapture", ("Number of " "call site arguments"
" marked '" "nocapture" "'")};; ++(NumIRCSArguments_nocapture
); }
};
5058};
5059
5060/// NoCapture attribute for floating values.
5061struct AANoCaptureFloating final : AANoCaptureImpl {
5062 AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
5063 : AANoCaptureImpl(IRP, A) {}
5064
5065 /// See AbstractAttribute::trackStatistics()
5066 void trackStatistics() const override {
5067 STATS_DECLTRACK_FLOATING_ATTR(nocapture){ static llvm::Statistic NumIRFloating_nocapture = {"attributor"
, "NumIRFloating_nocapture", ("Number of floating values known to be '"
"nocapture" "'")};; ++(NumIRFloating_nocapture); }
5068 }
5069};
5070
5071/// NoCapture attribute for function return value.
5072struct AANoCaptureReturned final : AANoCaptureImpl {
5073 AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
5074 : AANoCaptureImpl(IRP, A) {
5075 llvm_unreachable("NoCapture is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoCapture is not applicable to function returns!"
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5075)
;
5076 }
5077
5078 /// See AbstractAttribute::initialize(...).
5079 void initialize(Attributor &A) override {
5080 llvm_unreachable("NoCapture is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoCapture is not applicable to function returns!"
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5080)
;
5081 }
5082
5083 /// See AbstractAttribute::updateImpl(...).
5084 ChangeStatus updateImpl(Attributor &A) override {
5085 llvm_unreachable("NoCapture is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoCapture is not applicable to function returns!"
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5085)
;
5086 }
5087
5088 /// See AbstractAttribute::trackStatistics()
5089 void trackStatistics() const override {}
5090};
5091
5092/// NoCapture attribute deduction for a call site return value.
5093struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
5094 AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
5095 : AANoCaptureImpl(IRP, A) {}
5096
5097 /// See AbstractAttribute::initialize(...).
5098 void initialize(Attributor &A) override {
5099 const Function *F = getAnchorScope();
5100 // Check what state the associated function can actually capture.
5101 determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
5102 }
5103
5104 /// See AbstractAttribute::trackStatistics()
5105 void trackStatistics() const override {
5106 STATS_DECLTRACK_CSRET_ATTR(nocapture){ static llvm::Statistic NumIRCSReturn_nocapture = {"attributor"
, "NumIRCSReturn_nocapture", ("Number of " "call site returns"
" marked '" "nocapture" "'")};; ++(NumIRCSReturn_nocapture);
}
5107 }
5108};
5109} // namespace
5110
5111/// ------------------ Value Simplify Attribute ----------------------------
5112
5113bool ValueSimplifyStateType::unionAssumed(Optional<Value *> Other) {
5114 // FIXME: Add a typecast support.
5115 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5116 SimplifiedAssociatedValue, Other, Ty);
5117 if (SimplifiedAssociatedValue == Optional<Value *>(nullptr))
5118 return false;
5119
5120 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (SimplifiedAssociatedValue.hasValue())
dbgs() << "[ValueSimplify] is assumed to be " <<
**SimplifiedAssociatedValue << "\n"; else dbgs() <<
"[ValueSimplify] is assumed to be <none>\n"; }; } } while
(false)
5121 if (SimplifiedAssociatedValue.hasValue())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (SimplifiedAssociatedValue.hasValue())
dbgs() << "[ValueSimplify] is assumed to be " <<
**SimplifiedAssociatedValue << "\n"; else dbgs() <<
"[ValueSimplify] is assumed to be <none>\n"; }; } } while
(false)
5122 dbgs() << "[ValueSimplify] is assumed to be "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (SimplifiedAssociatedValue.hasValue())
dbgs() << "[ValueSimplify] is assumed to be " <<
**SimplifiedAssociatedValue << "\n"; else dbgs() <<
"[ValueSimplify] is assumed to be <none>\n"; }; } } while
(false)
5123 << **SimplifiedAssociatedValue << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (SimplifiedAssociatedValue.hasValue())
dbgs() << "[ValueSimplify] is assumed to be " <<
**SimplifiedAssociatedValue << "\n"; else dbgs() <<
"[ValueSimplify] is assumed to be <none>\n"; }; } } while
(false)
5124 elsedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (SimplifiedAssociatedValue.hasValue())
dbgs() << "[ValueSimplify] is assumed to be " <<
**SimplifiedAssociatedValue << "\n"; else dbgs() <<
"[ValueSimplify] is assumed to be <none>\n"; }; } } while
(false)
5125 dbgs() << "[ValueSimplify] is assumed to be <none>\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (SimplifiedAssociatedValue.hasValue())
dbgs() << "[ValueSimplify] is assumed to be " <<
**SimplifiedAssociatedValue << "\n"; else dbgs() <<
"[ValueSimplify] is assumed to be <none>\n"; }; } } while
(false)
5126 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (SimplifiedAssociatedValue.hasValue())
dbgs() << "[ValueSimplify] is assumed to be " <<
**SimplifiedAssociatedValue << "\n"; else dbgs() <<
"[ValueSimplify] is assumed to be <none>\n"; }; } } while
(false)
;
5127 return true;
5128}
5129
5130namespace {
5131struct AAValueSimplifyImpl : AAValueSimplify {
5132 AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
5133 : AAValueSimplify(IRP, A) {}
5134
5135 /// See AbstractAttribute::initialize(...).
5136 void initialize(Attributor &A) override {
5137 if (getAssociatedValue().getType()->isVoidTy())
5138 indicatePessimisticFixpoint();
5139 if (A.hasSimplificationCallback(getIRPosition()))
5140 indicatePessimisticFixpoint();
5141 }
5142
5143 /// See AbstractAttribute::getAsStr().
5144 const std::string getAsStr() const override {
5145 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { errs() << "SAV: " << SimplifiedAssociatedValue
<< " "; if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue
) errs() << "SAV: " << **SimplifiedAssociatedValue
<< " "; }; } } while (false)
5146 errs() << "SAV: " << SimplifiedAssociatedValue << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { errs() << "SAV: " << SimplifiedAssociatedValue
<< " "; if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue
) errs() << "SAV: " << **SimplifiedAssociatedValue
<< " "; }; } } while (false)
5147 if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { errs() << "SAV: " << SimplifiedAssociatedValue
<< " "; if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue
) errs() << "SAV: " << **SimplifiedAssociatedValue
<< " "; }; } } while (false)
5148 errs() << "SAV: " << **SimplifiedAssociatedValue << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { errs() << "SAV: " << SimplifiedAssociatedValue
<< " "; if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue
) errs() << "SAV: " << **SimplifiedAssociatedValue
<< " "; }; } } while (false)
5149 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { errs() << "SAV: " << SimplifiedAssociatedValue
<< " "; if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue
) errs() << "SAV: " << **SimplifiedAssociatedValue
<< " "; }; } } while (false)
;
5150 return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple")
5151 : "not-simple";
5152 }
5153
5154 /// See AbstractAttribute::trackStatistics()
5155 void trackStatistics() const override {}
5156
5157 /// See AAValueSimplify::getAssumedSimplifiedValue()
5158 Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5159 return SimplifiedAssociatedValue;
5160 }
5161
5162 /// Return a value we can use as replacement for the associated one, or
5163 /// nullptr if we don't have one that makes sense.
5164 Value *getReplacementValue(Attributor &A) const {
5165 Value *NewV;
5166 NewV = SimplifiedAssociatedValue.hasValue()
5167 ? SimplifiedAssociatedValue.getValue()
5168 : UndefValue::get(getAssociatedType());
5169 if (!NewV)
5170 return nullptr;
5171 NewV = AA::getWithType(*NewV, *getAssociatedType());
5172 if (!NewV || NewV == &getAssociatedValue())
5173 return nullptr;
5174 const Instruction *CtxI = getCtxI();
5175 if (CtxI && !AA::isValidAtPosition(*NewV, *CtxI, A.getInfoCache()))
5176 return nullptr;
5177 if (!CtxI && !AA::isValidInScope(*NewV, getAnchorScope()))
5178 return nullptr;
5179 return NewV;
5180 }
5181
5182 /// Helper function for querying AAValueSimplify and updating candicate.
5183 /// \param IRP The value position we are trying to unify with SimplifiedValue
5184 bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
5185 const IRPosition &IRP, bool Simplify = true) {
5186 bool UsedAssumedInformation = false;
5187 Optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue();
5188 if (Simplify)
5189 QueryingValueSimplified =
5190 A.getAssumedSimplified(IRP, QueryingAA, UsedAssumedInformation);
5191 return unionAssumed(QueryingValueSimplified);
5192 }
5193
5194 /// Returns a candidate is found or not
5195 template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
5196 if (!getAssociatedValue().getType()->isIntegerTy())
5197 return false;
5198
5199 // This will also pass the call base context.
5200 const auto &AA =
5201 A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
5202
5203 Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A);
5204
5205 if (!COpt.hasValue()) {
5206 SimplifiedAssociatedValue = llvm::None;
5207 A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5208 return true;
5209 }
5210 if (auto *C = COpt.getValue()) {
5211 SimplifiedAssociatedValue = C;
5212 A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5213 return true;
5214 }
5215 return false;
5216 }
5217
5218 bool askSimplifiedValueForOtherAAs(Attributor &A) {
5219 if (askSimplifiedValueFor<AAValueConstantRange>(A))
5220 return true;
5221 if (askSimplifiedValueFor<AAPotentialValues>(A))
5222 return true;
5223 return false;
5224 }
5225
5226 /// See AbstractAttribute::manifest(...).
5227 ChangeStatus manifest(Attributor &A) override {
5228 ChangeStatus Changed = ChangeStatus::UNCHANGED;
5229 if (getAssociatedValue().user_empty())
5230 return Changed;
5231
5232 if (auto *NewV = getReplacementValue(A)) {
5233 LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue() << " -> "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[ValueSimplify] " <<
getAssociatedValue() << " -> " << *NewV <<
" :: " << *this << "\n"; } } while (false)
5234 << *NewV << " :: " << *this << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[ValueSimplify] " <<
getAssociatedValue() << " -> " << *NewV <<
" :: " << *this << "\n"; } } while (false)
;
5235 if (A.changeValueAfterManifest(getAssociatedValue(), *NewV))
5236 Changed = ChangeStatus::CHANGED;
5237 }
5238
5239 return Changed | AAValueSimplify::manifest(A);
5240 }
5241
5242 /// See AbstractState::indicatePessimisticFixpoint(...).
5243 ChangeStatus indicatePessimisticFixpoint() override {
5244 SimplifiedAssociatedValue = &getAssociatedValue();
5245 return AAValueSimplify::indicatePessimisticFixpoint();
5246 }
5247
5248 static bool handleLoad(Attributor &A, const AbstractAttribute &AA,
5249 LoadInst &L, function_ref<bool(Value &)> Union) {
5250 auto UnionWrapper = [&](Value &V, Value &Obj) {
5251 if (isa<AllocaInst>(Obj))
5252 return Union(V);
5253 if (!AA::isDynamicallyUnique(A, AA, V))
5254 return false;
5255 if (!AA::isValidAtPosition(V, L, A.getInfoCache()))
5256 return false;
5257 return Union(V);
5258 };
5259
5260 Value &Ptr = *L.getPointerOperand();
5261 SmallVector<Value *, 8> Objects;
5262 if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, AA, &L))
5263 return false;
5264
5265 const auto *TLI =
5266 A.getInfoCache().getTargetLibraryInfoForFunction(*L.getFunction());
5267 for (Value *Obj : Objects) {
5268 LLVM_DEBUG(dbgs() << "Visit underlying object " << *Obj << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "Visit underlying object " <<
*Obj << "\n"; } } while (false)
;
5269 if (isa<UndefValue>(Obj))
5270 continue;
5271 if (isa<ConstantPointerNull>(Obj)) {
5272 // A null pointer access can be undefined but any offset from null may
5273 // be OK. We do not try to optimize the latter.
5274 bool UsedAssumedInformation = false;
5275 if (!NullPointerIsDefined(L.getFunction(),
5276 Ptr.getType()->getPointerAddressSpace()) &&
5277 A.getAssumedSimplified(Ptr, AA, UsedAssumedInformation) == Obj)
5278 continue;
5279 return false;
5280 }
5281 Constant *InitialVal = AA::getInitialValueForObj(*Obj, *L.getType(), TLI);
5282 if (!InitialVal || !Union(*InitialVal))
5283 return false;
5284
5285 LLVM_DEBUG(dbgs() << "Underlying object amenable to load-store "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "Underlying object amenable to load-store "
"propagation, checking accesses next.\n"; } } while (false)
5286 "propagation, checking accesses next.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "Underlying object amenable to load-store "
"propagation, checking accesses next.\n"; } } while (false)
;
5287
5288 auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) {
5289 LLVM_DEBUG(dbgs() << " - visit access " << Acc << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << " - visit access " <<
Acc << "\n"; } } while (false)
;
5290 if (!Acc.isWrite())
5291 return true;
5292 if (Acc.isWrittenValueYetUndetermined())
5293 return true;
5294 Value *Content = Acc.getWrittenValue();
5295 if (!Content)
5296 return false;
5297 Value *CastedContent =
5298 AA::getWithType(*Content, *AA.getAssociatedType());
5299 if (!CastedContent)
5300 return false;
5301 if (IsExact)
5302 return UnionWrapper(*CastedContent, *Obj);
5303 if (auto *C = dyn_cast<Constant>(CastedContent))
5304 if (C->isNullValue() || C->isAllOnesValue() || isa<UndefValue>(C))
5305 return UnionWrapper(*CastedContent, *Obj);
5306 return false;
5307 };
5308
5309 auto &PI = A.getAAFor<AAPointerInfo>(AA, IRPosition::value(*Obj),
5310 DepClassTy::REQUIRED);
5311 if (!PI.forallInterferingAccesses(L, CheckAccess))
5312 return false;
5313 }
5314 return true;
5315 }
5316};
5317
5318struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
5319 AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
5320 : AAValueSimplifyImpl(IRP, A) {}
5321
5322 void initialize(Attributor &A) override {
5323 AAValueSimplifyImpl::initialize(A);
5324 if (!getAnchorScope() || getAnchorScope()->isDeclaration())
5325 indicatePessimisticFixpoint();
5326 if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
5327 Attribute::StructRet, Attribute::Nest, Attribute::ByVal},
5328 /* IgnoreSubsumingPositions */ true))
5329 indicatePessimisticFixpoint();
5330
5331 // FIXME: This is a hack to prevent us from propagating function poiner in
5332 // the new pass manager CGSCC pass as it creates call edges the
5333 // CallGraphUpdater cannot handle yet.
5334 Value &V = getAssociatedValue();
5335 if (V.getType()->isPointerTy() &&
5336 V.getType()->getPointerElementType()->isFunctionTy() &&
5337 !A.isModulePass())
5338 indicatePessimisticFixpoint();
5339 }
5340
5341 /// See AbstractAttribute::updateImpl(...).
5342 ChangeStatus updateImpl(Attributor &A) override {
5343 // Byval is only replacable if it is readonly otherwise we would write into
5344 // the replaced value and not the copy that byval creates implicitly.
5345 Argument *Arg = getAssociatedArgument();
5346 if (Arg->hasByValAttr()) {
5347 // TODO: We probably need to verify synchronization is not an issue, e.g.,
5348 // there is no race by not copying a constant byval.
5349 const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
5350 DepClassTy::REQUIRED);
5351 if (!MemAA.isAssumedReadOnly())
5352 return indicatePessimisticFixpoint();
5353 }
5354
5355 auto Before = SimplifiedAssociatedValue;
5356
5357 auto PredForCallSite = [&](AbstractCallSite ACS) {
5358 const IRPosition &ACSArgPos =
5359 IRPosition::callsite_argument(ACS, getCallSiteArgNo());
5360 // Check if a coresponding argument was found or if it is on not
5361 // associated (which can happen for callback calls).
5362 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5363 return false;
5364
5365 // Simplify the argument operand explicitly and check if the result is
5366 // valid in the current scope. This avoids refering to simplified values
5367 // in other functions, e.g., we don't want to say a an argument in a
5368 // static function is actually an argument in a different function.
5369 bool UsedAssumedInformation = false;
5370 Optional<Constant *> SimpleArgOp =
5371 A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation);
5372 if (!SimpleArgOp.hasValue())
5373 return true;
5374 if (!SimpleArgOp.getValue())
5375 return false;
5376 if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp))
5377 return false;
5378 return unionAssumed(*SimpleArgOp);
5379 };
5380
5381 // Generate a answer specific to a call site context.
5382 bool Success;
5383 bool AllCallSitesKnown;
5384 if (hasCallBaseContext() &&
5385 getCallBaseContext()->getCalledFunction() == Arg->getParent())
5386 Success = PredForCallSite(
5387 AbstractCallSite(&getCallBaseContext()->getCalledOperandUse()));
5388 else
5389 Success = A.checkForAllCallSites(PredForCallSite, *this, true,
5390 AllCallSitesKnown);
5391
5392 if (!Success)
5393 if (!askSimplifiedValueForOtherAAs(A))
5394 return indicatePessimisticFixpoint();
5395
5396 // If a candicate was found in this update, return CHANGED.
5397 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5398 : ChangeStatus ::CHANGED;
5399 }
5400
5401 /// See AbstractAttribute::trackStatistics()
5402 void trackStatistics() const override {
5403 STATS_DECLTRACK_ARG_ATTR(value_simplify){ static llvm::Statistic NumIRArguments_value_simplify = {"attributor"
, "NumIRArguments_value_simplify", ("Number of " "arguments" " marked '"
"value_simplify" "'")};; ++(NumIRArguments_value_simplify); }
5404 }
5405};
5406
5407struct AAValueSimplifyReturned : AAValueSimplifyImpl {
5408 AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
5409 : AAValueSimplifyImpl(IRP, A) {}
5410
5411 /// See AAValueSimplify::getAssumedSimplifiedValue()
5412 Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5413 if (!isValidState())
5414 return nullptr;
5415 return SimplifiedAssociatedValue;
5416 }
5417
5418 /// See AbstractAttribute::updateImpl(...).
5419 ChangeStatus updateImpl(Attributor &A) override {
5420 auto Before = SimplifiedAssociatedValue;
5421
5422 auto PredForReturned = [&](Value &V) {
5423 return checkAndUpdate(A, *this,
5424 IRPosition::value(V, getCallBaseContext()));
5425 };
5426
5427 if (!A.checkForAllReturnedValues(PredForReturned, *this))
5428 if (!askSimplifiedValueForOtherAAs(A))
5429 return indicatePessimisticFixpoint();
5430
5431 // If a candicate was found in this update, return CHANGED.
5432 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5433 : ChangeStatus ::CHANGED;
5434 }
5435
5436 ChangeStatus manifest(Attributor &A) override {
5437 ChangeStatus Changed = ChangeStatus::UNCHANGED;
5438
5439 if (auto *NewV = getReplacementValue(A)) {
5440 auto PredForReturned =
5441 [&](Value &, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
5442 for (ReturnInst *RI : RetInsts) {
5443 Value *ReturnedVal = RI->getReturnValue();
5444 if (ReturnedVal == NewV || isa<UndefValue>(ReturnedVal))
5445 return true;
5446 assert(RI->getFunction() == getAnchorScope() &&(static_cast <bool> (RI->getFunction() == getAnchorScope
() && "ReturnInst in wrong function!") ? void (0) : __assert_fail
("RI->getFunction() == getAnchorScope() && \"ReturnInst in wrong function!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5447, __extension__
__PRETTY_FUNCTION__))
5447 "ReturnInst in wrong function!")(static_cast <bool> (RI->getFunction() == getAnchorScope
() && "ReturnInst in wrong function!") ? void (0) : __assert_fail
("RI->getFunction() == getAnchorScope() && \"ReturnInst in wrong function!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5447, __extension__
__PRETTY_FUNCTION__))
;
5448 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[ValueSimplify] " <<
*Returne