Bug Summary

File:build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
Warning:line 6198, column 9
Value stored to 'HasChanged' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name AttributorAttributes.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm -resource-dir /usr/lib/llvm-15/lib/clang/15.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Transforms/IPO -I /build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/lib/Transforms/IPO -I include -I /build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-15/lib/clang/15.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-04-20-140412-16051-1 -x c++ /build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
1//===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// See the Attributor.h file comment and the class descriptions in that file for
10// more information.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Transforms/IPO/Attributor.h"
15
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/MapVector.h"
18#include "llvm/ADT/SCCIterator.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SetOperations.h"
21#include "llvm/ADT/SetVector.h"
22#include "llvm/ADT/SmallPtrSet.h"
23#include "llvm/ADT/Statistic.h"
24#include "llvm/Analysis/AliasAnalysis.h"
25#include "llvm/Analysis/AssumeBundleQueries.h"
26#include "llvm/Analysis/AssumptionCache.h"
27#include "llvm/Analysis/CaptureTracking.h"
28#include "llvm/Analysis/InstructionSimplify.h"
29#include "llvm/Analysis/LazyValueInfo.h"
30#include "llvm/Analysis/MemoryBuiltins.h"
31#include "llvm/Analysis/OptimizationRemarkEmitter.h"
32#include "llvm/Analysis/ScalarEvolution.h"
33#include "llvm/Analysis/TargetTransformInfo.h"
34#include "llvm/Analysis/ValueTracking.h"
35#include "llvm/IR/Argument.h"
36#include "llvm/IR/Assumptions.h"
37#include "llvm/IR/Constants.h"
38#include "llvm/IR/DataLayout.h"
39#include "llvm/IR/GlobalValue.h"
40#include "llvm/IR/IRBuilder.h"
41#include "llvm/IR/Instruction.h"
42#include "llvm/IR/Instructions.h"
43#include "llvm/IR/IntrinsicInst.h"
44#include "llvm/IR/NoFolder.h"
45#include "llvm/IR/Value.h"
46#include "llvm/IR/ValueHandle.h"
47#include "llvm/Support/Alignment.h"
48#include "llvm/Support/Casting.h"
49#include "llvm/Support/CommandLine.h"
50#include "llvm/Support/ErrorHandling.h"
51#include "llvm/Support/GraphWriter.h"
52#include "llvm/Support/MathExtras.h"
53#include "llvm/Support/raw_ostream.h"
54#include "llvm/Transforms/IPO/ArgumentPromotion.h"
55#include "llvm/Transforms/Utils/Local.h"
56#include "llvm/Transforms/Utils/ValueMapper.h"
57#include <cassert>
58
59using namespace llvm;
60
61#define DEBUG_TYPE"attributor" "attributor"
62
63static cl::opt<bool> ManifestInternal(
64 "attributor-manifest-internal", cl::Hidden,
65 cl::desc("Manifest Attributor internal string attributes."),
66 cl::init(false));
67
68static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
69 cl::Hidden);
70
71template <>
72unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
73
74static cl::opt<unsigned, true> MaxPotentialValues(
75 "attributor-max-potential-values", cl::Hidden,
76 cl::desc("Maximum number of potential values to be "
77 "tracked for each position."),
78 cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
79 cl::init(7));
80
81static cl::opt<unsigned> MaxInterferingAccesses(
82 "attributor-max-interfering-accesses", cl::Hidden,
83 cl::desc("Maximum number of interfering accesses to "
84 "check before assuming all might interfere."),
85 cl::init(6));
86
87STATISTIC(NumAAs, "Number of abstract attributes created")static llvm::Statistic NumAAs = {"attributor", "NumAAs", "Number of abstract attributes created"
}
;
88
89// Some helper macros to deal with statistics tracking.
90//
91// Usage:
92// For simple IR attribute tracking overload trackStatistics in the abstract
93// attribute and choose the right STATS_DECLTRACK_********* macro,
94// e.g.,:
95// void trackStatistics() const override {
96// STATS_DECLTRACK_ARG_ATTR(returned)
97// }
98// If there is a single "increment" side one can use the macro
99// STATS_DECLTRACK with a custom message. If there are multiple increment
100// sides, STATS_DECL and STATS_TRACK can also be used separately.
101//
102#define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)("Number of " "TYPE" " marked '" "NAME" "'") \
103 ("Number of " #TYPE " marked '" #NAME "'")
104#define BUILD_STAT_NAME(NAME, TYPE)NumIRTYPE_NAME NumIR##TYPE##_##NAME
105#define STATS_DECL_(NAME, MSG)static llvm::Statistic NAME = {"attributor", "NAME", MSG}; STATISTIC(NAME, MSG)static llvm::Statistic NAME = {"attributor", "NAME", MSG};
106#define STATS_DECL(NAME, TYPE, MSG)static llvm::Statistic NumIRTYPE_NAME = {"attributor", "NumIRTYPE_NAME"
, MSG};;
\
107 STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG)static llvm::Statistic NumIRTYPE_NAME = {"attributor", "NumIRTYPE_NAME"
, MSG};
;
108#define STATS_TRACK(NAME, TYPE)++(NumIRTYPE_NAME); ++(BUILD_STAT_NAME(NAME, TYPE)NumIRTYPE_NAME);
109#define STATS_DECLTRACK(NAME, TYPE, MSG){ static llvm::Statistic NumIRTYPE_NAME = {"attributor", "NumIRTYPE_NAME"
, MSG};; ++(NumIRTYPE_NAME); }
\
110 { \
111 STATS_DECL(NAME, TYPE, MSG)static llvm::Statistic NumIRTYPE_NAME = {"attributor", "NumIRTYPE_NAME"
, MSG};;
\
112 STATS_TRACK(NAME, TYPE)++(NumIRTYPE_NAME); \
113 }
114#define STATS_DECLTRACK_ARG_ATTR(NAME){ static llvm::Statistic NumIRArguments_NAME = {"attributor",
"NumIRArguments_NAME", ("Number of " "arguments" " marked '"
"NAME" "'")};; ++(NumIRArguments_NAME); }
\
115 STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME)){ static llvm::Statistic NumIRArguments_NAME = {"attributor",
"NumIRArguments_NAME", ("Number of " "arguments" " marked '"
"NAME" "'")};; ++(NumIRArguments_NAME); }
116#define STATS_DECLTRACK_CSARG_ATTR(NAME){ static llvm::Statistic NumIRCSArguments_NAME = {"attributor"
, "NumIRCSArguments_NAME", ("Number of " "call site arguments"
" marked '" "NAME" "'")};; ++(NumIRCSArguments_NAME); }
\
117 STATS_DECLTRACK(NAME, CSArguments, \{ static llvm::Statistic NumIRCSArguments_NAME = {"attributor"
, "NumIRCSArguments_NAME", ("Number of " "call site arguments"
" marked '" "NAME" "'")};; ++(NumIRCSArguments_NAME); }
118 BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME)){ static llvm::Statistic NumIRCSArguments_NAME = {"attributor"
, "NumIRCSArguments_NAME", ("Number of " "call site arguments"
" marked '" "NAME" "'")};; ++(NumIRCSArguments_NAME); }
119#define STATS_DECLTRACK_FN_ATTR(NAME){ static llvm::Statistic NumIRFunction_NAME = {"attributor", "NumIRFunction_NAME"
, ("Number of " "functions" " marked '" "NAME" "'")};; ++(NumIRFunction_NAME
); }
\
120 STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME)){ static llvm::Statistic NumIRFunction_NAME = {"attributor", "NumIRFunction_NAME"
, ("Number of " "functions" " marked '" "NAME" "'")};; ++(NumIRFunction_NAME
); }
121#define STATS_DECLTRACK_CS_ATTR(NAME){ static llvm::Statistic NumIRCS_NAME = {"attributor", "NumIRCS_NAME"
, ("Number of " "call site" " marked '" "NAME" "'")};; ++(NumIRCS_NAME
); }
\
122 STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME)){ static llvm::Statistic NumIRCS_NAME = {"attributor", "NumIRCS_NAME"
, ("Number of " "call site" " marked '" "NAME" "'")};; ++(NumIRCS_NAME
); }
123#define STATS_DECLTRACK_FNRET_ATTR(NAME){ static llvm::Statistic NumIRFunctionReturn_NAME = {"attributor"
, "NumIRFunctionReturn_NAME", ("Number of " "function returns"
" marked '" "NAME" "'")};; ++(NumIRFunctionReturn_NAME); }
\
124 STATS_DECLTRACK(NAME, FunctionReturn, \{ static llvm::Statistic NumIRFunctionReturn_NAME = {"attributor"
, "NumIRFunctionReturn_NAME", ("Number of " "function returns"
" marked '" "NAME" "'")};; ++(NumIRFunctionReturn_NAME); }
125 BUILD_STAT_MSG_IR_ATTR(function returns, NAME)){ static llvm::Statistic NumIRFunctionReturn_NAME = {"attributor"
, "NumIRFunctionReturn_NAME", ("Number of " "function returns"
" marked '" "NAME" "'")};; ++(NumIRFunctionReturn_NAME); }
126#define STATS_DECLTRACK_CSRET_ATTR(NAME){ static llvm::Statistic NumIRCSReturn_NAME = {"attributor", "NumIRCSReturn_NAME"
, ("Number of " "call site returns" " marked '" "NAME" "'")};
; ++(NumIRCSReturn_NAME); }
\
127 STATS_DECLTRACK(NAME, CSReturn, \{ static llvm::Statistic NumIRCSReturn_NAME = {"attributor", "NumIRCSReturn_NAME"
, ("Number of " "call site returns" " marked '" "NAME" "'")};
; ++(NumIRCSReturn_NAME); }
128 BUILD_STAT_MSG_IR_ATTR(call site returns, NAME)){ static llvm::Statistic NumIRCSReturn_NAME = {"attributor", "NumIRCSReturn_NAME"
, ("Number of " "call site returns" " marked '" "NAME" "'")};
; ++(NumIRCSReturn_NAME); }
129#define STATS_DECLTRACK_FLOATING_ATTR(NAME){ static llvm::Statistic NumIRFloating_NAME = {"attributor", "NumIRFloating_NAME"
, ("Number of floating values known to be '" "NAME" "'")};; ++
(NumIRFloating_NAME); }
\
130 STATS_DECLTRACK(NAME, Floating, \{ static llvm::Statistic NumIRFloating_NAME = {"attributor", "NumIRFloating_NAME"
, ("Number of floating values known to be '" #NAME "'")};; ++
(NumIRFloating_NAME); }
131 ("Number of floating values known to be '" #NAME "'")){ static llvm::Statistic NumIRFloating_NAME = {"attributor", "NumIRFloating_NAME"
, ("Number of floating values known to be '" #NAME "'")};; ++
(NumIRFloating_NAME); }
132
133// Specialization of the operator<< for abstract attributes subclasses. This
134// disambiguates situations where multiple operators are applicable.
135namespace llvm {
136#define PIPE_OPERATOR(CLASS) \
137 raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) { \
138 return OS << static_cast<const AbstractAttribute &>(AA); \
139 }
140
141PIPE_OPERATOR(AAIsDead)
142PIPE_OPERATOR(AANoUnwind)
143PIPE_OPERATOR(AANoSync)
144PIPE_OPERATOR(AANoRecurse)
145PIPE_OPERATOR(AAWillReturn)
146PIPE_OPERATOR(AANoReturn)
147PIPE_OPERATOR(AAReturnedValues)
148PIPE_OPERATOR(AANonNull)
149PIPE_OPERATOR(AANoAlias)
150PIPE_OPERATOR(AADereferenceable)
151PIPE_OPERATOR(AAAlign)
152PIPE_OPERATOR(AAInstanceInfo)
153PIPE_OPERATOR(AANoCapture)
154PIPE_OPERATOR(AAValueSimplify)
155PIPE_OPERATOR(AANoFree)
156PIPE_OPERATOR(AAHeapToStack)
157PIPE_OPERATOR(AAReachability)
158PIPE_OPERATOR(AAMemoryBehavior)
159PIPE_OPERATOR(AAMemoryLocation)
160PIPE_OPERATOR(AAValueConstantRange)
161PIPE_OPERATOR(AAPrivatizablePtr)
162PIPE_OPERATOR(AAUndefinedBehavior)
163PIPE_OPERATOR(AAPotentialConstantValues)
164PIPE_OPERATOR(AANoUndef)
165PIPE_OPERATOR(AACallEdges)
166PIPE_OPERATOR(AAFunctionReachability)
167PIPE_OPERATOR(AAPointerInfo)
168PIPE_OPERATOR(AAAssumptionInfo)
169
170#undef PIPE_OPERATOR
171
172template <>
173ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
174 const DerefState &R) {
175 ChangeStatus CS0 =
176 clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
177 ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
178 return CS0 | CS1;
179}
180
181} // namespace llvm
182
183/// Get pointer operand of memory accessing instruction. If \p I is
184/// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
185/// is set to false and the instruction is volatile, return nullptr.
186static const Value *getPointerOperand(const Instruction *I,
187 bool AllowVolatile) {
188 if (!AllowVolatile && I->isVolatile())
189 return nullptr;
190
191 if (auto *LI = dyn_cast<LoadInst>(I)) {
192 return LI->getPointerOperand();
193 }
194
195 if (auto *SI = dyn_cast<StoreInst>(I)) {
196 return SI->getPointerOperand();
197 }
198
199 if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
200 return CXI->getPointerOperand();
201 }
202
203 if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
204 return RMWI->getPointerOperand();
205 }
206
207 return nullptr;
208}
209
210/// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
211/// advanced by \p Offset bytes. To aid later analysis the method tries to build
212/// getelement pointer instructions that traverse the natural type of \p Ptr if
213/// possible. If that fails, the remaining offset is adjusted byte-wise, hence
214/// through a cast to i8*.
215///
216/// TODO: This could probably live somewhere more prominantly if it doesn't
217/// already exist.
218static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
219 int64_t Offset, IRBuilder<NoFolder> &IRB,
220 const DataLayout &DL) {
221 assert(Offset >= 0 && "Negative offset not supported yet!")(static_cast <bool> (Offset >= 0 && "Negative offset not supported yet!"
) ? void (0) : __assert_fail ("Offset >= 0 && \"Negative offset not supported yet!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 221, __extension__
__PRETTY_FUNCTION__))
;
222 LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offsetdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "Construct pointer: " <<
*Ptr << " + " << Offset << "-bytes as " <<
*ResTy << "\n"; } } while (false)
223 << "-bytes as " << *ResTy << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "Construct pointer: " <<
*Ptr << " + " << Offset << "-bytes as " <<
*ResTy << "\n"; } } while (false)
;
224
225 if (Offset) {
226 Type *Ty = PtrElemTy;
227 APInt IntOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset);
228 SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(Ty, IntOffset);
229
230 SmallVector<Value *, 4> ValIndices;
231 std::string GEPName = Ptr->getName().str();
232 for (const APInt &Index : IntIndices) {
233 ValIndices.push_back(IRB.getInt(Index));
234 GEPName += "." + std::to_string(Index.getZExtValue());
235 }
236
237 // Create a GEP for the indices collected above.
238 Ptr = IRB.CreateGEP(PtrElemTy, Ptr, ValIndices, GEPName);
239
240 // If an offset is left we use byte-wise adjustment.
241 if (IntOffset != 0) {
242 Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
243 Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(IntOffset),
244 GEPName + ".b" + Twine(IntOffset.getZExtValue()));
245 }
246 }
247
248 // Ensure the result has the requested type.
249 Ptr = IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr, ResTy,
250 Ptr->getName() + ".cast");
251
252 LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "Constructed pointer: " <<
*Ptr << "\n"; } } while (false)
;
253 return Ptr;
254}
255
256/// Recursively visit all values that might become \p IRP at some point. This
257/// will be done by looking through cast instructions, selects, phis, and calls
258/// with the "returned" attribute. Once we cannot look through the value any
259/// further, the callback \p VisitValueCB is invoked and passed the current
260/// value, the \p State, and a flag to indicate if we stripped anything.
261/// Stripped means that we unpacked the value associated with \p IRP at least
262/// once. Note that the value used for the callback may still be the value
263/// associated with \p IRP (due to PHIs). To limit how much effort is invested,
264/// we will never visit more values than specified by \p MaxValues.
265/// If \p VS does not contain the Interprocedural bit, only values valid in the
266/// scope of \p CtxI will be visited and simplification into other scopes is
267/// prevented.
268template <typename StateTy>
269static bool genericValueTraversal(
270 Attributor &A, IRPosition IRP, const AbstractAttribute &QueryingAA,
271 StateTy &State,
272 function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
273 VisitValueCB,
274 const Instruction *CtxI, bool &UsedAssumedInformation,
275 bool UseValueSimplify = true, int MaxValues = 16,
276 function_ref<Value *(Value *)> StripCB = nullptr,
277 AA::ValueScope VS = AA::Interprocedural) {
278
279 struct LivenessInfo {
280 const AAIsDead *LivenessAA = nullptr;
281 bool AnyDead = false;
282 };
283 SmallMapVector<const Function *, LivenessInfo, 4> LivenessAAs;
284 auto GetLivenessInfo = [&](const Function &F) -> LivenessInfo & {
285 LivenessInfo &LI = LivenessAAs[&F];
286 if (!LI.LivenessAA)
287 LI.LivenessAA = &A.getAAFor<AAIsDead>(QueryingAA, IRPosition::function(F),
288 DepClassTy::NONE);
289 return LI;
290 };
291
292 Value *InitialV = &IRP.getAssociatedValue();
293 using Item = std::pair<Value *, const Instruction *>;
294 SmallSet<Item, 16> Visited;
295 SmallVector<Item, 16> Worklist;
296 Worklist.push_back({InitialV, CtxI});
297
298 int Iteration = 0;
299 do {
300 Item I = Worklist.pop_back_val();
301 Value *V = I.first;
302 CtxI = I.second;
303 if (StripCB)
304 V = StripCB(V);
305
306 // Check if we should process the current value. To prevent endless
307 // recursion keep a record of the values we followed!
308 if (!Visited.insert(I).second)
309 continue;
310
311 // Make sure we limit the compile time for complex expressions.
312 if (Iteration++ >= MaxValues) {
313 LLVM_DEBUG(dbgs() << "Generic value traversal reached iteration limit: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "Generic value traversal reached iteration limit: "
<< Iteration << "!\n"; } } while (false)
314 << Iteration << "!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "Generic value traversal reached iteration limit: "
<< Iteration << "!\n"; } } while (false)
;
315 return false;
316 }
317
318 // Explicitly look through calls with a "returned" attribute if we do
319 // not have a pointer as stripPointerCasts only works on them.
320 Value *NewV = nullptr;
321 if (V->getType()->isPointerTy()) {
322 NewV = V->stripPointerCasts();
323 } else {
324 auto *CB = dyn_cast<CallBase>(V);
325 if (CB && CB->getCalledFunction()) {
326 for (Argument &Arg : CB->getCalledFunction()->args())
327 if (Arg.hasReturnedAttr()) {
328 NewV = CB->getArgOperand(Arg.getArgNo());
329 break;
330 }
331 }
332 }
333 if (NewV && NewV != V) {
334 Worklist.push_back({NewV, CtxI});
335 continue;
336 }
337
338 // Look through select instructions, visit assumed potential values.
339 if (auto *SI = dyn_cast<SelectInst>(V)) {
340 Optional<Constant *> C = A.getAssumedConstant(
341 *SI->getCondition(), QueryingAA, UsedAssumedInformation);
342 bool NoValueYet = !C.hasValue();
343 if (NoValueYet || isa_and_nonnull<UndefValue>(*C))
344 continue;
345 if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) {
346 if (CI->isZero())
347 Worklist.push_back({SI->getFalseValue(), CtxI});
348 else
349 Worklist.push_back({SI->getTrueValue(), CtxI});
350 continue;
351 }
352 // We could not simplify the condition, assume both values.(
353 Worklist.push_back({SI->getTrueValue(), CtxI});
354 Worklist.push_back({SI->getFalseValue(), CtxI});
355 continue;
356 }
357
358 // Look through phi nodes, visit all live operands.
359 if (auto *PHI = dyn_cast<PHINode>(V)) {
360 LivenessInfo &LI = GetLivenessInfo(*PHI->getFunction());
361 for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
362 BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
363 if (LI.LivenessAA->isEdgeDead(IncomingBB, PHI->getParent())) {
364 LI.AnyDead = true;
365 UsedAssumedInformation |= !LI.LivenessAA->isAtFixpoint();
366 continue;
367 }
368 Worklist.push_back(
369 {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
370 }
371 continue;
372 }
373
374 if (auto *Arg = dyn_cast<Argument>(V)) {
375 if ((VS & AA::Interprocedural) && !Arg->hasPassPointeeByValueCopyAttr()) {
376 SmallVector<Item> CallSiteValues;
377 bool UsedAssumedInformation = false;
378 if (A.checkForAllCallSites(
379 [&](AbstractCallSite ACS) {
380 // Callbacks might not have a corresponding call site operand,
381 // stick with the argument in that case.
382 Value *CSOp = ACS.getCallArgOperand(*Arg);
383 if (!CSOp)
384 return false;
385 CallSiteValues.push_back({CSOp, ACS.getInstruction()});
386 return true;
387 },
388 *Arg->getParent(), true, &QueryingAA, UsedAssumedInformation)) {
389 Worklist.append(CallSiteValues);
390 continue;
391 }
392 }
393 }
394
395 if (UseValueSimplify && !isa<Constant>(V)) {
396 Optional<Value *> SimpleV =
397 A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation);
398 if (!SimpleV.hasValue())
399 continue;
400 Value *NewV = SimpleV.getValue();
401 if (NewV && NewV != V) {
402 if ((VS & AA::Interprocedural) || !CtxI ||
403 AA::isValidInScope(*NewV, CtxI->getFunction())) {
404 Worklist.push_back({NewV, CtxI});
405 continue;
406 }
407 }
408 }
409
410 if (auto *LI = dyn_cast<LoadInst>(V)) {
411 bool UsedAssumedInformation = false;
412 // If we ask for the potentially loaded values from the initial pointer we
413 // will simply end up here again. The load is as far as we can make it.
414 if (LI->getPointerOperand() != InitialV) {
415 SmallSetVector<Value *, 4> PotentialCopies;
416 SmallSetVector<Instruction *, 4> PotentialValueOrigins;
417 if (AA::getPotentiallyLoadedValues(A, *LI, PotentialCopies,
418 PotentialValueOrigins, QueryingAA,
419 UsedAssumedInformation,
420 /* OnlyExact */ true)) {
421 // Values have to be dynamically unique or we loose the fact that a
422 // single llvm::Value might represent two runtime values (e.g., stack
423 // locations in different recursive calls).
424 bool DynamicallyUnique =
425 llvm::all_of(PotentialCopies, [&A, &QueryingAA](Value *PC) {
426 return AA::isDynamicallyUnique(A, QueryingAA, *PC);
427 });
428 if (DynamicallyUnique &&
429 ((VS & AA::Interprocedural) || !CtxI ||
430 llvm::all_of(PotentialCopies, [CtxI](Value *PC) {
431 return AA::isValidInScope(*PC, CtxI->getFunction());
432 }))) {
433 for (auto *PotentialCopy : PotentialCopies)
434 Worklist.push_back({PotentialCopy, CtxI});
435 continue;
436 }
437 }
438 }
439 }
440
441 // Once a leaf is reached we inform the user through the callback.
442 if (!VisitValueCB(*V, CtxI, State, Iteration > 1)) {
443 LLVM_DEBUG(dbgs() << "Generic value traversal visit callback failed for: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "Generic value traversal visit callback failed for: "
<< *V << "!\n"; } } while (false)
444 << *V << "!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "Generic value traversal visit callback failed for: "
<< *V << "!\n"; } } while (false)
;
445 return false;
446 }
447 } while (!Worklist.empty());
448
449 // If we actually used liveness information so we have to record a dependence.
450 for (auto &It : LivenessAAs)
451 if (It.second.AnyDead)
452 A.recordDependence(*It.second.LivenessAA, QueryingAA,
453 DepClassTy::OPTIONAL);
454
455 // All values have been visited.
456 return true;
457}
458
459bool AA::getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr,
460 SmallVectorImpl<Value *> &Objects,
461 const AbstractAttribute &QueryingAA,
462 const Instruction *CtxI,
463 bool &UsedAssumedInformation,
464 AA::ValueScope VS) {
465 auto StripCB = [&](Value *V) { return getUnderlyingObject(V); };
466 SmallPtrSet<Value *, 8> SeenObjects;
467 auto VisitValueCB = [&SeenObjects](Value &Val, const Instruction *,
468 SmallVectorImpl<Value *> &Objects,
469 bool) -> bool {
470 if (SeenObjects.insert(&Val).second)
471 Objects.push_back(&Val);
472 return true;
473 };
474 if (!genericValueTraversal<decltype(Objects)>(
475 A, IRPosition::value(Ptr), QueryingAA, Objects, VisitValueCB, CtxI,
476 UsedAssumedInformation, true, 32, StripCB, VS))
477 return false;
478 return true;
479}
480
481static const Value *
482stripAndAccumulateOffsets(Attributor &A, const AbstractAttribute &QueryingAA,
483 const Value *Val, const DataLayout &DL, APInt &Offset,
484 bool GetMinOffset, bool AllowNonInbounds,
485 bool UseAssumed = false) {
486
487 auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
488 const IRPosition &Pos = IRPosition::value(V);
489 // Only track dependence if we are going to use the assumed info.
490 const AAValueConstantRange &ValueConstantRangeAA =
491 A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
492 UseAssumed ? DepClassTy::OPTIONAL
493 : DepClassTy::NONE);
494 ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
495 : ValueConstantRangeAA.getKnown();
496 if (Range.isFullSet())
497 return false;
498
499 // We can only use the lower part of the range because the upper part can
500 // be higher than what the value can really be.
501 if (GetMinOffset)
502 ROffset = Range.getSignedMin();
503 else
504 ROffset = Range.getSignedMax();
505 return true;
506 };
507
508 return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
509 /* AllowInvariant */ true,
510 AttributorAnalysis);
511}
512
513static const Value *
514getMinimalBaseOfPointer(Attributor &A, const AbstractAttribute &QueryingAA,
515 const Value *Ptr, int64_t &BytesOffset,
516 const DataLayout &DL, bool AllowNonInbounds = false) {
517 APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
518 const Value *Base =
519 stripAndAccumulateOffsets(A, QueryingAA, Ptr, DL, OffsetAPInt,
520 /* GetMinOffset */ true, AllowNonInbounds);
521
522 BytesOffset = OffsetAPInt.getSExtValue();
523 return Base;
524}
525
526/// Clamp the information known for all returned values of a function
527/// (identified by \p QueryingAA) into \p S.
528template <typename AAType, typename StateType = typename AAType::StateType>
529static void clampReturnedValueStates(
530 Attributor &A, const AAType &QueryingAA, StateType &S,
531 const IRPosition::CallBaseContext *CBContext = nullptr) {
532 LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Clamp return value states for "
<< QueryingAA << " into " << S << "\n"
; } } while (false)
533 << QueryingAA << " into " << S << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Clamp return value states for "
<< QueryingAA << " into " << S << "\n"
; } } while (false)
;
534
535 assert((QueryingAA.getIRPosition().getPositionKind() ==(static_cast <bool> ((QueryingAA.getIRPosition().getPositionKind
() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().
getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) &&
"Can only clamp returned value states for a function returned or call "
"site returned position!") ? void (0) : __assert_fail ("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 540, __extension__
__PRETTY_FUNCTION__))
536 IRPosition::IRP_RETURNED ||(static_cast <bool> ((QueryingAA.getIRPosition().getPositionKind
() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().
getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) &&
"Can only clamp returned value states for a function returned or call "
"site returned position!") ? void (0) : __assert_fail ("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 540, __extension__
__PRETTY_FUNCTION__))
537 QueryingAA.getIRPosition().getPositionKind() ==(static_cast <bool> ((QueryingAA.getIRPosition().getPositionKind
() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().
getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) &&
"Can only clamp returned value states for a function returned or call "
"site returned position!") ? void (0) : __assert_fail ("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 540, __extension__
__PRETTY_FUNCTION__))
538 IRPosition::IRP_CALL_SITE_RETURNED) &&(static_cast <bool> ((QueryingAA.getIRPosition().getPositionKind
() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().
getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) &&
"Can only clamp returned value states for a function returned or call "
"site returned position!") ? void (0) : __assert_fail ("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 540, __extension__
__PRETTY_FUNCTION__))
539 "Can only clamp returned value states for a function returned or call "(static_cast <bool> ((QueryingAA.getIRPosition().getPositionKind
() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().
getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) &&
"Can only clamp returned value states for a function returned or call "
"site returned position!") ? void (0) : __assert_fail ("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 540, __extension__
__PRETTY_FUNCTION__))
540 "site returned position!")(static_cast <bool> ((QueryingAA.getIRPosition().getPositionKind
() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().
getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) &&
"Can only clamp returned value states for a function returned or call "
"site returned position!") ? void (0) : __assert_fail ("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 540, __extension__
__PRETTY_FUNCTION__))
;
541
542 // Use an optional state as there might not be any return values and we want
543 // to join (IntegerState::operator&) the state of all there are.
544 Optional<StateType> T;
545
546 // Callback for each possibly returned value.
547 auto CheckReturnValue = [&](Value &RV) -> bool {
548 const IRPosition &RVPos = IRPosition::value(RV, CBContext);
549 const AAType &AA =
550 A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
551 LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] RV: " <<
RV << " AA: " << AA.getAsStr() << " @ " <<
RVPos << "\n"; } } while (false)
552 << " @ " << RVPos << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] RV: " <<
RV << " AA: " << AA.getAsStr() << " @ " <<
RVPos << "\n"; } } while (false)
;
553 const StateType &AAS = AA.getState();
554 if (T.hasValue())
555 *T &= AAS;
556 else
557 T = AAS;
558 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << Tdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] AA State: " <<
AAS << " RV State: " << T << "\n"; } } while
(false)
559 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] AA State: " <<
AAS << " RV State: " << T << "\n"; } } while
(false)
;
560 return T->isValidState();
561 };
562
563 if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
564 S.indicatePessimisticFixpoint();
565 else if (T.hasValue())
566 S ^= *T;
567}
568
569namespace {
570/// Helper class for generic deduction: return value -> returned position.
571template <typename AAType, typename BaseType,
572 typename StateType = typename BaseType::StateType,
573 bool PropagateCallBaseContext = false>
574struct AAReturnedFromReturnedValues : public BaseType {
575 AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
576 : BaseType(IRP, A) {}
577
578 /// See AbstractAttribute::updateImpl(...).
579 ChangeStatus updateImpl(Attributor &A) override {
580 StateType S(StateType::getBestState(this->getState()));
581 clampReturnedValueStates<AAType, StateType>(
582 A, *this, S,
583 PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
584 // TODO: If we know we visited all returned values, thus no are assumed
585 // dead, we can take the known information from the state T.
586 return clampStateAndIndicateChange<StateType>(this->getState(), S);
587 }
588};
589
590/// Clamp the information known at all call sites for a given argument
591/// (identified by \p QueryingAA) into \p S.
592template <typename AAType, typename StateType = typename AAType::StateType>
593static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
594 StateType &S) {
595 LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Clamp call site argument states for "
<< QueryingAA << " into " << S << "\n"
; } } while (false)
596 << QueryingAA << " into " << S << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Clamp call site argument states for "
<< QueryingAA << " into " << S << "\n"
; } } while (false)
;
597
598 assert(QueryingAA.getIRPosition().getPositionKind() ==(static_cast <bool> (QueryingAA.getIRPosition().getPositionKind
() == IRPosition::IRP_ARGUMENT && "Can only clamp call site argument states for an argument position!"
) ? void (0) : __assert_fail ("QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_ARGUMENT && \"Can only clamp call site argument states for an argument position!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 600, __extension__
__PRETTY_FUNCTION__))
599 IRPosition::IRP_ARGUMENT &&(static_cast <bool> (QueryingAA.getIRPosition().getPositionKind
() == IRPosition::IRP_ARGUMENT && "Can only clamp call site argument states for an argument position!"
) ? void (0) : __assert_fail ("QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_ARGUMENT && \"Can only clamp call site argument states for an argument position!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 600, __extension__
__PRETTY_FUNCTION__))
600 "Can only clamp call site argument states for an argument position!")(static_cast <bool> (QueryingAA.getIRPosition().getPositionKind
() == IRPosition::IRP_ARGUMENT && "Can only clamp call site argument states for an argument position!"
) ? void (0) : __assert_fail ("QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_ARGUMENT && \"Can only clamp call site argument states for an argument position!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 600, __extension__
__PRETTY_FUNCTION__))
;
601
602 // Use an optional state as there might not be any return values and we want
603 // to join (IntegerState::operator&) the state of all there are.
604 Optional<StateType> T;
605
606 // The argument number which is also the call site argument number.
607 unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
608
609 auto CallSiteCheck = [&](AbstractCallSite ACS) {
610 const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
611 // Check if a coresponding argument was found or if it is on not associated
612 // (which can happen for callback calls).
613 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
614 return false;
615
616 const AAType &AA =
617 A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
618 LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] ACS: " <<
*ACS.getInstruction() << " AA: " << AA.getAsStr(
) << " @" << ACSArgPos << "\n"; } } while (
false)
619 << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] ACS: " <<
*ACS.getInstruction() << " AA: " << AA.getAsStr(
) << " @" << ACSArgPos << "\n"; } } while (
false)
;
620 const StateType &AAS = AA.getState();
621 if (T.hasValue())
622 *T &= AAS;
623 else
624 T = AAS;
625 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << Tdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] AA State: " <<
AAS << " CSA State: " << T << "\n"; } } while
(false)
626 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] AA State: " <<
AAS << " CSA State: " << T << "\n"; } } while
(false)
;
627 return T->isValidState();
628 };
629
630 bool UsedAssumedInformation = false;
631 if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
632 UsedAssumedInformation))
633 S.indicatePessimisticFixpoint();
634 else if (T.hasValue())
635 S ^= *T;
636}
637
638/// This function is the bridge between argument position and the call base
639/// context.
640template <typename AAType, typename BaseType,
641 typename StateType = typename AAType::StateType>
642bool getArgumentStateFromCallBaseContext(Attributor &A,
643 BaseType &QueryingAttribute,
644 IRPosition &Pos, StateType &State) {
645 assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&(static_cast <bool> ((Pos.getPositionKind() == IRPosition
::IRP_ARGUMENT) && "Expected an 'argument' position !"
) ? void (0) : __assert_fail ("(Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) && \"Expected an 'argument' position !\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 646, __extension__
__PRETTY_FUNCTION__))
646 "Expected an 'argument' position !")(static_cast <bool> ((Pos.getPositionKind() == IRPosition
::IRP_ARGUMENT) && "Expected an 'argument' position !"
) ? void (0) : __assert_fail ("(Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) && \"Expected an 'argument' position !\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 646, __extension__
__PRETTY_FUNCTION__))
;
647 const CallBase *CBContext = Pos.getCallBaseContext();
648 if (!CBContext)
649 return false;
650
651 int ArgNo = Pos.getCallSiteArgNo();
652 assert(ArgNo >= 0 && "Invalid Arg No!")(static_cast <bool> (ArgNo >= 0 && "Invalid Arg No!"
) ? void (0) : __assert_fail ("ArgNo >= 0 && \"Invalid Arg No!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 652, __extension__
__PRETTY_FUNCTION__))
;
653
654 const auto &AA = A.getAAFor<AAType>(
655 QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
656 DepClassTy::REQUIRED);
657 const StateType &CBArgumentState =
658 static_cast<const StateType &>(AA.getState());
659
660 LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Briding Call site context to argument"
<< "Position:" << Pos << "CB Arg state:" <<
CBArgumentState << "\n"; } } while (false)
661 << "Position:" << Pos << "CB Arg state:" << CBArgumentStatedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Briding Call site context to argument"
<< "Position:" << Pos << "CB Arg state:" <<
CBArgumentState << "\n"; } } while (false)
662 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Briding Call site context to argument"
<< "Position:" << Pos << "CB Arg state:" <<
CBArgumentState << "\n"; } } while (false)
;
663
664 // NOTE: If we want to do call site grouping it should happen here.
665 State ^= CBArgumentState;
666 return true;
667}
668
669/// Helper class for generic deduction: call site argument -> argument position.
670template <typename AAType, typename BaseType,
671 typename StateType = typename AAType::StateType,
672 bool BridgeCallBaseContext = false>
673struct AAArgumentFromCallSiteArguments : public BaseType {
674 AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
675 : BaseType(IRP, A) {}
676
677 /// See AbstractAttribute::updateImpl(...).
678 ChangeStatus updateImpl(Attributor &A) override {
679 StateType S = StateType::getBestState(this->getState());
680
681 if (BridgeCallBaseContext) {
682 bool Success =
683 getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
684 A, *this, this->getIRPosition(), S);
685 if (Success)
686 return clampStateAndIndicateChange<StateType>(this->getState(), S);
687 }
688 clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
689
690 // TODO: If we know we visited all incoming values, thus no are assumed
691 // dead, we can take the known information from the state T.
692 return clampStateAndIndicateChange<StateType>(this->getState(), S);
693 }
694};
695
696/// Helper class for generic replication: function returned -> cs returned.
697template <typename AAType, typename BaseType,
698 typename StateType = typename BaseType::StateType,
699 bool IntroduceCallBaseContext = false>
700struct AACallSiteReturnedFromReturned : public BaseType {
701 AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
702 : BaseType(IRP, A) {}
703
704 /// See AbstractAttribute::updateImpl(...).
705 ChangeStatus updateImpl(Attributor &A) override {
706 assert(this->getIRPosition().getPositionKind() ==(static_cast <bool> (this->getIRPosition().getPositionKind
() == IRPosition::IRP_CALL_SITE_RETURNED && "Can only wrap function returned positions for call site returned "
"positions!") ? void (0) : __assert_fail ("this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED && \"Can only wrap function returned positions for call site returned \" \"positions!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 709, __extension__
__PRETTY_FUNCTION__))
707 IRPosition::IRP_CALL_SITE_RETURNED &&(static_cast <bool> (this->getIRPosition().getPositionKind
() == IRPosition::IRP_CALL_SITE_RETURNED && "Can only wrap function returned positions for call site returned "
"positions!") ? void (0) : __assert_fail ("this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED && \"Can only wrap function returned positions for call site returned \" \"positions!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 709, __extension__
__PRETTY_FUNCTION__))
708 "Can only wrap function returned positions for call site returned "(static_cast <bool> (this->getIRPosition().getPositionKind
() == IRPosition::IRP_CALL_SITE_RETURNED && "Can only wrap function returned positions for call site returned "
"positions!") ? void (0) : __assert_fail ("this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED && \"Can only wrap function returned positions for call site returned \" \"positions!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 709, __extension__
__PRETTY_FUNCTION__))
709 "positions!")(static_cast <bool> (this->getIRPosition().getPositionKind
() == IRPosition::IRP_CALL_SITE_RETURNED && "Can only wrap function returned positions for call site returned "
"positions!") ? void (0) : __assert_fail ("this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED && \"Can only wrap function returned positions for call site returned \" \"positions!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 709, __extension__
__PRETTY_FUNCTION__))
;
710 auto &S = this->getState();
711
712 const Function *AssociatedFunction =
713 this->getIRPosition().getAssociatedFunction();
714 if (!AssociatedFunction)
715 return S.indicatePessimisticFixpoint();
716
717 CallBase &CBContext = cast<CallBase>(this->getAnchorValue());
718 if (IntroduceCallBaseContext)
719 LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Introducing call base context:"
<< CBContext << "\n"; } } while (false)
720 << CBContext << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Introducing call base context:"
<< CBContext << "\n"; } } while (false)
;
721
722 IRPosition FnPos = IRPosition::returned(
723 *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
724 const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
725 return clampStateAndIndicateChange(S, AA.getState());
726 }
727};
728
729/// Helper function to accumulate uses.
730template <class AAType, typename StateType = typename AAType::StateType>
731static void followUsesInContext(AAType &AA, Attributor &A,
732 MustBeExecutedContextExplorer &Explorer,
733 const Instruction *CtxI,
734 SetVector<const Use *> &Uses,
735 StateType &State) {
736 auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
737 for (unsigned u = 0; u < Uses.size(); ++u) {
738 const Use *U = Uses[u];
739 if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
740 bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
741 if (Found && AA.followUseInMBEC(A, U, UserI, State))
742 for (const Use &Us : UserI->uses())
743 Uses.insert(&Us);
744 }
745 }
746}
747
748/// Use the must-be-executed-context around \p I to add information into \p S.
749/// The AAType class is required to have `followUseInMBEC` method with the
750/// following signature and behaviour:
751///
752/// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
753/// U - Underlying use.
754/// I - The user of the \p U.
755/// Returns true if the value should be tracked transitively.
756///
757template <class AAType, typename StateType = typename AAType::StateType>
758static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
759 Instruction &CtxI) {
760
761 // Container for (transitive) uses of the associated value.
762 SetVector<const Use *> Uses;
763 for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
764 Uses.insert(&U);
765
766 MustBeExecutedContextExplorer &Explorer =
767 A.getInfoCache().getMustBeExecutedContextExplorer();
768
769 followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
770
771 if (S.isAtFixpoint())
772 return;
773
774 SmallVector<const BranchInst *, 4> BrInsts;
775 auto Pred = [&](const Instruction *I) {
776 if (const BranchInst *Br = dyn_cast<BranchInst>(I))
777 if (Br->isConditional())
778 BrInsts.push_back(Br);
779 return true;
780 };
781
782 // Here, accumulate conditional branch instructions in the context. We
783 // explore the child paths and collect the known states. The disjunction of
784 // those states can be merged to its own state. Let ParentState_i be a state
785 // to indicate the known information for an i-th branch instruction in the
786 // context. ChildStates are created for its successors respectively.
787 //
788 // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
789 // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
790 // ...
791 // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
792 //
793 // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
794 //
795 // FIXME: Currently, recursive branches are not handled. For example, we
796 // can't deduce that ptr must be dereferenced in below function.
797 //
798 // void f(int a, int c, int *ptr) {
799 // if(a)
800 // if (b) {
801 // *ptr = 0;
802 // } else {
803 // *ptr = 1;
804 // }
805 // else {
806 // if (b) {
807 // *ptr = 0;
808 // } else {
809 // *ptr = 1;
810 // }
811 // }
812 // }
813
814 Explorer.checkForAllContext(&CtxI, Pred);
815 for (const BranchInst *Br : BrInsts) {
816 StateType ParentState;
817
818 // The known state of the parent state is a conjunction of children's
819 // known states so it is initialized with a best state.
820 ParentState.indicateOptimisticFixpoint();
821
822 for (const BasicBlock *BB : Br->successors()) {
823 StateType ChildState;
824
825 size_t BeforeSize = Uses.size();
826 followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
827
828 // Erase uses which only appear in the child.
829 for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
830 It = Uses.erase(It);
831
832 ParentState &= ChildState;
833 }
834
835 // Use only known state.
836 S += ParentState;
837 }
838}
839} // namespace
840
841/// ------------------------ PointerInfo ---------------------------------------
842
843namespace llvm {
844namespace AA {
845namespace PointerInfo {
846
847struct State;
848
849} // namespace PointerInfo
850} // namespace AA
851
852/// Helper for AA::PointerInfo::Acccess DenseMap/Set usage.
853template <>
854struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> {
855 using Access = AAPointerInfo::Access;
856 static inline Access getEmptyKey();
857 static inline Access getTombstoneKey();
858 static unsigned getHashValue(const Access &A);
859 static bool isEqual(const Access &LHS, const Access &RHS);
860};
861
862/// Helper that allows OffsetAndSize as a key in a DenseMap.
863template <>
864struct DenseMapInfo<AAPointerInfo ::OffsetAndSize>
865 : DenseMapInfo<std::pair<int64_t, int64_t>> {};
866
867/// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign
868/// but the instruction
869struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> {
870 using Base = DenseMapInfo<Instruction *>;
871 using Access = AAPointerInfo::Access;
872 static inline Access getEmptyKey();
873 static inline Access getTombstoneKey();
874 static unsigned getHashValue(const Access &A);
875 static bool isEqual(const Access &LHS, const Access &RHS);
876};
877
878} // namespace llvm
879
880/// A type to track pointer/struct usage and accesses for AAPointerInfo.
881struct AA::PointerInfo::State : public AbstractState {
882
883 ~State() {
884 // We do not delete the Accesses objects but need to destroy them still.
885 for (auto &It : AccessBins)
886 It.second->~Accesses();
887 }
888
889 /// Return the best possible representable state.
890 static State getBestState(const State &SIS) { return State(); }
891
892 /// Return the worst possible representable state.
893 static State getWorstState(const State &SIS) {
894 State R;
895 R.indicatePessimisticFixpoint();
896 return R;
897 }
898
899 State() = default;
900 State(State &&SIS) : AccessBins(std::move(SIS.AccessBins)) {
901 SIS.AccessBins.clear();
902 }
903
904 const State &getAssumed() const { return *this; }
905
906 /// See AbstractState::isValidState().
907 bool isValidState() const override { return BS.isValidState(); }
908
909 /// See AbstractState::isAtFixpoint().
910 bool isAtFixpoint() const override { return BS.isAtFixpoint(); }
911
912 /// See AbstractState::indicateOptimisticFixpoint().
913 ChangeStatus indicateOptimisticFixpoint() override {
914 BS.indicateOptimisticFixpoint();
915 return ChangeStatus::UNCHANGED;
916 }
917
918 /// See AbstractState::indicatePessimisticFixpoint().
919 ChangeStatus indicatePessimisticFixpoint() override {
920 BS.indicatePessimisticFixpoint();
921 return ChangeStatus::CHANGED;
922 }
923
924 State &operator=(const State &R) {
925 if (this == &R)
926 return *this;
927 BS = R.BS;
928 AccessBins = R.AccessBins;
929 return *this;
930 }
931
932 State &operator=(State &&R) {
933 if (this == &R)
934 return *this;
935 std::swap(BS, R.BS);
936 std::swap(AccessBins, R.AccessBins);
937 return *this;
938 }
939
940 bool operator==(const State &R) const {
941 if (BS != R.BS)
942 return false;
943 if (AccessBins.size() != R.AccessBins.size())
944 return false;
945 auto It = begin(), RIt = R.begin(), E = end();
946 while (It != E) {
947 if (It->getFirst() != RIt->getFirst())
948 return false;
949 auto &Accs = It->getSecond();
950 auto &RAccs = RIt->getSecond();
951 if (Accs->size() != RAccs->size())
952 return false;
953 for (const auto &ZipIt : llvm::zip(*Accs, *RAccs))
954 if (std::get<0>(ZipIt) != std::get<1>(ZipIt))
955 return false;
956 ++It;
957 ++RIt;
958 }
959 return true;
960 }
961 bool operator!=(const State &R) const { return !(*this == R); }
962
963 /// We store accesses in a set with the instruction as key.
964 struct Accesses {
965 SmallVector<AAPointerInfo::Access, 4> Accesses;
966 DenseMap<const Instruction *, unsigned> Map;
967
968 unsigned size() const { return Accesses.size(); }
969
970 using vec_iterator = decltype(Accesses)::iterator;
971 vec_iterator begin() { return Accesses.begin(); }
972 vec_iterator end() { return Accesses.end(); }
973
974 using iterator = decltype(Map)::const_iterator;
975 iterator find(AAPointerInfo::Access &Acc) {
976 return Map.find(Acc.getRemoteInst());
977 }
978 iterator find_end() { return Map.end(); }
979
980 AAPointerInfo::Access &get(iterator &It) {
981 return Accesses[It->getSecond()];
982 }
983
984 void insert(AAPointerInfo::Access &Acc) {
985 Map[Acc.getRemoteInst()] = Accesses.size();
986 Accesses.push_back(Acc);
987 }
988 };
989
990 /// We store all accesses in bins denoted by their offset and size.
991 using AccessBinsTy = DenseMap<AAPointerInfo::OffsetAndSize, Accesses *>;
992
993 AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); }
994 AccessBinsTy::const_iterator end() const { return AccessBins.end(); }
995
996protected:
997 /// The bins with all the accesses for the associated pointer.
998 AccessBinsTy AccessBins;
999
1000 /// Add a new access to the state at offset \p Offset and with size \p Size.
1001 /// The access is associated with \p I, writes \p Content (if anything), and
1002 /// is of kind \p Kind.
1003 /// \Returns CHANGED, if the state changed, UNCHANGED otherwise.
1004 ChangeStatus addAccess(Attributor &A, int64_t Offset, int64_t Size,
1005 Instruction &I, Optional<Value *> Content,
1006 AAPointerInfo::AccessKind Kind, Type *Ty,
1007 Instruction *RemoteI = nullptr,
1008 Accesses *BinPtr = nullptr) {
1009 AAPointerInfo::OffsetAndSize Key{Offset, Size};
1010 Accesses *&Bin = BinPtr ? BinPtr : AccessBins[Key];
1011 if (!Bin)
1012 Bin = new (A.Allocator) Accesses;
1013 AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty);
1014 // Check if we have an access for this instruction in this bin, if not,
1015 // simply add it.
1016 auto It = Bin->find(Acc);
1017 if (It == Bin->find_end()) {
1018 Bin->insert(Acc);
1019 return ChangeStatus::CHANGED;
1020 }
1021 // If the existing access is the same as then new one, nothing changed.
1022 AAPointerInfo::Access &Current = Bin->get(It);
1023 AAPointerInfo::Access Before = Current;
1024 // The new one will be combined with the existing one.
1025 Current &= Acc;
1026 return Current == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
1027 }
1028
1029 /// See AAPointerInfo::forallInterferingAccesses.
1030 bool forallInterferingAccesses(
1031 AAPointerInfo::OffsetAndSize OAS,
1032 function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
1033 if (!isValidState())
1034 return false;
1035
1036 for (auto &It : AccessBins) {
1037 AAPointerInfo::OffsetAndSize ItOAS = It.getFirst();
1038 if (!OAS.mayOverlap(ItOAS))
1039 continue;
1040 bool IsExact = OAS == ItOAS && !OAS.offsetOrSizeAreUnknown();
1041 for (auto &Access : *It.getSecond())
1042 if (!CB(Access, IsExact))
1043 return false;
1044 }
1045 return true;
1046 }
1047
1048 /// See AAPointerInfo::forallInterferingAccesses.
1049 bool forallInterferingAccesses(
1050 Instruction &I,
1051 function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
1052 if (!isValidState())
1053 return false;
1054
1055 // First find the offset and size of I.
1056 AAPointerInfo::OffsetAndSize OAS(-1, -1);
1057 for (auto &It : AccessBins) {
1058 for (auto &Access : *It.getSecond()) {
1059 if (Access.getRemoteInst() == &I) {
1060 OAS = It.getFirst();
1061 break;
1062 }
1063 }
1064 if (OAS.getSize() != -1)
1065 break;
1066 }
1067 // No access for I was found, we are done.
1068 if (OAS.getSize() == -1)
1069 return true;
1070
1071 // Now that we have an offset and size, find all overlapping ones and use
1072 // the callback on the accesses.
1073 return forallInterferingAccesses(OAS, CB);
1074 }
1075
1076private:
1077 /// State to track fixpoint and validity.
1078 BooleanState BS;
1079};
1080
1081namespace {
1082struct AAPointerInfoImpl
1083 : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> {
1084 using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>;
1085 AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
1086
1087 /// See AbstractAttribute::initialize(...).
1088 void initialize(Attributor &A) override { AAPointerInfo::initialize(A); }
1089
1090 /// See AbstractAttribute::getAsStr().
1091 const std::string getAsStr() const override {
1092 return std::string("PointerInfo ") +
1093 (isValidState() ? (std::string("#") +
1094 std::to_string(AccessBins.size()) + " bins")
1095 : "<invalid>");
1096 }
1097
1098 /// See AbstractAttribute::manifest(...).
1099 ChangeStatus manifest(Attributor &A) override {
1100 return AAPointerInfo::manifest(A);
1101 }
1102
1103 bool forallInterferingAccesses(
1104 OffsetAndSize OAS,
1105 function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1106 const override {
1107 return State::forallInterferingAccesses(OAS, CB);
1108 }
1109 bool forallInterferingAccesses(
1110 Attributor &A, const AbstractAttribute &QueryingAA, Instruction &I,
1111 function_ref<bool(const Access &, bool)> UserCB) const override {
1112 SmallPtrSet<const Access *, 8> DominatingWrites;
1113 SmallVector<std::pair<const Access *, bool>, 8> InterferingAccesses;
1114
1115 Function &Scope = *I.getFunction();
1116 const auto &NoSyncAA = A.getAAFor<AANoSync>(
1117 QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL);
1118 const auto *ExecDomainAA = A.lookupAAFor<AAExecutionDomain>(
1119 IRPosition::function(Scope), &QueryingAA, DepClassTy::OPTIONAL);
1120 const bool NoSync = NoSyncAA.isAssumedNoSync();
1121
1122 // Helper to determine if we need to consider threading, which we cannot
1123 // right now. However, if the function is (assumed) nosync or the thread
1124 // executing all instructions is the main thread only we can ignore
1125 // threading.
1126 auto CanIgnoreThreading = [&](const Instruction &I) -> bool {
1127 if (NoSync)
1128 return true;
1129 if (ExecDomainAA && ExecDomainAA->isExecutedByInitialThreadOnly(I))
1130 return true;
1131 return false;
1132 };
1133
1134 // Helper to determine if the access is executed by the same thread as the
1135 // load, for now it is sufficient to avoid any potential threading effects
1136 // as we cannot deal with them anyway.
1137 auto IsSameThreadAsLoad = [&](const Access &Acc) -> bool {
1138 return CanIgnoreThreading(*Acc.getLocalInst());
1139 };
1140
1141 // TODO: Use inter-procedural reachability and dominance.
1142 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1143 QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL);
1144
1145 const bool FindInterferingWrites = I.mayReadFromMemory();
1146 const bool FindInterferingReads = I.mayWriteToMemory();
1147 const bool UseDominanceReasoning = FindInterferingWrites;
1148 const bool CanUseCFGResoning = CanIgnoreThreading(I);
1149 InformationCache &InfoCache = A.getInfoCache();
1150 const DominatorTree *DT =
1151 NoRecurseAA.isKnownNoRecurse() && UseDominanceReasoning
1152 ? InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
1153 Scope)
1154 : nullptr;
1155
1156 enum GPUAddressSpace : unsigned {
1157 Generic = 0,
1158 Global = 1,
1159 Shared = 3,
1160 Constant = 4,
1161 Local = 5,
1162 };
1163
1164 // Helper to check if a value has "kernel lifetime", that is it will not
1165 // outlive a GPU kernel. This is true for shared, constant, and local
1166 // globals on AMD and NVIDIA GPUs.
1167 auto HasKernelLifetime = [&](Value *V, Module &M) {
1168 Triple T(M.getTargetTriple());
1169 if (!(T.isAMDGPU() || T.isNVPTX()))
1170 return false;
1171 switch (V->getType()->getPointerAddressSpace()) {
1172 case GPUAddressSpace::Shared:
1173 case GPUAddressSpace::Constant:
1174 case GPUAddressSpace::Local:
1175 return true;
1176 default:
1177 return false;
1178 };
1179 };
1180
1181 // The IsLiveInCalleeCB will be used by the AA::isPotentiallyReachable query
1182 // to determine if we should look at reachability from the callee. For
1183 // certain pointers we know the lifetime and we do not have to step into the
1184 // callee to determine reachability as the pointer would be dead in the
1185 // callee. See the conditional initialization below.
1186 std::function<bool(const Function &)> IsLiveInCalleeCB;
1187
1188 if (auto *AI = dyn_cast<AllocaInst>(&getAssociatedValue())) {
1189 // If the alloca containing function is not recursive the alloca
1190 // must be dead in the callee.
1191 const Function *AIFn = AI->getFunction();
1192 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1193 *this, IRPosition::function(*AIFn), DepClassTy::OPTIONAL);
1194 if (NoRecurseAA.isAssumedNoRecurse()) {
1195 IsLiveInCalleeCB = [AIFn](const Function &Fn) { return AIFn != &Fn; };
1196 }
1197 } else if (auto *GV = dyn_cast<GlobalValue>(&getAssociatedValue())) {
1198 // If the global has kernel lifetime we can stop if we reach a kernel
1199 // as it is "dead" in the (unknown) callees.
1200 if (HasKernelLifetime(GV, *GV->getParent()))
1201 IsLiveInCalleeCB = [](const Function &Fn) {
1202 return !Fn.hasFnAttribute("kernel");
1203 };
1204 }
1205
1206 auto AccessCB = [&](const Access &Acc, bool Exact) {
1207 if ((!FindInterferingWrites || !Acc.isWrite()) &&
1208 (!FindInterferingReads || !Acc.isRead()))
1209 return true;
1210
1211 // For now we only filter accesses based on CFG reasoning which does not
1212 // work yet if we have threading effects, or the access is complicated.
1213 if (CanUseCFGResoning) {
1214 if ((!Acc.isWrite() ||
1215 !AA::isPotentiallyReachable(A, *Acc.getLocalInst(), I, QueryingAA,
1216 IsLiveInCalleeCB)) &&
1217 (!Acc.isRead() ||
1218 !AA::isPotentiallyReachable(A, I, *Acc.getLocalInst(), QueryingAA,
1219 IsLiveInCalleeCB)))
1220 return true;
1221 if (DT && Exact && (Acc.getLocalInst()->getFunction() == &Scope) &&
1222 IsSameThreadAsLoad(Acc)) {
1223 if (DT->dominates(Acc.getLocalInst(), &I))
1224 DominatingWrites.insert(&Acc);
1225 }
1226 }
1227
1228 InterferingAccesses.push_back({&Acc, Exact});
1229 return true;
1230 };
1231 if (!State::forallInterferingAccesses(I, AccessCB))
1232 return false;
1233
1234 // If we cannot use CFG reasoning we only filter the non-write accesses
1235 // and are done here.
1236 if (!CanUseCFGResoning) {
1237 for (auto &It : InterferingAccesses)
1238 if (!UserCB(*It.first, It.second))
1239 return false;
1240 return true;
1241 }
1242
1243 // Helper to determine if we can skip a specific write access. This is in
1244 // the worst case quadratic as we are looking for another write that will
1245 // hide the effect of this one.
1246 auto CanSkipAccess = [&](const Access &Acc, bool Exact) {
1247 if (!IsSameThreadAsLoad(Acc))
1248 return false;
1249 if (!DominatingWrites.count(&Acc))
1250 return false;
1251 for (const Access *DomAcc : DominatingWrites) {
1252 assert(Acc.getLocalInst()->getFunction() ==(static_cast <bool> (Acc.getLocalInst()->getFunction
() == DomAcc->getLocalInst()->getFunction() && "Expected dominating writes to be in the same function!"
) ? void (0) : __assert_fail ("Acc.getLocalInst()->getFunction() == DomAcc->getLocalInst()->getFunction() && \"Expected dominating writes to be in the same function!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 1254, __extension__
__PRETTY_FUNCTION__))
1253 DomAcc->getLocalInst()->getFunction() &&(static_cast <bool> (Acc.getLocalInst()->getFunction
() == DomAcc->getLocalInst()->getFunction() && "Expected dominating writes to be in the same function!"
) ? void (0) : __assert_fail ("Acc.getLocalInst()->getFunction() == DomAcc->getLocalInst()->getFunction() && \"Expected dominating writes to be in the same function!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 1254, __extension__
__PRETTY_FUNCTION__))
1254 "Expected dominating writes to be in the same function!")(static_cast <bool> (Acc.getLocalInst()->getFunction
() == DomAcc->getLocalInst()->getFunction() && "Expected dominating writes to be in the same function!"
) ? void (0) : __assert_fail ("Acc.getLocalInst()->getFunction() == DomAcc->getLocalInst()->getFunction() && \"Expected dominating writes to be in the same function!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 1254, __extension__
__PRETTY_FUNCTION__))
;
1255
1256 if (DomAcc != &Acc &&
1257 DT->dominates(Acc.getLocalInst(), DomAcc->getLocalInst())) {
1258 return true;
1259 }
1260 }
1261 return false;
1262 };
1263
1264 // Run the user callback on all accesses we cannot skip and return if that
1265 // succeeded for all or not.
1266 unsigned NumInterferingAccesses = InterferingAccesses.size();
1267 for (auto &It : InterferingAccesses) {
1268 if (!DT || NumInterferingAccesses > MaxInterferingAccesses ||
1269 !CanSkipAccess(*It.first, It.second)) {
1270 if (!UserCB(*It.first, It.second))
1271 return false;
1272 }
1273 }
1274 return true;
1275 }
1276
1277 ChangeStatus translateAndAddCalleeState(Attributor &A,
1278 const AAPointerInfo &CalleeAA,
1279 int64_t CallArgOffset, CallBase &CB) {
1280 using namespace AA::PointerInfo;
1281 if (!CalleeAA.getState().isValidState() || !isValidState())
1282 return indicatePessimisticFixpoint();
1283
1284 const auto &CalleeImplAA = static_cast<const AAPointerInfoImpl &>(CalleeAA);
1285 bool IsByval = CalleeImplAA.getAssociatedArgument()->hasByValAttr();
1286
1287 // Combine the accesses bin by bin.
1288 ChangeStatus Changed = ChangeStatus::UNCHANGED;
1289 for (auto &It : CalleeImplAA.getState()) {
1290 OffsetAndSize OAS = OffsetAndSize::getUnknown();
1291 if (CallArgOffset != OffsetAndSize::Unknown)
1292 OAS = OffsetAndSize(It.first.getOffset() + CallArgOffset,
1293 It.first.getSize());
1294 Accesses *Bin = AccessBins[OAS];
1295 for (const AAPointerInfo::Access &RAcc : *It.second) {
1296 if (IsByval && !RAcc.isRead())
1297 continue;
1298 bool UsedAssumedInformation = false;
1299 Optional<Value *> Content = A.translateArgumentToCallSiteContent(
1300 RAcc.getContent(), CB, *this, UsedAssumedInformation);
1301 AccessKind AK =
1302 AccessKind(RAcc.getKind() & (IsByval ? AccessKind::AK_READ
1303 : AccessKind::AK_READ_WRITE));
1304 Changed =
1305 Changed | addAccess(A, OAS.getOffset(), OAS.getSize(), CB, Content,
1306 AK, RAcc.getType(), RAcc.getRemoteInst(), Bin);
1307 }
1308 }
1309 return Changed;
1310 }
1311
1312 /// Statistic tracking for all AAPointerInfo implementations.
1313 /// See AbstractAttribute::trackStatistics().
1314 void trackPointerInfoStatistics(const IRPosition &IRP) const {}
1315};
1316
1317struct AAPointerInfoFloating : public AAPointerInfoImpl {
1318 using AccessKind = AAPointerInfo::AccessKind;
1319 AAPointerInfoFloating(const IRPosition &IRP, Attributor &A)
1320 : AAPointerInfoImpl(IRP, A) {}
1321
1322 /// See AbstractAttribute::initialize(...).
1323 void initialize(Attributor &A) override { AAPointerInfoImpl::initialize(A); }
1324
1325 /// Deal with an access and signal if it was handled successfully.
1326 bool handleAccess(Attributor &A, Instruction &I, Value &Ptr,
1327 Optional<Value *> Content, AccessKind Kind, int64_t Offset,
1328 ChangeStatus &Changed, Type *Ty,
1329 int64_t Size = OffsetAndSize::Unknown) {
1330 using namespace AA::PointerInfo;
1331 // No need to find a size if one is given or the offset is unknown.
1332 if (Offset != OffsetAndSize::Unknown && Size == OffsetAndSize::Unknown &&
1333 Ty) {
1334 const DataLayout &DL = A.getDataLayout();
1335 TypeSize AccessSize = DL.getTypeStoreSize(Ty);
1336 if (!AccessSize.isScalable())
1337 Size = AccessSize.getFixedSize();
1338 }
1339 Changed = Changed | addAccess(A, Offset, Size, I, Content, Kind, Ty);
1340 return true;
1341 };
1342
1343 /// Helper struct, will support ranges eventually.
1344 struct OffsetInfo {
1345 int64_t Offset = OffsetAndSize::Unknown;
1346
1347 bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; }
1348 };
1349
1350 /// See AbstractAttribute::updateImpl(...).
1351 ChangeStatus updateImpl(Attributor &A) override {
1352 using namespace AA::PointerInfo;
1353 ChangeStatus Changed = ChangeStatus::UNCHANGED;
1354 Value &AssociatedValue = getAssociatedValue();
1355
1356 const DataLayout &DL = A.getDataLayout();
1357 DenseMap<Value *, OffsetInfo> OffsetInfoMap;
1358 OffsetInfoMap[&AssociatedValue] = OffsetInfo{0};
1359
1360 auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo PtrOI,
1361 bool &Follow) {
1362 OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1363 UsrOI = PtrOI;
1364 Follow = true;
1365 return true;
1366 };
1367
1368 const auto *TLI = getAnchorScope()
1369 ? A.getInfoCache().getTargetLibraryInfoForFunction(
1370 *getAnchorScope())
1371 : nullptr;
1372 auto UsePred = [&](const Use &U, bool &Follow) -> bool {
1373 Value *CurPtr = U.get();
1374 User *Usr = U.getUser();
1375 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] Analyze " <<
*CurPtr << " in " << *Usr << "\n"; } } while
(false)
1376 << *Usr << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] Analyze " <<
*CurPtr << " in " << *Usr << "\n"; } } while
(false)
;
1377 assert(OffsetInfoMap.count(CurPtr) &&(static_cast <bool> (OffsetInfoMap.count(CurPtr) &&
"The current pointer offset should have been seeded!") ? void
(0) : __assert_fail ("OffsetInfoMap.count(CurPtr) && \"The current pointer offset should have been seeded!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 1378, __extension__
__PRETTY_FUNCTION__))
1378 "The current pointer offset should have been seeded!")(static_cast <bool> (OffsetInfoMap.count(CurPtr) &&
"The current pointer offset should have been seeded!") ? void
(0) : __assert_fail ("OffsetInfoMap.count(CurPtr) && \"The current pointer offset should have been seeded!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 1378, __extension__
__PRETTY_FUNCTION__))
;
1379
1380 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) {
1381 if (CE->isCast())
1382 return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
1383 if (CE->isCompare())
1384 return true;
1385 if (!isa<GEPOperator>(CE)) {
1386 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CEdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] Unhandled constant user "
<< *CE << "\n"; } } while (false)
1387 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] Unhandled constant user "
<< *CE << "\n"; } } while (false)
;
1388 return false;
1389 }
1390 }
1391 if (auto *GEP = dyn_cast<GEPOperator>(Usr)) {
1392 // Note the order here, the Usr access might change the map, CurPtr is
1393 // already in it though.
1394 OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1395 OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1396 UsrOI = PtrOI;
1397
1398 // TODO: Use range information.
1399 if (PtrOI.Offset == OffsetAndSize::Unknown ||
1400 !GEP->hasAllConstantIndices()) {
1401 UsrOI.Offset = OffsetAndSize::Unknown;
1402 Follow = true;
1403 return true;
1404 }
1405
1406 SmallVector<Value *, 8> Indices;
1407 for (Use &Idx : GEP->indices()) {
1408 if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) {
1409 Indices.push_back(CIdx);
1410 continue;
1411 }
1412
1413 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEPdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] Non constant GEP index "
<< *GEP << " : " << *Idx << "\n"; } }
while (false)
1414 << " : " << *Idx << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] Non constant GEP index "
<< *GEP << " : " << *Idx << "\n"; } }
while (false)
;
1415 return false;
1416 }
1417 UsrOI.Offset = PtrOI.Offset + DL.getIndexedOffsetInType(
1418 GEP->getSourceElementType(), Indices);
1419 Follow = true;
1420 return true;
1421 }
1422 if (isa<CastInst>(Usr) || isa<SelectInst>(Usr))
1423 return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
1424
1425 // For PHIs we need to take care of the recurrence explicitly as the value
1426 // might change while we iterate through a loop. For now, we give up if
1427 // the PHI is not invariant.
1428 if (isa<PHINode>(Usr)) {
1429 // Note the order here, the Usr access might change the map, CurPtr is
1430 // already in it though.
1431 OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1432 OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1433 // Check if the PHI is invariant (so far).
1434 if (UsrOI == PtrOI)
1435 return true;
1436
1437 // Check if the PHI operand has already an unknown offset as we can't
1438 // improve on that anymore.
1439 if (PtrOI.Offset == OffsetAndSize::Unknown) {
1440 UsrOI = PtrOI;
1441 Follow = true;
1442 return true;
1443 }
1444
1445 // Check if the PHI operand is not dependent on the PHI itself.
1446 // TODO: This is not great as we look at the pointer type. However, it
1447 // is unclear where the Offset size comes from with typeless pointers.
1448 APInt Offset(
1449 DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()),
1450 0);
1451 if (&AssociatedValue == CurPtr->stripAndAccumulateConstantOffsets(
1452 DL, Offset, /* AllowNonInbounds */ true)) {
1453 if (Offset != PtrOI.Offset) {
1454 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] PHI operand pointer offset mismatch "
<< *CurPtr << " in " << *Usr << "\n"
; } } while (false)
1455 << "[AAPointerInfo] PHI operand pointer offset mismatch "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] PHI operand pointer offset mismatch "
<< *CurPtr << " in " << *Usr << "\n"
; } } while (false)
1456 << *CurPtr << " in " << *Usr << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] PHI operand pointer offset mismatch "
<< *CurPtr << " in " << *Usr << "\n"
; } } while (false)
;
1457 return false;
1458 }
1459 return HandlePassthroughUser(Usr, PtrOI, Follow);
1460 }
1461
1462 // TODO: Approximate in case we know the direction of the recurrence.
1463 LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] PHI operand is too complex "
<< *CurPtr << " in " << *Usr << "\n"
; } } while (false)
1464 << *CurPtr << " in " << *Usr << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] PHI operand is too complex "
<< *CurPtr << " in " << *Usr << "\n"
; } } while (false)
;
1465 UsrOI = PtrOI;
1466 UsrOI.Offset = OffsetAndSize::Unknown;
1467 Follow = true;
1468 return true;
1469 }
1470
1471 if (auto *LoadI = dyn_cast<LoadInst>(Usr))
1472 return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr,
1473 AccessKind::AK_READ, OffsetInfoMap[CurPtr].Offset,
1474 Changed, LoadI->getType());
1475 if (auto *StoreI = dyn_cast<StoreInst>(Usr)) {
1476 if (StoreI->getValueOperand() == CurPtr) {
1477 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] Escaping use in store "
<< *StoreI << "\n"; } } while (false)
1478 << *StoreI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] Escaping use in store "
<< *StoreI << "\n"; } } while (false)
;
1479 return false;
1480 }
1481 bool UsedAssumedInformation = false;
1482 Optional<Value *> Content = A.getAssumedSimplified(
1483 *StoreI->getValueOperand(), *this, UsedAssumedInformation);
1484 return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE,
1485 OffsetInfoMap[CurPtr].Offset, Changed,
1486 StoreI->getValueOperand()->getType());
1487 }
1488 if (auto *CB = dyn_cast<CallBase>(Usr)) {
1489 if (CB->isLifetimeStartOrEnd())
1490 return true;
1491 if (TLI && isFreeCall(CB, TLI))
1492 return true;
1493 if (CB->isArgOperand(&U)) {
1494 unsigned ArgNo = CB->getArgOperandNo(&U);
1495 const auto &CSArgPI = A.getAAFor<AAPointerInfo>(
1496 *this, IRPosition::callsite_argument(*CB, ArgNo),
1497 DepClassTy::REQUIRED);
1498 Changed = translateAndAddCalleeState(
1499 A, CSArgPI, OffsetInfoMap[CurPtr].Offset, *CB) |
1500 Changed;
1501 return true;
1502 }
1503 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CBdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] Call user not handled "
<< *CB << "\n"; } } while (false)
1504 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] Call user not handled "
<< *CB << "\n"; } } while (false)
;
1505 // TODO: Allow some call uses
1506 return false;
1507 }
1508
1509 LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] User not handled "
<< *Usr << "\n"; } } while (false)
;
1510 return false;
1511 };
1512 auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
1513 if (OffsetInfoMap.count(NewU))
1514 return OffsetInfoMap[NewU] == OffsetInfoMap[OldU];
1515 OffsetInfoMap[NewU] = OffsetInfoMap[OldU];
1516 return true;
1517 };
1518 if (!A.checkForAllUses(UsePred, *this, AssociatedValue,
1519 /* CheckBBLivenessOnly */ true, DepClassTy::OPTIONAL,
1520 /* IgnoreDroppableUses */ true, EquivalentUseCB))
1521 return indicatePessimisticFixpoint();
1522
1523 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
()->size() << "\n"; for (auto &Acc : *It.getSecond
()) { dbgs() << " - " << Acc.getKind() <<
" - " << *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) { if (Acc.getWrittenValue()) dbgs() << " - c: "
<< *Acc.getWrittenValue() << "\n"; else dbgs() <<
" - c: <unknown>\n"; } } } }; } } while (false)
1524 dbgs() << "Accesses by bin after update:\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
()->size() << "\n"; for (auto &Acc : *It.getSecond
()) { dbgs() << " - " << Acc.getKind() <<
" - " << *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) { if (Acc.getWrittenValue()) dbgs() << " - c: "
<< *Acc.getWrittenValue() << "\n"; else dbgs() <<
" - c: <unknown>\n"; } } } }; } } while (false)
1525 for (auto &It : AccessBins) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
()->size() << "\n"; for (auto &Acc : *It.getSecond
()) { dbgs() << " - " << Acc.getKind() <<
" - " << *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) { if (Acc.getWrittenValue()) dbgs() << " - c: "
<< *Acc.getWrittenValue() << "\n"; else dbgs() <<
" - c: <unknown>\n"; } } } }; } } while (false)
1526 dbgs() << "[" << It.first.getOffset() << "-"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
()->size() << "\n"; for (auto &Acc : *It.getSecond
()) { dbgs() << " - " << Acc.getKind() <<
" - " << *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) { if (Acc.getWrittenValue()) dbgs() << " - c: "
<< *Acc.getWrittenValue() << "\n"; else dbgs() <<
" - c: <unknown>\n"; } } } }; } } while (false)
1527 << It.first.getOffset() + It.first.getSize()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
()->size() << "\n"; for (auto &Acc : *It.getSecond
()) { dbgs() << " - " << Acc.getKind() <<
" - " << *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) { if (Acc.getWrittenValue()) dbgs() << " - c: "
<< *Acc.getWrittenValue() << "\n"; else dbgs() <<
" - c: <unknown>\n"; } } } }; } } while (false)
1528 << "] : " << It.getSecond()->size() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
()->size() << "\n"; for (auto &Acc : *It.getSecond
()) { dbgs() << " - " << Acc.getKind() <<
" - " << *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) { if (Acc.getWrittenValue()) dbgs() << " - c: "
<< *Acc.getWrittenValue() << "\n"; else dbgs() <<
" - c: <unknown>\n"; } } } }; } } while (false)
1529 for (auto &Acc : *It.getSecond()) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
()->size() << "\n"; for (auto &Acc : *It.getSecond
()) { dbgs() << " - " << Acc.getKind() <<
" - " << *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) { if (Acc.getWrittenValue()) dbgs() << " - c: "
<< *Acc.getWrittenValue() << "\n"; else dbgs() <<
" - c: <unknown>\n"; } } } }; } } while (false)
1530 dbgs() << " - " << Acc.getKind() << " - " << *Acc.getLocalInst()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
()->size() << "\n"; for (auto &Acc : *It.getSecond
()) { dbgs() << " - " << Acc.getKind() <<
" - " << *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) { if (Acc.getWrittenValue()) dbgs() << " - c: "
<< *Acc.getWrittenValue() << "\n"; else dbgs() <<
" - c: <unknown>\n"; } } } }; } } while (false)
1531 << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
()->size() << "\n"; for (auto &Acc : *It.getSecond
()) { dbgs() << " - " << Acc.getKind() <<
" - " << *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) { if (Acc.getWrittenValue()) dbgs() << " - c: "
<< *Acc.getWrittenValue() << "\n"; else dbgs() <<
" - c: <unknown>\n"; } } } }; } } while (false)
1532 if (Acc.getLocalInst() != Acc.getRemoteInst())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
()->size() << "\n"; for (auto &Acc : *It.getSecond
()) { dbgs() << " - " << Acc.getKind() <<
" - " << *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) { if (Acc.getWrittenValue()) dbgs() << " - c: "
<< *Acc.getWrittenValue() << "\n"; else dbgs() <<
" - c: <unknown>\n"; } } } }; } } while (false)
1533 dbgs() << " --> "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
()->size() << "\n"; for (auto &Acc : *It.getSecond
()) { dbgs() << " - " << Acc.getKind() <<
" - " << *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) { if (Acc.getWrittenValue()) dbgs() << " - c: "
<< *Acc.getWrittenValue() << "\n"; else dbgs() <<
" - c: <unknown>\n"; } } } }; } } while (false)
1534 << *Acc.getRemoteInst() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
()->size() << "\n"; for (auto &Acc : *It.getSecond
()) { dbgs() << " - " << Acc.getKind() <<
" - " << *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) { if (Acc.getWrittenValue()) dbgs() << " - c: "
<< *Acc.getWrittenValue() << "\n"; else dbgs() <<
" - c: <unknown>\n"; } } } }; } } while (false)
1535 if (!Acc.isWrittenValueYetUndetermined()) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
()->size() << "\n"; for (auto &Acc : *It.getSecond
()) { dbgs() << " - " << Acc.getKind() <<
" - " << *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) { if (Acc.getWrittenValue()) dbgs() << " - c: "
<< *Acc.getWrittenValue() << "\n"; else dbgs() <<
" - c: <unknown>\n"; } } } }; } } while (false)
1536 if (Acc.getWrittenValue())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
()->size() << "\n"; for (auto &Acc : *It.getSecond
()) { dbgs() << " - " << Acc.getKind() <<
" - " << *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) { if (Acc.getWrittenValue()) dbgs() << " - c: "
<< *Acc.getWrittenValue() << "\n"; else dbgs() <<
" - c: <unknown>\n"; } } } }; } } while (false)
1537 dbgs() << " - c: " << *Acc.getWrittenValue() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
()->size() << "\n"; for (auto &Acc : *It.getSecond
()) { dbgs() << " - " << Acc.getKind() <<
" - " << *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) { if (Acc.getWrittenValue()) dbgs() << " - c: "
<< *Acc.getWrittenValue() << "\n"; else dbgs() <<
" - c: <unknown>\n"; } } } }; } } while (false)
1538 elsedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
()->size() << "\n"; for (auto &Acc : *It.getSecond
()) { dbgs() << " - " << Acc.getKind() <<
" - " << *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) { if (Acc.getWrittenValue()) dbgs() << " - c: "
<< *Acc.getWrittenValue() << "\n"; else dbgs() <<
" - c: <unknown>\n"; } } } }; } } while (false)
1539 dbgs() << " - c: <unknown>\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
()->size() << "\n"; for (auto &Acc : *It.getSecond
()) { dbgs() << " - " << Acc.getKind() <<
" - " << *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) { if (Acc.getWrittenValue()) dbgs() << " - c: "
<< *Acc.getWrittenValue() << "\n"; else dbgs() <<
" - c: <unknown>\n"; } } } }; } } while (false)
1540 }do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
()->size() << "\n"; for (auto &Acc : *It.getSecond
()) { dbgs() << " - " << Acc.getKind() <<
" - " << *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) { if (Acc.getWrittenValue()) dbgs() << " - c: "
<< *Acc.getWrittenValue() << "\n"; else dbgs() <<
" - c: <unknown>\n"; } } } }; } } while (false)
1541 }do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
()->size() << "\n"; for (auto &Acc : *It.getSecond
()) { dbgs() << " - " << Acc.getKind() <<
" - " << *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) { if (Acc.getWrittenValue()) dbgs() << " - c: "
<< *Acc.getWrittenValue() << "\n"; else dbgs() <<
" - c: <unknown>\n"; } } } }; } } while (false)
1542 }do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
()->size() << "\n"; for (auto &Acc : *It.getSecond
()) { dbgs() << " - " << Acc.getKind() <<
" - " << *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) { if (Acc.getWrittenValue()) dbgs() << " - c: "
<< *Acc.getWrittenValue() << "\n"; else dbgs() <<
" - c: <unknown>\n"; } } } }; } } while (false)
1543 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "Accesses by bin after update:\n"
; for (auto &It : AccessBins) { dbgs() << "[" <<
It.first.getOffset() << "-" << It.first.getOffset
() + It.first.getSize() << "] : " << It.getSecond
()->size() << "\n"; for (auto &Acc : *It.getSecond
()) { dbgs() << " - " << Acc.getKind() <<
" - " << *Acc.getLocalInst() << "\n"; if (Acc.getLocalInst
() != Acc.getRemoteInst()) dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n"; if (!Acc.isWrittenValueYetUndetermined
()) { if (Acc.getWrittenValue()) dbgs() << " - c: "
<< *Acc.getWrittenValue() << "\n"; else dbgs() <<
" - c: <unknown>\n"; } } } }; } } while (false)
;
1544
1545 return Changed;
1546 }
1547
1548 /// See AbstractAttribute::trackStatistics()
1549 void trackStatistics() const override {
1550 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1551 }
1552};
1553
1554struct AAPointerInfoReturned final : AAPointerInfoImpl {
1555 AAPointerInfoReturned(const IRPosition &IRP, Attributor &A)
1556 : AAPointerInfoImpl(IRP, A) {}
1557
1558 /// See AbstractAttribute::updateImpl(...).
1559 ChangeStatus updateImpl(Attributor &A) override {
1560 return indicatePessimisticFixpoint();
1561 }
1562
1563 /// See AbstractAttribute::trackStatistics()
1564 void trackStatistics() const override {
1565 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1566 }
1567};
1568
1569struct AAPointerInfoArgument final : AAPointerInfoFloating {
1570 AAPointerInfoArgument(const IRPosition &IRP, Attributor &A)
1571 : AAPointerInfoFloating(IRP, A) {}
1572
1573 /// See AbstractAttribute::initialize(...).
1574 void initialize(Attributor &A) override {
1575 AAPointerInfoFloating::initialize(A);
1576 if (getAnchorScope()->isDeclaration())
1577 indicatePessimisticFixpoint();
1578 }
1579
1580 /// See AbstractAttribute::trackStatistics()
1581 void trackStatistics() const override {
1582 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1583 }
1584};
1585
1586struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
1587 AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
1588 : AAPointerInfoFloating(IRP, A) {}
1589
1590 /// See AbstractAttribute::updateImpl(...).
1591 ChangeStatus updateImpl(Attributor &A) override {
1592 using namespace AA::PointerInfo;
1593 // We handle memory intrinsics explicitly, at least the first (=
1594 // destination) and second (=source) arguments as we know how they are
1595 // accessed.
1596 if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) {
1597 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
1598 int64_t LengthVal = OffsetAndSize::Unknown;
1599 if (Length)
1600 LengthVal = Length->getSExtValue();
1601 Value &Ptr = getAssociatedValue();
1602 unsigned ArgNo = getIRPosition().getCallSiteArgNo();
1603 ChangeStatus Changed = ChangeStatus::UNCHANGED;
1604 if (ArgNo == 0) {
1605 handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed,
1606 nullptr, LengthVal);
1607 } else if (ArgNo == 1) {
1608 handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed,
1609 nullptr, LengthVal);
1610 } else {
1611 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "
<< *MI << "\n"; } } while (false)
1612 << *MI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "
<< *MI << "\n"; } } while (false)
;
1613 return indicatePessimisticFixpoint();
1614 }
1615 return Changed;
1616 }
1617
1618 // TODO: Once we have call site specific value information we can provide
1619 // call site specific liveness information and then it makes
1620 // sense to specialize attributes for call sites arguments instead of
1621 // redirecting requests to the callee argument.
1622 Argument *Arg = getAssociatedArgument();
1623 if (!Arg)
1624 return indicatePessimisticFixpoint();
1625 const IRPosition &ArgPos = IRPosition::argument(*Arg);
1626 auto &ArgAA =
1627 A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED);
1628 return translateAndAddCalleeState(A, ArgAA, 0, *cast<CallBase>(getCtxI()));
1629 }
1630
1631 /// See AbstractAttribute::trackStatistics()
1632 void trackStatistics() const override {
1633 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1634 }
1635};
1636
1637struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating {
1638 AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
1639 : AAPointerInfoFloating(IRP, A) {}
1640
1641 /// See AbstractAttribute::trackStatistics()
1642 void trackStatistics() const override {
1643 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1644 }
1645};
1646} // namespace
1647
1648/// -----------------------NoUnwind Function Attribute--------------------------
1649
1650namespace {
1651struct AANoUnwindImpl : AANoUnwind {
1652 AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
1653
1654 const std::string getAsStr() const override {
1655 return getAssumed() ? "nounwind" : "may-unwind";
1656 }
1657
1658 /// See AbstractAttribute::updateImpl(...).
1659 ChangeStatus updateImpl(Attributor &A) override {
1660 auto Opcodes = {
1661 (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
1662 (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet,
1663 (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
1664
1665 auto CheckForNoUnwind = [&](Instruction &I) {
1666 if (!I.mayThrow())
1667 return true;
1668
1669 if (const auto *CB = dyn_cast<CallBase>(&I)) {
1670 const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
1671 *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1672 return NoUnwindAA.isAssumedNoUnwind();
1673 }
1674 return false;
1675 };
1676
1677 bool UsedAssumedInformation = false;
1678 if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes,
1679 UsedAssumedInformation))
1680 return indicatePessimisticFixpoint();
1681
1682 return ChangeStatus::UNCHANGED;
1683 }
1684};
1685
1686struct AANoUnwindFunction final : public AANoUnwindImpl {
1687 AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
1688 : AANoUnwindImpl(IRP, A) {}
1689
1690 /// See AbstractAttribute::trackStatistics()
1691 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind){ static llvm::Statistic NumIRFunction_nounwind = {"attributor"
, "NumIRFunction_nounwind", ("Number of " "functions" " marked '"
"nounwind" "'")};; ++(NumIRFunction_nounwind); }
}
1692};
1693
1694/// NoUnwind attribute deduction for a call sites.
1695struct AANoUnwindCallSite final : AANoUnwindImpl {
1696 AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
1697 : AANoUnwindImpl(IRP, A) {}
1698
1699 /// See AbstractAttribute::initialize(...).
1700 void initialize(Attributor &A) override {
1701 AANoUnwindImpl::initialize(A);
1702 Function *F = getAssociatedFunction();
1703 if (!F || F->isDeclaration())
1704 indicatePessimisticFixpoint();
1705 }
1706
1707 /// See AbstractAttribute::updateImpl(...).
1708 ChangeStatus updateImpl(Attributor &A) override {
1709 // TODO: Once we have call site specific value information we can provide
1710 // call site specific liveness information and then it makes
1711 // sense to specialize attributes for call sites arguments instead of
1712 // redirecting requests to the callee argument.
1713 Function *F = getAssociatedFunction();
1714 const IRPosition &FnPos = IRPosition::function(*F);
1715 auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
1716 return clampStateAndIndicateChange(getState(), FnAA.getState());
1717 }
1718
1719 /// See AbstractAttribute::trackStatistics()
1720 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind){ static llvm::Statistic NumIRCS_nounwind = {"attributor", "NumIRCS_nounwind"
, ("Number of " "call site" " marked '" "nounwind" "'")};; ++
(NumIRCS_nounwind); }
; }
1721};
1722} // namespace
1723
1724/// --------------------- Function Return Values -------------------------------
1725
1726namespace {
1727/// "Attribute" that collects all potential returned values and the return
1728/// instructions that they arise from.
1729///
1730/// If there is a unique returned value R, the manifest method will:
1731/// - mark R with the "returned" attribute, if R is an argument.
1732class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
1733
1734 /// Mapping of values potentially returned by the associated function to the
1735 /// return instructions that might return them.
1736 MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
1737
1738 /// State flags
1739 ///
1740 ///{
1741 bool IsFixed = false;
1742 bool IsValidState = true;
1743 ///}
1744
1745public:
1746 AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
1747 : AAReturnedValues(IRP, A) {}
1748
1749 /// See AbstractAttribute::initialize(...).
1750 void initialize(Attributor &A) override {
1751 // Reset the state.
1752 IsFixed = false;
1753 IsValidState = true;
1754 ReturnedValues.clear();
1755
1756 Function *F = getAssociatedFunction();
1757 if (!F || F->isDeclaration()) {
1758 indicatePessimisticFixpoint();
1759 return;
1760 }
1761 assert(!F->getReturnType()->isVoidTy() &&(static_cast <bool> (!F->getReturnType()->isVoidTy
() && "Did not expect a void return type!") ? void (0
) : __assert_fail ("!F->getReturnType()->isVoidTy() && \"Did not expect a void return type!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 1762, __extension__
__PRETTY_FUNCTION__))
1762 "Did not expect a void return type!")(static_cast <bool> (!F->getReturnType()->isVoidTy
() && "Did not expect a void return type!") ? void (0
) : __assert_fail ("!F->getReturnType()->isVoidTy() && \"Did not expect a void return type!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 1762, __extension__
__PRETTY_FUNCTION__))
;
1763
1764 // The map from instruction opcodes to those instructions in the function.
1765 auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
1766
1767 // Look through all arguments, if one is marked as returned we are done.
1768 for (Argument &Arg : F->args()) {
1769 if (Arg.hasReturnedAttr()) {
1770 auto &ReturnInstSet = ReturnedValues[&Arg];
1771 if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
1772 for (Instruction *RI : *Insts)
1773 ReturnInstSet.insert(cast<ReturnInst>(RI));
1774
1775 indicateOptimisticFixpoint();
1776 return;
1777 }
1778 }
1779
1780 if (!A.isFunctionIPOAmendable(*F))
1781 indicatePessimisticFixpoint();
1782 }
1783
1784 /// See AbstractAttribute::manifest(...).
1785 ChangeStatus manifest(Attributor &A) override;
1786
1787 /// See AbstractAttribute::getState(...).
1788 AbstractState &getState() override { return *this; }
1789
1790 /// See AbstractAttribute::getState(...).
1791 const AbstractState &getState() const override { return *this; }
1792
1793 /// See AbstractAttribute::updateImpl(Attributor &A).
1794 ChangeStatus updateImpl(Attributor &A) override;
1795
1796 llvm::iterator_range<iterator> returned_values() override {
1797 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1798 }
1799
1800 llvm::iterator_range<const_iterator> returned_values() const override {
1801 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1802 }
1803
1804 /// Return the number of potential return values, -1 if unknown.
1805 size_t getNumReturnValues() const override {
1806 return isValidState() ? ReturnedValues.size() : -1;
1807 }
1808
1809 /// Return an assumed unique return value if a single candidate is found. If
1810 /// there cannot be one, return a nullptr. If it is not clear yet, return the
1811 /// Optional::NoneType.
1812 Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
1813
1814 /// See AbstractState::checkForAllReturnedValues(...).
1815 bool checkForAllReturnedValuesAndReturnInsts(
1816 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1817 const override;
1818
1819 /// Pretty print the attribute similar to the IR representation.
1820 const std::string getAsStr() const override;
1821
1822 /// See AbstractState::isAtFixpoint().
1823 bool isAtFixpoint() const override { return IsFixed; }
1824
1825 /// See AbstractState::isValidState().
1826 bool isValidState() const override { return IsValidState; }
1827
1828 /// See AbstractState::indicateOptimisticFixpoint(...).
1829 ChangeStatus indicateOptimisticFixpoint() override {
1830 IsFixed = true;
1831 return ChangeStatus::UNCHANGED;
1832 }
1833
1834 ChangeStatus indicatePessimisticFixpoint() override {
1835 IsFixed = true;
1836 IsValidState = false;
1837 return ChangeStatus::CHANGED;
1838 }
1839};
1840
1841ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
1842 ChangeStatus Changed = ChangeStatus::UNCHANGED;
1843
1844 // Bookkeeping.
1845 assert(isValidState())(static_cast <bool> (isValidState()) ? void (0) : __assert_fail
("isValidState()", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 1845, __extension__ __PRETTY_FUNCTION__))
;
1846 STATS_DECLTRACK(KnownReturnValues, FunctionReturn,{ static llvm::Statistic NumIRFunctionReturn_KnownReturnValues
= {"attributor", "NumIRFunctionReturn_KnownReturnValues", "Number of function with known return values"
};; ++(NumIRFunctionReturn_KnownReturnValues); }
1847 "Number of function with known return values"){ static llvm::Statistic NumIRFunctionReturn_KnownReturnValues
= {"attributor", "NumIRFunctionReturn_KnownReturnValues", "Number of function with known return values"
};; ++(NumIRFunctionReturn_KnownReturnValues); }
;
1848
1849 // Check if we have an assumed unique return value that we could manifest.
1850 Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
1851
1852 if (!UniqueRV.hasValue() || !UniqueRV.getValue())
1853 return Changed;
1854
1855 // Bookkeeping.
1856 STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,{ static llvm::Statistic NumIRFunctionReturn_UniqueReturnValue
= {"attributor", "NumIRFunctionReturn_UniqueReturnValue", "Number of function with unique return"
};; ++(NumIRFunctionReturn_UniqueReturnValue); }
1857 "Number of function with unique return"){ static llvm::Statistic NumIRFunctionReturn_UniqueReturnValue
= {"attributor", "NumIRFunctionReturn_UniqueReturnValue", "Number of function with unique return"
};; ++(NumIRFunctionReturn_UniqueReturnValue); }
;
1858 // If the assumed unique return value is an argument, annotate it.
1859 if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
1860 if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
1861 getAssociatedFunction()->getReturnType())) {
1862 getIRPosition() = IRPosition::argument(*UniqueRVArg);
1863 Changed = IRAttribute::manifest(A);
1864 }
1865 }
1866 return Changed;
1867}
1868
1869const std::string AAReturnedValuesImpl::getAsStr() const {
1870 return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1871 (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")";
1872}
1873
1874Optional<Value *>
1875AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1876 // If checkForAllReturnedValues provides a unique value, ignoring potential
1877 // undef values that can also be present, it is assumed to be the actual
1878 // return value and forwarded to the caller of this method. If there are
1879 // multiple, a nullptr is returned indicating there cannot be a unique
1880 // returned value.
1881 Optional<Value *> UniqueRV;
1882 Type *Ty = getAssociatedFunction()->getReturnType();
1883
1884 auto Pred = [&](Value &RV) -> bool {
1885 UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty);
1886 return UniqueRV != Optional<Value *>(nullptr);
1887 };
1888
1889 if (!A.checkForAllReturnedValues(Pred, *this))
1890 UniqueRV = nullptr;
1891
1892 return UniqueRV;
1893}
1894
1895bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1896 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1897 const {
1898 if (!isValidState())
1899 return false;
1900
1901 // Check all returned values but ignore call sites as long as we have not
1902 // encountered an overdefined one during an update.
1903 for (auto &It : ReturnedValues) {
1904 Value *RV = It.first;
1905 if (!Pred(*RV, It.second))
1906 return false;
1907 }
1908
1909 return true;
1910}
1911
1912ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1913 ChangeStatus Changed = ChangeStatus::UNCHANGED;
1914
1915 auto ReturnValueCB = [&](Value &V, const Instruction *CtxI, ReturnInst &Ret,
1916 bool) -> bool {
1917 assert(AA::isValidInScope(V, Ret.getFunction()) &&(static_cast <bool> (AA::isValidInScope(V, Ret.getFunction
()) && "Assumed returned value should be valid in function scope!"
) ? void (0) : __assert_fail ("AA::isValidInScope(V, Ret.getFunction()) && \"Assumed returned value should be valid in function scope!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 1918, __extension__
__PRETTY_FUNCTION__))
1918 "Assumed returned value should be valid in function scope!")(static_cast <bool> (AA::isValidInScope(V, Ret.getFunction
()) && "Assumed returned value should be valid in function scope!"
) ? void (0) : __assert_fail ("AA::isValidInScope(V, Ret.getFunction()) && \"Assumed returned value should be valid in function scope!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 1918, __extension__
__PRETTY_FUNCTION__))
;
1919 if (ReturnedValues[&V].insert(&Ret))
1920 Changed = ChangeStatus::CHANGED;
1921 return true;
1922 };
1923
1924 bool UsedAssumedInformation = false;
1925 auto ReturnInstCB = [&](Instruction &I) {
1926 ReturnInst &Ret = cast<ReturnInst>(I);
1927 return genericValueTraversal<ReturnInst>(
1928 A, IRPosition::value(*Ret.getReturnValue()), *this, Ret, ReturnValueCB,
1929 &I, UsedAssumedInformation, /* UseValueSimplify */ true,
1930 /* MaxValues */ 16,
1931 /* StripCB */ nullptr, AA::Intraprocedural);
1932 };
1933
1934 // Discover returned values from all live returned instructions in the
1935 // associated function.
1936 if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
1937 UsedAssumedInformation))
1938 return indicatePessimisticFixpoint();
1939 return Changed;
1940}
1941
1942struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1943 AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1944 : AAReturnedValuesImpl(IRP, A) {}
1945
1946 /// See AbstractAttribute::trackStatistics()
1947 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned){ static llvm::Statistic NumIRArguments_returned = {"attributor"
, "NumIRArguments_returned", ("Number of " "arguments" " marked '"
"returned" "'")};; ++(NumIRArguments_returned); }
}
1948};
1949
1950/// Returned values information for a call sites.
1951struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1952 AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1953 : AAReturnedValuesImpl(IRP, A) {}
1954
1955 /// See AbstractAttribute::initialize(...).
1956 void initialize(Attributor &A) override {
1957 // TODO: Once we have call site specific value information we can provide
1958 // call site specific liveness information and then it makes
1959 // sense to specialize attributes for call sites instead of
1960 // redirecting requests to the callee.
1961 llvm_unreachable("Abstract attributes for returned values are not "::llvm::llvm_unreachable_internal("Abstract attributes for returned values are not "
"supported for call sites yet!", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 1962)
1962 "supported for call sites yet!")::llvm::llvm_unreachable_internal("Abstract attributes for returned values are not "
"supported for call sites yet!", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 1962)
;
1963 }
1964
1965 /// See AbstractAttribute::updateImpl(...).
1966 ChangeStatus updateImpl(Attributor &A) override {
1967 return indicatePessimisticFixpoint();
1968 }
1969
1970 /// See AbstractAttribute::trackStatistics()
1971 void trackStatistics() const override {}
1972};
1973} // namespace
1974
1975/// ------------------------ NoSync Function Attribute -------------------------
1976
1977bool AANoSync::isNonRelaxedAtomic(const Instruction *I) {
1978 if (!I->isAtomic())
1979 return false;
1980
1981 if (auto *FI = dyn_cast<FenceInst>(I))
1982 // All legal orderings for fence are stronger than monotonic.
1983 return FI->getSyncScopeID() != SyncScope::SingleThread;
1984 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
1985 // Unordered is not a legal ordering for cmpxchg.
1986 return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
1987 AI->getFailureOrdering() != AtomicOrdering::Monotonic);
1988 }
1989
1990 AtomicOrdering Ordering;
1991 switch (I->getOpcode()) {
1992 case Instruction::AtomicRMW:
1993 Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1994 break;
1995 case Instruction::Store:
1996 Ordering = cast<StoreInst>(I)->getOrdering();
1997 break;
1998 case Instruction::Load:
1999 Ordering = cast<LoadInst>(I)->getOrdering();
2000 break;
2001 default:
2002 llvm_unreachable(::llvm::llvm_unreachable_internal("New atomic operations need to be known in the attributor."
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 2003)
2003 "New atomic operations need to be known in the attributor.")::llvm::llvm_unreachable_internal("New atomic operations need to be known in the attributor."
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 2003)
;
2004 }
2005
2006 return (Ordering != AtomicOrdering::Unordered &&
2007 Ordering != AtomicOrdering::Monotonic);
2008}
2009
2010/// Return true if this intrinsic is nosync. This is only used for intrinsics
2011/// which would be nosync except that they have a volatile flag. All other
2012/// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
2013bool AANoSync::isNoSyncIntrinsic(const Instruction *I) {
2014 if (auto *MI = dyn_cast<MemIntrinsic>(I))
2015 return !MI->isVolatile();
2016 return false;
2017}
2018
2019namespace {
2020struct AANoSyncImpl : AANoSync {
2021 AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
2022
2023 const std::string getAsStr() const override {
2024 return getAssumed() ? "nosync" : "may-sync";
2025 }
2026
2027 /// See AbstractAttribute::updateImpl(...).
2028 ChangeStatus updateImpl(Attributor &A) override;
2029};
2030
2031ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
2032
2033 auto CheckRWInstForNoSync = [&](Instruction &I) {
2034 return AA::isNoSyncInst(A, I, *this);
2035 };
2036
2037 auto CheckForNoSync = [&](Instruction &I) {
2038 // At this point we handled all read/write effects and they are all
2039 // nosync, so they can be skipped.
2040 if (I.mayReadOrWriteMemory())
2041 return true;
2042
2043 // non-convergent and readnone imply nosync.
2044 return !cast<CallBase>(I).isConvergent();
2045 };
2046
2047 bool UsedAssumedInformation = false;
2048 if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this,
2049 UsedAssumedInformation) ||
2050 !A.checkForAllCallLikeInstructions(CheckForNoSync, *this,
2051 UsedAssumedInformation))
2052 return indicatePessimisticFixpoint();
2053
2054 return ChangeStatus::UNCHANGED;
2055}
2056
2057struct AANoSyncFunction final : public AANoSyncImpl {
2058 AANoSyncFunction(const IRPosition &IRP, Attributor &A)
2059 : AANoSyncImpl(IRP, A) {}
2060
2061 /// See AbstractAttribute::trackStatistics()
2062 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync){ static llvm::Statistic NumIRFunction_nosync = {"attributor"
, "NumIRFunction_nosync", ("Number of " "functions" " marked '"
"nosync" "'")};; ++(NumIRFunction_nosync); }
}
2063};
2064
2065/// NoSync attribute deduction for a call sites.
2066struct AANoSyncCallSite final : AANoSyncImpl {
2067 AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
2068 : AANoSyncImpl(IRP, A) {}
2069
2070 /// See AbstractAttribute::initialize(...).
2071 void initialize(Attributor &A) override {
2072 AANoSyncImpl::initialize(A);
2073 Function *F = getAssociatedFunction();
2074 if (!F || F->isDeclaration())
2075 indicatePessimisticFixpoint();
2076 }
2077
2078 /// See AbstractAttribute::updateImpl(...).
2079 ChangeStatus updateImpl(Attributor &A) override {
2080 // TODO: Once we have call site specific value information we can provide
2081 // call site specific liveness information and then it makes
2082 // sense to specialize attributes for call sites arguments instead of
2083 // redirecting requests to the callee argument.
2084 Function *F = getAssociatedFunction();
2085 const IRPosition &FnPos = IRPosition::function(*F);
2086 auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
2087 return clampStateAndIndicateChange(getState(), FnAA.getState());
2088 }
2089
2090 /// See AbstractAttribute::trackStatistics()
2091 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync){ static llvm::Statistic NumIRCS_nosync = {"attributor", "NumIRCS_nosync"
, ("Number of " "call site" " marked '" "nosync" "'")};; ++(NumIRCS_nosync
); }
; }
2092};
2093} // namespace
2094
2095/// ------------------------ No-Free Attributes ----------------------------
2096
2097namespace {
2098struct AANoFreeImpl : public AANoFree {
2099 AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
2100
2101 /// See AbstractAttribute::updateImpl(...).
2102 ChangeStatus updateImpl(Attributor &A) override {
2103 auto CheckForNoFree = [&](Instruction &I) {
2104 const auto &CB = cast<CallBase>(I);
2105 if (CB.hasFnAttr(Attribute::NoFree))
2106 return true;
2107
2108 const auto &NoFreeAA = A.getAAFor<AANoFree>(
2109 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
2110 return NoFreeAA.isAssumedNoFree();
2111 };
2112
2113 bool UsedAssumedInformation = false;
2114 if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this,
2115 UsedAssumedInformation))
2116 return indicatePessimisticFixpoint();
2117 return ChangeStatus::UNCHANGED;
2118 }
2119
2120 /// See AbstractAttribute::getAsStr().
2121 const std::string getAsStr() const override {
2122 return getAssumed() ? "nofree" : "may-free";
2123 }
2124};
2125
2126struct AANoFreeFunction final : public AANoFreeImpl {
2127 AANoFreeFunction(const IRPosition &IRP, Attributor &A)
2128 : AANoFreeImpl(IRP, A) {}
2129
2130 /// See AbstractAttribute::trackStatistics()
2131 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree){ static llvm::Statistic NumIRFunction_nofree = {"attributor"
, "NumIRFunction_nofree", ("Number of " "functions" " marked '"
"nofree" "'")};; ++(NumIRFunction_nofree); }
}
2132};
2133
2134/// NoFree attribute deduction for a call sites.
2135struct AANoFreeCallSite final : AANoFreeImpl {
2136 AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
2137 : AANoFreeImpl(IRP, A) {}
2138
2139 /// See AbstractAttribute::initialize(...).
2140 void initialize(Attributor &A) override {
2141 AANoFreeImpl::initialize(A);
2142 Function *F = getAssociatedFunction();
2143 if (!F || F->isDeclaration())
2144 indicatePessimisticFixpoint();
2145 }
2146
2147 /// See AbstractAttribute::updateImpl(...).
2148 ChangeStatus updateImpl(Attributor &A) override {
2149 // TODO: Once we have call site specific value information we can provide
2150 // call site specific liveness information and then it makes
2151 // sense to specialize attributes for call sites arguments instead of
2152 // redirecting requests to the callee argument.
2153 Function *F = getAssociatedFunction();
2154 const IRPosition &FnPos = IRPosition::function(*F);
2155 auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
2156 return clampStateAndIndicateChange(getState(), FnAA.getState());
2157 }
2158
2159 /// See AbstractAttribute::trackStatistics()
2160 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree){ static llvm::Statistic NumIRCS_nofree = {"attributor", "NumIRCS_nofree"
, ("Number of " "call site" " marked '" "nofree" "'")};; ++(NumIRCS_nofree
); }
; }
2161};
2162
2163/// NoFree attribute for floating values.
2164struct AANoFreeFloating : AANoFreeImpl {
2165 AANoFreeFloating(const IRPosition &IRP, Attributor &A)
2166 : AANoFreeImpl(IRP, A) {}
2167
2168 /// See AbstractAttribute::trackStatistics()
2169 void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree){ static llvm::Statistic NumIRFloating_nofree = {"attributor"
, "NumIRFloating_nofree", ("Number of floating values known to be '"
"nofree" "'")};; ++(NumIRFloating_nofree); }
}
2170
2171 /// See Abstract Attribute::updateImpl(...).
2172 ChangeStatus updateImpl(Attributor &A) override {
2173 const IRPosition &IRP = getIRPosition();
2174
2175 const auto &NoFreeAA = A.getAAFor<AANoFree>(
2176 *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
2177 if (NoFreeAA.isAssumedNoFree())
2178 return ChangeStatus::UNCHANGED;
2179
2180 Value &AssociatedValue = getIRPosition().getAssociatedValue();
2181 auto Pred = [&](const Use &U, bool &Follow) -> bool {
2182 Instruction *UserI = cast<Instruction>(U.getUser());
2183 if (auto *CB = dyn_cast<CallBase>(UserI)) {
2184 if (CB->isBundleOperand(&U))
2185 return false;
2186 if (!CB->isArgOperand(&U))
2187 return true;
2188 unsigned ArgNo = CB->getArgOperandNo(&U);
2189
2190 const auto &NoFreeArg = A.getAAFor<AANoFree>(
2191 *this, IRPosition::callsite_argument(*CB, ArgNo),
2192 DepClassTy::REQUIRED);
2193 return NoFreeArg.isAssumedNoFree();
2194 }
2195
2196 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
2197 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
2198 Follow = true;
2199 return true;
2200 }
2201 if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) ||
2202 isa<ReturnInst>(UserI))
2203 return true;
2204
2205 // Unknown user.
2206 return false;
2207 };
2208 if (!A.checkForAllUses(Pred, *this, AssociatedValue))
2209 return indicatePessimisticFixpoint();
2210
2211 return ChangeStatus::UNCHANGED;
2212 }
2213};
2214
2215/// NoFree attribute for a call site argument.
2216struct AANoFreeArgument final : AANoFreeFloating {
2217 AANoFreeArgument(const IRPosition &IRP, Attributor &A)
2218 : AANoFreeFloating(IRP, A) {}
2219
2220 /// See AbstractAttribute::trackStatistics()
2221 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree){ static llvm::Statistic NumIRArguments_nofree = {"attributor"
, "NumIRArguments_nofree", ("Number of " "arguments" " marked '"
"nofree" "'")};; ++(NumIRArguments_nofree); }
}
2222};
2223
2224/// NoFree attribute for call site arguments.
2225struct AANoFreeCallSiteArgument final : AANoFreeFloating {
2226 AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
2227 : AANoFreeFloating(IRP, A) {}
2228
2229 /// See AbstractAttribute::updateImpl(...).
2230 ChangeStatus updateImpl(Attributor &A) override {
2231 // TODO: Once we have call site specific value information we can provide
2232 // call site specific liveness information and then it makes
2233 // sense to specialize attributes for call sites arguments instead of
2234 // redirecting requests to the callee argument.
2235 Argument *Arg = getAssociatedArgument();
2236 if (!Arg)
2237 return indicatePessimisticFixpoint();
2238 const IRPosition &ArgPos = IRPosition::argument(*Arg);
2239 auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
2240 return clampStateAndIndicateChange(getState(), ArgAA.getState());
2241 }
2242
2243 /// See AbstractAttribute::trackStatistics()
2244 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree){ static llvm::Statistic NumIRCSArguments_nofree = {"attributor"
, "NumIRCSArguments_nofree", ("Number of " "call site arguments"
" marked '" "nofree" "'")};; ++(NumIRCSArguments_nofree); }
};
2245};
2246
2247/// NoFree attribute for function return value.
2248struct AANoFreeReturned final : AANoFreeFloating {
2249 AANoFreeReturned(const IRPosition &IRP, Attributor &A)
2250 : AANoFreeFloating(IRP, A) {
2251 llvm_unreachable("NoFree is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoFree is not applicable to function returns!"
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 2251)
;
2252 }
2253
2254 /// See AbstractAttribute::initialize(...).
2255 void initialize(Attributor &A) override {
2256 llvm_unreachable("NoFree is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoFree is not applicable to function returns!"
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 2256)
;
2257 }
2258
2259 /// See AbstractAttribute::updateImpl(...).
2260 ChangeStatus updateImpl(Attributor &A) override {
2261 llvm_unreachable("NoFree is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoFree is not applicable to function returns!"
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 2261)
;
2262 }
2263
2264 /// See AbstractAttribute::trackStatistics()
2265 void trackStatistics() const override {}
2266};
2267
2268/// NoFree attribute deduction for a call site return value.
2269struct AANoFreeCallSiteReturned final : AANoFreeFloating {
2270 AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
2271 : AANoFreeFloating(IRP, A) {}
2272
2273 ChangeStatus manifest(Attributor &A) override {
2274 return ChangeStatus::UNCHANGED;
2275 }
2276 /// See AbstractAttribute::trackStatistics()
2277 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree){ static llvm::Statistic NumIRCSReturn_nofree = {"attributor"
, "NumIRCSReturn_nofree", ("Number of " "call site returns" " marked '"
"nofree" "'")};; ++(NumIRCSReturn_nofree); }
}
2278};
2279} // namespace
2280
2281/// ------------------------ NonNull Argument Attribute ------------------------
2282namespace {
2283static int64_t getKnownNonNullAndDerefBytesForUse(
2284 Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
2285 const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
2286 TrackUse = false;
2287
2288 const Value *UseV = U->get();
2289 if (!UseV->getType()->isPointerTy())
2290 return 0;
2291
2292 // We need to follow common pointer manipulation uses to the accesses they
2293 // feed into. We can try to be smart to avoid looking through things we do not
2294 // like for now, e.g., non-inbounds GEPs.
2295 if (isa<CastInst>(I)) {
2296 TrackUse = true;
2297 return 0;
2298 }
2299
2300 if (isa<GetElementPtrInst>(I)) {
2301 TrackUse = true;
2302 return 0;
2303 }
2304
2305 Type *PtrTy = UseV->getType();
2306 const Function *F = I->getFunction();
2307 bool NullPointerIsDefined =
2308 F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
2309 const DataLayout &DL = A.getInfoCache().getDL();
2310 if (const auto *CB = dyn_cast<CallBase>(I)) {
2311 if (CB->isBundleOperand(U)) {
2312 if (RetainedKnowledge RK = getKnowledgeFromUse(
2313 U, {Attribute::NonNull, Attribute::Dereferenceable})) {
2314 IsNonNull |=
2315 (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
2316 return RK.ArgValue;
2317 }
2318 return 0;
2319 }
2320
2321 if (CB->isCallee(U)) {
2322 IsNonNull |= !NullPointerIsDefined;
2323 return 0;
2324 }
2325
2326 unsigned ArgNo = CB->getArgOperandNo(U);
2327 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
2328 // As long as we only use known information there is no need to track
2329 // dependences here.
2330 auto &DerefAA =
2331 A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
2332 IsNonNull |= DerefAA.isKnownNonNull();
2333 return DerefAA.getKnownDereferenceableBytes();
2334 }
2335
2336 Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
2337 if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
2338 return 0;
2339
2340 int64_t Offset;
2341 const Value *Base =
2342 getMinimalBaseOfPointer(A, QueryingAA, Loc->Ptr, Offset, DL);
2343 if (Base && Base == &AssociatedValue) {
2344 int64_t DerefBytes = Loc->Size.getValue() + Offset;
2345 IsNonNull |= !NullPointerIsDefined;
2346 return std::max(int64_t(0), DerefBytes);
2347 }
2348
2349 /// Corner case when an offset is 0.
2350 Base = GetPointerBaseWithConstantOffset(Loc->Ptr, Offset, DL,
2351 /*AllowNonInbounds*/ true);
2352 if (Base && Base == &AssociatedValue && Offset == 0) {
2353 int64_t DerefBytes = Loc->Size.getValue();
2354 IsNonNull |= !NullPointerIsDefined;
2355 return std::max(int64_t(0), DerefBytes);
2356 }
2357
2358 return 0;
2359}
2360
2361struct AANonNullImpl : AANonNull {
2362 AANonNullImpl(const IRPosition &IRP, Attributor &A)
2363 : AANonNull(IRP, A),
2364 NullIsDefined(NullPointerIsDefined(
2365 getAnchorScope(),
2366 getAssociatedValue().getType()->getPointerAddressSpace())) {}
2367
2368 /// See AbstractAttribute::initialize(...).
2369 void initialize(Attributor &A) override {
2370 Value &V = getAssociatedValue();
2371 if (!NullIsDefined &&
2372 hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
2373 /* IgnoreSubsumingPositions */ false, &A)) {
2374 indicateOptimisticFixpoint();
2375 return;
2376 }
2377
2378 if (isa<ConstantPointerNull>(V)) {
2379 indicatePessimisticFixpoint();
2380 return;
2381 }
2382
2383 AANonNull::initialize(A);
2384
2385 bool CanBeNull, CanBeFreed;
2386 if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
2387 CanBeFreed)) {
2388 if (!CanBeNull) {
2389 indicateOptimisticFixpoint();
2390 return;
2391 }
2392 }
2393
2394 if (isa<GlobalValue>(&getAssociatedValue())) {
2395 indicatePessimisticFixpoint();
2396 return;
2397 }
2398
2399 if (Instruction *CtxI = getCtxI())
2400 followUsesInMBEC(*this, A, getState(), *CtxI);
2401 }
2402
2403 /// See followUsesInMBEC
2404 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
2405 AANonNull::StateType &State) {
2406 bool IsNonNull = false;
2407 bool TrackUse = false;
2408 getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
2409 IsNonNull, TrackUse);
2410 State.setKnown(IsNonNull);
2411 return TrackUse;
2412 }
2413
2414 /// See AbstractAttribute::getAsStr().
2415 const std::string getAsStr() const override {
2416 return getAssumed() ? "nonnull" : "may-null";
2417 }
2418
2419 /// Flag to determine if the underlying value can be null and still allow
2420 /// valid accesses.
2421 const bool NullIsDefined;
2422};
2423
2424/// NonNull attribute for a floating value.
2425struct AANonNullFloating : public AANonNullImpl {
2426 AANonNullFloating(const IRPosition &IRP, Attributor &A)
2427 : AANonNullImpl(IRP, A) {}
2428
2429 /// See AbstractAttribute::updateImpl(...).
2430 ChangeStatus updateImpl(Attributor &A) override {
2431 const DataLayout &DL = A.getDataLayout();
2432
2433 DominatorTree *DT = nullptr;
2434 AssumptionCache *AC = nullptr;
2435 InformationCache &InfoCache = A.getInfoCache();
2436 if (const Function *Fn = getAnchorScope()) {
2437 DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
2438 AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
2439 }
2440
2441 auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
2442 AANonNull::StateType &T, bool Stripped) -> bool {
2443 const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
2444 DepClassTy::REQUIRED);
2445 if (!Stripped && this == &AA) {
2446 if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
2447 T.indicatePessimisticFixpoint();
2448 } else {
2449 // Use abstract attribute information.
2450 const AANonNull::StateType &NS = AA.getState();
2451 T ^= NS;
2452 }
2453 return T.isValidState();
2454 };
2455
2456 StateType T;
2457 bool UsedAssumedInformation = false;
2458 if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
2459 VisitValueCB, getCtxI(),
2460 UsedAssumedInformation))
2461 return indicatePessimisticFixpoint();
2462
2463 return clampStateAndIndicateChange(getState(), T);
2464 }
2465
2466 /// See AbstractAttribute::trackStatistics()
2467 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull){ static llvm::Statistic NumIRFunctionReturn_nonnull = {"attributor"
, "NumIRFunctionReturn_nonnull", ("Number of " "function returns"
" marked '" "nonnull" "'")};; ++(NumIRFunctionReturn_nonnull
); }
}
2468};
2469
2470/// NonNull attribute for function return value.
2471struct AANonNullReturned final
2472 : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
2473 AANonNullReturned(const IRPosition &IRP, Attributor &A)
2474 : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
2475
2476 /// See AbstractAttribute::getAsStr().
2477 const std::string getAsStr() const override {
2478 return getAssumed() ? "nonnull" : "may-null";
2479 }
2480
2481 /// See AbstractAttribute::trackStatistics()
2482 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull){ static llvm::Statistic NumIRFunctionReturn_nonnull = {"attributor"
, "NumIRFunctionReturn_nonnull", ("Number of " "function returns"
" marked '" "nonnull" "'")};; ++(NumIRFunctionReturn_nonnull
); }
}
2483};
2484
2485/// NonNull attribute for function argument.
2486struct AANonNullArgument final
2487 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
2488 AANonNullArgument(const IRPosition &IRP, Attributor &A)
2489 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
2490
2491 /// See AbstractAttribute::trackStatistics()
2492 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull){ static llvm::Statistic NumIRArguments_nonnull = {"attributor"
, "NumIRArguments_nonnull", ("Number of " "arguments" " marked '"
"nonnull" "'")};; ++(NumIRArguments_nonnull); }
}
2493};
2494
2495struct AANonNullCallSiteArgument final : AANonNullFloating {
2496 AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
2497 : AANonNullFloating(IRP, A) {}
2498
2499 /// See AbstractAttribute::trackStatistics()
2500 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull){ static llvm::Statistic NumIRCSArguments_nonnull = {"attributor"
, "NumIRCSArguments_nonnull", ("Number of " "call site arguments"
" marked '" "nonnull" "'")};; ++(NumIRCSArguments_nonnull); }
}
2501};
2502
2503/// NonNull attribute for a call site return position.
2504struct AANonNullCallSiteReturned final
2505 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
2506 AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
2507 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
2508
2509 /// See AbstractAttribute::trackStatistics()
2510 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull){ static llvm::Statistic NumIRCSReturn_nonnull = {"attributor"
, "NumIRCSReturn_nonnull", ("Number of " "call site returns" " marked '"
"nonnull" "'")};; ++(NumIRCSReturn_nonnull); }
}
2511};
2512} // namespace
2513
2514/// ------------------------ No-Recurse Attributes ----------------------------
2515
2516namespace {
2517struct AANoRecurseImpl : public AANoRecurse {
2518 AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
2519
2520 /// See AbstractAttribute::getAsStr()
2521 const std::string getAsStr() const override {
2522 return getAssumed() ? "norecurse" : "may-recurse";
2523 }
2524};
2525
2526struct AANoRecurseFunction final : AANoRecurseImpl {
2527 AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
2528 : AANoRecurseImpl(IRP, A) {}
2529
2530 /// See AbstractAttribute::updateImpl(...).
2531 ChangeStatus updateImpl(Attributor &A) override {
2532
2533 // If all live call sites are known to be no-recurse, we are as well.
2534 auto CallSitePred = [&](AbstractCallSite ACS) {
2535 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2536 *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
2537 DepClassTy::NONE);
2538 return NoRecurseAA.isKnownNoRecurse();
2539 };
2540 bool UsedAssumedInformation = false;
2541 if (A.checkForAllCallSites(CallSitePred, *this, true,
2542 UsedAssumedInformation)) {
2543 // If we know all call sites and all are known no-recurse, we are done.
2544 // If all known call sites, which might not be all that exist, are known
2545 // to be no-recurse, we are not done but we can continue to assume
2546 // no-recurse. If one of the call sites we have not visited will become
2547 // live, another update is triggered.
2548 if (!UsedAssumedInformation)
2549 indicateOptimisticFixpoint();
2550 return ChangeStatus::UNCHANGED;
2551 }
2552
2553 const AAFunctionReachability &EdgeReachability =
2554 A.getAAFor<AAFunctionReachability>(*this, getIRPosition(),
2555 DepClassTy::REQUIRED);
2556 if (EdgeReachability.canReach(A, *getAnchorScope()))
2557 return indicatePessimisticFixpoint();
2558 return ChangeStatus::UNCHANGED;
2559 }
2560
2561 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse){ static llvm::Statistic NumIRFunction_norecurse = {"attributor"
, "NumIRFunction_norecurse", ("Number of " "functions" " marked '"
"norecurse" "'")};; ++(NumIRFunction_norecurse); }
}
2562};
2563
2564/// NoRecurse attribute deduction for a call sites.
2565struct AANoRecurseCallSite final : AANoRecurseImpl {
2566 AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
2567 : AANoRecurseImpl(IRP, A) {}
2568
2569 /// See AbstractAttribute::initialize(...).
2570 void initialize(Attributor &A) override {
2571 AANoRecurseImpl::initialize(A);
2572 Function *F = getAssociatedFunction();
2573 if (!F || F->isDeclaration())
2574 indicatePessimisticFixpoint();
2575 }
2576
2577 /// See AbstractAttribute::updateImpl(...).
2578 ChangeStatus updateImpl(Attributor &A) override {
2579 // TODO: Once we have call site specific value information we can provide
2580 // call site specific liveness information and then it makes
2581 // sense to specialize attributes for call sites arguments instead of
2582 // redirecting requests to the callee argument.
2583 Function *F = getAssociatedFunction();
2584 const IRPosition &FnPos = IRPosition::function(*F);
2585 auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
2586 return clampStateAndIndicateChange(getState(), FnAA.getState());
2587 }
2588
2589 /// See AbstractAttribute::trackStatistics()
2590 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse){ static llvm::Statistic NumIRCS_norecurse = {"attributor", "NumIRCS_norecurse"
, ("Number of " "call site" " marked '" "norecurse" "'")};; ++
(NumIRCS_norecurse); }
; }
2591};
2592} // namespace
2593
2594/// -------------------- Undefined-Behavior Attributes ------------------------
2595
2596namespace {
2597struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2598 AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
2599 : AAUndefinedBehavior(IRP, A) {}
2600
2601 /// See AbstractAttribute::updateImpl(...).
2602 // through a pointer (i.e. also branches etc.)
2603 ChangeStatus updateImpl(Attributor &A) override {
2604 const size_t UBPrevSize = KnownUBInsts.size();
2605 const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2606
2607 auto InspectMemAccessInstForUB = [&](Instruction &I) {
2608 // Lang ref now states volatile store is not UB, let's skip them.
2609 if (I.isVolatile() && I.mayWriteToMemory())
2610 return true;
2611
2612 // Skip instructions that are already saved.
2613 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2614 return true;
2615
2616 // If we reach here, we know we have an instruction
2617 // that accesses memory through a pointer operand,
2618 // for which getPointerOperand() should give it to us.
2619 Value *PtrOp =
2620 const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true));
2621 assert(PtrOp &&(static_cast <bool> (PtrOp && "Expected pointer operand of memory accessing instruction"
) ? void (0) : __assert_fail ("PtrOp && \"Expected pointer operand of memory accessing instruction\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 2622, __extension__
__PRETTY_FUNCTION__))
2622 "Expected pointer operand of memory accessing instruction")(static_cast <bool> (PtrOp && "Expected pointer operand of memory accessing instruction"
) ? void (0) : __assert_fail ("PtrOp && \"Expected pointer operand of memory accessing instruction\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 2622, __extension__
__PRETTY_FUNCTION__))
;
2623
2624 // Either we stopped and the appropriate action was taken,
2625 // or we got back a simplified value to continue.
2626 Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
2627 if (!SimplifiedPtrOp.hasValue() || !SimplifiedPtrOp.getValue())
2628 return true;
2629 const Value *PtrOpVal = SimplifiedPtrOp.getValue();
2630
2631 // A memory access through a pointer is considered UB
2632 // only if the pointer has constant null value.
2633 // TODO: Expand it to not only check constant values.
2634 if (!isa<ConstantPointerNull>(PtrOpVal)) {
2635 AssumedNoUBInsts.insert(&I);
2636 return true;
2637 }
2638 const Type *PtrTy = PtrOpVal->getType();
2639
2640 // Because we only consider instructions inside functions,
2641 // assume that a parent function exists.
2642 const Function *F = I.getFunction();
2643
2644 // A memory access using constant null pointer is only considered UB
2645 // if null pointer is _not_ defined for the target platform.
2646 if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
2647 AssumedNoUBInsts.insert(&I);
2648 else
2649 KnownUBInsts.insert(&I);
2650 return true;
2651 };
2652
2653 auto InspectBrInstForUB = [&](Instruction &I) {
2654 // A conditional branch instruction is considered UB if it has `undef`
2655 // condition.
2656
2657 // Skip instructions that are already saved.
2658 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2659 return true;
2660
2661 // We know we have a branch instruction.
2662 auto *BrInst = cast<BranchInst>(&I);
2663
2664 // Unconditional branches are never considered UB.
2665 if (BrInst->isUnconditional())
2666 return true;
2667
2668 // Either we stopped and the appropriate action was taken,
2669 // or we got back a simplified value to continue.
2670 Optional<Value *> SimplifiedCond =
2671 stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2672 if (!SimplifiedCond.hasValue() || !SimplifiedCond.getValue())
2673 return true;
2674 AssumedNoUBInsts.insert(&I);
2675 return true;
2676 };
2677
2678 auto InspectCallSiteForUB = [&](Instruction &I) {
2679 // Check whether a callsite always cause UB or not
2680
2681 // Skip instructions that are already saved.
2682 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2683 return true;
2684
2685 // Check nonnull and noundef argument attribute violation for each
2686 // callsite.
2687 CallBase &CB = cast<CallBase>(I);
2688 Function *Callee = CB.getCalledFunction();
2689 if (!Callee)
2690 return true;
2691 for (unsigned idx = 0; idx < CB.arg_size(); idx++) {
2692 // If current argument is known to be simplified to null pointer and the
2693 // corresponding argument position is known to have nonnull attribute,
2694 // the argument is poison. Furthermore, if the argument is poison and
2695 // the position is known to have noundef attriubte, this callsite is
2696 // considered UB.
2697 if (idx >= Callee->arg_size())
2698 break;
2699 Value *ArgVal = CB.getArgOperand(idx);
2700 if (!ArgVal)
2701 continue;
2702 // Here, we handle three cases.
2703 // (1) Not having a value means it is dead. (we can replace the value
2704 // with undef)
2705 // (2) Simplified to undef. The argument violate noundef attriubte.
2706 // (3) Simplified to null pointer where known to be nonnull.
2707 // The argument is a poison value and violate noundef attribute.
2708 IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2709 auto &NoUndefAA =
2710 A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2711 if (!NoUndefAA.isKnownNoUndef())
2712 continue;
2713 bool UsedAssumedInformation = false;
2714 Optional<Value *> SimplifiedVal = A.getAssumedSimplified(
2715 IRPosition::value(*ArgVal), *this, UsedAssumedInformation);
2716 if (UsedAssumedInformation)
2717 continue;
2718 if (SimplifiedVal.hasValue() && !SimplifiedVal.getValue())
2719 return true;
2720 if (!SimplifiedVal.hasValue() ||
2721 isa<UndefValue>(*SimplifiedVal.getValue())) {
2722 KnownUBInsts.insert(&I);
2723 continue;
2724 }
2725 if (!ArgVal->getType()->isPointerTy() ||
2726 !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2727 continue;
2728 auto &NonNullAA =
2729 A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2730 if (NonNullAA.isKnownNonNull())
2731 KnownUBInsts.insert(&I);
2732 }
2733 return true;
2734 };
2735
2736 auto InspectReturnInstForUB = [&](Instruction &I) {
2737 auto &RI = cast<ReturnInst>(I);
2738 // Either we stopped and the appropriate action was taken,
2739 // or we got back a simplified return value to continue.
2740 Optional<Value *> SimplifiedRetValue =
2741 stopOnUndefOrAssumed(A, RI.getReturnValue(), &I);
2742 if (!SimplifiedRetValue.hasValue() || !SimplifiedRetValue.getValue())
2743 return true;
2744
2745 // Check if a return instruction always cause UB or not
2746 // Note: It is guaranteed that the returned position of the anchor
2747 // scope has noundef attribute when this is called.
2748 // We also ensure the return position is not "assumed dead"
2749 // because the returned value was then potentially simplified to
2750 // `undef` in AAReturnedValues without removing the `noundef`
2751 // attribute yet.
2752
2753 // When the returned position has noundef attriubte, UB occurs in the
2754 // following cases.
2755 // (1) Returned value is known to be undef.
2756 // (2) The value is known to be a null pointer and the returned
2757 // position has nonnull attribute (because the returned value is
2758 // poison).
2759 if (isa<ConstantPointerNull>(*SimplifiedRetValue)) {
2760 auto &NonNullAA = A.getAAFor<AANonNull>(
2761 *this, IRPosition::returned(*getAnchorScope()), DepClassTy::NONE);
2762 if (NonNullAA.isKnownNonNull())
2763 KnownUBInsts.insert(&I);
2764 }
2765
2766 return true;
2767 };
2768
2769 bool UsedAssumedInformation = false;
2770 A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2771 {Instruction::Load, Instruction::Store,
2772 Instruction::AtomicCmpXchg,
2773 Instruction::AtomicRMW},
2774 UsedAssumedInformation,
2775 /* CheckBBLivenessOnly */ true);
2776 A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2777 UsedAssumedInformation,
2778 /* CheckBBLivenessOnly */ true);
2779 A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this,
2780 UsedAssumedInformation);
2781
2782 // If the returned position of the anchor scope has noundef attriubte, check
2783 // all returned instructions.
2784 if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2785 const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2786 if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) {
2787 auto &RetPosNoUndefAA =
2788 A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2789 if (RetPosNoUndefAA.isKnownNoUndef())
2790 A.checkForAllInstructions(InspectReturnInstForUB, *this,
2791 {Instruction::Ret}, UsedAssumedInformation,
2792 /* CheckBBLivenessOnly */ true);
2793 }
2794 }
2795
2796 if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2797 UBPrevSize != KnownUBInsts.size())
2798 return ChangeStatus::CHANGED;
2799 return ChangeStatus::UNCHANGED;
2800 }
2801
2802 bool isKnownToCauseUB(Instruction *I) const override {
2803 return KnownUBInsts.count(I);
2804 }
2805
2806 bool isAssumedToCauseUB(Instruction *I) const override {
2807 // In simple words, if an instruction is not in the assumed to _not_
2808 // cause UB, then it is assumed UB (that includes those
2809 // in the KnownUBInsts set). The rest is boilerplate
2810 // is to ensure that it is one of the instructions we test
2811 // for UB.
2812
2813 switch (I->getOpcode()) {
2814 case Instruction::Load:
2815 case Instruction::Store:
2816 case Instruction::AtomicCmpXchg:
2817 case Instruction::AtomicRMW:
2818 return !AssumedNoUBInsts.count(I);
2819 case Instruction::Br: {
2820 auto *BrInst = cast<BranchInst>(I);
2821 if (BrInst->isUnconditional())
2822 return false;
2823 return !AssumedNoUBInsts.count(I);
2824 } break;
2825 default:
2826 return false;
2827 }
2828 return false;
2829 }
2830
2831 ChangeStatus manifest(Attributor &A) override {
2832 if (KnownUBInsts.empty())
2833 return ChangeStatus::UNCHANGED;
2834 for (Instruction *I : KnownUBInsts)
2835 A.changeToUnreachableAfterManifest(I);
2836 return ChangeStatus::CHANGED;
2837 }
2838
2839 /// See AbstractAttribute::getAsStr()
2840 const std::string getAsStr() const override {
2841 return getAssumed() ? "undefined-behavior" : "no-ub";
2842 }
2843
2844 /// Note: The correctness of this analysis depends on the fact that the
2845 /// following 2 sets will stop changing after some point.
2846 /// "Change" here means that their size changes.
2847 /// The size of each set is monotonically increasing
2848 /// (we only add items to them) and it is upper bounded by the number of
2849 /// instructions in the processed function (we can never save more
2850 /// elements in either set than this number). Hence, at some point,
2851 /// they will stop increasing.
2852 /// Consequently, at some point, both sets will have stopped
2853 /// changing, effectively making the analysis reach a fixpoint.
2854
2855 /// Note: These 2 sets are disjoint and an instruction can be considered
2856 /// one of 3 things:
2857 /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2858 /// the KnownUBInsts set.
2859 /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2860 /// has a reason to assume it).
2861 /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2862 /// could not find a reason to assume or prove that it can cause UB,
2863 /// hence it assumes it doesn't. We have a set for these instructions
2864 /// so that we don't reprocess them in every update.
2865 /// Note however that instructions in this set may cause UB.
2866
2867protected:
2868 /// A set of all live instructions _known_ to cause UB.
2869 SmallPtrSet<Instruction *, 8> KnownUBInsts;
2870
2871private:
2872 /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2873 SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2874
2875 // Should be called on updates in which if we're processing an instruction
2876 // \p I that depends on a value \p V, one of the following has to happen:
2877 // - If the value is assumed, then stop.
2878 // - If the value is known but undef, then consider it UB.
2879 // - Otherwise, do specific processing with the simplified value.
2880 // We return None in the first 2 cases to signify that an appropriate
2881 // action was taken and the caller should stop.
2882 // Otherwise, we return the simplified value that the caller should
2883 // use for specific processing.
2884 Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V,
2885 Instruction *I) {
2886 bool UsedAssumedInformation = false;
2887 Optional<Value *> SimplifiedV = A.getAssumedSimplified(
2888 IRPosition::value(*V), *this, UsedAssumedInformation);
2889 if (!UsedAssumedInformation) {
2890 // Don't depend on assumed values.
2891 if (!SimplifiedV.hasValue()) {
2892 // If it is known (which we tested above) but it doesn't have a value,
2893 // then we can assume `undef` and hence the instruction is UB.
2894 KnownUBInsts.insert(I);
2895 return llvm::None;
2896 }
2897 if (!SimplifiedV.getValue())
2898 return nullptr;
2899 V = *SimplifiedV;
2900 }
2901 if (isa<UndefValue>(V)) {
2902 KnownUBInsts.insert(I);
2903 return llvm::None;
2904 }
2905 return V;
2906 }
2907};
2908
2909struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2910 AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2911 : AAUndefinedBehaviorImpl(IRP, A) {}
2912
2913 /// See AbstractAttribute::trackStatistics()
2914 void trackStatistics() const override {
2915 STATS_DECL(UndefinedBehaviorInstruction, Instruction,static llvm::Statistic NumIRInstruction_UndefinedBehaviorInstruction
= {"attributor", "NumIRInstruction_UndefinedBehaviorInstruction"
, "Number of instructions known to have UB"};;
2916 "Number of instructions known to have UB")static llvm::Statistic NumIRInstruction_UndefinedBehaviorInstruction
= {"attributor", "NumIRInstruction_UndefinedBehaviorInstruction"
, "Number of instructions known to have UB"};;
;
2917 BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction)NumIRInstruction_UndefinedBehaviorInstruction +=
2918 KnownUBInsts.size();
2919 }
2920};
2921} // namespace
2922
2923/// ------------------------ Will-Return Attributes ----------------------------
2924
2925namespace {
2926// Helper function that checks whether a function has any cycle which we don't
2927// know if it is bounded or not.
2928// Loops with maximum trip count are considered bounded, any other cycle not.
2929static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2930 ScalarEvolution *SE =
2931 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2932 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2933 // If either SCEV or LoopInfo is not available for the function then we assume
2934 // any cycle to be unbounded cycle.
2935 // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2936 // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2937 if (!SE || !LI) {
2938 for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2939 if (SCCI.hasCycle())
2940 return true;
2941 return false;
2942 }
2943
2944 // If there's irreducible control, the function may contain non-loop cycles.
2945 if (mayContainIrreducibleControl(F, LI))
2946 return true;
2947
2948 // Any loop that does not have a max trip count is considered unbounded cycle.
2949 for (auto *L : LI->getLoopsInPreorder()) {
2950 if (!SE->getSmallConstantMaxTripCount(L))
2951 return true;
2952 }
2953 return false;
2954}
2955
2956struct AAWillReturnImpl : public AAWillReturn {
2957 AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2958 : AAWillReturn(IRP, A) {}
2959
2960 /// See AbstractAttribute::initialize(...).
2961 void initialize(Attributor &A) override {
2962 AAWillReturn::initialize(A);
2963
2964 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2965 indicateOptimisticFixpoint();
2966 return;
2967 }
2968 }
2969
2970 /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
2971 bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
2972 // Check for `mustprogress` in the scope and the associated function which
2973 // might be different if this is a call site.
2974 if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2975 (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2976 return false;
2977
2978 bool IsKnown;
2979 if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
2980 return IsKnown || !KnownOnly;
2981 return false;
2982 }
2983
2984 /// See AbstractAttribute::updateImpl(...).
2985 ChangeStatus updateImpl(Attributor &A) override {
2986 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2987 return ChangeStatus::UNCHANGED;
2988
2989 auto CheckForWillReturn = [&](Instruction &I) {
2990 IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2991 const auto &WillReturnAA =
2992 A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2993 if (WillReturnAA.isKnownWillReturn())
2994 return true;
2995 if (!WillReturnAA.isAssumedWillReturn())
2996 return false;
2997 const auto &NoRecurseAA =
2998 A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2999 return NoRecurseAA.isAssumedNoRecurse();
3000 };
3001
3002 bool UsedAssumedInformation = false;
3003 if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this,
3004 UsedAssumedInformation))
3005 return indicatePessimisticFixpoint();
3006
3007 return ChangeStatus::UNCHANGED;
3008 }
3009
3010 /// See AbstractAttribute::getAsStr()
3011 const std::string getAsStr() const override {
3012 return getAssumed() ? "willreturn" : "may-noreturn";
3013 }
3014};
3015
3016struct AAWillReturnFunction final : AAWillReturnImpl {
3017 AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
3018 : AAWillReturnImpl(IRP, A) {}
3019
3020 /// See AbstractAttribute::initialize(...).
3021 void initialize(Attributor &A) override {
3022 AAWillReturnImpl::initialize(A);
3023
3024 Function *F = getAnchorScope();
3025 if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
3026 indicatePessimisticFixpoint();
3027 }
3028
3029 /// See AbstractAttribute::trackStatistics()
3030 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn){ static llvm::Statistic NumIRFunction_willreturn = {"attributor"
, "NumIRFunction_willreturn", ("Number of " "functions" " marked '"
"willreturn" "'")};; ++(NumIRFunction_willreturn); }
}
3031};
3032
3033/// WillReturn attribute deduction for a call sites.
3034struct AAWillReturnCallSite final : AAWillReturnImpl {
3035 AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
3036 : AAWillReturnImpl(IRP, A) {}
3037
3038 /// See AbstractAttribute::initialize(...).
3039 void initialize(Attributor &A) override {
3040 AAWillReturnImpl::initialize(A);
3041 Function *F = getAssociatedFunction();
3042 if (!F || !A.isFunctionIPOAmendable(*F))
3043 indicatePessimisticFixpoint();
3044 }
3045
3046 /// See AbstractAttribute::updateImpl(...).
3047 ChangeStatus updateImpl(Attributor &A) override {
3048 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
3049 return ChangeStatus::UNCHANGED;
3050
3051 // TODO: Once we have call site specific value information we can provide
3052 // call site specific liveness information and then it makes
3053 // sense to specialize attributes for call sites arguments instead of
3054 // redirecting requests to the callee argument.
3055 Function *F = getAssociatedFunction();
3056 const IRPosition &FnPos = IRPosition::function(*F);
3057 auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
3058 return clampStateAndIndicateChange(getState(), FnAA.getState());
3059 }
3060
3061 /// See AbstractAttribute::trackStatistics()
3062 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn){ static llvm::Statistic NumIRCS_willreturn = {"attributor", "NumIRCS_willreturn"
, ("Number of " "call site" " marked '" "willreturn" "'")};; ++
(NumIRCS_willreturn); }
; }
3063};
3064} // namespace
3065
3066/// -------------------AAReachability Attribute--------------------------
3067
3068namespace {
3069struct AAReachabilityImpl : AAReachability {
3070 AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
3071 : AAReachability(IRP, A) {}
3072
3073 const std::string getAsStr() const override {
3074 // TODO: Return the number of reachable queries.
3075 return "reachable";
3076 }
3077
3078 /// See AbstractAttribute::updateImpl(...).
3079 ChangeStatus updateImpl(Attributor &A) override {
3080 return ChangeStatus::UNCHANGED;
3081 }
3082};
3083
3084struct AAReachabilityFunction final : public AAReachabilityImpl {
3085 AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
3086 : AAReachabilityImpl(IRP, A) {}
3087
3088 /// See AbstractAttribute::trackStatistics()
3089 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable){ static llvm::Statistic NumIRFunction_reachable = {"attributor"
, "NumIRFunction_reachable", ("Number of " "functions" " marked '"
"reachable" "'")};; ++(NumIRFunction_reachable); }
; }
3090};
3091} // namespace
3092
3093/// ------------------------ NoAlias Argument Attribute ------------------------
3094
3095namespace {
3096struct AANoAliasImpl : AANoAlias {
3097 AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
3098 assert(getAssociatedType()->isPointerTy() &&(static_cast <bool> (getAssociatedType()->isPointerTy
() && "Noalias is a pointer attribute") ? void (0) : __assert_fail
("getAssociatedType()->isPointerTy() && \"Noalias is a pointer attribute\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3099, __extension__
__PRETTY_FUNCTION__))
3099 "Noalias is a pointer attribute")(static_cast <bool> (getAssociatedType()->isPointerTy
() && "Noalias is a pointer attribute") ? void (0) : __assert_fail
("getAssociatedType()->isPointerTy() && \"Noalias is a pointer attribute\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3099, __extension__
__PRETTY_FUNCTION__))
;
3100 }
3101
3102 const std::string getAsStr() const override {
3103 return getAssumed() ? "noalias" : "may-alias";
3104 }
3105};
3106
3107/// NoAlias attribute for a floating value.
3108struct AANoAliasFloating final : AANoAliasImpl {
3109 AANoAliasFloating(const IRPosition &IRP, Attributor &A)
3110 : AANoAliasImpl(IRP, A) {}
3111
3112 /// See AbstractAttribute::initialize(...).
3113 void initialize(Attributor &A) override {
3114 AANoAliasImpl::initialize(A);
3115 Value *Val = &getAssociatedValue();
3116 do {
3117 CastInst *CI = dyn_cast<CastInst>(Val);
3118 if (!CI)
3119 break;
3120 Value *Base = CI->getOperand(0);
3121 if (!Base->hasOneUse())
3122 break;
3123 Val = Base;
3124 } while (true);
3125
3126 if (!Val->getType()->isPointerTy()) {
3127 indicatePessimisticFixpoint();
3128 return;
3129 }
3130
3131 if (isa<AllocaInst>(Val))
3132 indicateOptimisticFixpoint();
3133 else if (isa<ConstantPointerNull>(Val) &&
3134 !NullPointerIsDefined(getAnchorScope(),
3135 Val->getType()->getPointerAddressSpace()))
3136 indicateOptimisticFixpoint();
3137 else if (Val != &getAssociatedValue()) {
3138 const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
3139 *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
3140 if (ValNoAliasAA.isKnownNoAlias())
3141 indicateOptimisticFixpoint();
3142 }
3143 }
3144
3145 /// See AbstractAttribute::updateImpl(...).
3146 ChangeStatus updateImpl(Attributor &A) override {
3147 // TODO: Implement this.
3148 return indicatePessimisticFixpoint();
3149 }
3150
3151 /// See AbstractAttribute::trackStatistics()
3152 void trackStatistics() const override {
3153 STATS_DECLTRACK_FLOATING_ATTR(noalias){ static llvm::Statistic NumIRFloating_noalias = {"attributor"
, "NumIRFloating_noalias", ("Number of floating values known to be '"
"noalias" "'")};; ++(NumIRFloating_noalias); }
3154 }
3155};
3156
3157/// NoAlias attribute for an argument.
3158struct AANoAliasArgument final
3159 : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
3160 using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
3161 AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3162
3163 /// See AbstractAttribute::initialize(...).
3164 void initialize(Attributor &A) override {
3165 Base::initialize(A);
3166 // See callsite argument attribute and callee argument attribute.
3167 if (hasAttr({Attribute::ByVal}))
3168 indicateOptimisticFixpoint();
3169 }
3170
3171 /// See AbstractAttribute::update(...).
3172 ChangeStatus updateImpl(Attributor &A) override {
3173 // We have to make sure no-alias on the argument does not break
3174 // synchronization when this is a callback argument, see also [1] below.
3175 // If synchronization cannot be affected, we delegate to the base updateImpl
3176 // function, otherwise we give up for now.
3177
3178 // If the function is no-sync, no-alias cannot break synchronization.
3179 const auto &NoSyncAA =
3180 A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
3181 DepClassTy::OPTIONAL);
3182 if (NoSyncAA.isAssumedNoSync())
3183 return Base::updateImpl(A);
3184
3185 // If the argument is read-only, no-alias cannot break synchronization.
3186 bool IsKnown;
3187 if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
3188 return Base::updateImpl(A);
3189
3190 // If the argument is never passed through callbacks, no-alias cannot break
3191 // synchronization.
3192 bool UsedAssumedInformation = false;
3193 if (A.checkForAllCallSites(
3194 [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
3195 true, UsedAssumedInformation))
3196 return Base::updateImpl(A);
3197
3198 // TODO: add no-alias but make sure it doesn't break synchronization by
3199 // introducing fake uses. See:
3200 // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
3201 // International Workshop on OpenMP 2018,
3202 // http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
3203
3204 return indicatePessimisticFixpoint();
3205 }
3206
3207 /// See AbstractAttribute::trackStatistics()
3208 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias){ static llvm::Statistic NumIRArguments_noalias = {"attributor"
, "NumIRArguments_noalias", ("Number of " "arguments" " marked '"
"noalias" "'")};; ++(NumIRArguments_noalias); }
}
3209};
3210
3211struct AANoAliasCallSiteArgument final : AANoAliasImpl {
3212 AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
3213 : AANoAliasImpl(IRP, A) {}
3214
3215 /// See AbstractAttribute::initialize(...).
3216 void initialize(Attributor &A) override {
3217 // See callsite argument attribute and callee argument attribute.
3218 const auto &CB = cast<CallBase>(getAnchorValue());
3219 if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
3220 indicateOptimisticFixpoint();
3221 Value &Val = getAssociatedValue();
3222 if (isa<ConstantPointerNull>(Val) &&
3223 !NullPointerIsDefined(getAnchorScope(),
3224 Val.getType()->getPointerAddressSpace()))
3225 indicateOptimisticFixpoint();
3226 }
3227
3228 /// Determine if the underlying value may alias with the call site argument
3229 /// \p OtherArgNo of \p ICS (= the underlying call site).
3230 bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
3231 const AAMemoryBehavior &MemBehaviorAA,
3232 const CallBase &CB, unsigned OtherArgNo) {
3233 // We do not need to worry about aliasing with the underlying IRP.
3234 if (this->getCalleeArgNo() == (int)OtherArgNo)
3235 return false;
3236
3237 // If it is not a pointer or pointer vector we do not alias.
3238 const Value *ArgOp = CB.getArgOperand(OtherArgNo);
3239 if (!ArgOp->getType()->isPtrOrPtrVectorTy())
3240 return false;
3241
3242 auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3243 *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
3244
3245 // If the argument is readnone, there is no read-write aliasing.
3246 if (CBArgMemBehaviorAA.isAssumedReadNone()) {
3247 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3248 return false;
3249 }
3250
3251 // If the argument is readonly and the underlying value is readonly, there
3252 // is no read-write aliasing.
3253 bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
3254 if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
3255 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3256 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3257 return false;
3258 }
3259
3260 // We have to utilize actual alias analysis queries so we need the object.
3261 if (!AAR)
3262 AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
3263
3264 // Try to rule it out at the call site.
3265 bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
3266 LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[NoAliasCSArg] Check alias between "
"callsite arguments: " << getAssociatedValue() <<
" " << *ArgOp << " => " << (IsAliasing ?
"" : "no-") << "alias \n"; } } while (false)
3267 "callsite arguments: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[NoAliasCSArg] Check alias between "
"callsite arguments: " << getAssociatedValue() <<
" " << *ArgOp << " => " << (IsAliasing ?
"" : "no-") << "alias \n"; } } while (false)
3268 << getAssociatedValue() << " " << *ArgOp << " => "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[NoAliasCSArg] Check alias between "
"callsite arguments: " << getAssociatedValue() <<
" " << *ArgOp << " => " << (IsAliasing ?
"" : "no-") << "alias \n"; } } while (false)
3269 << (IsAliasing ? "" : "no-") << "alias \n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[NoAliasCSArg] Check alias between "
"callsite arguments: " << getAssociatedValue() <<
" " << *ArgOp << " => " << (IsAliasing ?
"" : "no-") << "alias \n"; } } while (false)
;
3270
3271 return IsAliasing;
3272 }
3273
3274 bool
3275 isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
3276 const AAMemoryBehavior &MemBehaviorAA,
3277 const AANoAlias &NoAliasAA) {
3278 // We can deduce "noalias" if the following conditions hold.
3279 // (i) Associated value is assumed to be noalias in the definition.
3280 // (ii) Associated value is assumed to be no-capture in all the uses
3281 // possibly executed before this callsite.
3282 // (iii) There is no other pointer argument which could alias with the
3283 // value.
3284
3285 bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
3286 if (!AssociatedValueIsNoAliasAtDef) {
3287 LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AANoAlias] " << getAssociatedValue
() << " is not no-alias at the definition\n"; } } while
(false)
3288 << " is not no-alias at the definition\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AANoAlias] " << getAssociatedValue
() << " is not no-alias at the definition\n"; } } while
(false)
;
3289 return false;
3290 }
3291
3292 auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) {
3293 const auto &DerefAA = A.getAAFor<AADereferenceable>(
3294 *this, IRPosition::value(*O), DepClassTy::OPTIONAL);
3295 return DerefAA.getAssumedDereferenceableBytes();
3296 };
3297
3298 A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
3299
3300 const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3301 const Function *ScopeFn = VIRP.getAnchorScope();
3302 auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
3303 // Check whether the value is captured in the scope using AANoCapture.
3304 // Look at CFG and check only uses possibly executed before this
3305 // callsite.
3306 auto UsePred = [&](const Use &U, bool &Follow) -> bool {
3307 Instruction *UserI = cast<Instruction>(U.getUser());
3308
3309 // If UserI is the curr instruction and there is a single potential use of
3310 // the value in UserI we allow the use.
3311 // TODO: We should inspect the operands and allow those that cannot alias
3312 // with the value.
3313 if (UserI == getCtxI() && UserI->getNumOperands() == 1)
3314 return true;
3315
3316 if (ScopeFn) {
3317 if (auto *CB = dyn_cast<CallBase>(UserI)) {
3318 if (CB->isArgOperand(&U)) {
3319
3320 unsigned ArgNo = CB->getArgOperandNo(&U);
3321
3322 const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
3323 *this, IRPosition::callsite_argument(*CB, ArgNo),
3324 DepClassTy::OPTIONAL);
3325
3326 if (NoCaptureAA.isAssumedNoCapture())
3327 return true;
3328 }
3329 }
3330
3331 if (!AA::isPotentiallyReachable(A, *UserI, *getCtxI(), *this))
3332 return true;
3333 }
3334
3335 // TODO: We should track the capturing uses in AANoCapture but the problem
3336 // is CGSCC runs. For those we would need to "allow" AANoCapture for
3337 // a value in the module slice.
3338 switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) {
3339 case UseCaptureKind::NO_CAPTURE:
3340 return true;
3341 case UseCaptureKind::MAY_CAPTURE:
3342 LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *UserIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AANoAliasCSArg] Unknown user: "
<< *UserI << "\n"; } } while (false)
3343 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AANoAliasCSArg] Unknown user: "
<< *UserI << "\n"; } } while (false)
;
3344 return false;
3345 case UseCaptureKind::PASSTHROUGH:
3346 Follow = true;
3347 return true;
3348 }
3349 llvm_unreachable("unknown UseCaptureKind")::llvm::llvm_unreachable_internal("unknown UseCaptureKind", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 3349)
;
3350 };
3351
3352 if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3353 if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
3354 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AANoAliasCSArg] " <<
getAssociatedValue() << " cannot be noalias as it is potentially captured\n"
; } } while (false)
3355 dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AANoAliasCSArg] " <<
getAssociatedValue() << " cannot be noalias as it is potentially captured\n"
; } } while (false)
3356 << " cannot be noalias as it is potentially captured\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AANoAliasCSArg] " <<
getAssociatedValue() << " cannot be noalias as it is potentially captured\n"
; } } while (false)
;
3357 return false;
3358 }
3359 }
3360 A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
3361
3362 // Check there is no other pointer argument which could alias with the
3363 // value passed at this call site.
3364 // TODO: AbstractCallSite
3365 const auto &CB = cast<CallBase>(getAnchorValue());
3366 for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++)
3367 if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
3368 return false;
3369
3370 return true;
3371 }
3372
3373 /// See AbstractAttribute::updateImpl(...).
3374 ChangeStatus updateImpl(Attributor &A) override {
3375 // If the argument is readnone we are done as there are no accesses via the
3376 // argument.
3377 auto &MemBehaviorAA =
3378 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
3379 if (MemBehaviorAA.isAssumedReadNone()) {
3380 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3381 return ChangeStatus::UNCHANGED;
3382 }
3383
3384 const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3385 const auto &NoAliasAA =
3386 A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
3387
3388 AAResults *AAR = nullptr;
3389 if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
3390 NoAliasAA)) {
3391 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n"
; } } while (false)
3392 dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n"
; } } while (false)
;
3393 return ChangeStatus::UNCHANGED;
3394 }
3395
3396 return indicatePessimisticFixpoint();
3397 }
3398
3399 /// See AbstractAttribute::trackStatistics()
3400 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias){ static llvm::Statistic NumIRCSArguments_noalias = {"attributor"
, "NumIRCSArguments_noalias", ("Number of " "call site arguments"
" marked '" "noalias" "'")};; ++(NumIRCSArguments_noalias); }
}
3401};
3402
3403/// NoAlias attribute for function return value.
3404struct AANoAliasReturned final : AANoAliasImpl {
3405 AANoAliasReturned(const IRPosition &IRP, Attributor &A)
3406 : AANoAliasImpl(IRP, A) {}
3407
3408 /// See AbstractAttribute::initialize(...).
3409 void initialize(Attributor &A) override {
3410 AANoAliasImpl::initialize(A);
3411 Function *F = getAssociatedFunction();
3412 if (!F || F->isDeclaration())
3413 indicatePessimisticFixpoint();
3414 }
3415
3416 /// See AbstractAttribute::updateImpl(...).
3417 virtual ChangeStatus updateImpl(Attributor &A) override {
3418
3419 auto CheckReturnValue = [&](Value &RV) -> bool {
3420 if (Constant *C = dyn_cast<Constant>(&RV))
3421 if (C->isNullValue() || isa<UndefValue>(C))
3422 return true;
3423
3424 /// For now, we can only deduce noalias if we have call sites.
3425 /// FIXME: add more support.
3426 if (!isa<CallBase>(&RV))
3427 return false;
3428
3429 const IRPosition &RVPos = IRPosition::value(RV);
3430 const auto &NoAliasAA =
3431 A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
3432 if (!NoAliasAA.isAssumedNoAlias())
3433 return false;
3434
3435 const auto &NoCaptureAA =
3436 A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
3437 return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
3438 };
3439
3440 if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
3441 return indicatePessimisticFixpoint();
3442
3443 return ChangeStatus::UNCHANGED;
3444 }
3445
3446 /// See AbstractAttribute::trackStatistics()
3447 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias){ static llvm::Statistic NumIRFunctionReturn_noalias = {"attributor"
, "NumIRFunctionReturn_noalias", ("Number of " "function returns"
" marked '" "noalias" "'")};; ++(NumIRFunctionReturn_noalias
); }
}
3448};
3449
3450/// NoAlias attribute deduction for a call site return value.
3451struct AANoAliasCallSiteReturned final : AANoAliasImpl {
3452 AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
3453 : AANoAliasImpl(IRP, A) {}
3454
3455 /// See AbstractAttribute::initialize(...).
3456 void initialize(Attributor &A) override {
3457 AANoAliasImpl::initialize(A);
3458 Function *F = getAssociatedFunction();
3459 if (!F || F->isDeclaration())
3460 indicatePessimisticFixpoint();
3461 }
3462
3463 /// See AbstractAttribute::updateImpl(...).
3464 ChangeStatus updateImpl(Attributor &A) override {
3465 // TODO: Once we have call site specific value information we can provide
3466 // call site specific liveness information and then it makes
3467 // sense to specialize attributes for call sites arguments instead of
3468 // redirecting requests to the callee argument.
3469 Function *F = getAssociatedFunction();
3470 const IRPosition &FnPos = IRPosition::returned(*F);
3471 auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
3472 return clampStateAndIndicateChange(getState(), FnAA.getState());
3473 }
3474
3475 /// See AbstractAttribute::trackStatistics()
3476 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias){ static llvm::Statistic NumIRCSReturn_noalias = {"attributor"
, "NumIRCSReturn_noalias", ("Number of " "call site returns" " marked '"
"noalias" "'")};; ++(NumIRCSReturn_noalias); }
; }
3477};
3478} // namespace
3479
3480/// -------------------AAIsDead Function Attribute-----------------------
3481
3482namespace {
3483struct AAIsDeadValueImpl : public AAIsDead {
3484 AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3485
3486 /// See AbstractAttribute::initialize(...).
3487 void initialize(Attributor &A) override {
3488 if (auto *Scope = getAnchorScope())
3489 if (!A.isRunOn(*Scope))
3490 indicatePessimisticFixpoint();
3491 }
3492
3493 /// See AAIsDead::isAssumedDead().
3494 bool isAssumedDead() const override { return isAssumed(IS_DEAD); }
3495
3496 /// See AAIsDead::isKnownDead().
3497 bool isKnownDead() const override { return isKnown(IS_DEAD); }
3498
3499 /// See AAIsDead::isAssumedDead(BasicBlock *).
3500 bool isAssumedDead(const BasicBlock *BB) const override { return false; }
3501
3502 /// See AAIsDead::isKnownDead(BasicBlock *).
3503 bool isKnownDead(const BasicBlock *BB) const override { return false; }
3504
3505 /// See AAIsDead::isAssumedDead(Instruction *I).
3506 bool isAssumedDead(const Instruction *I) const override {
3507 return I == getCtxI() && isAssumedDead();
3508 }
3509
3510 /// See AAIsDead::isKnownDead(Instruction *I).
3511 bool isKnownDead(const Instruction *I) const override {
3512 return isAssumedDead(I) && isKnownDead();
3513 }
3514
3515 /// See AbstractAttribute::getAsStr().
3516 virtual const std::string getAsStr() const override {
3517 return isAssumedDead() ? "assumed-dead" : "assumed-live";
3518 }
3519
3520 /// Check if all uses are assumed dead.
3521 bool areAllUsesAssumedDead(Attributor &A, Value &V) {
3522 // Callers might not check the type, void has no uses.
3523 if (V.getType()->isVoidTy() || V.use_empty())
3524 return true;
3525
3526 // If we replace a value with a constant there are no uses left afterwards.
3527 if (!isa<Constant>(V)) {
3528 if (auto *I = dyn_cast<Instruction>(&V))
3529 if (!A.isRunOn(*I->getFunction()))
3530 return false;
3531 bool UsedAssumedInformation = false;
3532 Optional<Constant *> C =
3533 A.getAssumedConstant(V, *this, UsedAssumedInformation);
3534 if (!C.hasValue() || *C)
3535 return true;
3536 }
3537
3538 auto UsePred = [&](const Use &U, bool &Follow) { return false; };
3539 // Explicitly set the dependence class to required because we want a long
3540 // chain of N dependent instructions to be considered live as soon as one is
3541 // without going through N update cycles. This is not required for
3542 // correctness.
3543 return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false,
3544 DepClassTy::REQUIRED,
3545 /* IgnoreDroppableUses */ false);
3546 }
3547
3548 /// Determine if \p I is assumed to be side-effect free.
3549 bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
3550 if (!I || wouldInstructionBeTriviallyDead(I))
3551 return true;
3552
3553 auto *CB = dyn_cast<CallBase>(I);
3554 if (!CB || isa<IntrinsicInst>(CB))
3555 return false;
3556
3557 const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
3558 const auto &NoUnwindAA =
3559 A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
3560 if (!NoUnwindAA.isAssumedNoUnwind())
3561 return false;
3562 if (!NoUnwindAA.isKnownNoUnwind())
3563 A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
3564
3565 bool IsKnown;
3566 return AA::isAssumedReadOnly(A, CallIRP, *this, IsKnown);
3567 }
3568};
3569
3570struct AAIsDeadFloating : public AAIsDeadValueImpl {
3571 AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
3572 : AAIsDeadValueImpl(IRP, A) {}
3573
3574 /// See AbstractAttribute::initialize(...).
3575 void initialize(Attributor &A) override {
3576 AAIsDeadValueImpl::initialize(A);
3577
3578 if (isa<UndefValue>(getAssociatedValue())) {
3579 indicatePessimisticFixpoint();
3580 return;
3581 }
3582
3583 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3584 if (!isAssumedSideEffectFree(A, I)) {
3585 if (!isa_and_nonnull<StoreInst>(I))
3586 indicatePessimisticFixpoint();
3587 else
3588 removeAssumedBits(HAS_NO_EFFECT);
3589 }
3590 }
3591
3592 bool isDeadStore(Attributor &A, StoreInst &SI) {
3593 // Lang ref now states volatile store is not UB/dead, let's skip them.
3594 if (SI.isVolatile())
3595 return false;
3596
3597 bool UsedAssumedInformation = false;
3598 SmallSetVector<Value *, 4> PotentialCopies;
3599 if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this,
3600 UsedAssumedInformation))
3601 return false;
3602 return llvm::all_of(PotentialCopies, [&](Value *V) {
3603 return A.isAssumedDead(IRPosition::value(*V), this, nullptr,
3604 UsedAssumedInformation);
3605 });
3606 }
3607
3608 /// See AbstractAttribute::getAsStr().
3609 const std::string getAsStr() const override {
3610 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3611 if (isa_and_nonnull<StoreInst>(I))
3612 if (isValidState())
3613 return "assumed-dead-store";
3614 return AAIsDeadValueImpl::getAsStr();
3615 }
3616
3617 /// See AbstractAttribute::updateImpl(...).
3618 ChangeStatus updateImpl(Attributor &A) override {
3619 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3620 if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
3621 if (!isDeadStore(A, *SI))
3622 return indicatePessimisticFixpoint();
3623 } else {
3624 if (!isAssumedSideEffectFree(A, I))
3625 return indicatePessimisticFixpoint();
3626 if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3627 return indicatePessimisticFixpoint();
3628 }
3629 return ChangeStatus::UNCHANGED;
3630 }
3631
3632 bool isRemovableStore() const override {
3633 return isAssumed(IS_REMOVABLE) && isa<StoreInst>(&getAssociatedValue());
3634 }
3635
3636 /// See AbstractAttribute::manifest(...).
3637 ChangeStatus manifest(Attributor &A) override {
3638 Value &V = getAssociatedValue();
3639 if (auto *I = dyn_cast<Instruction>(&V)) {
3640 // If we get here we basically know the users are all dead. We check if
3641 // isAssumedSideEffectFree returns true here again because it might not be
3642 // the case and only the users are dead but the instruction (=call) is
3643 // still needed.
3644 if (isa<StoreInst>(I) ||
3645 (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) {
3646 A.deleteAfterManifest(*I);
3647 return ChangeStatus::CHANGED;
3648 }
3649 }
3650 return ChangeStatus::UNCHANGED;
3651 }
3652
3653 /// See AbstractAttribute::trackStatistics()
3654 void trackStatistics() const override {
3655 STATS_DECLTRACK_FLOATING_ATTR(IsDead){ static llvm::Statistic NumIRFloating_IsDead = {"attributor"
, "NumIRFloating_IsDead", ("Number of floating values known to be '"
"IsDead" "'")};; ++(NumIRFloating_IsDead); }
3656 }
3657};
3658
3659struct AAIsDeadArgument : public AAIsDeadFloating {
3660 AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
3661 : AAIsDeadFloating(IRP, A) {}
3662
3663 /// See AbstractAttribute::initialize(...).
3664 void initialize(Attributor &A) override {
3665 AAIsDeadFloating::initialize(A);
3666 if (!A.isFunctionIPOAmendable(*getAnchorScope()))
3667 indicatePessimisticFixpoint();
3668 }
3669
3670 /// See AbstractAttribute::manifest(...).
3671 ChangeStatus manifest(Attributor &A) override {
3672 Argument &Arg = *getAssociatedArgument();
3673 if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
3674 if (A.registerFunctionSignatureRewrite(
3675 Arg, /* ReplacementTypes */ {},
3676 Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
3677 Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
3678 return ChangeStatus::CHANGED;
3679 }
3680 return ChangeStatus::UNCHANGED;
3681 }
3682
3683 /// See AbstractAttribute::trackStatistics()
3684 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead){ static llvm::Statistic NumIRArguments_IsDead = {"attributor"
, "NumIRArguments_IsDead", ("Number of " "arguments" " marked '"
"IsDead" "'")};; ++(NumIRArguments_IsDead); }
}
3685};
3686
3687struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
3688 AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
3689 : AAIsDeadValueImpl(IRP, A) {}
3690
3691 /// See AbstractAttribute::initialize(...).
3692 void initialize(Attributor &A) override {
3693 AAIsDeadValueImpl::initialize(A);
3694 if (isa<UndefValue>(getAssociatedValue()))
3695 indicatePessimisticFixpoint();
3696 }
3697
3698 /// See AbstractAttribute::updateImpl(...).
3699 ChangeStatus updateImpl(Attributor &A) override {
3700 // TODO: Once we have call site specific value information we can provide
3701 // call site specific liveness information and then it makes
3702 // sense to specialize attributes for call sites arguments instead of
3703 // redirecting requests to the callee argument.
3704 Argument *Arg = getAssociatedArgument();
3705 if (!Arg)
3706 return indicatePessimisticFixpoint();
3707 const IRPosition &ArgPos = IRPosition::argument(*Arg);
3708 auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
3709 return clampStateAndIndicateChange(getState(), ArgAA.getState());
3710 }
3711
3712 /// See AbstractAttribute::manifest(...).
3713 ChangeStatus manifest(Attributor &A) override {
3714 CallBase &CB = cast<CallBase>(getAnchorValue());
3715 Use &U = CB.getArgOperandUse(getCallSiteArgNo());
3716 assert(!isa<UndefValue>(U.get()) &&(static_cast <bool> (!isa<UndefValue>(U.get()) &&
"Expected undef values to be filtered out!") ? void (0) : __assert_fail
("!isa<UndefValue>(U.get()) && \"Expected undef values to be filtered out!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3717, __extension__
__PRETTY_FUNCTION__))
3717 "Expected undef values to be filtered out!")(static_cast <bool> (!isa<UndefValue>(U.get()) &&
"Expected undef values to be filtered out!") ? void (0) : __assert_fail
("!isa<UndefValue>(U.get()) && \"Expected undef values to be filtered out!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3717, __extension__
__PRETTY_FUNCTION__))
;
3718 UndefValue &UV = *UndefValue::get(U->getType());
3719 if (A.changeUseAfterManifest(U, UV))
3720 return ChangeStatus::CHANGED;
3721 return ChangeStatus::UNCHANGED;
3722 }
3723
3724 /// See AbstractAttribute::trackStatistics()
3725 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead){ static llvm::Statistic NumIRCSArguments_IsDead = {"attributor"
, "NumIRCSArguments_IsDead", ("Number of " "call site arguments"
" marked '" "IsDead" "'")};; ++(NumIRCSArguments_IsDead); }
}
3726};
3727
3728struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
3729 AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
3730 : AAIsDeadFloating(IRP, A) {}
3731
3732 /// See AAIsDead::isAssumedDead().
3733 bool isAssumedDead() const override {
3734 return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
3735 }
3736
3737 /// See AbstractAttribute::initialize(...).
3738 void initialize(Attributor &A) override {
3739 AAIsDeadFloating::initialize(A);
3740 if (isa<UndefValue>(getAssociatedValue())) {
3741 indicatePessimisticFixpoint();
3742 return;
3743 }
3744
3745 // We track this separately as a secondary state.
3746 IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3747 }
3748
3749 /// See AbstractAttribute::updateImpl(...).
3750 ChangeStatus updateImpl(Attributor &A) override {
3751 ChangeStatus Changed = ChangeStatus::UNCHANGED;
3752 if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3753 IsAssumedSideEffectFree = false;
3754 Changed = ChangeStatus::CHANGED;
3755 }
3756 if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3757 return indicatePessimisticFixpoint();
3758 return Changed;
3759 }
3760
3761 /// See AbstractAttribute::trackStatistics()
3762 void trackStatistics() const override {
3763 if (IsAssumedSideEffectFree)
3764 STATS_DECLTRACK_CSRET_ATTR(IsDead){ static llvm::Statistic NumIRCSReturn_IsDead = {"attributor"
, "NumIRCSReturn_IsDead", ("Number of " "call site returns" " marked '"
"IsDead" "'")};; ++(NumIRCSReturn_IsDead); }
3765 else
3766 STATS_DECLTRACK_CSRET_ATTR(UnusedResult){ static llvm::Statistic NumIRCSReturn_UnusedResult = {"attributor"
, "NumIRCSReturn_UnusedResult", ("Number of " "call site returns"
" marked '" "UnusedResult" "'")};; ++(NumIRCSReturn_UnusedResult
); }
3767 }
3768
3769 /// See AbstractAttribute::getAsStr().
3770 const std::string getAsStr() const override {
3771 return isAssumedDead()
3772 ? "assumed-dead"
3773 : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3774 }
3775
3776private:
3777 bool IsAssumedSideEffectFree = true;
3778};
3779
3780struct AAIsDeadReturned : public AAIsDeadValueImpl {
3781 AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3782 : AAIsDeadValueImpl(IRP, A) {}
3783
3784 /// See AbstractAttribute::updateImpl(...).
3785 ChangeStatus updateImpl(Attributor &A) override {
3786
3787 bool UsedAssumedInformation = false;
3788 A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3789 {Instruction::Ret}, UsedAssumedInformation);
3790
3791 auto PredForCallSite = [&](AbstractCallSite ACS) {
3792 if (ACS.isCallbackCall() || !ACS.getInstruction())
3793 return false;
3794 return areAllUsesAssumedDead(A, *ACS.getInstruction());
3795 };
3796
3797 if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3798 UsedAssumedInformation))
3799 return indicatePessimisticFixpoint();
3800
3801 return ChangeStatus::UNCHANGED;
3802 }
3803
3804 /// See AbstractAttribute::manifest(...).
3805 ChangeStatus manifest(Attributor &A) override {
3806 // TODO: Rewrite the signature to return void?
3807 bool AnyChange = false;
3808 UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3809 auto RetInstPred = [&](Instruction &I) {
3810 ReturnInst &RI = cast<ReturnInst>(I);
3811 if (!isa<UndefValue>(RI.getReturnValue()))
3812 AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3813 return true;
3814 };
3815 bool UsedAssumedInformation = false;
3816 A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
3817 UsedAssumedInformation);
3818 return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3819 }
3820
3821 /// See AbstractAttribute::trackStatistics()
3822 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead){ static llvm::Statistic NumIRFunctionReturn_IsDead = {"attributor"
, "NumIRFunctionReturn_IsDead", ("Number of " "function returns"
" marked '" "IsDead" "'")};; ++(NumIRFunctionReturn_IsDead);
}
}
3823};
3824
3825struct AAIsDeadFunction : public AAIsDead {
3826 AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3827
3828 /// See AbstractAttribute::initialize(...).
3829 void initialize(Attributor &A) override {
3830 Function *F = getAnchorScope();
3831 if (!F || F->isDeclaration() || !A.isRunOn(*F)) {
3832 indicatePessimisticFixpoint();
3833 return;
3834 }
3835 ToBeExploredFrom.insert(&F->getEntryBlock().front());
3836 assumeLive(A, F->getEntryBlock());
3837 }
3838
3839 /// See AbstractAttribute::getAsStr().
3840 const std::string getAsStr() const override {
3841 return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3842 std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3843 std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3844 std::to_string(KnownDeadEnds.size()) + "]";
3845 }
3846
3847 /// See AbstractAttribute::manifest(...).
3848 ChangeStatus manifest(Attributor &A) override {
3849 assert(getState().isValidState() &&(static_cast <bool> (getState().isValidState() &&
"Attempted to manifest an invalid state!") ? void (0) : __assert_fail
("getState().isValidState() && \"Attempted to manifest an invalid state!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3850, __extension__
__PRETTY_FUNCTION__))
3850 "Attempted to manifest an invalid state!")(static_cast <bool> (getState().isValidState() &&
"Attempted to manifest an invalid state!") ? void (0) : __assert_fail
("getState().isValidState() && \"Attempted to manifest an invalid state!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3850, __extension__
__PRETTY_FUNCTION__))
;
3851
3852 ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3853 Function &F = *getAnchorScope();
3854
3855 if (AssumedLiveBlocks.empty()) {
3856 A.deleteAfterManifest(F);
3857 return ChangeStatus::CHANGED;
3858 }
3859
3860 // Flag to determine if we can change an invoke to a call assuming the
3861 // callee is nounwind. This is not possible if the personality of the
3862 // function allows to catch asynchronous exceptions.
3863 bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3864
3865 KnownDeadEnds.set_union(ToBeExploredFrom);
3866 for (const Instruction *DeadEndI : KnownDeadEnds) {
3867 auto *CB = dyn_cast<CallBase>(DeadEndI);
3868 if (!CB)
3869 continue;
3870 const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3871 *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3872 bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3873 if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3874 continue;
3875
3876 if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3877 A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3878 else
3879 A.changeToUnreachableAfterManifest(
3880 const_cast<Instruction *>(DeadEndI->getNextNode()));
3881 HasChanged = ChangeStatus::CHANGED;
3882 }
3883
3884 STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.")static llvm::Statistic NumIRBasicBlock_AAIsDead = {"attributor"
, "NumIRBasicBlock_AAIsDead", "Number of dead basic blocks deleted."
};;
;
3885 for (BasicBlock &BB : F)
3886 if (!AssumedLiveBlocks.count(&BB)) {
3887 A.deleteAfterManifest(BB);
3888 ++BUILD_STAT_NAME(AAIsDead, BasicBlock)NumIRBasicBlock_AAIsDead;
3889 HasChanged = ChangeStatus::CHANGED;
3890 }
3891
3892 return HasChanged;
3893 }
3894
3895 /// See AbstractAttribute::updateImpl(...).
3896 ChangeStatus updateImpl(Attributor &A) override;
3897
3898 bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3899 assert(From->getParent() == getAnchorScope() &&(static_cast <bool> (From->getParent() == getAnchorScope
() && To->getParent() == getAnchorScope() &&
"Used AAIsDead of the wrong function") ? void (0) : __assert_fail
("From->getParent() == getAnchorScope() && To->getParent() == getAnchorScope() && \"Used AAIsDead of the wrong function\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3901, __extension__
__PRETTY_FUNCTION__))
3900 To->getParent() == getAnchorScope() &&(static_cast <bool> (From->getParent() == getAnchorScope
() && To->getParent() == getAnchorScope() &&
"Used AAIsDead of the wrong function") ? void (0) : __assert_fail
("From->getParent() == getAnchorScope() && To->getParent() == getAnchorScope() && \"Used AAIsDead of the wrong function\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3901, __extension__
__PRETTY_FUNCTION__))
3901 "Used AAIsDead of the wrong function")(static_cast <bool> (From->getParent() == getAnchorScope
() && To->getParent() == getAnchorScope() &&
"Used AAIsDead of the wrong function") ? void (0) : __assert_fail
("From->getParent() == getAnchorScope() && To->getParent() == getAnchorScope() && \"Used AAIsDead of the wrong function\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3901, __extension__
__PRETTY_FUNCTION__))
;
3902 return isValidState() && !AssumedLiveEdges.count(std::make_pair(From, To));
3903 }
3904
3905 /// See AbstractAttribute::trackStatistics()
3906 void trackStatistics() const override {}
3907
3908 /// Returns true if the function is assumed dead.
3909 bool isAssumedDead() const override { return false; }
3910
3911 /// See AAIsDead::isKnownDead().
3912 bool isKnownDead() const override { return false; }
3913
3914 /// See AAIsDead::isAssumedDead(BasicBlock *).
3915 bool isAssumedDead(const BasicBlock *BB) const override {
3916 assert(BB->getParent() == getAnchorScope() &&(static_cast <bool> (BB->getParent() == getAnchorScope
() && "BB must be in the same anchor scope function."
) ? void (0) : __assert_fail ("BB->getParent() == getAnchorScope() && \"BB must be in the same anchor scope function.\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3917, __extension__
__PRETTY_FUNCTION__))
3917 "BB must be in the same anchor scope function.")(static_cast <bool> (BB->getParent() == getAnchorScope
() && "BB must be in the same anchor scope function."
) ? void (0) : __assert_fail ("BB->getParent() == getAnchorScope() && \"BB must be in the same anchor scope function.\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3917, __extension__
__PRETTY_FUNCTION__))
;
3918
3919 if (!getAssumed())
3920 return false;
3921 return !AssumedLiveBlocks.count(BB);
3922 }
3923
3924 /// See AAIsDead::isKnownDead(BasicBlock *).
3925 bool isKnownDead(const BasicBlock *BB) const override {
3926 return getKnown() && isAssumedDead(BB);
3927 }
3928
3929 /// See AAIsDead::isAssumed(Instruction *I).
3930 bool isAssumedDead(const Instruction *I) const override {
3931 assert(I->getParent()->getParent() == getAnchorScope() &&(static_cast <bool> (I->getParent()->getParent() ==
getAnchorScope() && "Instruction must be in the same anchor scope function."
) ? void (0) : __assert_fail ("I->getParent()->getParent() == getAnchorScope() && \"Instruction must be in the same anchor scope function.\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3932, __extension__
__PRETTY_FUNCTION__))
3932 "Instruction must be in the same anchor scope function.")(static_cast <bool> (I->getParent()->getParent() ==
getAnchorScope() && "Instruction must be in the same anchor scope function."
) ? void (0) : __assert_fail ("I->getParent()->getParent() == getAnchorScope() && \"Instruction must be in the same anchor scope function.\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3932, __extension__
__PRETTY_FUNCTION__))
;
3933
3934 if (!getAssumed())
3935 return false;
3936
3937 // If it is not in AssumedLiveBlocks then it for sure dead.
3938 // Otherwise, it can still be after noreturn call in a live block.
3939 if (!AssumedLiveBlocks.count(I->getParent()))
3940 return true;
3941
3942 // If it is not after a liveness barrier it is live.
3943 const Instruction *PrevI = I->getPrevNode();
3944 while (PrevI) {
3945 if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3946 return true;
3947 PrevI = PrevI->getPrevNode();
3948 }
3949 return false;
3950 }
3951
3952 /// See AAIsDead::isKnownDead(Instruction *I).
3953 bool isKnownDead(const Instruction *I) const override {
3954 return getKnown() && isAssumedDead(I);
3955 }
3956
3957 /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3958 /// that internal function called from \p BB should now be looked at.
3959 bool assumeLive(Attributor &A, const BasicBlock &BB) {
3960 if (!AssumedLiveBlocks.insert(&BB).second)
3961 return false;
3962
3963 // We assume that all of BB is (probably) live now and if there are calls to
3964 // internal functions we will assume that those are now live as well. This
3965 // is a performance optimization for blocks with calls to a lot of internal
3966 // functions. It can however cause dead functions to be treated as live.
3967 for (const Instruction &I : BB)
3968 if (const auto *CB = dyn_cast<CallBase>(&I))
3969 if (const Function *F = CB->getCalledFunction())
3970 if (F->hasLocalLinkage())
3971 A.markLiveInternalFunction(*F);
3972 return true;
3973 }
3974
3975 /// Collection of instructions that need to be explored again, e.g., we
3976 /// did assume they do not transfer control to (one of their) successors.
3977 SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3978
3979 /// Collection of instructions that are known to not transfer control.
3980 SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3981
3982 /// Collection of all assumed live edges
3983 DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3984
3985 /// Collection of all assumed live BasicBlocks.
3986 DenseSet<const BasicBlock *> AssumedLiveBlocks;
3987};
3988
3989static bool
3990identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3991 AbstractAttribute &AA,
3992 SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3993 const IRPosition &IPos = IRPosition::callsite_function(CB);
3994
3995 const auto &NoReturnAA =
3996 A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3997 if (NoReturnAA.isAssumedNoReturn())
3998 return !NoReturnAA.isKnownNoReturn();
3999 if (CB.isTerminator())
4000 AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
4001 else
4002 AliveSuccessors.push_back(CB.getNextNode());
4003 return false;
4004}
4005
4006static bool
4007identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
4008 AbstractAttribute &AA,
4009 SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4010 bool UsedAssumedInformation =
4011 identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
4012
4013 // First, determine if we can change an invoke to a call assuming the
4014 // callee is nounwind. This is not possible if the personality of the
4015 // function allows to catch asynchronous exceptions.
4016 if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
4017 AliveSuccessors.push_back(&II.getUnwindDest()->front());
4018 } else {
4019 const IRPosition &IPos = IRPosition::callsite_function(II);
4020 const auto &AANoUnw =
4021 A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
4022 if (AANoUnw.isAssumedNoUnwind()) {
4023 UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
4024 } else {
4025 AliveSuccessors.push_back(&II.getUnwindDest()->front());
4026 }
4027 }
4028 return UsedAssumedInformation;
4029}
4030
4031static bool
4032identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
4033 AbstractAttribute &AA,
4034 SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4035 bool UsedAssumedInformation = false;
4036 if (BI.getNumSuccessors() == 1) {
4037 AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
4038 } else {
4039 Optional<Constant *> C =
4040 A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
4041 if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
4042 // No value yet, assume both edges are dead.
4043 } else if (isa_and_nonnull<ConstantInt>(*C)) {
4044 const BasicBlock *SuccBB =
4045 BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
4046 AliveSuccessors.push_back(&SuccBB->front());
4047 } else {
4048 AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
4049 AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
4050 UsedAssumedInformation = false;
4051 }
4052 }
4053 return UsedAssumedInformation;
4054}
4055
4056static bool
4057identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
4058 AbstractAttribute &AA,
4059 SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4060 bool UsedAssumedInformation = false;
4061 Optional<Constant *> C =
4062 A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
4063 if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
4064 // No value yet, assume all edges are dead.
4065 } else if (isa_and_nonnull<ConstantInt>(C.getValue())) {
4066 for (auto &CaseIt : SI.cases()) {
4067 if (CaseIt.getCaseValue() == C.getValue()) {
4068 AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
4069 return UsedAssumedInformation;
4070 }
4071 }
4072 AliveSuccessors.push_back(&SI.getDefaultDest()->front());
4073 return UsedAssumedInformation;
4074 } else {
4075 for (const BasicBlock *SuccBB : successors(SI.getParent()))
4076 AliveSuccessors.push_back(&SuccBB->front());
4077 }
4078 return UsedAssumedInformation;
4079}
4080
4081ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
4082 ChangeStatus Change = ChangeStatus::UNCHANGED;
4083
4084 LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] Live [" <<
AssumedLiveBlocks.size() << "/" << getAnchorScope
()->size() << "] BBs and " << ToBeExploredFrom
.size() << " exploration points and " << KnownDeadEnds
.size() << " known dead ends\n"; } } while (false)
4085 << getAnchorScope()->size() << "] BBs and "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] Live [" <<
AssumedLiveBlocks.size() << "/" << getAnchorScope
()->size() << "] BBs and " << ToBeExploredFrom
.size() << " exploration points and " << KnownDeadEnds
.size() << " known dead ends\n"; } } while (false)
4086 << ToBeExploredFrom.size() << " exploration points and "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] Live [" <<
AssumedLiveBlocks.size() << "/" << getAnchorScope
()->size() << "] BBs and " << ToBeExploredFrom
.size() << " exploration points and " << KnownDeadEnds
.size() << " known dead ends\n"; } } while (false)
4087 << KnownDeadEnds.size() << " known dead ends\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] Live [" <<
AssumedLiveBlocks.size() << "/" << getAnchorScope
()->size() << "] BBs and " << ToBeExploredFrom
.size() << " exploration points and " << KnownDeadEnds
.size() << " known dead ends\n"; } } while (false)
;
4088
4089 // Copy and clear the list of instructions we need to explore from. It is
4090 // refilled with instructions the next update has to look at.
4091 SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
4092 ToBeExploredFrom.end());
4093 decltype(ToBeExploredFrom) NewToBeExploredFrom;
4094
4095 SmallVector<const Instruction *, 8> AliveSuccessors;
4096 while (!Worklist.empty()) {
4097 const Instruction *I = Worklist.pop_back_val();
4098 LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] Exploration inst: "
<< *I << "\n"; } } while (false)
;
4099
4100 // Fast forward for uninteresting instructions. We could look for UB here
4101 // though.
4102 while (!I->isTerminator() && !isa<CallBase>(I))
4103 I = I->getNextNode();
4104
4105 AliveSuccessors.clear();
4106
4107 bool UsedAssumedInformation = false;
4108 switch (I->getOpcode()) {
4109 // TODO: look for (assumed) UB to backwards propagate "deadness".
4110 default:
4111 assert(I->isTerminator() &&(static_cast <bool> (I->isTerminator() && "Expected non-terminators to be handled already!"
) ? void (0) : __assert_fail ("I->isTerminator() && \"Expected non-terminators to be handled already!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 4112, __extension__
__PRETTY_FUNCTION__))
4112 "Expected non-terminators to be handled already!")(static_cast <bool> (I->isTerminator() && "Expected non-terminators to be handled already!"
) ? void (0) : __assert_fail ("I->isTerminator() && \"Expected non-terminators to be handled already!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 4112, __extension__
__PRETTY_FUNCTION__))
;
4113 for (const BasicBlock *SuccBB : successors(I->getParent()))
4114 AliveSuccessors.push_back(&SuccBB->front());
4115 break;
4116 case Instruction::Call:
4117 UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
4118 *this, AliveSuccessors);
4119 break;
4120 case Instruction::Invoke:
4121 UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
4122 *this, AliveSuccessors);
4123 break;
4124 case Instruction::Br:
4125 UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
4126 *this, AliveSuccessors);
4127 break;
4128 case Instruction::Switch:
4129 UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
4130 *this, AliveSuccessors);
4131 break;
4132 }
4133
4134 if (UsedAssumedInformation) {
4135 NewToBeExploredFrom.insert(I);
4136 } else if (AliveSuccessors.empty() ||
4137 (I->isTerminator() &&
4138 AliveSuccessors.size() < I->getNumSuccessors())) {
4139 if (KnownDeadEnds.insert(I))
4140 Change = ChangeStatus::CHANGED;
4141 }
4142
4143 LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] #AliveSuccessors: "
<< AliveSuccessors.size() << " UsedAssumedInformation: "
<< UsedAssumedInformation << "\n"; } } while (false
)
4144 << AliveSuccessors.size() << " UsedAssumedInformation: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] #AliveSuccessors: "
<< AliveSuccessors.size() << " UsedAssumedInformation: "
<< UsedAssumedInformation << "\n"; } } while (false
)
4145 << UsedAssumedInformation << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] #AliveSuccessors: "
<< AliveSuccessors.size() << " UsedAssumedInformation: "
<< UsedAssumedInformation << "\n"; } } while (false
)
;
4146
4147 for (const Instruction *AliveSuccessor : AliveSuccessors) {
4148 if (!I->isTerminator()) {
4149 assert(AliveSuccessors.size() == 1 &&(static_cast <bool> (AliveSuccessors.size() == 1 &&
"Non-terminator expected to have a single successor!") ? void
(0) : __assert_fail ("AliveSuccessors.size() == 1 && \"Non-terminator expected to have a single successor!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 4150, __extension__
__PRETTY_FUNCTION__))
4150 "Non-terminator expected to have a single successor!")(static_cast <bool> (AliveSuccessors.size() == 1 &&
"Non-terminator expected to have a single successor!") ? void
(0) : __assert_fail ("AliveSuccessors.size() == 1 && \"Non-terminator expected to have a single successor!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 4150, __extension__
__PRETTY_FUNCTION__))
;
4151 Worklist.push_back(AliveSuccessor);
4152 } else {
4153 // record the assumed live edge
4154 auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent());
4155 if (AssumedLiveEdges.insert(Edge).second)
4156 Change = ChangeStatus::CHANGED;
4157 if (assumeLive(A, *AliveSuccessor->getParent()))
4158 Worklist.push_back(AliveSuccessor);
4159 }
4160 }
4161 }
4162
4163 // Check if the content of ToBeExploredFrom changed, ignore the order.
4164 if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() ||
4165 llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) {
4166 return !ToBeExploredFrom.count(I);
4167 })) {
4168 Change = ChangeStatus::CHANGED;
4169 ToBeExploredFrom = std::move(NewToBeExploredFrom);
4170 }
4171
4172 // If we know everything is live there is no need to query for liveness.
4173 // Instead, indicating a pessimistic fixpoint will cause the state to be
4174 // "invalid" and all queries to be answered conservatively without lookups.
4175 // To be in this state we have to (1) finished the exploration and (3) not
4176 // discovered any non-trivial dead end and (2) not ruled unreachable code
4177 // dead.
4178 if (ToBeExploredFrom.empty() &&
4179 getAnchorScope()->size() == AssumedLiveBlocks.size() &&
4180 llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
4181 return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
4182 }))
4183 return indicatePessimisticFixpoint();
4184 return Change;
4185}
4186
4187/// Liveness information for a call sites.
4188struct AAIsDeadCallSite final : AAIsDeadFunction {
4189 AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
4190 : AAIsDeadFunction(IRP, A) {}
4191
4192 /// See AbstractAttribute::initialize(...).
4193 void initialize(Attributor &A) override {
4194 // TODO: Once we have call site specific value information we can provide
4195 // call site specific liveness information and then it makes
4196 // sense to specialize attributes for call sites instead of
4197 // redirecting requests to the callee.
4198 llvm_unreachable("Abstract attributes for liveness are not "::llvm::llvm_unreachable_internal("Abstract attributes for liveness are not "
"supported for call sites yet!", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 4199)
4199 "supported for call sites yet!")::llvm::llvm_unreachable_internal("Abstract attributes for liveness are not "
"supported for call sites yet!", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 4199)
;
4200 }
4201
4202 /// See AbstractAttribute::updateImpl(...).
4203 ChangeStatus updateImpl(Attributor &A) override {
4204 return indicatePessimisticFixpoint();
4205 }
4206
4207 /// See AbstractAttribute::trackStatistics()
4208 void trackStatistics() const override {}
4209};
4210} // namespace
4211
4212/// -------------------- Dereferenceable Argument Attribute --------------------
4213
4214namespace {
4215struct AADereferenceableImpl : AADereferenceable {
4216 AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
4217 : AADereferenceable(IRP, A) {}
4218 using StateType = DerefState;
4219
4220 /// See AbstractAttribute::initialize(...).
4221 void initialize(Attributor &A) override {
4222 SmallVector<Attribute, 4> Attrs;
4223 getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
4224 Attrs, /* IgnoreSubsumingPositions */ false, &A);
4225 for (const Attribute &Attr : Attrs)
4226 takeKnownDerefBytesMaximum(Attr.getValueAsInt());
4227
4228 const IRPosition &IRP = this->getIRPosition();
4229 NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
4230
4231 bool CanBeNull, CanBeFreed;
4232 takeKnownDerefBytesMaximum(
4233 IRP.getAssociatedValue().getPointerDereferenceableBytes(
4234 A.getDataLayout(), CanBeNull, CanBeFreed));
4235
4236 bool IsFnInterface = IRP.isFnInterfaceKind();
4237 Function *FnScope = IRP.getAnchorScope();
4238 if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
4239 indicatePessimisticFixpoint();
4240 return;
4241 }
4242
4243 if (Instruction *CtxI = getCtxI())
4244 followUsesInMBEC(*this, A, getState(), *CtxI);
4245 }
4246
4247 /// See AbstractAttribute::getState()
4248 /// {
4249 StateType &getState() override { return *this; }
4250 const StateType &getState() const override { return *this; }
4251 /// }
4252
4253 /// Helper function for collecting accessed bytes in must-be-executed-context
4254 void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
4255 DerefState &State) {
4256 const Value *UseV = U->get();
4257 if (!UseV->getType()->isPointerTy())
4258 return;
4259
4260 Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
4261 if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
4262 return;
4263
4264 int64_t Offset;
4265 const Value *Base = GetPointerBaseWithConstantOffset(
4266 Loc->Ptr, Offset, A.getDataLayout(), /*AllowNonInbounds*/ true);
4267 if (Base && Base == &getAssociatedValue())
4268 State.addAccessedBytes(Offset, Loc->Size.getValue());
4269 }
4270
4271 /// See followUsesInMBEC
4272 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4273 AADereferenceable::StateType &State) {
4274 bool IsNonNull = false;
4275 bool TrackUse = false;
4276 int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
4277 A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
4278 LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytesdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AADereferenceable] Deref bytes: "
<< DerefBytes << " for instruction " << *I
<< "\n"; } } while (false)
4279 << " for instruction " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AADereferenceable] Deref bytes: "
<< DerefBytes << " for instruction " << *I
<< "\n"; } } while (false)
;
4280
4281 addAccessedBytesForUse(A, U, I, State);
4282 State.takeKnownDerefBytesMaximum(DerefBytes);
4283 return TrackUse;
4284 }
4285
4286 /// See AbstractAttribute::manifest(...).
4287 ChangeStatus manifest(Attributor &A) override {
4288 ChangeStatus Change = AADereferenceable::manifest(A);
4289 if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
4290 removeAttrs({Attribute::DereferenceableOrNull});
4291 return ChangeStatus::CHANGED;
4292 }
4293 return Change;
4294 }
4295
4296 void getDeducedAttributes(LLVMContext &Ctx,
4297 SmallVectorImpl<Attribute> &Attrs) const override {
4298 // TODO: Add *_globally support
4299 if (isAssumedNonNull())
4300 Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
4301 Ctx, getAssumedDereferenceableBytes()));
4302 else
4303 Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
4304 Ctx, getAssumedDereferenceableBytes()));
4305 }
4306
4307 /// See AbstractAttribute::getAsStr().
4308 const std::string getAsStr() const override {
4309 if (!getAssumedDereferenceableBytes())
4310 return "unknown-dereferenceable";
4311 return std::string("dereferenceable") +
4312 (isAssumedNonNull() ? "" : "_or_null") +
4313 (isAssumedGlobal() ? "_globally" : "") + "<" +
4314 std::to_string(getKnownDereferenceableBytes()) + "-" +
4315 std::to_string(getAssumedDereferenceableBytes()) + ">";
4316 }
4317};
4318
4319/// Dereferenceable attribute for a floating value.
4320struct AADereferenceableFloating : AADereferenceableImpl {
4321 AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
4322 : AADereferenceableImpl(IRP, A) {}
4323
4324 /// See AbstractAttribute::updateImpl(...).
4325 ChangeStatus updateImpl(Attributor &A) override {
4326 const DataLayout &DL = A.getDataLayout();
4327
4328 auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
4329 bool Stripped) -> bool {
4330 unsigned IdxWidth =
4331 DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
4332 APInt Offset(IdxWidth, 0);
4333 const Value *Base = stripAndAccumulateOffsets(
4334 A, *this, &V, DL, Offset, /* GetMinOffset */ false,
4335 /* AllowNonInbounds */ true);
4336
4337 const auto &AA = A.getAAFor<AADereferenceable>(
4338 *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
4339 int64_t DerefBytes = 0;
4340 if (!Stripped && this == &AA) {
4341 // Use IR information if we did not strip anything.
4342 // TODO: track globally.
4343 bool CanBeNull, CanBeFreed;
4344 DerefBytes =
4345 Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
4346 T.GlobalState.indicatePessimisticFixpoint();
4347 } else {
4348 const DerefState &DS = AA.getState();
4349 DerefBytes = DS.DerefBytesState.getAssumed();
4350 T.GlobalState &= DS.GlobalState;
4351 }
4352
4353 // For now we do not try to "increase" dereferenceability due to negative
4354 // indices as we first have to come up with code to deal with loops and
4355 // for overflows of the dereferenceable bytes.
4356 int64_t OffsetSExt = Offset.getSExtValue();
4357 if (OffsetSExt < 0)
4358 OffsetSExt = 0;
4359
4360 T.takeAssumedDerefBytesMinimum(
4361 std::max(int64_t(0), DerefBytes - OffsetSExt));
4362
4363 if (this == &AA) {
4364 if (!Stripped) {
4365 // If nothing was stripped IR information is all we got.
4366 T.takeKnownDerefBytesMaximum(
4367 std::max(int64_t(0), DerefBytes - OffsetSExt));
4368 T.indicatePessimisticFixpoint();
4369 } else if (OffsetSExt > 0) {
4370 // If something was stripped but there is circular reasoning we look
4371 // for the offset. If it is positive we basically decrease the
4372 // dereferenceable bytes in a circluar loop now, which will simply
4373 // drive them down to the known value in a very slow way which we
4374 // can accelerate.
4375 T.indicatePessimisticFixpoint();
4376 }
4377 }
4378
4379 return T.isValidState();
4380 };
4381
4382 DerefState T;
4383 bool UsedAssumedInformation = false;
4384 if (!genericValueTraversal<DerefState>(A, getIRPosition(), *this, T,
4385 VisitValueCB, getCtxI(),
4386 UsedAssumedInformation))
4387 return indicatePessimisticFixpoint();
4388
4389 return clampStateAndIndicateChange(getState(), T);
4390 }
4391
4392 /// See AbstractAttribute::trackStatistics()
4393 void trackStatistics() const override {
4394 STATS_DECLTRACK_FLOATING_ATTR(dereferenceable){ static llvm::Statistic NumIRFloating_dereferenceable = {"attributor"
, "NumIRFloating_dereferenceable", ("Number of floating values known to be '"
"dereferenceable" "'")};; ++(NumIRFloating_dereferenceable);
}
4395 }
4396};
4397
4398/// Dereferenceable attribute for a return value.
4399struct AADereferenceableReturned final
4400 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
4401 AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
4402 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
4403 IRP, A) {}
4404
4405 /// See AbstractAttribute::trackStatistics()
4406 void trackStatistics() const override {
4407 STATS_DECLTRACK_FNRET_ATTR(dereferenceable){ static llvm::Statistic NumIRFunctionReturn_dereferenceable =
{"attributor", "NumIRFunctionReturn_dereferenceable", ("Number of "
"function returns" " marked '" "dereferenceable" "'")};; ++(
NumIRFunctionReturn_dereferenceable); }
4408 }
4409};
4410
4411/// Dereferenceable attribute for an argument
4412struct AADereferenceableArgument final
4413 : AAArgumentFromCallSiteArguments<AADereferenceable,
4414 AADereferenceableImpl> {
4415 using Base =
4416 AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
4417 AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
4418 : Base(IRP, A) {}
4419
4420 /// See AbstractAttribute::trackStatistics()
4421 void trackStatistics() const override {
4422 STATS_DECLTRACK_ARG_ATTR(dereferenceable){ static llvm::Statistic NumIRArguments_dereferenceable = {"attributor"
, "NumIRArguments_dereferenceable", ("Number of " "arguments"
" marked '" "dereferenceable" "'")};; ++(NumIRArguments_dereferenceable
); }
4423 }
4424};
4425
4426/// Dereferenceable attribute for a call site argument.
4427struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
4428 AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
4429 : AADereferenceableFloating(IRP, A) {}
4430
4431 /// See AbstractAttribute::trackStatistics()
4432 void trackStatistics() const override {
4433 STATS_DECLTRACK_CSARG_ATTR(dereferenceable){ static llvm::Statistic NumIRCSArguments_dereferenceable = {
"attributor", "NumIRCSArguments_dereferenceable", ("Number of "
"call site arguments" " marked '" "dereferenceable" "'")};; ++
(NumIRCSArguments_dereferenceable); }
4434 }
4435};
4436
4437/// Dereferenceable attribute deduction for a call site return value.
4438struct AADereferenceableCallSiteReturned final
4439 : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
4440 using Base =
4441 AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
4442 AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
4443 : Base(IRP, A) {}
4444
4445 /// See AbstractAttribute::trackStatistics()
4446 void trackStatistics() const override {
4447 STATS_DECLTRACK_CS_ATTR(dereferenceable){ static llvm::Statistic NumIRCS_dereferenceable = {"attributor"
, "NumIRCS_dereferenceable", ("Number of " "call site" " marked '"
"dereferenceable" "'")};; ++(NumIRCS_dereferenceable); }
;
4448 }
4449};
4450} // namespace
4451
4452// ------------------------ Align Argument Attribute ------------------------
4453
4454namespace {
4455static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
4456 Value &AssociatedValue, const Use *U,
4457 const Instruction *I, bool &TrackUse) {
4458 // We need to follow common pointer manipulation uses to the accesses they
4459 // feed into.
4460 if (isa<CastInst>(I)) {
4461 // Follow all but ptr2int casts.
4462 TrackUse = !isa<PtrToIntInst>(I);
4463 return 0;
4464 }
4465 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
4466 if (GEP->hasAllConstantIndices())
4467 TrackUse = true;
4468 return 0;
4469 }
4470
4471 MaybeAlign MA;
4472 if (const auto *CB = dyn_cast<CallBase>(I)) {
4473 if (CB->isBundleOperand(U) || CB->isCallee(U))
4474 return 0;
4475
4476 unsigned ArgNo = CB->getArgOperandNo(U);
4477 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
4478 // As long as we only use known information there is no need to track
4479 // dependences here.
4480 auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
4481 MA = MaybeAlign(AlignAA.getKnownAlign());
4482 }
4483
4484 const DataLayout &DL = A.getDataLayout();
4485 const Value *UseV = U->get();
4486 if (auto *SI = dyn_cast<StoreInst>(I)) {
4487 if (SI->getPointerOperand() == UseV)
4488 MA = SI->getAlign();
4489 } else if (auto *LI = dyn_cast<LoadInst>(I)) {
4490 if (LI->getPointerOperand() == UseV)
4491 MA = LI->getAlign();
4492 }
4493
4494 if (!MA || *MA <= QueryingAA.getKnownAlign())
4495 return 0;
4496
4497 unsigned Alignment = MA->value();
4498 int64_t Offset;
4499
4500 if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
4501 if (Base == &AssociatedValue) {
4502 // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4503 // So we can say that the maximum power of two which is a divisor of
4504 // gcd(Offset, Alignment) is an alignment.
4505
4506 uint32_t gcd =
4507 greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
4508 Alignment = llvm::PowerOf2Floor(gcd);
4509 }
4510 }
4511
4512 return Alignment;
4513}
4514
4515struct AAAlignImpl : AAAlign {
4516 AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
4517
4518 /// See AbstractAttribute::initialize(...).
4519 void initialize(Attributor &A) override {
4520 SmallVector<Attribute, 4> Attrs;
4521 getAttrs({Attribute::Alignment}, Attrs);
4522 for (const Attribute &Attr : Attrs)
4523 takeKnownMaximum(Attr.getValueAsInt());
4524
4525 Value &V = getAssociatedValue();
4526 takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
4527
4528 if (getIRPosition().isFnInterfaceKind() &&
4529 (!getAnchorScope() ||
4530 !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
4531 indicatePessimisticFixpoint();
4532 return;
4533 }
4534
4535 if (Instruction *CtxI = getCtxI())
4536 followUsesInMBEC(*this, A, getState(), *CtxI);
4537 }
4538
4539 /// See AbstractAttribute::manifest(...).
4540 ChangeStatus manifest(Attributor &A) override {
4541 ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
4542
4543 // Check for users that allow alignment annotations.
4544 Value &AssociatedValue = getAssociatedValue();
4545 for (const Use &U : AssociatedValue.uses()) {
4546 if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
4547 if (SI->getPointerOperand() == &AssociatedValue)
4548 if (SI->getAlignment() < getAssumedAlign()) {
4549 STATS_DECLTRACK(AAAlign, Store,{ static llvm::Statistic NumIRStore_AAAlign = {"attributor", "NumIRStore_AAAlign"
, "Number of times alignment added to a store"};; ++(NumIRStore_AAAlign
); }
4550 "Number of times alignment added to a store"){ static llvm::Statistic NumIRStore_AAAlign = {"attributor", "NumIRStore_AAAlign"
, "Number of times alignment added to a store"};; ++(NumIRStore_AAAlign
); }
;
4551 SI->setAlignment(Align(getAssumedAlign()));
4552 LoadStoreChanged = ChangeStatus::CHANGED;
4553 }
4554 } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
4555 if (LI->getPointerOperand() == &AssociatedValue)
4556 if (LI->getAlignment() < getAssumedAlign()) {
4557 LI->setAlignment(Align(getAssumedAlign()));
4558 STATS_DECLTRACK(AAAlign, Load,{ static llvm::Statistic NumIRLoad_AAAlign = {"attributor", "NumIRLoad_AAAlign"
, "Number of times alignment added to a load"};; ++(NumIRLoad_AAAlign
); }
4559 "Number of times alignment added to a load"){ static llvm::Statistic NumIRLoad_AAAlign = {"attributor", "NumIRLoad_AAAlign"
, "Number of times alignment added to a load"};; ++(NumIRLoad_AAAlign
); }
;
4560 LoadStoreChanged = ChangeStatus::CHANGED;
4561 }
4562 }
4563 }
4564
4565 ChangeStatus Changed = AAAlign::manifest(A);
4566
4567 Align InheritAlign =
4568 getAssociatedValue().getPointerAlignment(A.getDataLayout());
4569 if (InheritAlign >= getAssumedAlign())
4570 return LoadStoreChanged;
4571 return Changed | LoadStoreChanged;
4572 }
4573
4574 // TODO: Provide a helper to determine the implied ABI alignment and check in
4575 // the existing manifest method and a new one for AAAlignImpl that value
4576 // to avoid making the alignment explicit if it did not improve.
4577
4578 /// See AbstractAttribute::getDeducedAttributes
4579 virtual void
4580 getDeducedAttributes(LLVMContext &Ctx,
4581 SmallVectorImpl<Attribute> &Attrs) const override {
4582 if (getAssumedAlign() > 1)
4583 Attrs.emplace_back(
4584 Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
4585 }
4586
4587 /// See followUsesInMBEC
4588 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4589 AAAlign::StateType &State) {
4590 bool TrackUse = false;
4591
4592 unsigned int KnownAlign =
4593 getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
4594 State.takeKnownMaximum(KnownAlign);
4595
4596 return TrackUse;
4597 }
4598
4599 /// See AbstractAttribute::getAsStr().
4600 const std::string getAsStr() const override {
4601 return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
4602 "-" + std::to_string(getAssumedAlign()) + ">")
4603 : "unknown-align";
4604 }
4605};
4606
4607/// Align attribute for a floating value.
4608struct AAAlignFloating : AAAlignImpl {
4609 AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
4610
4611 /// See AbstractAttribute::updateImpl(...).
4612 ChangeStatus updateImpl(Attributor &A) override {
4613 const DataLayout &DL = A.getDataLayout();
4614
4615 auto VisitValueCB = [&](Value &V, const Instruction *,
4616 AAAlign::StateType &T, bool Stripped) -> bool {
4617 if (isa<UndefValue>(V) || isa<ConstantPointerNull>(V))
4618 return true;
4619 const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
4620 DepClassTy::REQUIRED);
4621 if (!Stripped && this == &AA) {
4622 int64_t Offset;
4623 unsigned Alignment = 1;
4624 if (const Value *Base =
4625 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
4626 // TODO: Use AAAlign for the base too.
4627 Align PA = Base->getPointerAlignment(DL);
4628 // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4629 // So we can say that the maximum power of two which is a divisor of
4630 // gcd(Offset, Alignment) is an alignment.
4631
4632 uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
4633 uint32_t(PA.value()));
4634 Alignment = llvm::PowerOf2Floor(gcd);
4635 } else {
4636 Alignment = V.getPointerAlignment(DL).value();
4637 }
4638 // Use only IR information if we did not strip anything.
4639 T.takeKnownMaximum(Alignment);
4640 T.indicatePessimisticFixpoint();
4641 } else {
4642 // Use abstract attribute information.
4643 const AAAlign::StateType &DS = AA.getState();
4644 T ^= DS;
4645 }
4646 return T.isValidState();
4647 };
4648
4649 StateType T;
4650 bool UsedAssumedInformation = false;
4651 if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
4652 VisitValueCB, getCtxI(),
4653 UsedAssumedInformation))
4654 return indicatePessimisticFixpoint();
4655
4656 // TODO: If we know we visited all incoming values, thus no are assumed
4657 // dead, we can take the known information from the state T.
4658 return clampStateAndIndicateChange(getState(), T);
4659 }
4660
4661 /// See AbstractAttribute::trackStatistics()
4662 void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align){ static llvm::Statistic NumIRFloating_align = {"attributor",
"NumIRFloating_align", ("Number of floating values known to be '"
"align" "'")};; ++(NumIRFloating_align); }
}
4663};
4664
4665/// Align attribute for function return value.
4666struct AAAlignReturned final
4667 : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
4668 using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
4669 AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4670
4671 /// See AbstractAttribute::initialize(...).
4672 void initialize(Attributor &A) override {
4673 Base::initialize(A);
4674 Function *F = getAssociatedFunction();
4675 if (!F || F->isDeclaration())
4676 indicatePessimisticFixpoint();
4677 }
4678
4679 /// See AbstractAttribute::trackStatistics()
4680 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned){ static llvm::Statistic NumIRFunctionReturn_aligned = {"attributor"
, "NumIRFunctionReturn_aligned", ("Number of " "function returns"
" marked '" "aligned" "'")};; ++(NumIRFunctionReturn_aligned
); }
}
4681};
4682
4683/// Align attribute for function argument.
4684struct AAAlignArgument final
4685 : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
4686 using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
4687 AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4688
4689 /// See AbstractAttribute::manifest(...).
4690 ChangeStatus manifest(Attributor &A) override {
4691 // If the associated argument is involved in a must-tail call we give up
4692 // because we would need to keep the argument alignments of caller and
4693 // callee in-sync. Just does not seem worth the trouble right now.
4694 if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
4695 return ChangeStatus::UNCHANGED;
4696 return Base::manifest(A);
4697 }
4698
4699 /// See AbstractAttribute::trackStatistics()
4700 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned){ static llvm::Statistic NumIRArguments_aligned = {"attributor"
, "NumIRArguments_aligned", ("Number of " "arguments" " marked '"
"aligned" "'")};; ++(NumIRArguments_aligned); }
}
4701};
4702
4703struct AAAlignCallSiteArgument final : AAAlignFloating {
4704 AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
4705 : AAAlignFloating(IRP, A) {}
4706
4707 /// See AbstractAttribute::manifest(...).
4708 ChangeStatus manifest(Attributor &A) override {
4709 // If the associated argument is involved in a must-tail call we give up
4710 // because we would need to keep the argument alignments of caller and
4711 // callee in-sync. Just does not seem worth the trouble right now.
4712 if (Argument *Arg = getAssociatedArgument())
4713 if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
4714 return ChangeStatus::UNCHANGED;
4715 ChangeStatus Changed = AAAlignImpl::manifest(A);
4716 Align InheritAlign =
4717 getAssociatedValue().getPointerAlignment(A.getDataLayout());
4718 if (InheritAlign >= getAssumedAlign())
4719 Changed = ChangeStatus::UNCHANGED;
4720 return Changed;
4721 }
4722
4723 /// See AbstractAttribute::updateImpl(Attributor &A).
4724 ChangeStatus updateImpl(Attributor &A) override {
4725 ChangeStatus Changed = AAAlignFloating::updateImpl(A);
4726 if (Argument *Arg = getAssociatedArgument()) {
4727 // We only take known information from the argument
4728 // so we do not need to track a dependence.
4729 const auto &ArgAlignAA = A.getAAFor<AAAlign>(
4730 *this, IRPosition::argument(*Arg), DepClassTy::NONE);
4731 takeKnownMaximum(ArgAlignAA.getKnownAlign());
4732 }
4733 return Changed;
4734 }
4735
4736 /// See AbstractAttribute::trackStatistics()
4737 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned){ static llvm::Statistic NumIRCSArguments_aligned = {"attributor"
, "NumIRCSArguments_aligned", ("Number of " "call site arguments"
" marked '" "aligned" "'")};; ++(NumIRCSArguments_aligned); }
}
4738};
4739
4740/// Align attribute deduction for a call site return value.
4741struct AAAlignCallSiteReturned final
4742 : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
4743 using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
4744 AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
4745 : Base(IRP, A) {}
4746
4747 /// See AbstractAttribute::initialize(...).
4748 void initialize(Attributor &A) override {
4749 Base::initialize(A);
4750 Function *F = getAssociatedFunction();
4751 if (!F || F->isDeclaration())
4752 indicatePessimisticFixpoint();
4753 }
4754
4755 /// See AbstractAttribute::trackStatistics()
4756 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align){ static llvm::Statistic NumIRCS_align = {"attributor", "NumIRCS_align"
, ("Number of " "call site" " marked '" "align" "'")};; ++(NumIRCS_align
); }
; }
4757};
4758} // namespace
4759
4760/// ------------------ Function No-Return Attribute ----------------------------
4761namespace {
4762struct AANoReturnImpl : public AANoReturn {
4763 AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4764
4765 /// See AbstractAttribute::initialize(...).
4766 void initialize(Attributor &A) override {
4767 AANoReturn::initialize(A);
4768 Function *F = getAssociatedFunction();
4769 if (!F || F->isDeclaration())
4770 indicatePessimisticFixpoint();
4771 }
4772
4773 /// See AbstractAttribute::getAsStr().
4774 const std::string getAsStr() const override {
4775 return getAssumed() ? "noreturn" : "may-return";
4776 }
4777
4778 /// See AbstractAttribute::updateImpl(Attributor &A).
4779 virtual ChangeStatus updateImpl(Attributor &A) override {
4780 auto CheckForNoReturn = [](Instruction &) { return false; };
4781 bool UsedAssumedInformation = false;
4782 if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4783 {(unsigned)Instruction::Ret},
4784 UsedAssumedInformation))
4785 return indicatePessimisticFixpoint();
4786 return ChangeStatus::UNCHANGED;
4787 }
4788};
4789
4790struct AANoReturnFunction final : AANoReturnImpl {
4791 AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4792 : AANoReturnImpl(IRP, A) {}
4793
4794 /// See AbstractAttribute::trackStatistics()
4795 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn){ static llvm::Statistic NumIRFunction_noreturn = {"attributor"
, "NumIRFunction_noreturn", ("Number of " "functions" " marked '"
"noreturn" "'")};; ++(NumIRFunction_noreturn); }
}
4796};
4797
4798/// NoReturn attribute deduction for a call sites.
4799struct AANoReturnCallSite final : AANoReturnImpl {
4800 AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4801 : AANoReturnImpl(IRP, A) {}
4802
4803 /// See AbstractAttribute::initialize(...).
4804 void initialize(Attributor &A) override {
4805 AANoReturnImpl::initialize(A);
4806 if (Function *F = getAssociatedFunction()) {
4807 const IRPosition &FnPos = IRPosition::function(*F);
4808 auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4809 if (!FnAA.isAssumedNoReturn())
4810 indicatePessimisticFixpoint();
4811 }
4812 }
4813
4814 /// See AbstractAttribute::updateImpl(...).
4815 ChangeStatus updateImpl(Attributor &A) override {
4816 // TODO: Once we have call site specific value information we can provide
4817 // call site specific liveness information and then it makes
4818 // sense to specialize attributes for call sites arguments instead of
4819 // redirecting requests to the callee argument.
4820 Function *F = getAssociatedFunction();
4821 const IRPosition &FnPos = IRPosition::function(*F);
4822 auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4823 return clampStateAndIndicateChange(getState(), FnAA.getState());
4824 }
4825
4826 /// See AbstractAttribute::trackStatistics()
4827 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn){ static llvm::Statistic NumIRCS_noreturn = {"attributor", "NumIRCS_noreturn"
, ("Number of " "call site" " marked '" "noreturn" "'")};; ++
(NumIRCS_noreturn); }
; }
4828};
4829} // namespace
4830
4831/// ----------------------- Instance Info ---------------------------------
4832
4833namespace {
4834/// A class to hold the state of for no-capture attributes.
4835struct AAInstanceInfoImpl : public AAInstanceInfo {
4836 AAInstanceInfoImpl(const IRPosition &IRP, Attributor &A)
4837 : AAInstanceInfo(IRP, A) {}
4838
4839 /// See AbstractAttribute::initialize(...).
4840 void initialize(Attributor &A) override {
4841 Value &V = getAssociatedValue();
4842 if (auto *C = dyn_cast<Constant>(&V)) {
4843 if (C->isThreadDependent())
4844 indicatePessimisticFixpoint();
4845 else
4846 indicateOptimisticFixpoint();
4847 return;
4848 }
4849 if (auto *CB = dyn_cast<CallBase>(&V))
4850 if (CB->arg_size() == 0 && !CB->mayHaveSideEffects() &&
4851 !CB->mayReadFromMemory()) {
4852 indicateOptimisticFixpoint();
4853 return;
4854 }
4855 }
4856
4857 /// See AbstractAttribute::updateImpl(...).
4858 ChangeStatus updateImpl(Attributor &A) override {
4859 ChangeStatus Changed = ChangeStatus::UNCHANGED;
4860
4861 Value &V = getAssociatedValue();
4862 const Function *Scope = nullptr;
4863 if (auto *I = dyn_cast<Instruction>(&V))
4864 Scope = I->getFunction();
4865 if (auto *A = dyn_cast<Argument>(&V)) {
4866 Scope = A->getParent();
4867 if (!Scope->hasLocalLinkage())
4868 return Changed;
4869 }
4870 if (!Scope)
4871 return indicateOptimisticFixpoint();
4872
4873 auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
4874 *this, IRPosition::function(*Scope), DepClassTy::OPTIONAL);
4875 if (NoRecurseAA.isAssumedNoRecurse())
4876 return Changed;
4877
4878 auto UsePred = [&](const Use &U, bool &Follow) {
4879 const Instruction *UserI = dyn_cast<Instruction>(U.getUser());
4880 if (!UserI || isa<GetElementPtrInst>(UserI) || isa<CastInst>(UserI) ||
4881 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4882 Follow = true;
4883 return true;
4884 }
4885 if (isa<LoadInst>(UserI) || isa<CmpInst>(UserI) ||
4886 (isa<StoreInst>(UserI) &&
4887 cast<StoreInst>(UserI)->getValueOperand() != U.get()))
4888 return true;
4889 if (auto *CB = dyn_cast<CallBase>(UserI)) {
4890 // This check is not guaranteeing uniqueness but for now that we cannot
4891 // end up with two versions of \p U thinking it was one.
4892 if (!CB->getCalledFunction() ||
4893 !CB->getCalledFunction()->hasLocalLinkage())
4894 return true;
4895 if (!CB->isArgOperand(&U))
4896 return false;
4897 const auto &ArgInstanceInfoAA = A.getAAFor<AAInstanceInfo>(
4898 *this, IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U)),
4899 DepClassTy::OPTIONAL);
4900 if (ArgInstanceInfoAA.isAssumedUniqueForAnalysis())
4901 return true;
4902 }
4903 return false;
4904 };
4905
4906 auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
4907 if (auto *SI = dyn_cast<StoreInst>(OldU.getUser())) {
4908 auto *Ptr = SI->getPointerOperand()->stripPointerCasts();
4909 if (isa<AllocaInst>(Ptr) && AA::isDynamicallyUnique(A, *this, *Ptr))
4910 return true;
4911 auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(
4912 *SI->getFunction());
4913 if (isAllocationFn(Ptr, TLI) && AA::isDynamicallyUnique(A, *this, *Ptr))
4914 return true;
4915 }
4916 return false;
4917 };
4918
4919 if (!A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ true,
4920 DepClassTy::OPTIONAL,
4921 /* IgnoreDroppableUses */ true, EquivalentUseCB))
4922 return indicatePessimisticFixpoint();
4923
4924 return Changed;
4925 }
4926
4927 /// See AbstractState::getAsStr().
4928 const std::string getAsStr() const override {
4929 return isAssumedUniqueForAnalysis() ? "<unique [fAa]>" : "<unknown>";
4930 }
4931
4932 /// See AbstractAttribute::trackStatistics()
4933 void trackStatistics() const override {}
4934};
4935
4936/// InstanceInfo attribute for floating values.
4937struct AAInstanceInfoFloating : AAInstanceInfoImpl {
4938 AAInstanceInfoFloating(const IRPosition &IRP, Attributor &A)
4939 : AAInstanceInfoImpl(IRP, A) {}
4940};
4941
4942/// NoCapture attribute for function arguments.
4943struct AAInstanceInfoArgument final : AAInstanceInfoFloating {
4944 AAInstanceInfoArgument(const IRPosition &IRP, Attributor &A)
4945 : AAInstanceInfoFloating(IRP, A) {}
4946};
4947
4948/// InstanceInfo attribute for call site arguments.
4949struct AAInstanceInfoCallSiteArgument final : AAInstanceInfoImpl {
4950 AAInstanceInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
4951 : AAInstanceInfoImpl(IRP, A) {}
4952
4953 /// See AbstractAttribute::updateImpl(...).
4954 ChangeStatus updateImpl(Attributor &A) override {
4955 // TODO: Once we have call site specific value information we can provide
4956 // call site specific liveness information and then it makes
4957 // sense to specialize attributes for call sites arguments instead of
4958 // redirecting requests to the callee argument.
4959 Argument *Arg = getAssociatedArgument();
4960 if (!Arg)
4961 return indicatePessimisticFixpoint();
4962 const IRPosition &ArgPos = IRPosition::argument(*Arg);
4963 auto &ArgAA =
4964 A.getAAFor<AAInstanceInfo>(*this, ArgPos, DepClassTy::REQUIRED);
4965 return clampStateAndIndicateChange(getState(), ArgAA.getState());
4966 }
4967};
4968
4969/// InstanceInfo attribute for function return value.
4970struct AAInstanceInfoReturned final : AAInstanceInfoImpl {
4971 AAInstanceInfoReturned(const IRPosition &IRP, Attributor &A)
4972 : AAInstanceInfoImpl(IRP, A) {
4973 llvm_unreachable("InstanceInfo is not applicable to function returns!")::llvm::llvm_unreachable_internal("InstanceInfo is not applicable to function returns!"
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 4973)
;
4974 }
4975
4976 /// See AbstractAttribute::initialize(...).
4977 void initialize(Attributor &A) override {
4978 llvm_unreachable("InstanceInfo is not applicable to function returns!")::llvm::llvm_unreachable_internal("InstanceInfo is not applicable to function returns!"
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 4978)
;
4979 }
4980
4981 /// See AbstractAttribute::updateImpl(...).
4982 ChangeStatus updateImpl(Attributor &A) override {
4983 llvm_unreachable("InstanceInfo is not applicable to function returns!")::llvm::llvm_unreachable_internal("InstanceInfo is not applicable to function returns!"
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 4983)
;
4984 }
4985};
4986
4987/// InstanceInfo attribute deduction for a call site return value.
4988struct AAInstanceInfoCallSiteReturned final : AAInstanceInfoFloating {
4989 AAInstanceInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
4990 : AAInstanceInfoFloating(IRP, A) {}
4991};
4992} // namespace
4993
4994/// ----------------------- Variable Capturing ---------------------------------
4995
4996namespace {
4997/// A class to hold the state of for no-capture attributes.
4998struct AANoCaptureImpl : public AANoCapture {
4999 AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
5000
5001 /// See AbstractAttribute::initialize(...).
5002 void initialize(Attributor &A) override {
5003 if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
5004 indicateOptimisticFixpoint();
5005 return;
5006 }
5007 Function *AnchorScope = getAnchorScope();
5008 if (isFnInterfaceKind() &&
5009 (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
5010 indicatePessimisticFixpoint();
5011 return;
5012 }
5013
5014 // You cannot "capture" null in the default address space.
5015 if (isa<ConstantPointerNull>(getAssociatedValue()) &&
5016 getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
5017 indicateOptimisticFixpoint();
5018 return;
5019 }
5020
5021 const Function *F =
5022 isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
5023
5024 // Check what state the associated function can actually capture.
5025 if (F)
5026 determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
5027 else
5028 indicatePessimisticFixpoint();
5029 }
5030
5031 /// See AbstractAttribute::updateImpl(...).
5032 ChangeStatus updateImpl(Attributor &A) override;
5033
5034 /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
5035 virtual void
5036 getDeducedAttributes(LLVMContext &Ctx,
5037 SmallVectorImpl<Attribute> &Attrs) const override {
5038 if (!isAssumedNoCaptureMaybeReturned())
5039 return;
5040
5041 if (isArgumentPosition()) {
5042 if (isAssumedNoCapture())
5043 Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
5044 else if (ManifestInternal)
5045 Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
5046 }
5047 }
5048
5049 /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
5050 /// depending on the ability of the function associated with \p IRP to capture
5051 /// state in memory and through "returning/throwing", respectively.
5052 static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
5053 const Function &F,
5054 BitIntegerState &State) {
5055 // TODO: Once we have memory behavior attributes we should use them here.
5056
5057 // If we know we cannot communicate or write to memory, we do not care about
5058 // ptr2int anymore.
5059 if (F.onlyReadsMemory() && F.doesNotThrow() &&
5060 F.getReturnType()->isVoidTy()) {
5061 State.addKnownBits(NO_CAPTURE);
5062 return;
5063 }
5064
5065 // A function cannot capture state in memory if it only reads memory, it can
5066 // however return/throw state and the state might be influenced by the
5067 // pointer value, e.g., loading from a returned pointer might reveal a bit.
5068 if (F.onlyReadsMemory())
5069 State.addKnownBits(NOT_CAPTURED_IN_MEM);
5070
5071 // A function cannot communicate state back if it does not through
5072 // exceptions and doesn not return values.
5073 if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
5074 State.addKnownBits(NOT_CAPTURED_IN_RET);
5075
5076 // Check existing "returned" attributes.
5077 int ArgNo = IRP.getCalleeArgNo();
5078 if (F.doesNotThrow() && ArgNo >= 0) {
5079 for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
5080 if (F.hasParamAttribute(u, Attribute::Returned)) {
5081 if (u == unsigned(ArgNo))
5082 State.removeAssumedBits(NOT_CAPTURED_IN_RET);
5083 else if (F.onlyReadsMemory())
5084 State.addKnownBits(NO_CAPTURE);
5085 else
5086 State.addKnownBits(NOT_CAPTURED_IN_RET);
5087 break;
5088 }
5089 }
5090 }
5091
5092 /// See AbstractState::getAsStr().
5093 const std::string getAsStr() const override {
5094 if (isKnownNoCapture())
5095 return "known not-captured";
5096 if (isAssumedNoCapture())
5097 return "assumed not-captured";
5098 if (isKnownNoCaptureMaybeReturned())
5099 return "known not-captured-maybe-returned";
5100 if (isAssumedNoCaptureMaybeReturned())
5101 return "assumed not-captured-maybe-returned";
5102 return "assumed-captured";
5103 }
5104
5105 /// Check the use \p U and update \p State accordingly. Return true if we
5106 /// should continue to update the state.
5107 bool checkUse(Attributor &A, AANoCapture::StateType &State, const Use &U,
5108 bool &Follow) {
5109 Instruction *UInst = cast<Instruction>(U.getUser());
5110 LLVM_DEBUG(dbgs() << "[AANoCapture] Check use: " << *U.get() << " in "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AANoCapture] Check use: "
<< *U.get() << " in " << *UInst << "\n"
; } } while (false)
5111 << *UInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AANoCapture] Check use: "
<< *U.get() << " in " << *UInst << "\n"
; } } while (false)
;
5112
5113 // Deal with ptr2int by following uses.
5114 if (isa<PtrToIntInst>(UInst)) {
5115 LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << " - ptr2int assume the worst!\n"
; } } while (false)
;
5116 return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5117 /* Return */ true);
5118 }
5119
5120 // For stores we already checked if we can follow them, if they make it
5121 // here we give up.
5122 if (isa<StoreInst>(UInst))
5123 return isCapturedIn(State, /* Memory */ true, /* Integer */ false,
5124 /* Return */ false);
5125
5126 // Explicitly catch return instructions.
5127 if (isa<ReturnInst>(UInst)) {
5128 if (UInst->getFunction() == getAnchorScope())
5129 return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5130 /* Return */ true);
5131 return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5132 /* Return */ true);
5133 }
5134
5135 // For now we only use special logic for call sites. However, the tracker
5136 // itself knows about a lot of other non-capturing cases already.
5137 auto *CB = dyn_cast<CallBase>(UInst);
5138 if (!CB || !CB->isArgOperand(&U))
5139 return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5140 /* Return */ true);
5141
5142 unsigned ArgNo = CB->getArgOperandNo(&U);
5143 const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
5144 // If we have a abstract no-capture attribute for the argument we can use
5145 // it to justify a non-capture attribute here. This allows recursion!
5146 auto &ArgNoCaptureAA =
5147 A.getAAFor<AANoCapture>(*this, CSArgPos, DepClassTy::REQUIRED);
5148 if (ArgNoCaptureAA.isAssumedNoCapture())
5149 return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5150 /* Return */ false);
5151 if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
5152 Follow = true;
5153 return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5154 /* Return */ false);
5155 }
5156
5157 // Lastly, we could not find a reason no-capture can be assumed so we don't.
5158 return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5159 /* Return */ true);
5160 }
5161
5162 /// Update \p State according to \p CapturedInMem, \p CapturedInInt, and
5163 /// \p CapturedInRet, then return true if we should continue updating the
5164 /// state.
5165 static bool isCapturedIn(AANoCapture::StateType &State, bool CapturedInMem,
5166 bool CapturedInInt, bool CapturedInRet) {
5167 LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << " - captures [Mem " <<
CapturedInMem << "|Int " << CapturedInInt <<
"|Ret " << CapturedInRet << "]\n"; } } while (false
)
5168 << CapturedInInt << "|Ret " << CapturedInRet << "]\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << " - captures [Mem " <<
CapturedInMem << "|Int " << CapturedInInt <<
"|Ret " << CapturedInRet << "]\n"; } } while (false
)
;
5169 if (CapturedInMem)
5170 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
5171 if (CapturedInInt)
5172 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
5173 if (CapturedInRet)
5174 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
5175 return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
5176 }
5177};
5178
5179ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
5180 const IRPosition &IRP = getIRPosition();
5181 Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
5182 : &IRP.getAssociatedValue();
5183 if (!V)
5184 return indicatePessimisticFixpoint();
5185
5186 const Function *F =
5187 isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
5188 assert(F && "Expected a function!")(static_cast <bool> (F && "Expected a function!"
) ? void (0) : __assert_fail ("F && \"Expected a function!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5188, __extension__
__PRETTY_FUNCTION__))
;
5189 const IRPosition &FnPos = IRPosition::function(*F);
5190
5191 AANoCapture::StateType T;
5192
5193 // Readonly means we cannot capture through memory.
5194 bool IsKnown;
5195 if (AA::isAssumedReadOnly(A, FnPos, *this, IsKnown)) {
5196 T.addKnownBits(NOT_CAPTURED_IN_MEM);
5197 if (IsKnown)
5198 addKnownBits(NOT_CAPTURED_IN_MEM);
5199 }
5200
5201 // Make sure all returned values are different than the underlying value.
5202 // TODO: we could do this in a more sophisticated way inside
5203 // AAReturnedValues, e.g., track all values that escape through returns
5204 // directly somehow.
5205 auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
5206 bool SeenConstant = false;
5207 for (auto &It : RVAA.returned_values()) {
5208 if (isa<Constant>(It.first)) {
5209 if (SeenConstant)
5210 return false;
5211 SeenConstant = true;
5212 } else if (!isa<Argument>(It.first) ||
5213 It.first == getAssociatedArgument())
5214 return false;
5215 }
5216 return true;
5217 };
5218
5219 const auto &NoUnwindAA =
5220 A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
5221 if (NoUnwindAA.isAssumedNoUnwind()) {
5222 bool IsVoidTy = F->getReturnType()->isVoidTy();
5223 const AAReturnedValues *RVAA =
5224 IsVoidTy ? nullptr
5225 : &A.getAAFor<AAReturnedValues>(*this, FnPos,
5226
5227 DepClassTy::OPTIONAL);
5228 if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
5229 T.addKnownBits(NOT_CAPTURED_IN_RET);
5230 if (T.isKnown(NOT_CAPTURED_IN_MEM))
5231 return ChangeStatus::UNCHANGED;
5232 if (NoUnwindAA.isKnownNoUnwind() &&
5233 (IsVoidTy || RVAA->getState().isAtFixpoint())) {
5234 addKnownBits(NOT_CAPTURED_IN_RET);
5235 if (isKnown(NOT_CAPTURED_IN_MEM))
5236 return indicateOptimisticFixpoint();
5237 }
5238 }
5239 }
5240
5241 auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) {
5242 const auto &DerefAA = A.getAAFor<AADereferenceable>(
5243 *this, IRPosition::value(*O), DepClassTy::OPTIONAL);
5244 return DerefAA.getAssumedDereferenceableBytes();
5245 };
5246
5247 auto UseCheck = [&](const Use &U, bool &Follow) -> bool {
5248 switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) {
5249 case UseCaptureKind::NO_CAPTURE:
5250 return true;
5251 case UseCaptureKind::MAY_CAPTURE:
5252 return checkUse(A, T, U, Follow);
5253 case UseCaptureKind::PASSTHROUGH:
5254 Follow = true;
5255 return true;
5256 }
5257 llvm_unreachable("Unexpected use capture kind!")::llvm::llvm_unreachable_internal("Unexpected use capture kind!"
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5257)
;
5258 };
5259
5260 if (!A.checkForAllUses(UseCheck, *this, *V))
5261 return indicatePessimisticFixpoint();
5262
5263 AANoCapture::StateType &S = getState();
5264 auto Assumed = S.getAssumed();
5265 S.intersectAssumedBits(T.getAssumed());
5266 if (!isAssumedNoCaptureMaybeReturned())
5267 return indicatePessimisticFixpoint();
5268 return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
5269 : ChangeStatus::CHANGED;
5270}
5271
5272/// NoCapture attribute for function arguments.
5273struct AANoCaptureArgument final : AANoCaptureImpl {
5274 AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
5275 : AANoCaptureImpl(IRP, A) {}
5276
5277 /// See AbstractAttribute::trackStatistics()
5278 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture){ static llvm::Statistic NumIRArguments_nocapture = {"attributor"
, "NumIRArguments_nocapture", ("Number of " "arguments" " marked '"
"nocapture" "'")};; ++(NumIRArguments_nocapture); }
}
5279};
5280
5281/// NoCapture attribute for call site arguments.
5282struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
5283 AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
5284 : AANoCaptureImpl(IRP, A) {}
5285
5286 /// See AbstractAttribute::initialize(...).
5287 void initialize(Attributor &A) override {
5288 if (Argument *Arg = getAssociatedArgument())
5289 if (Arg->hasByValAttr())
5290 indicateOptimisticFixpoint();
5291 AANoCaptureImpl::initialize(A);
5292 }
5293
5294 /// See AbstractAttribute::updateImpl(...).
5295 ChangeStatus updateImpl(Attributor &A) override {
5296 // TODO: Once we have call site specific value information we can provide
5297 // call site specific liveness information and then it makes
5298 // sense to specialize attributes for call sites arguments instead of
5299 // redirecting requests to the callee argument.
5300 Argument *Arg = getAssociatedArgument();
5301 if (!Arg)
5302 return indicatePessimisticFixpoint();
5303 const IRPosition &ArgPos = IRPosition::argument(*Arg);
5304 auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
5305 return clampStateAndIndicateChange(getState(), ArgAA.getState());
5306 }
5307
5308 /// See AbstractAttribute::trackStatistics()
5309 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture){ static llvm::Statistic NumIRCSArguments_nocapture = {"attributor"
, "NumIRCSArguments_nocapture", ("Number of " "call site arguments"
" marked '" "nocapture" "'")};; ++(NumIRCSArguments_nocapture
); }
};
5310};
5311
5312/// NoCapture attribute for floating values.
5313struct AANoCaptureFloating final : AANoCaptureImpl {
5314 AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
5315 : AANoCaptureImpl(IRP, A) {}
5316
5317 /// See AbstractAttribute::trackStatistics()
5318 void trackStatistics() const override {
5319 STATS_DECLTRACK_FLOATING_ATTR(nocapture){ static llvm::Statistic NumIRFloating_nocapture = {"attributor"
, "NumIRFloating_nocapture", ("Number of floating values known to be '"
"nocapture" "'")};; ++(NumIRFloating_nocapture); }
5320 }
5321};
5322
5323/// NoCapture attribute for function return value.
5324struct AANoCaptureReturned final : AANoCaptureImpl {
5325 AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
5326 : AANoCaptureImpl(IRP, A) {
5327 llvm_unreachable("NoCapture is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoCapture is not applicable to function returns!"
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5327)
;
5328 }
5329
5330 /// See AbstractAttribute::initialize(...).
5331 void initialize(Attributor &A) override {
5332 llvm_unreachable("NoCapture is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoCapture is not applicable to function returns!"
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5332)
;
5333 }
5334
5335 /// See AbstractAttribute::updateImpl(...).
5336 ChangeStatus updateImpl(Attributor &A) override {
5337 llvm_unreachable("NoCapture is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoCapture is not applicable to function returns!"
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5337)
;
5338 }
5339
5340 /// See AbstractAttribute::trackStatistics()
5341 void trackStatistics() const override {}
5342};
5343
5344/// NoCapture attribute deduction for a call site return value.
5345struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
5346 AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
5347 : AANoCaptureImpl(IRP, A) {}
5348
5349 /// See AbstractAttribute::initialize(...).
5350 void initialize(Attributor &A) override {
5351 const Function *F = getAnchorScope();
5352 // Check what state the associated function can actually capture.
5353 determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
5354 }
5355
5356 /// See AbstractAttribute::trackStatistics()
5357 void trackStatistics() const override {
5358 STATS_DECLTRACK_CSRET_ATTR(nocapture){ static llvm::Statistic NumIRCSReturn_nocapture = {"attributor"
, "NumIRCSReturn_nocapture", ("Number of " "call site returns"
" marked '" "nocapture" "'")};; ++(NumIRCSReturn_nocapture);
}
5359 }
5360};
5361} // namespace
5362
5363/// ------------------ Value Simplify Attribute ----------------------------
5364
5365bool ValueSimplifyStateType::unionAssumed(Optional<Value *> Other) {
5366 // FIXME: Add a typecast support.
5367 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5368 SimplifiedAssociatedValue, Other, Ty);
5369 if (SimplifiedAssociatedValue == Optional<Value *>(nullptr))
5370 return false;
5371
5372 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (SimplifiedAssociatedValue.hasValue())
dbgs() << "[ValueSimplify] is assumed to be " <<
**SimplifiedAssociatedValue << "\n"; else dbgs() <<
"[ValueSimplify] is assumed to be <none>\n"; }; } } while
(false)
5373 if (SimplifiedAssociatedValue.hasValue())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (SimplifiedAssociatedValue.hasValue())
dbgs() << "[ValueSimplify] is assumed to be " <<
**SimplifiedAssociatedValue << "\n"; else dbgs() <<
"[ValueSimplify] is assumed to be <none>\n"; }; } } while
(false)
5374 dbgs() << "[ValueSimplify] is assumed to be "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (SimplifiedAssociatedValue.hasValue())
dbgs() << "[ValueSimplify] is assumed to be " <<
**SimplifiedAssociatedValue << "\n"; else dbgs() <<
"[ValueSimplify] is assumed to be <none>\n"; }; } } while
(false)
5375 << **SimplifiedAssociatedValue << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (SimplifiedAssociatedValue.hasValue())
dbgs() << "[ValueSimplify] is assumed to be " <<
**SimplifiedAssociatedValue << "\n"; else dbgs() <<
"[ValueSimplify] is assumed to be <none>\n"; }; } } while
(false)
5376 elsedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (SimplifiedAssociatedValue.hasValue())
dbgs() << "[ValueSimplify] is assumed to be " <<
**SimplifiedAssociatedValue << "\n"; else dbgs() <<
"[ValueSimplify] is assumed to be <none>\n"; }; } } while
(false)
5377 dbgs() << "[ValueSimplify] is assumed to be <none>\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (SimplifiedAssociatedValue.hasValue())
dbgs() << "[ValueSimplify] is assumed to be " <<
**SimplifiedAssociatedValue << "\n"; else dbgs() <<
"[ValueSimplify] is assumed to be <none>\n"; }; } } while
(false)
5378 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (SimplifiedAssociatedValue.hasValue())
dbgs() << "[ValueSimplify] is assumed to be " <<
**SimplifiedAssociatedValue << "\n"; else dbgs() <<
"[ValueSimplify] is assumed to be <none>\n"; }; } } while
(false)
;
5379 return true;
5380}
5381
5382namespace {
5383struct AAValueSimplifyImpl : AAValueSimplify {
5384 AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
5385 : AAValueSimplify(IRP, A) {}
5386
5387 /// See AbstractAttribute::initialize(...).
5388 void initialize(Attributor &A) override {
5389 if (getAssociatedValue().getType()->isVoidTy())
5390 indicatePessimisticFixpoint();
5391 if (A.hasSimplificationCallback(getIRPosition()))
5392 indicatePessimisticFixpoint();
5393 }
5394
5395 /// See AbstractAttribute::getAsStr().
5396 const std::string getAsStr() const override {
5397 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { errs() << "SAV: " << (bool)SimplifiedAssociatedValue
<< " "; if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue
) errs() << "SAV: " << **SimplifiedAssociatedValue
<< " "; }; } } while (false)
5398 errs() << "SAV: " << (bool)SimplifiedAssociatedValue << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { errs() << "SAV: " << (bool)SimplifiedAssociatedValue
<< " "; if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue
) errs() << "SAV: " << **SimplifiedAssociatedValue
<< " "; }; } } while (false)
5399 if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { errs() << "SAV: " << (bool)SimplifiedAssociatedValue
<< " "; if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue
) errs() << "SAV: " << **SimplifiedAssociatedValue
<< " "; }; } } while (false)
5400 errs() << "SAV: " << **SimplifiedAssociatedValue << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { errs() << "SAV: " << (bool)SimplifiedAssociatedValue
<< " "; if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue
) errs() << "SAV: " << **SimplifiedAssociatedValue
<< " "; }; } } while (false)
5401 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { errs() << "SAV: " << (bool)SimplifiedAssociatedValue
<< " "; if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue
) errs() << "SAV: " << **SimplifiedAssociatedValue
<< " "; }; } } while (false)
;
5402 return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple")
5403 : "not-simple";
5404 }
5405
5406 /// See AbstractAttribute::trackStatistics()
5407 void trackStatistics() const override {}
5408
5409 /// See AAValueSimplify::getAssumedSimplifiedValue()
5410 Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5411 return SimplifiedAssociatedValue;
5412 }
5413
5414 /// Ensure the return value is \p V with type \p Ty, if not possible return
5415 /// nullptr. If \p Check is true we will only verify such an operation would
5416 /// suceed and return a non-nullptr value if that is the case. No IR is
5417 /// generated or modified.
5418 static Value *ensureType(Attributor &A, Value &V, Type &Ty, Instruction *CtxI,
5419 bool Check) {
5420 if (auto *TypedV = AA::getWithType(V, Ty))
5421 return TypedV;
5422 if (CtxI && V.getType()->canLosslesslyBitCastTo(&Ty))
5423 return Check ? &V
5424 : BitCastInst::CreatePointerBitCastOrAddrSpaceCast(&V, &Ty,
5425 "", CtxI);
5426 return nullptr;
5427 }
5428
5429 /// Reproduce \p I with type \p Ty or return nullptr if that is not posisble.
5430 /// If \p Check is true we will only verify such an operation would suceed and
5431 /// return a non-nullptr value if that is the case. No IR is generated or
5432 /// modified.
5433 static Value *reproduceInst(Attributor &A,
5434 const AbstractAttribute &QueryingAA,
5435 Instruction &I, Type &Ty, Instruction *CtxI,
5436 bool Check, ValueToValueMapTy &VMap) {
5437 assert(CtxI && "Cannot reproduce an instruction without context!")(static_cast <bool> (CtxI && "Cannot reproduce an instruction without context!"
) ? void (0) : __assert_fail ("CtxI && \"Cannot reproduce an instruction without context!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5437, __extension__
__PRETTY_FUNCTION__))
;
5438 if (Check && (I.mayReadFromMemory() ||
5439 !isSafeToSpeculativelyExecute(&I, CtxI, /* DT */ nullptr,
5440 /* TLI */ nullptr)))
5441 return nullptr;
5442 for (Value *Op : I.operands()) {
5443 Value *NewOp = reproduceValue(A, QueryingAA, *Op, Ty, CtxI, Check, VMap);
5444 if (!NewOp) {
5445 assert(Check && "Manifest of new value unexpectedly failed!")(static_cast <bool> (Check && "Manifest of new value unexpectedly failed!"
) ? void (0) : __assert_fail ("Check && \"Manifest of new value unexpectedly failed!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5445, __extension__
__PRETTY_FUNCTION__))
;
5446 return nullptr;
5447 }
5448 if (!Check)
5449 VMap[Op] = NewOp;
5450 }
5451 if (Check)
5452 return &I;
5453
5454 Instruction *CloneI = I.clone();
5455 VMap[&I] = CloneI;
5456 CloneI->insertBefore(CtxI);
5457 RemapInstruction(CloneI, VMap);
5458 return CloneI;
5459 }
5460
5461 /// Reproduce \p V with type \p Ty or return nullptr if that is not posisble.
5462 /// If \p Check is true we will only verify such an operation would suceed and
5463 /// return a non-nullptr value if that is the case. No IR is generated or
5464 /// modified.
5465 static Value *reproduceValue(Attributor &A,
5466 const AbstractAttribute &QueryingAA, Value &V,
5467 Type &Ty, Instruction *CtxI, bool Check,
5468 ValueToValueMapTy &VMap) {
5469 if (const auto &NewV = VMap.lookup(&V))
5470 return NewV;
5471 bool UsedAssumedInformation = false;
5472 Optional<Value *> SimpleV =
5473 A.getAssumedSimplified(V, QueryingAA, UsedAssumedInformation);
5474 if (!SimpleV.hasValue())
5475 return PoisonValue::get(&Ty);
5476 Value *EffectiveV = &V;
5477 if (SimpleV.getValue())
5478 EffectiveV = SimpleV.getValue();
5479 if (auto *C = dyn_cast<Constant>(EffectiveV))
5480 if (!C->canTrap())
5481 return C;
5482 if (CtxI && AA::isValidAtPosition(*EffectiveV, *CtxI, A.getInfoCache()))
5483 return ensureType(A, *EffectiveV, Ty, CtxI, Check);
5484 if (auto *I = dyn_cast<Instruction>(EffectiveV))
5485 if (Value *NewV = reproduceInst(A, QueryingAA, *I, Ty, CtxI, Check, VMap))
5486 return ensureType(A, *NewV, Ty, CtxI, Check);
5487 return nullptr;
5488 }
5489
5490 /// Return a value we can use as replacement for the associated one, or
5491 /// nullptr if we don't have one that makes sense.
5492 Value *manifestReplacementValue(Attributor &A, Instruction *CtxI) const {
5493 Value *NewV = SimplifiedAssociatedValue.hasValue()
5494 ? SimplifiedAssociatedValue.getValue()
5495 : UndefValue::get(getAssociatedType());
5496 if (NewV && NewV != &getAssociatedValue()) {
5497 ValueToValueMapTy VMap;
5498 // First verify we can reprduce the value with the required type at the
5499 // context location before we actually start modifying the IR.
5500 if (reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI,
5501 /* CheckOnly */ true, VMap))
5502 return reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI,
5503 /* CheckOnly */ false, VMap);
5504 }
5505 return nullptr;
5506 }
5507
5508 /// Helper function for querying AAValueSimplify and updating candicate.
5509 /// \param IRP The value position we are trying to unify with SimplifiedValue
5510 bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
5511 const IRPosition &IRP, bool Simplify = true) {
5512 bool UsedAssumedInformation = false;
5513 Optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue();
5514 if (Simplify)
5515 QueryingValueSimplified =
5516 A.getAssumedSimplified(IRP, QueryingAA, UsedAssumedInformation);
5517 return unionAssumed(QueryingValueSimplified);
5518 }
5519
5520 /// Returns a candidate is found or not
5521 template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
5522 if (!getAssociatedValue().getType()->isIntegerTy())
5523 return false;
5524
5525 // This will also pass the call base context.
5526 const auto &AA =
5527 A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
5528
5529 Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A);
5530
5531 if (!COpt.hasValue()) {
5532 SimplifiedAssociatedValue = llvm::None;
5533 A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5534 return true;
5535 }
5536 if (auto *C = COpt.getValue()) {
5537 SimplifiedAssociatedValue = C;
5538 A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5539 return true;
5540 }
5541 return false;
5542 }
5543
5544 bool askSimplifiedValueForOtherAAs(Attributor &A) {
5545 if (askSimplifiedValueFor<AAValueConstantRange>(A))
5546 return true;
5547 if (askSimplifiedValueFor<AAPotentialConstantValues>(A))
5548 return true;
5549 return false;
5550 }
5551
5552 /// See AbstractAttribute::manifest(...).
5553 ChangeStatus manifest(Attributor &A) override {
5554 ChangeStatus Changed = ChangeStatus::UNCHANGED;
5555 for (auto &U : getAssociatedValue().uses()) {
5556 // Check if we need to adjust the insertion point to make sure the IR is
5557 // valid.
5558 Instruction *IP = dyn_cast<Instruction>(U.getUser());
5559 if (auto *PHI = dyn_cast_or_null<PHINode>(IP))
5560 IP = PHI->getIncomingBlock(U)->getTerminator();
5561 if (auto *NewV = manifestReplacementValue(A, IP)) {
5562 LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[ValueSimplify] " <<
getAssociatedValue() << " -> " << *NewV <<
" :: " << *this << "\n"; } } while (false)
5563 << " -> " << *NewV << " :: " << *this << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[ValueSimplify] " <<
getAssociatedValue() << " -> " << *NewV <<
" :: " << *this << "\n"; } } while (false)
;
5564 if (A.changeUseAfterManifest(U, *NewV))
5565 Changed = ChangeStatus::CHANGED;
5566 }
5567 }
5568
5569 return Changed | AAValueSimplify::manifest(A);
5570 }
5571
5572 /// See AbstractState::indicatePessimisticFixpoint(...).
5573 ChangeStatus indicatePessimisticFixpoint() override {
5574 SimplifiedAssociatedValue = &getAssociatedValue();
5575 return AAValueSimplify::indicatePessimisticFixpoint();
5576 }
5577};
5578
5579struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
5580 AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
5581 : AAValueSimplifyImpl(IRP, A) {}
5582
5583 void initialize(Attributor &A) override {
5584 AAValueSimplifyImpl::initialize(A);
5585 if (!getAnchorScope() || getAnchorScope()->isDeclaration())
5586 indicatePessimisticFixpoint();
5587 if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
5588 Attribute::StructRet, Attribute::Nest, Attribute::ByVal},
5589 /* IgnoreSubsumingPositions */ true))
5590 indicatePessimisticFixpoint();
5591 }
5592
5593 /// See AbstractAttribute::updateImpl(...).
5594 ChangeStatus updateImpl(Attributor &A) override {
5595 // Byval is only replacable if it is readonly otherwise we would write into
5596 // the replaced value and not the copy that byval creates implicitly.
5597 Argument *Arg = getAssociatedArgument();
5598 if (Arg->hasByValAttr()) {
5599 // TODO: We probably need to verify synchronization is not an issue, e.g.,
5600 // there is no race by not copying a constant byval.
5601 bool IsKnown;
5602 if (!AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
5603 return indicatePessimisticFixpoint();
5604 }
5605
5606 auto Before = SimplifiedAssociatedValue;
5607
5608 auto PredForCallSite = [&](AbstractCallSite ACS) {
5609 const IRPosition &ACSArgPos =
5610 IRPosition::callsite_argument(ACS, getCallSiteArgNo());
5611 // Check if a coresponding argument was found or if it is on not
5612 // associated (which can happen for callback calls).
5613 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5614 return false;
5615
5616 // Simplify the argument operand explicitly and check if the result is
5617 // valid in the current scope. This avoids refering to simplified values
5618 // in other functions, e.g., we don't want to say a an argument in a
5619 // static function is actually an argument in a different function.
5620 bool UsedAssumedInformation = false;
5621 Optional<Constant *> SimpleArgOp =
5622 A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation);
5623 if (!SimpleArgOp.hasValue())
5624 return true;
5625 if (!SimpleArgOp.getValue())
5626 return false;
5627 if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp))
5628 return false;
5629 return unionAssumed(*SimpleArgOp);
5630 };
5631
5632 // Generate a answer specific to a call site context.
5633 bool Success;
5634 bool UsedAssumedInformation = false;
5635 if (hasCallBaseContext() &&
5636 getCallBaseContext()->getCalledFunction() == Arg->getParent())
5637 Success = PredForCallSite(
5638 AbstractCallSite(&getCallBaseContext()->getCalledOperandUse()));
5639 else
5640 Success = A.checkForAllCallSites(PredForCallSite, *this, true,
5641 UsedAssumedInformation);
5642
5643 if (!Success)
5644 if (!askSimplifiedValueForOtherAAs(A))
5645 return indicatePessimisticFixpoint();
5646
5647 // If a candicate was found in this update, return CHANGED.
5648 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5649 : ChangeStatus ::CHANGED;
5650 }
5651
5652 /// See AbstractAttribute::trackStatistics()
5653 void trackStatistics() const override {
5654 STATS_DECLTRACK_ARG_ATTR(value_simplify){ static llvm::Statistic NumIRArguments_value_simplify = {"attributor"
, "NumIRArguments_value_simplify", ("Number of " "arguments" " marked '"
"value_simplify" "'")};; ++(NumIRArguments_value_simplify); }
5655 }
5656};
5657
5658struct AAValueSimplifyReturned : AAValueSimplifyImpl {
5659 AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
5660 : AAValueSimplifyImpl(IRP, A) {}
5661
5662 /// See AAValueSimplify::getAssumedSimplifiedValue()
5663 Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5664 if (!isValidState())
5665 return nullptr;
5666 return SimplifiedAssociatedValue;
5667 }
5668
5669 /// See AbstractAttribute::updateImpl(...).
5670 ChangeStatus updateImpl(Attributor &A) override {
5671 auto Before = SimplifiedAssociatedValue;
5672
5673 auto ReturnInstCB = [&](Instruction &I) {
5674 auto &RI = cast<ReturnInst>(I);
5675 return checkAndUpdate(
5676 A, *this,
5677 IRPosition::value(*RI.getReturnValue(), getCallBaseContext()));
5678 };
5679
5680 bool UsedAssumedInformation = false;
5681 if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
5682 UsedAssumedInformation))
5683 if (!askSimplifiedValueForOtherAAs(A))
5684 return indicatePessimisticFixpoint();
5685
5686 // If a candicate was found in this update, return CHANGED.
5687 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5688 : ChangeStatus ::CHANGED;
5689 }
5690
5691 ChangeStatus manifest(Attributor &A) override {
5692 // We queried AAValueSimplify for the returned values so they will be
5693 // replaced if a simplified form was found. Nothing to do here.
5694 return ChangeStatus::UNCHANGED;
5695 }
5696
5697 /// See AbstractAttribute::trackStatistics()
5698 void trackStatistics() const override {
5699 STATS_DECLTRACK_FNRET_ATTR(value_simplify){ static llvm::Statistic NumIRFunctionReturn_value_simplify =
{"attributor", "NumIRFunctionReturn_value_simplify", ("Number of "
"function returns" " marked '" "value_simplify" "'")};; ++(NumIRFunctionReturn_value_simplify
); }
5700 }
5701};
5702
5703struct AAValueSimplifyFloating : AAValueSimplifyImpl {
5704 AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
5705 : AAValueSimplifyImpl(IRP, A) {}
5706
5707 /// See AbstractAttribute::initialize(...).
5708 void initialize(Attributor &A) override {
5709 AAValueSimplifyImpl::initialize(A);
5710 Value &V = getAnchorValue();
5711
5712 // TODO: add other stuffs
5713 if (isa<Constant>(V))
5714 indicatePessimisticFixpoint();
5715 }
5716
5717 /// Check if \p Cmp is a comparison we can simplify.
5718 ///
5719 /// We handle multiple cases, one in which at least one operand is an
5720 /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other
5721 /// operand. Return true if successful, in that case SimplifiedAssociatedValue
5722 /// will be updated.
5723 bool handleCmp(Attributor &A, CmpInst &Cmp) {
5724 auto Union = [&](Value &V) {
5725 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5726 SimplifiedAssociatedValue, &V, V.getType());
5727 return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5728 };
5729
5730 Value *LHS = Cmp.getOperand(0);
5731 Value *RHS = Cmp.getOperand(1);
5732
5733 // Simplify the operands first.
5734 bool UsedAssumedInformation = false;
5735 const auto &SimplifiedLHS =
5736 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
5737 *this, UsedAssumedInformation);
5738 if (!SimplifiedLHS.hasValue())
5739 return true;
5740 if (!SimplifiedLHS.getValue())
5741 return false;
5742 LHS = *SimplifiedLHS;
5743
5744 const auto &SimplifiedRHS =
5745 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
5746 *this, UsedAssumedInformation);
5747 if (!SimplifiedRHS.hasValue())
5748 return true;
5749 if (!SimplifiedRHS.getValue())
5750 return false;
5751 RHS = *SimplifiedRHS;
5752
5753 LLVMContext &Ctx = Cmp.getContext();
5754 // Handle the trivial case first in which we don't even need to think about
5755 // null or non-null.
5756 if (LHS == RHS && (Cmp.isTrueWhenEqual() || Cmp.isFalseWhenEqual())) {
5757 Constant *NewVal =
5758 ConstantInt::get(Type::getInt1Ty(Ctx), Cmp.isTrueWhenEqual());
5759 if (!Union(*NewVal))
5760 return false;
5761 if (!UsedAssumedInformation)
5762 indicateOptimisticFixpoint();
5763 return true;
5764 }
5765
5766 // From now on we only handle equalities (==, !=).
5767 ICmpInst *ICmp = dyn_cast<ICmpInst>(&Cmp);
5768 if (!ICmp || !ICmp->isEquality())
5769 return false;
5770
5771 bool LHSIsNull = isa<ConstantPointerNull>(LHS);
5772 bool RHSIsNull = isa<ConstantPointerNull>(RHS);
5773 if (!LHSIsNull && !RHSIsNull)
5774 return false;
5775
5776 // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
5777 // non-nullptr operand and if we assume it's non-null we can conclude the
5778 // result of the comparison.
5779 assert((LHSIsNull || RHSIsNull) &&(static_cast <bool> ((LHSIsNull || RHSIsNull) &&
"Expected nullptr versus non-nullptr comparison at this point"
) ? void (0) : __assert_fail ("(LHSIsNull || RHSIsNull) && \"Expected nullptr versus non-nullptr comparison at this point\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5780, __extension__
__PRETTY_FUNCTION__))
5780 "Expected nullptr versus non-nullptr comparison at this point")(static_cast <bool> ((LHSIsNull || RHSIsNull) &&
"Expected nullptr versus non-nullptr comparison at this point"
) ? void (0) : __assert_fail ("(LHSIsNull || RHSIsNull) && \"Expected nullptr versus non-nullptr comparison at this point\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5780, __extension__
__PRETTY_FUNCTION__))
;
5781
5782 // The index is the operand that we assume is not null.
5783 unsigned PtrIdx = LHSIsNull;
5784 auto &PtrNonNullAA = A.getAAFor<AANonNull>(
5785 *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
5786 DepClassTy::REQUIRED);
5787 if (!PtrNonNullAA.isAssumedNonNull())
5788 return false;
5789 UsedAssumedInformation |= !PtrNonNullAA.isKnownNonNull();
5790
5791 // The new value depends on the predicate, true for != and false for ==.
5792 Constant *NewVal = ConstantInt::get(
5793 Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_NE);
5794 if (!Union(*NewVal))
5795 return false;
5796
5797 if (!UsedAssumedInformation)
5798 indicateOptimisticFixpoint();
5799
5800 return true;
5801 }
5802
5803 /// Use the generic, non-optimistic InstSimplfy functionality if we managed to
5804 /// simplify any operand of the instruction \p I. Return true if successful,
5805 /// in that case SimplifiedAssociatedValue will be updated.
5806 bool handleGenericInst(Attributor &A, Instruction &I) {
5807 bool SomeSimplified = false;
5808 bool UsedAssumedInformation = false;
5809
5810 SmallVector<Value *, 8> NewOps(I.getNumOperands());
5811 int Idx = 0;
5812 for (Value *Op : I.operands()) {
5813 const auto &SimplifiedOp =
5814 A.getAssumedSimplified(IRPosition::value(*Op, getCallBaseContext()),
5815 *this, UsedAssumedInformation);
5816 // If we are not sure about any operand we are not sure about the entire
5817 // instruction, we'll wait.
5818 if (!SimplifiedOp.hasValue())
5819 return true;
5820
5821 if (SimplifiedOp.getValue())
5822 NewOps[Idx] = SimplifiedOp.getValue();
5823 else
5824 NewOps[Idx] = Op;
5825
5826 SomeSimplified |= (NewOps[Idx] != Op);
5827 ++Idx;
5828 }
5829
5830 // We won't bother with the InstSimplify interface if we didn't simplify any
5831 // operand ourselves.
5832 if (!SomeSimplified)
5833 return false;
5834
5835 InformationCache &InfoCache = A.getInfoCache();
5836 Function *F = I.getFunction();
5837 const auto *DT =
5838 InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
5839 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5840 auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
5841 OptimizationRemarkEmitter *ORE = nullptr;
5842
5843 const DataLayout &DL = I.getModule()->getDataLayout();
5844 SimplifyQuery Q(DL, TLI, DT, AC, &I);
5845 if (Value *SimplifiedI =
5846 SimplifyInstructionWithOperands(&I, NewOps, Q, ORE)) {
5847 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5848 SimplifiedAssociatedValue, SimplifiedI, I.getType());
5849 return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5850 }
5851 return false;
5852 }
5853
5854 /// See AbstractAttribute::updateImpl(...).
5855 ChangeStatus updateImpl(Attributor &A) override {
5856 auto Before = SimplifiedAssociatedValue;
5857
5858 // Do not simplify loads that are only used in llvm.assume if we cannot also
5859 // remove all stores that may feed into the load. The reason is that the
5860 // assume is probably worth something as long as the stores are around.
5861 if (auto *LI = dyn_cast<LoadInst>(&getAssociatedValue())) {
5862 InformationCache &InfoCache = A.getInfoCache();
5863 if (InfoCache.isOnlyUsedByAssume(*LI)) {
5864 SmallSetVector<Value *, 4> PotentialCopies;
5865 SmallSetVector<Instruction *, 4> PotentialValueOrigins;
5866 bool UsedAssumedInformation = false;
5867 if (AA::getPotentiallyLoadedValues(A, *LI, PotentialCopies,
5868 PotentialValueOrigins, *this,
5869 UsedAssumedInformation,
5870 /* OnlyExact */ true)) {
5871 if (!llvm::all_of(PotentialValueOrigins, [&](Instruction *I) {
5872 if (!I)
5873 return true;
5874 if (auto *SI = dyn_cast<StoreInst>(I))
5875 return A.isAssumedDead(SI->getOperandUse(0), this,
5876 /* LivenessAA */ nullptr,
5877 UsedAssumedInformation,
5878 /* CheckBBLivenessOnly */ false);
5879 return A.isAssumedDead(*I, this, /* LivenessAA */ nullptr,
5880 UsedAssumedInformation,
5881 /* CheckBBLivenessOnly */ false);
5882 }))
5883 return indicatePessimisticFixpoint();
5884 }
5885 }
5886 }
5887
5888 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
5889 bool Stripped) -> bool {
5890 auto &AA = A.getAAFor<AAValueSimplify>(
5891 *this, IRPosition::value(V, getCallBaseContext()),
5892 DepClassTy::REQUIRED);
5893 if (!Stripped && this == &AA) {
5894
5895 if (auto *I = dyn_cast<Instruction>(&V)) {
5896 if (auto *Cmp = dyn_cast<CmpInst>(&V))
5897 if (handleCmp(A, *Cmp))
5898 return true;
5899 if (handleGenericInst(A, *I))
5900 return true;
5901 }
5902 // TODO: Look the instruction and check recursively.
5903
5904 LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << Vdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[ValueSimplify] Can't be stripped more : "
<< V << "\n"; } } while (false)
5905 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[ValueSimplify] Can't be stripped more : "
<< V << "\n"; } } while (false)
;
5906 return false;
5907 }
5908 return checkAndUpdate(A, *this,
5909 IRPosition::value(V, getCallBaseContext()));
5910 };
5911
5912 bool Dummy = false;
5913 bool UsedAssumedInformation = false;
5914 if (!genericValueTraversal<bool>(A, getIRPosition(), *this, Dummy,
5915 VisitValueCB, getCtxI(),
5916 UsedAssumedInformation,
5917 /* UseValueSimplify */ false))
5918 if (!askSimplifiedValueForOtherAAs(A))
5919 return indicatePessimisticFixpoint();
5920
5921 // If a candicate was found in this update, return CHANGED.
5922 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5923 : ChangeStatus ::CHANGED;
5924 }
5925
5926 /// See AbstractAttribute::trackStatistics()
5927 void trackStatistics() const override {
5928 STATS_DECLTRACK_FLOATING_ATTR(value_simplify){ static llvm::Statistic NumIRFloating_value_simplify = {"attributor"
, "NumIRFloating_value_simplify", ("Number of floating values known to be '"
"value_simplify" "'")};; ++(NumIRFloating_value_simplify); }
5929 }
5930};
5931
5932struct AAValueSimplifyFunction : AAValueSimplifyImpl {
5933 AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
5934 : AAValueSimplifyImpl(IRP, A) {}
5935
5936 /// See AbstractAttribute::initialize(...).
5937 void initialize(Attributor &A) override {
5938 SimplifiedAssociatedValue = nullptr;
5939 indicateOptimisticFixpoint();
5940 }
5941 /// See AbstractAttribute::initialize(...).
5942 ChangeStatus updateImpl(Attributor &A) override {
5943 llvm_unreachable(::llvm::llvm_unreachable_internal("AAValueSimplify(Function|CallSite)::updateImpl will not be called"
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5944)
5944 "AAValueSimplify(Function|CallSite)::updateImpl will not be called")::llvm::llvm_unreachable_internal("AAValueSimplify(Function|CallSite)::updateImpl will not be called"
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5944)
;
5945 }
5946 /// See AbstractAttribute::trackStatistics()
5947 void trackStatistics() const override {
5948 STATS_DECLTRACK_FN_ATTR(value_simplify){ static llvm::Statistic NumIRFunction_value_simplify = {"attributor"
, "NumIRFunction_value_simplify", ("Number of " "functions" " marked '"
"value_simplify" "'")};; ++(NumIRFunction_value_simplify); }
5949 }
5950};
5951
5952struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
5953 AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
5954 : AAValueSimplifyFunction(IRP, A) {}
5955 /// See AbstractAttribute::trackStatistics()
5956 void trackStatistics() const override {
5957 STATS_DECLTRACK_CS_ATTR(value_simplify){ static llvm::Statistic NumIRCS_value_simplify = {"attributor"
, "NumIRCS_value_simplify", ("Number of " "call site" " marked '"
"value_simplify" "'")};; ++(NumIRCS_value_simplify); }
5958 }
5959};
5960
5961struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl {
5962 AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
5963 : AAValueSimplifyImpl(IRP, A) {}
5964
5965 void initialize(Attributor &A) override {
5966 AAValueSimplifyImpl::initialize(A);
5967 Function *Fn = getAssociatedFunction();
5968 if (!Fn) {
5969 indicatePessimisticFixpoint();
5970 return;
5971 }
5972 for (Argument &Arg : Fn->args()) {
5973 if (Arg.hasReturnedAttr()) {
5974 auto IRP = IRPosition::callsite_argument(*cast<CallBase>(getCtxI()),
5975 Arg.getArgNo());
5976 if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_ARGUMENT &&
5977 checkAndUpdate(A, *this, IRP))
5978 indicateOptimisticFixpoint();
5979 else
5980 indicatePessimisticFixpoint();
5981 return;
5982 }
5983 }
5984 }
5985
5986 /// See AbstractAttribute::updateImpl(...).
5987 ChangeStatus updateImpl(Attributor &A) override {
5988 auto Before = SimplifiedAssociatedValue;
5989 auto &RetAA = A.getAAFor<AAReturnedValues>(
5990 *this, IRPosition::function(*getAssociatedFunction()),
5991 DepClassTy::REQUIRED);
5992 auto PredForReturned =
5993 [&](Value &RetVal, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
5994 bool UsedAssumedInformation = false;
5995 Optional<Value *> CSRetVal = A.translateArgumentToCallSiteContent(
5996 &RetVal, *cast<CallBase>(getCtxI()), *this,
5997 UsedAssumedInformation);
5998 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5999 SimplifiedAssociatedValue, CSRetVal, getAssociatedType());
6000 return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
6001 };
6002 if (!RetAA.checkForAllReturnedValuesAndReturnInsts(PredForReturned))
6003 if (!askSimplifiedValueForOtherAAs(A))
6004 return indicatePessimisticFixpoint();
6005 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
6006 : ChangeStatus ::CHANGED;
6007 }
6008
6009 void trackStatistics() const override {
6010 STATS_DECLTRACK_CSRET_ATTR(value_simplify){ static llvm::Statistic NumIRCSReturn_value_simplify = {"attributor"
, "NumIRCSReturn_value_simplify", ("Number of " "call site returns"
" marked '" "value_simplify" "'")};; ++(NumIRCSReturn_value_simplify
); }
6011 }
6012};
6013
6014struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
6015 AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
6016 : AAValueSimplifyFloating(IRP, A) {}
6017
6018 /// See AbstractAttribute::manifest(...).
6019 ChangeStatus manifest(Attributor &A) override {
6020 ChangeStatus Changed = ChangeStatus::UNCHANGED;
6021
6022 if (auto *NewV = manifestReplacementValue(A, getCtxI())) {
6023 Use &U = cast<CallBase>(&getAnchorValue())
6024 ->getArgOperandUse(getCallSiteArgNo());
6025 if (A.changeUseAfterManifest(U, *NewV))
6026 Changed = ChangeStatus::CHANGED;
6027 }
6028
6029 return Changed | AAValueSimplify::manifest(A);
6030 }
6031
6032 void trackStatistics() const override {
6033 STATS_DECLTRACK_CSARG_ATTR(value_simplify){ static llvm::Statistic NumIRCSArguments_value_simplify = {"attributor"
, "NumIRCSArguments_value_simplify", ("Number of " "call site arguments"
" marked '" "value_simplify" "'")};; ++(NumIRCSArguments_value_simplify
); }
6034 }
6035};
6036} // namespace
6037
6038/// ----------------------- Heap-To-Stack Conversion ---------------------------
6039namespace {
6040struct AAHeapToStackFunction final : public AAHeapToStack {
6041
6042 struct AllocationInfo {
6043 /// The call that allocates the memory.
6044 CallBase *const CB;
6045
6046 /// The library function id for the allocation.
6047 LibFunc LibraryFunctionId = NotLibFunc;
6048
6049 /// The status wrt. a rewrite.
6050 enum {
6051 STACK_DUE_TO_USE,
6052 STACK_DUE_TO_FREE,
6053 INVALID,
6054 } Status = STACK_DUE_TO_USE;
6055
6056 /// Flag to indicate if we encountered a use that might free this allocation
6057 /// but which is not in the deallocation infos.
6058 bool HasPotentiallyFreeingUnknownUses = false;
6059
6060 /// The set of free calls that use this allocation.
6061 SmallSetVector<CallBase *, 1> PotentialFreeCalls{};
6062 };
6063
6064 struct DeallocationInfo {
6065 /// The call that deallocates the memory.
6066 CallBase *const CB;
6067
6068 /// Flag to indicate if we don't know all objects this deallocation might
6069 /// free.
6070 bool MightFreeUnknownObjects = false;
6071
6072 /// The set of allocation calls that are potentially freed.
6073 SmallSetVector<CallBase *, 1> PotentialAllocationCalls{};
6074 };
6075
6076 AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
6077 : AAHeapToStack(IRP, A) {}
6078
6079 ~AAHeapToStackFunction() {
6080 // Ensure we call the destructor so we release any memory allocated in the
6081 // sets.
6082 for (auto &It : AllocationInfos)
6083 It.second->~AllocationInfo();
6084 for (auto &It : DeallocationInfos)
6085 It.second->~DeallocationInfo();
6086 }
6087
6088 void initialize(Attributor &A) override {
6089 AAHeapToStack::initialize(A);
6090
6091 const Function *F = getAnchorScope();
6092 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6093
6094 auto AllocationIdentifierCB = [&](Instruction &I) {
6095 CallBase *CB = dyn_cast<CallBase>(&I);
6096 if (!CB)
6097 return true;
6098 if (isFreeCall(CB, TLI)) {
6099 DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB};
6100 return true;
6101 }
6102 // To do heap to stack, we need to know that the allocation itself is
6103 // removable once uses are rewritten, and that we can initialize the
6104 // alloca to the same pattern as the original allocation result.
6105 if (isAllocationFn(CB, TLI) && isAllocRemovable(CB, TLI)) {
6106 auto *I8Ty = Type::getInt8Ty(CB->getParent()->getContext());
6107 if (nullptr != getInitialValueOfAllocation(CB, TLI, I8Ty)) {
6108 AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB};
6109 AllocationInfos[CB] = AI;
6110 if (TLI)
6111 TLI->getLibFunc(*CB, AI->LibraryFunctionId);
6112 }
6113 }
6114 return true;
6115 };
6116
6117 bool UsedAssumedInformation = false;
6118 bool Success = A.checkForAllCallLikeInstructions(
6119 AllocationIdentifierCB, *this, UsedAssumedInformation,
6120 /* CheckBBLivenessOnly */ false,
6121 /* CheckPotentiallyDead */ true);
6122 (void)Success;
6123 assert(Success && "Did not expect the call base visit callback to fail!")(static_cast <bool> (Success && "Did not expect the call base visit callback to fail!"
) ? void (0) : __assert_fail ("Success && \"Did not expect the call base visit callback to fail!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 6123, __extension__
__PRETTY_FUNCTION__))
;
6124
6125 Attributor::SimplifictionCallbackTy SCB =
6126 [](const IRPosition &, const AbstractAttribute *,
6127 bool &) -> Optional<Value *> { return nullptr; };
6128 for (const auto &It : AllocationInfos)
6129 A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first),
6130 SCB);
6131 for (const auto &It : DeallocationInfos)
6132 A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first),
6133 SCB);
6134 }
6135
6136 const std::string getAsStr() const override {
6137 unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0;
6138 for (const auto &It : AllocationInfos) {
6139 if (It.second->Status == AllocationInfo::INVALID)
6140 ++NumInvalidMallocs;
6141 else
6142 ++NumH2SMallocs;
6143 }
6144 return "[H2S] Mallocs Good/Bad: " + std::to_string(NumH2SMallocs) + "/" +
6145 std::to_string(NumInvalidMallocs);
6146 }
6147
6148 /// See AbstractAttribute::trackStatistics().
6149 void trackStatistics() const override {
6150 STATS_DECL(static llvm::Statistic NumIRFunction_MallocCalls = {"attributor"
, "NumIRFunction_MallocCalls", "Number of malloc/calloc/aligned_alloc calls converted to allocas"
};;
6151 MallocCalls, Function,static llvm::Statistic NumIRFunction_MallocCalls = {"attributor"
, "NumIRFunction_MallocCalls", "Number of malloc/calloc/aligned_alloc calls converted to allocas"
};;
6152 "Number of malloc/calloc/aligned_alloc calls converted to allocas")static llvm::Statistic NumIRFunction_MallocCalls = {"attributor"
, "NumIRFunction_MallocCalls", "Number of malloc/calloc/aligned_alloc calls converted to allocas"
};;
;
6153 for (auto &It : AllocationInfos)
6154 if (It.second->Status != AllocationInfo::INVALID)
6155 ++BUILD_STAT_NAME(MallocCalls, Function)NumIRFunction_MallocCalls;
6156 }
6157
6158 bool isAssumedHeapToStack(const CallBase &CB) const override {
6159 if (isValidState())
6160 if (AllocationInfo *AI =
6161 AllocationInfos.lookup(const_cast<CallBase *>(&CB)))
6162 return AI->Status != AllocationInfo::INVALID;
6163 return false;
6164 }
6165
6166 bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override {
6167 if (!isValidState())
6168 return false;
6169
6170 for (auto &It : AllocationInfos) {
6171 AllocationInfo &AI = *It.second;
6172 if (AI.Status == AllocationInfo::INVALID)
6173 continue;
6174
6175 if (AI.PotentialFreeCalls.count(&CB))
6176 return true;
6177 }
6178
6179 return false;
6180 }
6181
6182 ChangeStatus manifest(Attributor &A) override {
6183 assert(getState().isValidState() &&(static_cast <bool> (getState().isValidState() &&
"Attempted to manifest an invalid state!") ? void (0) : __assert_fail
("getState().isValidState() && \"Attempted to manifest an invalid state!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 6184, __extension__
__PRETTY_FUNCTION__))
6184 "Attempted to manifest an invalid state!")(static_cast <bool> (getState().isValidState() &&
"Attempted to manifest an invalid state!") ? void (0) : __assert_fail
("getState().isValidState() && \"Attempted to manifest an invalid state!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 6184, __extension__
__PRETTY_FUNCTION__))
;
6185
6186 ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
6187 Function *F = getAnchorScope();
6188 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6189
6190 for (auto &It : AllocationInfos) {
6191 AllocationInfo &AI = *It.second;
6192 if (AI.Status == AllocationInfo::INVALID)
6193 continue;
6194
6195 for (CallBase *FreeCall : AI.PotentialFreeCalls) {
6196 LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "H2S: Removing free call: "
<< *FreeCall << "\n"; } } while (false)
;
6197 A.deleteAfterManifest(*FreeCall);
6198 HasChanged = ChangeStatus::CHANGED;
Value stored to 'HasChanged' is never read
6199 }
6200
6201 LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CBdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "H2S: Removing malloc-like call: "
<< *AI.CB << "\n"; } } while (false)
6202 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "H2S: Removing malloc-like call: "
<< *AI.CB << "\n"; } } while (false)
;
6203
6204 auto Remark = [&](OptimizationRemark OR) {
6205 LibFunc IsAllocShared;
6206 if (TLI->getLibFunc(*AI.CB, IsAllocShared))
6207 if (IsAllocShared == LibFunc___kmpc_alloc_shared)
6208 return OR << "Moving globalized variable to the stack.";
6209 return OR << "Moving memory allocation from the heap to the stack.";
6210 };
6211 if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
6212 A.emitRemark<OptimizationRemark>(AI.CB, "OMP110", Remark);
6213 else
6214 A.emitRemark<OptimizationRemark>(AI.CB, "HeapToStack", Remark);
6215
6216 const DataLayout &DL = A.getInfoCache().getDL();
6217 Value *Size;
6218 Optional<APInt> SizeAPI = getSize(A, *this, AI);
6219 if (SizeAPI.hasValue()) {
6220 Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI);
6221 } else {
6222 LLVMContext &Ctx = AI.CB->getContext();
6223 ObjectSizeOpts Opts;
6224 ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, Opts);
6225 SizeOffsetEvalType SizeOffsetPair = Eval.compute(AI.CB);
6226 assert(SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown() &&(static_cast <bool> (SizeOffsetPair != ObjectSizeOffsetEvaluator
::unknown() && cast<ConstantInt>(SizeOffsetPair
.second)->isZero()) ? void (0) : __assert_fail ("SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown() && cast<ConstantInt>(SizeOffsetPair.second)->isZero()"
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 6227, __extension__
__PRETTY_FUNCTION__))
6227 cast<ConstantInt>(SizeOffsetPair.second)->isZero())(static_cast <bool> (SizeOffsetPair != ObjectSizeOffsetEvaluator
::unknown() && cast<ConstantInt>(SizeOffsetPair
.second)->isZero()) ? void (0) : __assert_fail ("SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown() && cast<ConstantInt>(SizeOffsetPair.second)->isZero()"
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 6227, __extension__
__PRETTY_FUNCTION__))
;
6228 Size = SizeOffsetPair.first;
6229 }
6230
6231 Align Alignment(1);
6232 if (MaybeAlign RetAlign = AI.CB->getRetAlign())
6233 Alignment = max(Alignment, RetAlign);
6234 if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
6235 Optional<APInt> AlignmentAPI = getAPInt(A, *this, *Align);
6236 assert(AlignmentAPI.hasValue() &&(static_cast <bool> (AlignmentAPI.hasValue() &&
"Expected an alignment during manifest!") ? void (0) : __assert_fail
("AlignmentAPI.hasValue() && \"Expected an alignment during manifest!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 6237, __extension__
__PRETTY_FUNCTION__))
6237 "Expected an alignment during manifest!")(static_cast <bool> (AlignmentAPI.hasValue() &&
"Expected an alignment during manifest!") ? void (0) : __assert_fail
("AlignmentAPI.hasValue() && \"Expected an alignment during manifest!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 6237, __extension__
__PRETTY_FUNCTION__))
;
6238 Alignment =
6239 max(Alignment, MaybeAlign(AlignmentAPI.getValue().getZExtValue()));
6240 }
6241
6242 // TODO: Hoist the alloca towards the function entry.
6243 unsigned AS = DL.getAllocaAddrSpace();
6244 Instruction *Alloca = new AllocaInst(Type::getInt8Ty(F->getContext()), AS,
6245 Size, Alignment, "", AI.CB);
6246
6247 if (Alloca->getType() != AI.CB->getType())
6248 Alloca = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
6249 Alloca, AI.CB->getType(), "malloc_cast", AI.CB);
6250
6251 auto *I8Ty = Type::getInt8Ty(F->getContext());
6252 auto *InitVal = getInitialValueOfAllocation(AI.CB, TLI, I8Ty);
6253 assert(InitVal &&(static_cast <bool> (InitVal && "Must be able to materialize initial memory state of allocation"
) ? void (0) : __assert_fail ("InitVal && \"Must be able to materialize initial memory state of allocation\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 6254, __extension__
__PRETTY_FUNCTION__))
6254 "Must be able to materialize initial memory state of allocation")(static_cast <bool> (InitVal && "Must be able to materialize initial memory state of allocation"
) ? void (0) : __assert_fail ("InitVal && \"Must be able to materialize initial memory state of allocation\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 6254, __extension__
__PRETTY_FUNCTION__))
;
6255
6256 A.changeValueAfterManifest(*AI.CB, *Alloca);
6257
6258 if (auto *II = dyn_cast<InvokeInst>(AI.CB)) {
6259 auto *NBB = II->getNormalDest();
6260 BranchInst::Create(NBB, AI.CB->getParent());
6261 A.deleteAfterManifest(*AI.CB);
6262 } else {
6263 A.deleteAfterManifest(*AI.CB);
6264 }
6265
6266 // Initialize the alloca with the same value as used by the allocation
6267 // function. We can skip undef as the initial value of an alloc is
6268 // undef, and the memset would simply end up being DSEd.
6269 if (!isa<UndefValue>(InitVal)) {
6270 IRBuilder<> Builder(Alloca->getNextNode());
6271 // TODO: Use alignment above if align!=1
6272 Builder.CreateMemSet(Alloca, InitVal, Size, None);
6273 }
6274 HasChanged = ChangeStatus::CHANGED;
6275 }
6276
6277 return HasChanged;
6278 }
6279
6280 Optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA,
6281 Value &V) {
6282 bool UsedAssumedInformation = false;
6283 Optional<Constant *> SimpleV =
6284 A.getAssumedConstant(V, AA, UsedAssumedInformation);
6285 if (!SimpleV.hasValue())
6286 return APInt(64, 0);
6287 if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.getValue()))
6288 return CI->getValue();
6289 return llvm::None;
6290 }
6291
6292 Optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA,
6293 AllocationInfo &AI) {
6294 auto Mapper = [&](const Value *V) -> const Value * {
6295 bool UsedAssumedInformation = false;
6296 if (Optional<Constant *> SimpleV =
6297 A.getAssumedConstant(*V, AA, UsedAssumedInformation))
6298 if (*SimpleV)
6299 return *SimpleV;
6300 return V;
6301 };
6302
6303 const Function *F = getAnchorScope();
6304 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6305 return getAllocSize(AI.CB, TLI, Mapper);
6306 }
6307
6308 /// Collection of all malloc-like calls in a function with associated
6309 /// information.
6310 MapVector<CallBase *, AllocationInfo *> AllocationInfos;
6311
6312 /// Collection of all free-like calls in a function with associated
6313 /// information.
6314 MapVector<CallBase *, DeallocationInfo *> DeallocationInfos;
6315
6316 ChangeStatus updateImpl(Attributor &A) override;
6317};
6318
6319ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) {
6320 ChangeStatus Changed = ChangeStatus::UNCHANGED;
6321 const Function *F = getAnchorScope();
6322 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6323
6324 const auto &LivenessAA =
6325 A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE);
6326
6327 MustBeExecutedContextExplorer &Explorer =
6328 A.getInfoCache().getMustBeExecutedContextExplorer();
6329
6330 bool StackIsAccessibleByOtherThreads =
6331 A.getInfoCache().stackIsAccessibleByOtherThreads();
6332
6333 // Flag to ensure we update our deallocation information at most once per
6334 // updateImpl call and only if we use the free check reasoning.
6335 bool HasUpdatedFrees = false;
6336
6337 auto UpdateFrees = [&]() {
6338 HasUpdatedFrees = true;
6339
6340 for (auto &It : DeallocationInfos) {
6341 DeallocationInfo &DI = *It.second;
6342 // For now we cannot use deallocations that have unknown inputs, skip
6343 // them.
6344 if (DI.MightFreeUnknownObjects)
6345 continue;
6346
6347 // No need to analyze dead calls, ignore them instead.
6348 bool UsedAssumedInformation = false;
6349 if (A.isAssumedDead(*DI.CB, this, &LivenessAA, UsedAssumedInformation,
6350 /* CheckBBLivenessOnly */ true))
6351 continue;
6352
6353 // Use the optimistic version to get the freed objects, ignoring dead
6354 // branches etc.
6355 SmallVector<Value *, 8> Objects;
6356 if (!AA::getAssumedUnderlyingObjects(A, *DI.CB->getArgOperand(0), Objects,
6357 *this, DI.CB,
6358 UsedAssumedInformation)) {
6359 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] Unexpected failure in getAssumedUnderlyingObjects!\n"
; } } while (false)
6360 dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] Unexpected failure in getAssumedUnderlyingObjects!\n"
; } } while (false)
6361 << "[H2S] Unexpected failure in getAssumedUnderlyingObjects!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] Unexpected failure in getAssumedUnderlyingObjects!\n"
; } } while (false)
;
6362 DI.MightFreeUnknownObjects = true;
6363 continue;
6364 }
6365
6366 // Check each object explicitly.
6367 for (auto *Obj : Objects) {
6368 // Free of null and undef can be ignored as no-ops (or UB in the latter
6369 // case).
6370 if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj))
6371 continue;
6372
6373 CallBase *ObjCB = dyn_cast<CallBase>(Obj);
6374 if (!ObjCB) {
6375 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] Free of a non-call object: "
<< *Obj << "\n"; } } while (false)
6376 << "[H2S] Free of a non-call object: " << *Obj << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] Free of a non-call object: "
<< *Obj << "\n"; } } while (false)
;
6377 DI.MightFreeUnknownObjects = true;
6378 continue;
6379 }
6380
6381 AllocationInfo *AI = AllocationInfos.lookup(ObjCB);
6382 if (!AI) {
6383 LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Objdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] Free of a non-allocation object: "
<< *Obj << "\n"; } } while (false)
6384 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] Free of a non-allocation object: "
<< *Obj << "\n"; } } while (false)
;
6385 DI.MightFreeUnknownObjects = true;
6386 continue;
6387 }
6388
6389 DI.PotentialAllocationCalls.insert(ObjCB);
6390 }
6391 }
6392 };
6393
6394 auto FreeCheck = [&](AllocationInfo &AI) {
6395 // If the stack is not accessible by other threads, the "must-free" logic
6396 // doesn't apply as the pointer could be shared and needs to be places in
6397 // "shareable" memory.
6398 if (!StackIsAccessibleByOtherThreads) {
6399 auto &NoSyncAA =
6400 A.getAAFor<AANoSync>(*this, getIRPosition(), DepClassTy::OPTIONAL);
6401 if (!NoSyncAA.isAssumedNoSync()) {
6402 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] found an escaping use, stack is not accessible by "
"other threads and function is not nosync:\n"; } } while (false
)
6403 dbgs() << "[H2S] found an escaping use, stack is not accessible by "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] found an escaping use, stack is not accessible by "
"other threads and function is not nosync:\n"; } } while (false
)
6404 "other threads and function is not nosync:\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] found an escaping use, stack is not accessible by "
"other threads and function is not nosync:\n"; } } while (false
)
;
6405 return false;
6406 }
6407 }
6408 if (!HasUpdatedFrees)
6409 UpdateFrees();
6410
6411 // TODO: Allow multi exit functions that have different free calls.
6412 if (AI.PotentialFreeCalls.size() != 1) {
6413 LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] did not find one free call but "
<< AI.PotentialFreeCalls.size() << "\n"; } } while
(false)
6414 << AI.PotentialFreeCalls.size() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] did not find one free call but "
<< AI.PotentialFreeCalls.size() << "\n"; } } while
(false)
;
6415 return false;
6416 }
6417 CallBase *UniqueFree = *AI.PotentialFreeCalls.begin();
6418 DeallocationInfo *DI = DeallocationInfos.lookup(UniqueFree);
6419 if (!DI) {
6420 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] unique free call was not known as deallocation call "
<< *UniqueFree << "\n"; } } while (false)
6421 dbgs() << "[H2S] unique free call was not known as deallocation call "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] unique free call was not known as deallocation call "
<< *UniqueFree << "\n"; } } while (false)
6422 << *UniqueFree << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] unique free call was not known as deallocation call "
<< *UniqueFree << "\n"; } } while (false)
;
6423 return false;
6424 }
6425 if (DI->MightFreeUnknownObjects) {
6426 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] unique free call might free unknown allocations\n"
; } } while (false)
6427 dbgs() << "[H2S] unique free call might free unknown allocations\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] unique free call might free unknown allocations\n"
; } } while (false)
;
6428 return false;
6429 }
6430 if (DI->PotentialAllocationCalls.size() > 1) {
6431 LLVM_DEBUG(dbgs() << "[H2S] unique free call might free "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] unique free call might free "
<< DI->PotentialAllocationCalls.size() << " different allocations\n"
; } } while (false)
6432 << DI->PotentialAllocationCalls.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] unique free call might free "
<< DI->PotentialAllocationCalls.size() << " different allocations\n"
; } } while (false)
6433 << " different allocations\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] unique free call might free "
<< DI->PotentialAllocationCalls.size() << " different allocations\n"
; } } while (false)
;
6434 return false;
6435 }
6436 if (*DI->PotentialAllocationCalls.begin() != AI.CB) {
6437 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] unique free call not known to free this allocation but "
<< **DI->PotentialAllocationCalls.begin() << "\n"
; } } while (false)
6438 dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] unique free call not known to free this allocation but "
<< **DI->PotentialAllocationCalls.begin() << "\n"
; } } while (false)
6439 << "[H2S] unique free call not known to free this allocation but "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] unique free call not known to free this allocation but "
<< **DI->PotentialAllocationCalls.begin() << "\n"
; } } while (false)
6440 << **DI->PotentialAllocationCalls.begin() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] unique free call not known to free this allocation but "
<< **DI->PotentialAllocationCalls.begin() << "\n"
; } } while (false)
;
6441 return false;
6442 }
6443 Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode();
6444 if (!Explorer.findInContextOf(UniqueFree, CtxI)) {
6445 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] unique free call might not be executed with the allocation "
<< *UniqueFree << "\n"; } } while (false)
6446 dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] unique free call might not be executed with the allocation "
<< *UniqueFree << "\n"; } } while (false)
6447 << "[H2S] unique free call might not be executed with the allocation "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] unique free call might not be executed with the allocation "
<< *UniqueFree << "\n"; } } while (false)
6448 << *UniqueFree << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] unique free call might not be executed with the allocation "
<< *UniqueFree << "\n"; } } while (false)
;
6449 return false;
6450 }
6451 return true;
6452 };
6453
6454 auto UsesCheck = [&](AllocationInfo &AI) {
6455 bool ValidUsesOnly = true;
6456
6457 auto Pred = [&](const Use &U, bool &Follow) -> bool {
6458 Instruction *UserI = cast<Instruction>(U.getUser());
6459 if (isa<LoadInst>(UserI))
6460 return true;
6461 if (auto *SI = dyn_cast<StoreInst>(UserI)) {
6462 if (SI->getValueOperand() == U.get()) {
6463 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] escaping store to memory: "
<< *UserI << "\n"; } } while (false)
6464 << "[H2S] escaping store to memory: " << *UserI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] escaping store to memory: "
<< *UserI << "\n"; } } while (false)
;
6465 ValidUsesOnly = false;
6466 } else {
6467 // A store into the malloc'ed memory is fine.
6468 }
6469 return true;
6470 }
6471 if (auto *CB = dyn_cast<CallBase>(UserI)) {
6472 if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
6473 return true;
6474 if (DeallocationInfos.count(CB)) {
6475 AI.PotentialFreeCalls.insert(CB);
6476 return true;
6477 }
6478
6479 unsigned ArgNo = CB->getArgOperandNo(&U);
6480
6481 const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
6482 *this, IRPosition::callsite_argument(*CB, ArgNo),
6483 DepClassTy::OPTIONAL);
6484
6485 // If a call site argument use is nofree, we are fine.
6486 const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
6487 *this, IRPosition::callsite_argument(*CB, ArgNo),
6488 DepClassTy::OPTIONAL);
6489
6490 bool MaybeCaptured = !NoCaptureAA.isAssumedNoCapture();
6491 bool MaybeFreed = !ArgNoFreeAA.isAssumedNoFree();
6492 if (MaybeCaptured ||
6493 (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared &&
6494 MaybeFreed)) {
6495 AI.HasPotentiallyFreeingUnknownUses |= MaybeFreed;
6496
6497 // Emit a missed remark if this is missed OpenMP globalization.
6498 auto Remark = [&](OptimizationRemarkMissed ORM) {
6499 return ORM
6500 << "Could not move globalized variable to the stack. "
6501 "Variable is potentially captured in call. Mark "
6502 "parameter as `__attribute__((noescape))` to override.";
6503 };
6504
6505 if (ValidUsesOnly &&
6506 AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
6507 A.emitRemark<OptimizationRemarkMissed>(CB, "OMP113", Remark);
6508
6509 LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] Bad user: " <<
*UserI << "\n"; } } while (false)
;
6510 ValidUsesOnly = false;
6511 }
6512 return true;
6513 }
6514
6515 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
6516 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
6517 Follow = true;
6518 return true;
6519 }
6520 // Unknown user for which we can not track uses further (in a way that
6521 // makes sense).
6522 LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] Unknown user: " <<
*UserI << "\n"; } } while (false)
;
6523 ValidUsesOnly = false;
6524 return true;
6525 };
6526 if (!A.checkForAllUses(Pred, *this, *AI.CB))
6527 return false;
6528 return ValidUsesOnly;
6529 };
6530
6531 // The actual update starts here. We look at all allocations and depending on
6532 // their status perform the appropriate check(s).
6533 for (auto &It : AllocationInfos) {
6534 AllocationInfo &AI = *It.second;
6535 if (AI.Status == AllocationInfo::INVALID)
6536 continue;
6537
6538 if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
6539 Optional<APInt> APAlign = getAPInt(A, *this, *Align);
6540 if (!APAlign) {
6541 // Can't generate an alloca which respects the required alignment
6542 // on the allocation.
6543 LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CBdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] Unknown allocation alignment: "
<< *AI.CB << "\n"; } } while (false)
6544 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] Unknown allocation alignment: "
<< *AI.CB << "\n"; } } while (false)
;
6545 AI.Status = AllocationInfo::INVALID;
6546 Changed = ChangeStatus::CHANGED;
6547 continue;
6548 } else {
6549 if (APAlign->ugt(llvm::Value::MaximumAlignment) ||
6550 !APAlign->isPowerOf2()) {
6551 LLVM_DEBUG(dbgs() << "[H2S] Invalid allocation alignment: " << APAligndo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] Invalid allocation alignment: "
<< APAlign << "\n"; } } while (false)
6552 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] Invalid allocation alignment: "
<< APAlign << "\n"; } } while (false)
;
6553 AI.Status = AllocationInfo::INVALID;
6554 Changed = ChangeStatus::CHANGED;
6555 continue;
6556 }
6557 }
6558 }
6559
6560 if (MaxHeapToStackSize != -1) {
6561 Optional<APInt> Size = getSize(A, *this, AI);
6562 if (!Size.hasValue() || Size.getValue().ugt(MaxHeapToStackSize)) {
6563 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (!Size.hasValue()) dbgs() << "[H2S] Unknown allocation size: "
<< *AI.CB << "\n"; else dbgs() << "[H2S] Allocation size too large: "
<< *AI.CB << " vs. " << MaxHeapToStackSize
<< "\n"; }; } } while (false)
6564 if (!Size.hasValue())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (!Size.hasValue()) dbgs() << "[H2S] Unknown allocation size: "
<< *AI.CB << "\n"; else dbgs() << "[H2S] Allocation size too large: "
<< *AI.CB << " vs. " << MaxHeapToStackSize
<< "\n"; }; } } while (false)
6565 dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (!Size.hasValue()) dbgs() << "[H2S] Unknown allocation size: "
<< *AI.CB << "\n"; else dbgs() << "[H2S] Allocation size too large: "
<< *AI.CB << " vs. " << MaxHeapToStackSize
<< "\n"; }; } } while (false)
6566 elsedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (!Size.hasValue()) dbgs() << "[H2S] Unknown allocation size: "
<< *AI.CB << "\n"; else dbgs() << "[H2S] Allocation size too large: "
<< *AI.CB << " vs. " << MaxHeapToStackSize
<< "\n"; }; } } while (false)
6567 dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (!Size.hasValue()) dbgs() << "[H2S] Unknown allocation size: "
<< *AI.CB << "\n"; else dbgs() << "[H2S] Allocation size too large: "
<< *AI.CB << " vs. " << MaxHeapToStackSize
<< "\n"; }; } } while (false)
6568 << MaxHeapToStackSize << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (!Size.hasValue()) dbgs() << "[H2S] Unknown allocation size: "
<< *AI.CB << "\n"; else dbgs() << "[H2S] Allocation size too large: "
<< *AI.CB << " vs. " << MaxHeapToStackSize
<< "\n"; }; } } while (false)
6569 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (!Size.hasValue()) dbgs() << "[H2S] Unknown allocation size: "
<< *AI.CB << "\n"; else dbgs() << "[H2S] Allocation size too large: "
<< *AI.CB << " vs. " << MaxHeapToStackSize
<< "\n"; }; } } while (false)
;
6570
6571 AI.Status = AllocationInfo::INVALID;
6572 Changed = ChangeStatus::CHANGED;
6573 continue;
6574 }
6575 }
6576
6577 switch (AI.Status) {
6578 case AllocationInfo::STACK_DUE_TO_USE:
6579 if (UsesCheck(AI))
6580 continue;
6581 AI.Status = AllocationInfo::STACK_DUE_TO_FREE;
6582 LLVM_FALLTHROUGH[[gnu::fallthrough]];
6583 case AllocationInfo::STACK_DUE_TO_FREE:
6584 if (FreeCheck(AI))
6585 continue;
6586 AI.Status = AllocationInfo::INVALID;
6587 Changed = ChangeStatus::CHANGED;
6588 continue;
6589 case AllocationInfo::INVALID:
6590 llvm_unreachable("Invalid allocations should never reach this point!")::llvm::llvm_unreachable_internal("Invalid allocations should never reach this point!"
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 6590)
;
6591 };
6592 }
6593
6594 return Changed;
6595}
6596} // namespace
6597
6598/// ----------------------- Privatizable Pointers ------------------------------
6599namespace {
6600struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
6601 AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
6602 : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
6603
6604 ChangeStatus indicatePessimisticFixpoint() override {
6605 AAPrivatizablePtr::indicatePessimisticFixpoint();
6606 PrivatizableType = nullptr;
6607 return ChangeStatus::CHANGED;
6608 }
6609
6610 /// Identify the type we can chose for a private copy of the underlying
6611 /// argument. None means it is not clear yet, nullptr means there is none.
6612 virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
6613
6614 /// Return a privatizable type that encloses both T0 and T1.
6615 /// TODO: This is merely a stub for now as we should manage a mapping as well.
6616 Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
6617 if (!T0.hasValue())
6618 return T1;
6619 if (!T1.hasValue())
6620 return T0;
6621 if (T0 == T1)
6622 return T0;
6623 return nullptr;
6624 }
6625
6626 Optional<Type *> getPrivatizableType() const override {
6627 return PrivatizableType;
6628 }
6629
6630 const std::string getAsStr() const override {
6631 return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
6632 }
6633
6634protected:
6635 Optional<Type *> PrivatizableType;
6636};
6637
6638// TODO: Do this for call site arguments (probably also other values) as well.
6639
6640struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
6641 AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
6642 : AAPrivatizablePtrImpl(IRP, A) {}
6643
6644 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
6645 Optional<Type *> identifyPrivatizableType(Attributor &A) override {
6646 // If this is a byval argument and we know all the call sites (so we can
6647 // rewrite them), there is no need to check them explicitly.
6648 bool UsedAssumedInformation = false;
6649 SmallVector<Attribute, 1> Attrs;
6650 getAttrs({Attribute::ByVal}, Attrs, /* IgnoreSubsumingPositions */ true);
6651 if (!Attrs.empty() &&
6652 A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
6653 true, UsedAssumedInformation))
6654 return Attrs[0].getValueAsType();
6655
6656 Optional<Type *> Ty;
6657 unsigned ArgNo = getIRPosition().getCallSiteArgNo();
6658
6659 // Make sure the associated call site argument has the same type at all call
6660 // sites and it is an allocation we know is safe to privatize, for now that
6661 // means we only allow alloca instructions.
6662 // TODO: We can additionally analyze the accesses in the callee to create
6663 // the type from that information instead. That is a little more
6664 // involved and will be done in a follow up patch.
6665 auto CallSiteCheck = [&](AbstractCallSite ACS) {
6666 IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
6667 // Check if a coresponding argument was found or if it is one not
6668 // associated (which can happen for callback calls).
6669 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
6670 return false;
6671
6672 // Check that all call sites agree on a type.
6673 auto &PrivCSArgAA =
6674 A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
6675 Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
6676
6677 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: "
<< ACSArgPos << ", CSTy: "; if (CSTy.hasValue() &&
CSTy.getValue()) CSTy.getValue()->print(dbgs()); else if (
CSTy.hasValue()) dbgs() << "<nullptr>"; else dbgs
() << "<none>"; }; } } while (false)
6678 dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: "
<< ACSArgPos << ", CSTy: "; if (CSTy.hasValue() &&
CSTy.getValue()) CSTy.getValue()->print(dbgs()); else if (
CSTy.hasValue()) dbgs() << "<nullptr>"; else dbgs
() << "<none>"; }; } } while (false)
6679 if (CSTy.hasValue() && CSTy.getValue())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: "
<< ACSArgPos << ", CSTy: "; if (CSTy.hasValue() &&
CSTy.getValue()) CSTy.getValue()->print(dbgs()); else if (
CSTy.hasValue()) dbgs() << "<nullptr>"; else dbgs
() << "<none>"; }; } } while (false)
6680 CSTy.getValue()->print(dbgs());do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: "
<< ACSArgPos << ", CSTy: "; if (CSTy.hasValue() &&
CSTy.getValue()) CSTy.getValue()->print(dbgs()); else if (
CSTy.hasValue()) dbgs() << "<nullptr>"; else dbgs
() << "<none>"; }; } } while (false)
6681 else if (CSTy.hasValue())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: "
<< ACSArgPos << ", CSTy: "; if (CSTy.hasValue() &&
CSTy.getValue()) CSTy.getValue()->print(dbgs()); else if (
CSTy.hasValue()) dbgs() << "<nullptr>"; else dbgs
() << "<none>"; }; } } while (false)
6682 dbgs() << "<nullptr>";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: "
<< ACSArgPos << ", CSTy: "; if (CSTy.hasValue() &&
CSTy.getValue()) CSTy.getValue()->print(dbgs()); else if (
CSTy.hasValue()) dbgs() << "<nullptr>"; else dbgs
() << "<none>"; }; } } while (false)
6683 elsedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: "
<< ACSArgPos << ", CSTy: "; if (CSTy.hasValue() &&
CSTy.getValue()) CSTy.getValue()->print(dbgs()); else if (
CSTy.hasValue()) dbgs() << "<nullptr>"; else dbgs
() << "<none>"; }; } } while (false)
6684 dbgs() << "<none>";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: "
<< ACSArgPos << ", CSTy: "; if (CSTy.hasValue() &&
CSTy.getValue()) CSTy.getValue()->print(dbgs()); else if (
CSTy.hasValue()) dbgs() << "<nullptr>"; else dbgs
() << "<none>"; }; } } while (false)
6685 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: "
<< ACSArgPos << ", CSTy: "; if (CSTy.hasValue() &&
CSTy.getValue()) CSTy.getValue()->print(dbgs()); else if (
CSTy.hasValue()) dbgs() << "<nullptr>"; else dbgs
() << "<none>"; }; } } while (false)
;
6686
6687 Ty = combineTypes(Ty, CSTy);
6688
6689 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << " : New Type: "; if (Ty.hasValue
() && Ty.getValue()) Ty.getValue()->print(dbgs());
else if (Ty.hasValue()) dbgs() << "<nullptr>"; else
dbgs() << "<none>"; dbgs() << "\n"; }; } }
while (false)
6690 dbgs() << " : New Type: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << " : New Type: "; if (Ty.hasValue
() && Ty.getValue()) Ty.getValue()->print(dbgs());
else if (Ty.hasValue()) dbgs() << "<nullptr>"; else
dbgs() << "<none>"; dbgs() << "\n"; }; } }
while (false)
6691 if (Ty.hasValue() && Ty.getValue())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << " : New Type: "; if (Ty.hasValue
() && Ty.getValue()) Ty.getValue()->print(dbgs());
else if (Ty.hasValue()) dbgs() << "<nullptr>"; else
dbgs() << "<none>"; dbgs() << "\n"; }; } }
while (false)
6692 Ty.getValue()->print(dbgs());do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << " : New Type: "; if (Ty.hasValue
() && Ty.getValue()) Ty.getValue()->print(dbgs());
else if (Ty.hasValue()) dbgs() << "<nullptr>"; else
dbgs() << "<none>"; dbgs() << "\n"; }; } }
while (false)
6693 else if (Ty.hasValue())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << " : New Type: "; if (Ty.hasValue
() && Ty.getValue()) Ty.getValue()->print(dbgs());
else if (Ty.hasValue()) dbgs() << "<nullptr>"; else
dbgs() << "<none>"; dbgs() << "\n"; }; } }
while (false)
6694 dbgs() << "<nullptr>";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << " : New Type: "; if (Ty.hasValue
() && Ty.getValue()) Ty.getValue()->print(dbgs());
else if (Ty.hasValue()) dbgs() << "<nullptr>"; else
dbgs() << "<none>"; dbgs() << "\n"; }; } }
while (false)
6695 elsedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << " : New Type: "; if (Ty.hasValue
() && Ty.getValue()) Ty.getValue()->print(dbgs());
else if (Ty.hasValue()) dbgs() << "<nullptr>"; else
dbgs() << "<none>"; dbgs() << "\n"; }; } }
while (false)
6696 dbgs() << "<none>";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << " : New Type: "; if (Ty.hasValue
() && Ty.getValue()) Ty.getValue()->print(dbgs());
else if (Ty.hasValue()) dbgs() << "<nullptr>"; else
dbgs() << "<none>"; dbgs() << "\n"; }; } }
while (false)
6697 dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << " : New Type: "; if (Ty.hasValue
() && Ty.getValue()) Ty.getValue()->print(dbgs());
else if (Ty.hasValue()) dbgs() << "<nullptr>"; else
dbgs() << "<none>"; dbgs() << "\n"; }; } }
while (false)
6698 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << " : New Type: "; if (Ty.hasValue
() && Ty.getValue()) Ty.getValue()->print(dbgs());
else if (Ty.hasValue()) dbgs() << "<nullptr>"; else
dbgs() << "<none>"; dbgs() << "\n"; }; } }
while (false)
;
6699
6700 return !Ty.hasValue() || Ty.getValue();
6701 };
6702
6703 if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
6704 UsedAssumedInformation))
6705 return nullptr;
6706 return Ty;
6707 }
6708
6709 /// See AbstractAttribute::updateImpl(...).
6710 ChangeStatus updateImpl(Attributor &A) override {
6711 PrivatizableType = identifyPrivatizableType(A);
6712 if (!PrivatizableType.hasValue())
6713 return ChangeStatus::UNCHANGED;
6714 if (!PrivatizableType.getValue())
6715 return indicatePessimisticFixpoint();
6716
6717 // The dependence is optional so we don't give up once we give up on the
6718 // alignment.
6719 A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
6720 DepClassTy::OPTIONAL);
6721
6722 // Avoid arguments with padding for now.
6723 if (!getIRPosition().hasAttr(Attribute::ByVal) &&
6724 !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
6725 A.getInfoCache().getDL())) {
6726 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPrivatizablePtr] Padding detected\n"
; } } while (false)
;
6727 return indicatePessimisticFixpoint();
6728 }
6729
6730 // Collect the types that will replace the privatizable type in the function
6731 // signature.
6732 SmallVector<Type *, 16> ReplacementTypes;
6733 identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
6734
6735 // Verify callee and caller agree on how the promoted argument would be
6736 // passed.
6737 Function &Fn = *getIRPosition().getAnchorScope();
6738 const auto *TTI =
6739 A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
6740 if (!TTI) {
6741 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Missing TTI for function "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPrivatizablePtr] Missing TTI for function "
<< Fn.getName() << "\n"; } } while (false)
6742 << Fn.getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPrivatizablePtr] Missing TTI for function "
<< Fn.getName() << "\n"; } } while (false)
;
6743 return indicatePessimisticFixpoint();
6744 }
6745
6746 auto CallSiteCheck = [&](AbstractCallSite ACS) {
6747 CallBase *CB = ACS.getInstruction();
6748 return TTI->areTypesABICompatible(
6749 CB->getCaller(), CB->getCalledFunction(), ReplacementTypes);
6750 };
6751 bool UsedAssumedInformation = false;
6752 if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
6753 UsedAssumedInformation)) {
6754 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
<< Fn.getName() << "\n"; } } while (false)
6755 dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
<< Fn.getName() << "\n"; } } while (false)
6756 << Fn.getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
<< Fn.getName() << "\n"; } } while (false)
;
6757 return indicatePessimisticFixpoint();
6758 }
6759
6760 // Register a rewrite of the argument.
6761 Argument *Arg = getAssociatedArgument();
6762 if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
6763 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n"
; } } while (false)
;
6764 return indicatePessimisticFixpoint();
6765 }
6766
6767 unsigned ArgNo = Arg->getArgNo();
6768
6769 // Helper to check if for the given call site the associated argument is
6770 // passed to a callback where the privatization would be different.
6771 auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
6772 SmallVector<const Use *, 4> CallbackUses;
6773 AbstractCallSite::getCallbackUses(CB, CallbackUses);
6774 for (const Use *U : CallbackUses) {
6775 AbstractCallSite CBACS(U);
6776 assert(CBACS && CBACS.isCallbackCall())(static_cast <bool> (CBACS && CBACS.isCallbackCall
()) ? void (0) : __assert_fail ("CBACS && CBACS.isCallbackCall()"
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 6776, __extension__
__PRETTY_FUNCTION__))
;
6777 for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
6778 int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
6779
6780 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << "check if can be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ")\n[AAPrivatizablePtr] " << CBArg
<< " : " << CBACS.getCallArgOperand(CBArg) <<
" vs " << CB.getArgOperand(ArgNo) << "\n" <<
"[AAPrivatizablePtr] " << CBArg << " : " <<
CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo
<< "\n"; }; } } while (false)
6781 dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << "check if can be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ")\n[AAPrivatizablePtr] " << CBArg
<< " : " << CBACS.getCallArgOperand(CBArg) <<
" vs " << CB.getArgOperand(ArgNo) << "\n" <<
"[AAPrivatizablePtr] " << CBArg << " : " <<
CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo
<< "\n"; }; } } while (false)
6782 << "[AAPrivatizablePtr] Argument " << *Argdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << "check if can be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ")\n[AAPrivatizablePtr] " << CBArg
<< " : " << CBACS.getCallArgOperand(CBArg) <<
" vs " << CB.getArgOperand(ArgNo) << "\n" <<
"[AAPrivatizablePtr] " << CBArg << " : " <<
CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo
<< "\n"; }; } } while (false)
6783 << "check if can be privatized in the context of its parent ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << "check if can be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ")\n[AAPrivatizablePtr] " << CBArg
<< " : " << CBACS.getCallArgOperand(CBArg) <<
" vs " << CB.getArgOperand(ArgNo) << "\n" <<
"[AAPrivatizablePtr] " << CBArg << " : " <<
CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo
<< "\n"; }; } } while (false)
6784 << Arg->getParent()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << "check if can be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ")\n[AAPrivatizablePtr] " << CBArg
<< " : " << CBACS.getCallArgOperand(CBArg) <<
" vs " << CB.getArgOperand(ArgNo) << "\n" <<
"[AAPrivatizablePtr] " << CBArg << " : " <<
CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo
<< "\n"; }; } } while (false)
6785 << ")\n[AAPrivatizablePtr] because it is an argument in a "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << "check if can be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ")\n[AAPrivatizablePtr] " << CBArg
<< " : " << CBACS.getCallArgOperand(CBArg) <<
" vs " << CB.getArgOperand(ArgNo) << "\n" <<
"[AAPrivatizablePtr] " << CBArg << " : " <<
CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo
<< "\n"; }; } } while (false)
6786 "callback ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << "check if can be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ")\n[AAPrivatizablePtr] " << CBArg
<< " : " << CBACS.getCallArgOperand(CBArg) <<
" vs " << CB.getArgOperand(ArgNo) << "\n" <<
"[AAPrivatizablePtr] " << CBArg << " : " <<
CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo
<< "\n"; }; } } while (false)
6787 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << "check if can be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ")\n[AAPrivatizablePtr] " << CBArg
<< " : " << CBACS.getCallArgOperand(CBArg) <<
" vs " << CB.getArgOperand(ArgNo) << "\n" <<
"[AAPrivatizablePtr] " << CBArg << " : " <<
CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo
<< "\n"; }; } } while (false)
6788 << ")\n[AAPrivatizablePtr] " << CBArg << " : "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << "check if can be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ")\n[AAPrivatizablePtr] " << CBArg
<< " : " << CBACS.getCallArgOperand(CBArg) <<
" vs " << CB.getArgOperand(ArgNo) << "\n" <<
"[AAPrivatizablePtr] " << CBArg << " : " <<
CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo
<< "\n"; }; } } while (false)
6789 << CBACS.getCallArgOperand(CBArg) << " vs "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << "check if can be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ")\n[AAPrivatizablePtr] " << CBArg
<< " : " << CBACS.getCallArgOperand(CBArg) <<
" vs " << CB.getArgOperand(ArgNo) << "\n" <<
"[AAPrivatizablePtr] " << CBArg << " : " <<
CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo
<< "\n"; }; } } while (false)
6790 << CB.getArgOperand(ArgNo) << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << "check if can be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ")\n[AAPrivatizablePtr] " << CBArg
<< " : " << CBACS.getCallArgOperand(CBArg) <<
" vs " << CB.getArgOperand(ArgNo) << "\n" <<
"[AAPrivatizablePtr] " << CBArg << " : " <<
CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo
<< "\n"; }; } } while (false)
6791 << "[AAPrivatizablePtr] " << CBArg << " : "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << "check if can be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ")\n[AAPrivatizablePtr] " << CBArg
<< " : " << CBACS.getCallArgOperand(CBArg) <<
" vs " << CB.getArgOperand(ArgNo) << "\n" <<
"[AAPrivatizablePtr] " << CBArg << " : " <<
CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo
<< "\n"; }; } } while (false)
6792 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << "check if can be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ")\n[AAPrivatizablePtr] " << CBArg
<< " : " << CBACS.getCallArgOperand(CBArg) <<
" vs " << CB.getArgOperand(ArgNo) << "\n" <<
"[AAPrivatizablePtr] " << CBArg << " : " <<
CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo
<< "\n"; }; } } while (false)
6793 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << "check if can be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ")\n[AAPrivatizablePtr] " << CBArg
<< " : " << CBACS.getCallArgOperand(CBArg) <<
" vs " << CB.getArgOperand(ArgNo) << "\n" <<
"[AAPrivatizablePtr] " << CBArg << " : " <<
CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo
<< "\n"; }; } } while (false)
;
6794
6795 if (CBArgNo != int(ArgNo))
6796 continue;
6797 const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6798 *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
6799 if (CBArgPrivAA.isValidState()) {
6800 auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
6801 if (!CBArgPrivTy.hasValue())
6802 continue;
6803 if (CBArgPrivTy.getValue() == PrivatizableType)
6804 continue;
6805 }
6806
6807 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " cannot be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ").\n[AAPrivatizablePtr] for which the argument "
"privatization is not compatible.\n"; }; } } while (false)
6808 dbgs() << "[AAPrivatizablePtr] Argument " << *Argdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " cannot be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ").\n[AAPrivatizablePtr] for which the argument "
"privatization is not compatible.\n"; }; } } while (false)
6809 << " cannot be privatized in the context of its parent ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " cannot be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ").\n[AAPrivatizablePtr] for which the argument "
"privatization is not compatible.\n"; }; } } while (false)
6810 << Arg->getParent()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " cannot be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ").\n[AAPrivatizablePtr] for which the argument "
"privatization is not compatible.\n"; }; } } while (false)
6811 << ")\n[AAPrivatizablePtr] because it is an argument in a "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " cannot be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ").\n[AAPrivatizablePtr] for which the argument "
"privatization is not compatible.\n"; }; } } while (false)
6812 "callback ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " cannot be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ").\n[AAPrivatizablePtr] for which the argument "
"privatization is not compatible.\n"; }; } } while (false)
6813 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " cannot be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ").\n[AAPrivatizablePtr] for which the argument "
"privatization is not compatible.\n"; }; } } while (false)
6814 << ").\n[AAPrivatizablePtr] for which the argument "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " cannot be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ").\n[AAPrivatizablePtr] for which the argument "
"privatization is not compatible.\n"; }; } } while (false)
6815 "privatization is not compatible.\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " cannot be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ").\n[AAPrivatizablePtr] for which the argument "
"privatization is not compatible.\n"; }; } } while (false)
6816 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " cannot be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"callback (" << CBArgNo << "@" << CBACS.getCalledFunction
()->getName() << ").\n[AAPrivatizablePtr] for which the argument "
"privatization is not compatible.\n"; }; } } while (false)
;
6817 return false;
6818 }
6819 }
6820 return true;
6821 };
6822
6823 // Helper to check if for the given call site the associated argument is
6824 // passed to a direct call where the privatization would be different.
6825 auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
6826 CallBase *DC = cast<CallBase>(ACS.getInstruction());
6827 int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
6828 assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() &&(static_cast <bool> (DCArgNo >= 0 && unsigned
(DCArgNo) < DC->arg_size() && "Expected a direct call operand for callback call operand"
) ? void (0) : __assert_fail ("DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() && \"Expected a direct call operand for callback call operand\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 6829, __extension__
__PRETTY_FUNCTION__))
6829 "Expected a direct call operand for callback call operand")(static_cast <bool> (DCArgNo >= 0 && unsigned
(DCArgNo) < DC->arg_size() && "Expected a direct call operand for callback call operand"
) ? void (0) : __assert_fail ("DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() && \"Expected a direct call operand for callback call operand\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 6829, __extension__
__PRETTY_FUNCTION__))
;
6830
6831 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " check if be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"direct call of (" << DCArgNo << "@" << DC
->getCalledFunction()->getName() << ").\n"; }; } }
while (false)
6832 dbgs() << "[AAPrivatizablePtr] Argument " << *Argdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " check if be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"direct call of (" << DCArgNo << "@" << DC
->getCalledFunction()->getName() << ").\n"; }; } }
while (false)
6833 << " check if be privatized in the context of its parent ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " check if be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"direct call of (" << DCArgNo << "@" << DC
->getCalledFunction()->getName() << ").\n"; }; } }
while (false)
6834 << Arg->getParent()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " check if be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"direct call of (" << DCArgNo << "@" << DC
->getCalledFunction()->getName() << ").\n"; }; } }
while (false)
6835 << ")\n[AAPrivatizablePtr] because it is an argument in a "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " check if be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"direct call of (" << DCArgNo << "@" << DC
->getCalledFunction()->getName() << ").\n"; }; } }
while (false)
6836 "direct call of ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " check if be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"direct call of (" << DCArgNo << "@" << DC
->getCalledFunction()->getName() << ").\n"; }; } }
while (false)
6837 << DCArgNo << "@" << DC->getCalledFunction()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " check if be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"direct call of (" << DCArgNo << "@" << DC
->getCalledFunction()->getName() << ").\n"; }; } }
while (false)
6838 << ").\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " check if be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"direct call of (" << DCArgNo << "@" << DC
->getCalledFunction()->getName() << ").\n"; }; } }
while (false)
6839 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " check if be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"direct call of (" << DCArgNo << "@" << DC
->getCalledFunction()->getName() << ").\n"; }; } }
while (false)
;
6840
6841 Function *DCCallee = DC->getCalledFunction();
6842 if (unsigned(DCArgNo) < DCCallee->arg_size()) {
6843 const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6844 *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
6845 DepClassTy::REQUIRED);
6846 if (DCArgPrivAA.isValidState()) {
6847 auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
6848 if (!DCArgPrivTy.hasValue())
6849 return true;
6850 if (DCArgPrivTy.getValue() == PrivatizableType)
6851 return true;
6852 }
6853 }
6854
6855 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " cannot be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"direct call of (" << ACS.getInstruction()->getCalledFunction
()->getName() << ").\n[AAPrivatizablePtr] for which the argument "
"privatization is not compatible.\n"; }; } } while (false)
6856 dbgs() << "[AAPrivatizablePtr] Argument " << *Argdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " cannot be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"direct call of (" << ACS.getInstruction()->getCalledFunction
()->getName() << ").\n[AAPrivatizablePtr] for which the argument "
"privatization is not compatible.\n"; }; } } while (false)
6857 << " cannot be privatized in the context of its parent ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " cannot be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"direct call of (" << ACS.getInstruction()->getCalledFunction
()->getName() << ").\n[AAPrivatizablePtr] for which the argument "
"privatization is not compatible.\n"; }; } } while (false)
6858 << Arg->getParent()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " cannot be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"direct call of (" << ACS.getInstruction()->getCalledFunction
()->getName() << ").\n[AAPrivatizablePtr] for which the argument "
"privatization is not compatible.\n"; }; } } while (false)
6859 << ")\n[AAPrivatizablePtr] because it is an argument in a "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " cannot be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"direct call of (" << ACS.getInstruction()->getCalledFunction
()->getName() << ").\n[AAPrivatizablePtr] for which the argument "
"privatization is not compatible.\n"; }; } } while (false)
6860 "direct call of ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " cannot be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"direct call of (" << ACS.getInstruction()->getCalledFunction
()->getName() << ").\n[AAPrivatizablePtr] for which the argument "
"privatization is not compatible.\n"; }; } } while (false)
6861 << ACS.getInstruction()->getCalledFunction()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " cannot be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"direct call of (" << ACS.getInstruction()->getCalledFunction
()->getName() << ").\n[AAPrivatizablePtr] for which the argument "
"privatization is not compatible.\n"; }; } } while (false)
6862 << ").\n[AAPrivatizablePtr] for which the argument "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " cannot be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"direct call of (" << ACS.getInstruction()->getCalledFunction
()->getName() << ").\n[AAPrivatizablePtr] for which the argument "
"privatization is not compatible.\n"; }; } } while (false)
6863 "privatization is not compatible.\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " cannot be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"direct call of (" << ACS.getInstruction()->getCalledFunction
()->getName() << ").\n[AAPrivatizablePtr] for which the argument "
"privatization is not compatible.\n"; }; } } while (false)
6864 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument "
<< *Arg << " cannot be privatized in the context of its parent ("
<< Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a "
"direct call of (" << ACS.getInstruction()->getCalledFunction
()->getName() << ").\n[AAPrivatizablePtr] for which the argument "
"privatization is not compatible.\n"; }; } } while (false)
;
6865 return false;
6866 };
6867
6868 // Helper to check if the associated argument is used at the given abstract
6869 // call site in a way that is incompatible with the privatization assumed
6870 // here.
6871 auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
6872 if (ACS.isDirectCall())
6873 return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
6874 if (ACS.isCallbackCall())
6875 return IsCompatiblePrivArgOfDirectCS(ACS);
6876 return false;
6877 };
6878
6879 if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
6880 UsedAssumedInformation))
6881 return indicatePessimisticFixpoint();
6882
6883 return ChangeStatus::UNCHANGED;
6884 }
6885
6886 /// Given a type to private \p PrivType, collect the constituates (which are
6887 /// used) in \p ReplacementTypes.
6888 static void
6889 identifyReplacementTypes(Type *PrivType,
6890 SmallVectorImpl<Type *> &ReplacementTypes) {
6891 // TODO: For now we expand the privatization type to the fullest which can
6892 // lead to dead arguments that need to be removed later.
6893 assert(PrivType && "Expected privatizable type!")(static_cast <bool> (PrivType && "Expected privatizable type!"
) ? void (0) : __assert_fail ("PrivType && \"Expected privatizable type!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 6893, __extension__
__PRETTY_FUNCTION__))
;
6894
6895 // Traverse the type, extract constituate types on the outermost level.
6896 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6897 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
6898 ReplacementTypes.push_back(PrivStructType->getElementType(u));
6899 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6900 ReplacementTypes.append(PrivArrayType->getNumElements(),
6901 PrivArrayType->getElementType());
6902 } else {
6903 ReplacementTypes.push_back(PrivType);
6904 }
6905 }
6906
6907 /// Initialize \p Base according to the type \p PrivType at position \p IP.
6908 /// The values needed are taken from the arguments of \p F starting at
6909 /// position \p ArgNo.
6910 static void createInitialization(Type *PrivType, Value &Base, Function &F,
6911 unsigned ArgNo, Instruction &IP) {
6912 assert(PrivType && "Expected privatizable type!")(static_cast <bool> (PrivType && "Expected privatizable type!"
) ? void (0) : __assert_fail ("PrivType && \"Expected privatizable type!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 6912, __extension__
__PRETTY_FUNCTION__))
;
6913
6914 IRBuilder<NoFolder> IRB(&IP);
6915 const DataLayout &DL = F.getParent()->getDataLayout();
6916
6917 // Traverse the type, build GEPs and stores.
6918 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6919 const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6920 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6921 Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
6922 Value *Ptr =
6923 constructPointer(PointeeTy, PrivType, &Base,
6924 PrivStructLayout->getElementOffset(u), IRB, DL);
6925 new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6926 }
6927 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6928 Type *PointeeTy = PrivArrayType->getElementType();
6929 Type *PointeePtrTy = PointeeTy->getPointerTo();
6930 uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6931 for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6932 Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base,
6933 u * PointeeTySize, IRB, DL);
6934 new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6935 }
6936 } else {
6937 new StoreInst(F.getArg(ArgNo), &Base, &IP);
6938 }
6939 }
6940
6941 /// Extract values from \p Base according to the type \p PrivType at the
6942 /// call position \p ACS. The values are appended to \p ReplacementValues.
6943 void createReplacementValues(Align Alignment, Type *PrivType,
6944 AbstractCallSite ACS, Value *Base,
6945 SmallVectorImpl<Value *> &ReplacementValues) {
6946 assert(Base && "Expected base value!")(static_cast <bool> (Base && "Expected base value!"
) ? void (0) : __assert_fail ("Base && \"Expected base value!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 6946, __extension__
__PRETTY_FUNCTION__))
;
6947 assert(PrivType && "Expected privatizable type!")(static_cast <bool> (PrivType && "Expected privatizable type!"
) ? void (0) : __assert_fail ("PrivType && \"Expected privatizable type!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 6947, __extension__
__PRETTY_FUNCTION__))
;
6948 Instruction *IP = ACS.getInstruction();
6949
6950 IRBuilder<NoFolder> IRB(IP);
6951 const DataLayout &DL = IP->getModule()->getDataLayout();
6952
6953 Type *PrivPtrType = PrivType->getPointerTo();
6954 if (Base->getType() != PrivPtrType)
6955 Base = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
6956 Base, PrivPtrType, "", ACS.getInstruction());
6957
6958 // Traverse the type, build GEPs and loads.
6959 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6960 const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6961 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6962 Type *PointeeTy = PrivStructType->getElementType(u);
6963 Value *Ptr =
6964 constructPointer(PointeeTy->getPointerTo(), PrivType, Base,
6965 PrivStructLayout->getElementOffset(u), IRB, DL);
6966 LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6967 L->setAlignment(Alignment);
6968 ReplacementValues.push_back(L);
6969 }
6970 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6971 Type *PointeeTy = PrivArrayType->getElementType();
6972 uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6973 Type *PointeePtrTy = PointeeTy->getPointerTo();
6974 for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6975 Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base,
6976 u * PointeeTySize, IRB, DL);
6977 LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6978 L->setAlignment(Alignment);
6979 ReplacementValues.push_back(L);
6980 }
6981 } else {
6982 LoadInst *L = new LoadInst(PrivType, Base, "", IP);
6983 L->setAlignment(Alignment);
6984 ReplacementValues.push_back(L);
6985 }
6986 }
6987
6988 /// See AbstractAttribute::manifest(...)
6989 ChangeStatus manifest(Attributor &A) override {
6990 if (!PrivatizableType.hasValue())
6991 return ChangeStatus::UNCHANGED;
6992 assert(PrivatizableType.getValue() && "Expected privatizable type!")(static_cast <bool> (PrivatizableType.getValue() &&
"Expected privatizable type!") ? void (0) : __assert_fail ("PrivatizableType.getValue() && \"Expected privatizable type!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 6992, __extension__
__PRETTY_FUNCTION__))
;
6993
6994 // Collect all tail calls in the function as we cannot allow new allocas to
6995 // escape into tail recursion.
6996 // TODO: Be smarter about new allocas escaping into tail calls.
6997 SmallVector<CallInst *, 16> TailCalls;
6998 bool UsedAssumedInformation = false;
6999 if (!A.checkForAllInstructions(
7000 [&](Instruction &I) {
7001 CallInst &CI = cast<CallInst>(I);
7002 if (CI.isTailCall())
7003 TailCalls.push_back(&CI);
7004 return true;
7005 },
7006 *this, {Instruction::Call}, UsedAssumedInformation))
7007 return ChangeStatus::UNCHANGED;
7008
7009 Argument *Arg = getAssociatedArgument();
7010 // Query AAAlign attribute for alignment of associated argument to
7011 // determine the best alignment of loads.
7012 const auto &AlignAA =
7013 A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
7014
7015 // Callback to repair the associated function. A new alloca is placed at the
7016 // beginning and initialized with the values passed through arguments. The
7017 // new alloca replaces the use of the old pointer argument.
7018 Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
7019 [=](const Attributor::ArgumentReplacementInfo &ARI,
7020 Function &ReplacementFn, Function::arg_iterator ArgIt) {
7021 BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
7022 Instruction *IP = &*EntryBB.getFirstInsertionPt();
7023 const DataLayout &DL = IP->getModule()->getDataLayout();
7024 unsigned AS = DL.getAllocaAddrSpace();
7025 Instruction *AI = new AllocaInst(PrivatizableType.getValue(), AS,
7026 Arg->getName() + ".priv", IP);
7027 createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
7028 ArgIt->getArgNo(), *IP);
7029
7030 if (AI->getType() != Arg->getType())
7031 AI = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
7032 AI, Arg->getType(), "", IP);
7033 Arg->replaceAllUsesWith(AI);
7034
7035 for (CallInst *CI : TailCalls)
7036 CI->setTailCall(false);
7037 };
7038
7039 // Callback to repair a call site of the associated function. The elements
7040 // of the privatizable type are loaded prior to the call and passed to the
7041 // new function version.
7042 Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
7043 [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
7044 AbstractCallSite ACS,
7045 SmallVectorImpl<Value *> &NewArgOperands) {
7046 // When no alignment is specified for the load instruction,
7047 // natural alignment is assumed.
7048 createReplacementValues(
7049 assumeAligned(AlignAA.getAssumedAlign()),
7050 PrivatizableType.getValue(), ACS,
7051 ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
7052 NewArgOperands);
7053 };
7054
7055 // Collect the types that will replace the privatizable type in the function
7056 // signature.
7057 SmallVector<Type *, 16> ReplacementTypes;
7058 identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
7059
7060 // Register a rewrite of the argument.
7061 if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
7062 std::move(FnRepairCB),
7063 std::move(ACSRepairCB)))
7064 return ChangeStatus::CHANGED;
7065 return ChangeStatus::UNCHANGED;
7066 }
7067
7068 /// See AbstractAttribute::trackStatistics()
7069 void trackStatistics() const override {
7070 STATS_DECLTRACK_ARG_ATTR(privatizable_ptr){ static llvm::Statistic NumIRArguments_privatizable_ptr = {"attributor"
, "NumIRArguments_privatizable_ptr", ("Number of " "arguments"
" marked '" "privatizable_ptr" "'")};; ++(NumIRArguments_privatizable_ptr
); }
;
7071 }
7072};
7073
7074struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
7075 AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
7076 : AAPrivatizablePtrImpl(IRP, A) {}
7077
7078 /// See AbstractAttribute::initialize(...).
7079 virtual void initialize(Attributor &A) override {
7080 // TODO: We can privatize more than arguments.
7081 indicatePessimisticFixpoint();
7082 }
7083
7084 ChangeStatus updateImpl(Attributor &A) override {
7085 llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"::llvm::llvm_unreachable_internal("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
"updateImpl will not be called", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 7086)
7086 "updateImpl will not be called")::llvm::llvm_unreachable_internal("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
"updateImpl will not be called", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 7086)
;
7087 }
7088
7089 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
7090 Optional<Type *> identifyPrivatizableType(Attributor &A) override {
7091 Value *Obj = getUnderlyingObject(&getAssociatedValue());
7092 if (!Obj) {
7093 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPrivatizablePtr] No underlying object found!\n"
; } } while (false)
;
7094 return nullptr;
7095 }
7096
7097 if (auto *AI = dyn_cast<AllocaInst>(Obj))
7098 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
7099 if (CI->isOne())
7100 return AI->getAllocatedType();
7101 if (auto *Arg = dyn_cast<Argument>(Obj)) {
7102 auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
7103 *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
7104 if (PrivArgAA.isAssumedPrivatizablePtr())
7105 return PrivArgAA.getPrivatizableType();
7106 }
7107
7108 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
"alloca nor privatizable argument: " << *Obj << "!\n"
; } } while (false)
7109 "alloca nor privatizable argument: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
"alloca nor privatizable argument: " << *Obj << "!\n"
; } } while (false)
7110 << *Obj << "!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
"alloca nor privatizable argument: " << *Obj << "!\n"
; } } while (false)
;
7111 return nullptr;
7112 }
7113
7114 /// See AbstractAttribute::trackStatistics()
7115 void trackStatistics() const override {
7116 STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr){ static llvm::Statistic NumIRFloating_privatizable_ptr = {"attributor"
, "NumIRFloating_privatizable_ptr", ("Number of floating values known to be '"
"privatizable_ptr" "'")};; ++(NumIRFloating_privatizable_ptr
); }
;
7117 }
7118};
7119
7120struct AAPrivatizablePtrCallSiteArgument final
7121 : public AAPrivatizablePtrFloating {
7122 AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
7123 : AAPrivatizablePtrFloating(IRP, A) {}
7124
7125 /// See AbstractAttribute::initialize(...).
7126 void initialize(Attributor &A) override {
7127 if (getIRPosition().hasAttr(Attribute::ByVal))
7128 indicateOptimisticFixpoint();
7129 }
7130
7131 /// See AbstractAttribute::updateImpl(...).
7132 ChangeStatus updateImpl(Attributor &A) override {
7133 PrivatizableType = identifyPrivatizableType(A);
7134 if (!PrivatizableType.hasValue())
7135 return ChangeStatus::UNCHANGED;
7136 if (!PrivatizableType.getValue())
7137 return indicatePessimisticFixpoint();
7138
7139 const IRPosition &IRP = getIRPosition();
7140 auto &NoCaptureAA =
7141 A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED);
7142 if (!NoCaptureAA.isAssumedNoCapture()) {
7143 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n"
; } } while (false)
;
7144 return indicatePessimisticFixpoint();
7145 }
7146
7147 auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED);
7148 if (!NoAliasAA.isAssumedNoAlias()) {
7149 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPrivatizablePtr] pointer might alias!\n"
; } } while (false)
;
7150 return indicatePessimisticFixpoint();
7151 }
7152
7153 bool IsKnown;
7154 if (!AA::isAssumedReadOnly(A, IRP, *this, IsKnown)) {
7155 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPrivatizablePtr] pointer is written!\n"
; } } while (false)
;
7156 return indicatePessimisticFixpoint();
7157 }
7158
7159 return ChangeStatus::UNCHANGED;
7160 }
7161
7162 /// See AbstractAttribute::trackStatistics()
7163 void trackStatistics() const override {
7164 STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr){ static llvm::Statistic NumIRCSArguments_privatizable_ptr = {
"attributor", "NumIRCSArguments_privatizable_ptr", ("Number of "
"call site arguments" " marked '" "privatizable_ptr" "'")};;
++(NumIRCSArguments_privatizable_ptr); }
;
7165 }
7166};
7167
7168struct AAPrivatizablePtrCallSiteReturned final
7169 : public AAPrivatizablePtrFloating {
7170 AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
7171 : AAPrivatizablePtrFloating(IRP, A) {}
7172
7173 /// See AbstractAttribute::initialize(...).
7174 void initialize(Attributor &A) override {
7175 // TODO: We can privatize more than arguments.
7176 indicatePessimisticFixpoint();
7177 }
7178
7179 /// See AbstractAttribute::trackStatistics()
7180 void trackStatistics() const override {
7181 STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr){ static llvm::Statistic NumIRCSReturn_privatizable_ptr = {"attributor"
, "NumIRCSReturn_privatizable_ptr", ("Number of " "call site returns"
" marked '" "privatizable_ptr" "'")};; ++(NumIRCSReturn_privatizable_ptr
); }
;
7182 }
7183};
7184
7185struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
7186 AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
7187 : AAPrivatizablePtrFloating(IRP, A) {}
7188
7189 /// See AbstractAttribute::initialize(...).
7190 void initialize(Attributor &A) override {
7191 // TODO: We can privatize more than arguments.
7192 indicatePessimisticFixpoint();
7193 }
7194
7195 /// See AbstractAttribute::trackStatistics()
7196 void trackStatistics() const override {
7197 STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr){ static llvm::Statistic NumIRFunctionReturn_privatizable_ptr
= {"attributor", "NumIRFunctionReturn_privatizable_ptr", ("Number of "
"function returns" " marked '" "privatizable_ptr" "'")};; ++
(NumIRFunctionReturn_privatizable_ptr); }
;
7198 }
7199};
7200} // namespace
7201
7202/// -------------------- Memory Behavior Attributes ----------------------------
7203/// Includes read-none, read-only, and write-only.
7204/// ----------------------------------------------------------------------------
7205namespace {
7206struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
7207 AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
7208 : AAMemoryBehavior(IRP, A) {}
7209
7210 /// See AbstractAttribute::initialize(...).
7211 void initialize(Attributor &A) override {
7212 intersectAssumedBits(BEST_STATE);
7213 getKnownStateFromValue(getIRPosition(), getState());
7214 AAMemoryBehavior::initialize(A);
7215 }
7216
7217 /// Return the memory behavior information encoded in the IR for \p IRP.
7218 static void getKnownStateFromValue(const IRPosition &IRP,
7219 BitIntegerState &State,
7220 bool IgnoreSubsumingPositions = false) {
7221 SmallVector<Attribute, 2> Attrs;
7222 IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
7223 for (const Attribute &Attr : Attrs) {
7224 switch (Attr.getKindAsEnum()) {
7225 case Attribute::ReadNone:
7226 State.addKnownBits(NO_ACCESSES);
7227 break;
7228 case Attribute::ReadOnly:
7229 State.addKnownBits(NO_WRITES);
7230 break;
7231 case Attribute::WriteOnly:
7232 State.addKnownBits(NO_READS);
7233 break;
7234 default:
7235 llvm_unreachable("Unexpected attribute!")::llvm::llvm_unreachable_internal("Unexpected attribute!", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 7235)
;
7236 }
7237 }
7238
7239 if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
7240 if (!I->mayReadFromMemory())
7241 State.addKnownBits(NO_READS);
7242 if (!I->mayWriteToMemory())
7243 State.addKnownBits(NO_WRITES);
7244 }
7245 }
7246
7247 /// See AbstractAttribute::getDeducedAttributes(...).
7248 void getDeducedAttributes(LLVMContext &Ctx,
7249 SmallVectorImpl<Attribute> &Attrs) const override {
7250 assert(Attrs.size() == 0)(static_cast <bool> (Attrs.size() == 0) ? void (0) : __assert_fail
("Attrs.size() == 0", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 7250, __extension__ __PRETTY_FUNCTION__))
;
7251 if (isAssumedReadNone())
7252 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7253 else if (isAssumedReadOnly())
7254 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
7255 else if (isAssumedWriteOnly())
7256 Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
7257 assert(Attrs.size() <= 1)(static_cast <bool> (Attrs.size() <= 1) ? void (0) :
__assert_fail ("Attrs.size() <= 1", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 7257, __extension__ __PRETTY_FUNCTION__))
;
7258 }
7259
7260 /// See AbstractAttribute::manifest(...).
7261 ChangeStatus manifest(Attributor &A) override {
7262 if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
7263 return ChangeStatus::UNCHANGED;
7264
7265 const IRPosition &IRP = getIRPosition();
7266
7267 // Check if we would improve the existing attributes first.
7268 SmallVector<Attribute, 4> DeducedAttrs;
7269 getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
7270 if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7271 return IRP.hasAttr(Attr.getKindAsEnum(),
7272 /* IgnoreSubsumingPositions */ true);
7273 }))
7274 return ChangeStatus::UNCHANGED;
7275
7276 // Clear existing attributes.
7277 IRP.removeAttrs(AttrKinds);
7278
7279 // Use the generic manifest method.
7280 return IRAttribute::manifest(A);
7281 }
7282
7283 /// See AbstractState::getAsStr().
7284 const std::string getAsStr() const override {
7285 if (isAssumedReadNone())
7286 return "readnone";
7287 if (isAssumedReadOnly())
7288 return "readonly";
7289 if (isAssumedWriteOnly())
7290 return "writeonly";
7291 return "may-read/write";
7292 }
7293
7294 /// The set of IR attributes AAMemoryBehavior deals with.
7295 static const Attribute::AttrKind AttrKinds[3];
7296};
7297
7298const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
7299 Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
7300
7301/// Memory behavior attribute for a floating value.
7302struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
7303 AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
7304 : AAMemoryBehaviorImpl(IRP, A) {}
7305
7306 /// See AbstractAttribute::updateImpl(...).
7307 ChangeStatus updateImpl(Attributor &A) override;
7308
7309 /// See AbstractAttribute::trackStatistics()
7310 void trackStatistics() const override {
7311 if (isAssumedReadNone())
7312 STATS_DECLTRACK_FLOATING_ATTR(readnone){ static llvm::Statistic NumIRFloating_readnone = {"attributor"
, "NumIRFloating_readnone", ("Number of floating values known to be '"
"readnone" "'")};; ++(NumIRFloating_readnone); }
7313 else if (isAssumedReadOnly())
7314 STATS_DECLTRACK_FLOATING_ATTR(readonly){ static llvm::Statistic NumIRFloating_readonly = {"attributor"
, "NumIRFloating_readonly", ("Number of floating values known to be '"
"readonly" "'")};; ++(NumIRFloating_readonly); }
7315 else if (isAssumedWriteOnly())
7316 STATS_DECLTRACK_FLOATING_ATTR(writeonly){ static llvm::Statistic NumIRFloating_writeonly = {"attributor"
, "NumIRFloating_writeonly", ("Number of floating values known to be '"
"writeonly" "'")};; ++(NumIRFloating_writeonly); }
7317 }
7318
7319private:
7320 /// Return true if users of \p UserI might access the underlying
7321 /// variable/location described by \p U and should therefore be analyzed.
7322 bool followUsersOfUseIn(Attributor &A, const Use &U,
7323 const Instruction *UserI);
7324
7325 /// Update the state according to the effect of use \p U in \p UserI.
7326 void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI);
7327};
7328
7329/// Memory behavior attribute for function argument.
7330struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
7331 AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
7332 : AAMemoryBehaviorFloating(IRP, A) {}
7333
7334 /// See AbstractAttribute::initialize(...).
7335 void initialize(Attributor &A) override {
7336 intersectAssumedBits(BEST_STATE);
7337 const IRPosition &IRP = getIRPosition();
7338 // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
7339 // can query it when we use has/getAttr. That would allow us to reuse the
7340 // initialize of the base class here.
7341 bool HasByVal =
7342 IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
7343 getKnownStateFromValue(IRP, getState(),
7344 /* IgnoreSubsumingPositions */ HasByVal);
7345
7346 // Initialize the use vector with all direct uses of the associated value.
7347 Argument *Arg = getAssociatedArgument();
7348 if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent())))
7349 indicatePessimisticFixpoint();
7350 }
7351
7352 ChangeStatus manifest(Attributor &A) override {
7353 // TODO: Pointer arguments are not supported on vectors of pointers yet.
7354 if (!getAssociatedValue().getType()->isPointerTy())
7355 return ChangeStatus::UNCHANGED;
7356
7357 // TODO: From readattrs.ll: "inalloca parameters are always
7358 // considered written"
7359 if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
7360 removeKnownBits(NO_WRITES);
7361 removeAssumedBits(NO_WRITES);
7362 }
7363 return AAMemoryBehaviorFloating::manifest(A);
7364 }
7365
7366 /// See AbstractAttribute::trackStatistics()
7367 void trackStatistics() const override {
7368 if (isAssumedReadNone())
7369 STATS_DECLTRACK_ARG_ATTR(readnone){ static llvm::Statistic NumIRArguments_readnone = {"attributor"
, "NumIRArguments_readnone", ("Number of " "arguments" " marked '"
"readnone" "'")};; ++(NumIRArguments_readnone); }
7370 else if (isAssumedReadOnly())
7371 STATS_DECLTRACK_ARG_ATTR(readonly){ static llvm::Statistic NumIRArguments_readonly = {"attributor"
, "NumIRArguments_readonly", ("Number of " "arguments" " marked '"
"readonly" "'")};; ++(NumIRArguments_readonly); }
7372 else if (isAssumedWriteOnly())
7373 STATS_DECLTRACK_ARG_ATTR(writeonly){ static llvm::Statistic NumIRArguments_writeonly = {"attributor"
, "NumIRArguments_writeonly", ("Number of " "arguments" " marked '"
"writeonly" "'")};; ++(NumIRArguments_writeonly); }
7374 }
7375};
7376
7377struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
7378 AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
7379 : AAMemoryBehaviorArgument(IRP, A) {}
7380
7381 /// See AbstractAttribute::initialize(...).
7382 void initialize(Attributor &A) override {
7383 // If we don't have an associated attribute this is either a variadic call
7384 // or an indirect call, either way, nothing to do here.
7385 Argument *Arg = getAssociatedArgument();
7386 if (!Arg) {
7387 indicatePessimisticFixpoint();
7388 return;
7389 }
7390 if (Arg->hasByValAttr()) {
7391 addKnownBits(NO_WRITES);
7392 removeKnownBits(NO_READS);
7393 removeAssumedBits(NO_READS);
7394 }
7395 AAMemoryBehaviorArgument::initialize(A);
7396 if (getAssociatedFunction()->isDeclaration())
7397 indicatePessimisticFixpoint();
7398 }
7399
7400 /// See AbstractAttribute::updateImpl(...).
7401 ChangeStatus updateImpl(Attributor &A) override {
7402 // TODO: Once we have call site specific value information we can provide
7403 // call site specific liveness liveness information and then it makes
7404 // sense to specialize attributes for call sites arguments instead of
7405 // redirecting requests to the callee argument.
7406 Argument *Arg = getAssociatedArgument();
7407 const IRPosition &ArgPos = IRPosition::argument(*Arg);
7408 auto &ArgAA =
7409 A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
7410 return clampStateAndIndicateChange(getState(), ArgAA.getState());
7411 }
7412
7413 /// See AbstractAttribute::trackStatistics()
7414 void trackStatistics() const override {
7415 if (isAssumedReadNone())
7416 STATS_DECLTRACK_CSARG_ATTR(readnone){ static llvm::Statistic NumIRCSArguments_readnone = {"attributor"
, "NumIRCSArguments_readnone", ("Number of " "call site arguments"
" marked '" "readnone" "'")};; ++(NumIRCSArguments_readnone)
; }
7417 else if (isAssumedReadOnly())
7418 STATS_DECLTRACK_CSARG_ATTR(readonly){ static llvm::Statistic NumIRCSArguments_readonly = {"attributor"
, "NumIRCSArguments_readonly", ("Number of " "call site arguments"
" marked '" "readonly" "'")};; ++(NumIRCSArguments_readonly)
; }
7419 else if (isAssumedWriteOnly())
7420 STATS_DECLTRACK_CSARG_ATTR(writeonly){ static llvm::Statistic NumIRCSArguments_writeonly = {"attributor"
, "NumIRCSArguments_writeonly", ("Number of " "call site arguments"
" marked '" "writeonly" "'")};; ++(NumIRCSArguments_writeonly
); }
7421 }
7422};
7423
7424/// Memory behavior attribute for a call site return position.
7425struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
7426 AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
7427 : AAMemoryBehaviorFloating(IRP, A) {}
7428
7429 /// See AbstractAttribute::initialize(...).
7430 void initialize(Attributor &A) override {
7431 AAMemoryBehaviorImpl::initialize(A);
7432 Function *F = getAssociatedFunction();
7433 if (!F || F->isDeclaration())
7434 indicatePessimisticFixpoint();
7435 }
7436
7437 /// See AbstractAttribute::manifest(...).
7438 ChangeStatus manifest(Attributor &A) override {
7439 // We do not annotate returned values.
7440 return ChangeStatus::UNCHANGED;
7441 }
7442
7443 /// See AbstractAttribute::trackStatistics()
7444 void trackStatistics() const override {}
7445};
7446
7447/// An AA to represent the memory behavior function attributes.
7448struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
7449 AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
7450 : AAMemoryBehaviorImpl(IRP, A) {}
7451
7452 /// See AbstractAttribute::updateImpl(Attributor &A).
7453 virtual ChangeStatus updateImpl(Attributor &A) override;
7454
7455 /// See AbstractAttribute::manifest(...).
7456 ChangeStatus manifest(Attributor &A) override {
7457 Function &F = cast<Function>(getAnchorValue());
7458 if (isAssumedReadNone()) {
7459 F.removeFnAttr(Attribute::ArgMemOnly);
7460 F.removeFnAttr(Attribute::InaccessibleMemOnly);
7461 F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
7462 }
7463 return AAMemoryBehaviorImpl::manifest(A);
7464 }
7465
7466 /// See AbstractAttribute::trackStatistics()
7467 void trackStatistics() const override {
7468 if (isAssumedReadNone())
7469 STATS_DECLTRACK_FN_ATTR(readnone){ static llvm::Statistic NumIRFunction_readnone = {"attributor"
, "NumIRFunction_readnone", ("Number of " "functions" " marked '"
"readnone" "'")};; ++(NumIRFunction_readnone); }
7470 else if (isAssumedReadOnly())
7471 STATS_DECLTRACK_FN_ATTR(readonly){ static llvm::Statistic NumIRFunction_readonly = {"attributor"
, "NumIRFunction_readonly", ("Number of " "functions" " marked '"
"readonly" "'")};; ++(NumIRFunction_readonly); }
7472 else if (isAssumedWriteOnly())
7473 STATS_DECLTRACK_FN_ATTR(writeonly){ static llvm::Statistic NumIRFunction_writeonly = {"attributor"
, "NumIRFunction_writeonly", ("Number of " "functions" " marked '"
"writeonly" "'")};; ++(NumIRFunction_writeonly); }
7474 }
7475};
7476
7477/// AAMemoryBehavior attribute for call sites.
7478struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
7479 AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
7480 : AAMemoryBehaviorImpl(IRP, A) {}
7481
7482 /// See AbstractAttribute::initialize(...).
7483 void initialize(Attributor &A) override {
7484 AAMemoryBehaviorImpl::initialize(A);
7485 Function *F = getAssociatedFunction();
7486 if (!F || F->isDeclaration())
7487 indicatePessimisticFixpoint();
7488 }
7489
7490 /// See AbstractAttribute::updateImpl(...).
7491 ChangeStatus updateImpl(Attributor &A) override {
7492 // TODO: Once we have call site specific value information we can provide
7493 // call site specific liveness liveness information and then it makes
7494 // sense to specialize attributes for call sites arguments instead of
7495 // redirecting requests to the callee argument.
7496 Function *F = getAssociatedFunction();
7497 const IRPosition &FnPos = IRPosition::function(*F);
7498 auto &FnAA =
7499 A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED);
7500 return clampStateAndIndicateChange(getState(), FnAA.getState());
7501 }
7502
7503 /// See AbstractAttribute::trackStatistics()
7504 void trackStatistics() const override {
7505 if (isAssumedReadNone())
7506 STATS_DECLTRACK_CS_ATTR(readnone){ static llvm::Statistic NumIRCS_readnone = {"attributor", "NumIRCS_readnone"
, ("Number of " "call site" " marked '" "readnone" "'")};; ++
(NumIRCS_readnone); }
7507 else if (isAssumedReadOnly())
7508 STATS_DECLTRACK_CS_ATTR(readonly){ static llvm::Statistic NumIRCS_readonly = {"attributor", "NumIRCS_readonly"
, ("Number of " "call site" " marked '" "readonly" "'")};; ++
(NumIRCS_readonly); }
7509 else if (isAssumedWriteOnly())
7510 STATS_DECLTRACK_CS_ATTR(writeonly){ static llvm::Statistic NumIRCS_writeonly = {"attributor", "NumIRCS_writeonly"
, ("Number of " "call site" " marked '" "writeonly" "'")};; ++
(NumIRCS_writeonly); }
7511 }
7512};
7513
7514ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
7515
7516 // The current assumed state used to determine a change.
7517 auto AssumedState = getAssumed();
7518
7519 auto CheckRWInst = [&](Instruction &I) {
7520 // If the instruction has an own memory behavior state, use it to restrict
7521 // the local state. No further analysis is required as the other memory
7522 // state is as optimistic as it gets.
7523 if (const auto *CB = dyn_cast<CallBase>(&I)) {
7524 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
7525 *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
7526 intersectAssumedBits(MemBehaviorAA.getAssumed());
7527 return !isAtFixpoint();
7528 }
7529
7530 // Remove access kind modifiers if necessary.
7531 if (I.mayReadFromMemory())
7532 removeAssumedBits(NO_READS);
7533 if (I.mayWriteToMemory())
7534 removeAssumedBits(NO_WRITES);
7535 return !isAtFixpoint();
7536 };
7537
7538 bool UsedAssumedInformation = false;
7539 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
7540 UsedAssumedInformation))
7541 return indicatePessimisticFixpoint();
7542
7543 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7544 : ChangeStatus::UNCHANGED;
7545}
7546
7547ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
7548
7549 const IRPosition &IRP = getIRPosition();
7550 const IRPosition &FnPos = IRPosition::function_scope(IRP);
7551 AAMemoryBehavior::StateType &S = getState();
7552
7553 // First, check the function scope. We take the known information and we avoid
7554 // work if the assumed information implies the current assumed information for
7555 // this attribute. This is a valid for all but byval arguments.
7556 Argument *Arg = IRP.getAssociatedArgument();
7557 AAMemoryBehavior::base_t FnMemAssumedState =
7558 AAMemoryBehavior::StateType::getWorstState();
7559 if (!Arg || !Arg->hasByValAttr()) {
7560 const auto &FnMemAA =
7561 A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
7562 FnMemAssumedState = FnMemAA.getAssumed();
7563 S.addKnownBits(FnMemAA.getKnown());
7564 if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
7565 return ChangeStatus::UNCHANGED;
7566 }
7567
7568 // The current assumed state used to determine a change.
7569 auto AssumedState = S.getAssumed();
7570
7571 // Make sure the value is not captured (except through "return"), if
7572 // it is, any information derived would be irrelevant anyway as we cannot
7573 // check the potential aliases introduced by the capture. However, no need
7574 // to fall back to anythign less optimistic than the function state.
7575 const auto &ArgNoCaptureAA =
7576 A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL);
7577 if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
7578 S.intersectAssumedBits(FnMemAssumedState);
7579 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7580 : ChangeStatus::UNCHANGED;
7581 }
7582
7583 // Visit and expand uses until all are analyzed or a fixpoint is reached.
7584 auto UsePred = [&](const Use &U, bool &Follow) -> bool {
7585 Instruction *UserI = cast<Instruction>(U.getUser());
7586 LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryBehavior] Use: " <<
*U << " in " << *UserI << " \n"; } } while
(false)
7587 << " \n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryBehavior] Use: " <<
*U << " in " << *UserI << " \n"; } } while
(false)
;
7588
7589 // Droppable users, e.g., llvm::assume does not actually perform any action.
7590 if (UserI->isDroppable())
7591 return true;
7592
7593 // Check if the users of UserI should also be visited.
7594 Follow = followUsersOfUseIn(A, U, UserI);
7595
7596 // If UserI might touch memory we analyze the use in detail.
7597 if (UserI->mayReadOrWriteMemory())
7598 analyzeUseIn(A, U, UserI);
7599
7600 return !isAtFixpoint();
7601 };
7602
7603 if (!A.checkForAllUses(UsePred, *this, getAssociatedValue()))
7604 return indicatePessimisticFixpoint();
7605
7606 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7607 : ChangeStatus::UNCHANGED;
7608}
7609
7610bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U,
7611 const Instruction *UserI) {
7612 // The loaded value is unrelated to the pointer argument, no need to
7613 // follow the users of the load.
7614 if (isa<LoadInst>(UserI))
7615 return false;
7616
7617 // By default we follow all uses assuming UserI might leak information on U,
7618 // we have special handling for call sites operands though.
7619 const auto *CB = dyn_cast<CallBase>(UserI);
7620 if (!CB || !CB->isArgOperand(&U))
7621 return true;
7622
7623 // If the use is a call argument known not to be captured, the users of
7624 // the call do not need to be visited because they have to be unrelated to
7625 // the input. Note that this check is not trivial even though we disallow
7626 // general capturing of the underlying argument. The reason is that the
7627 // call might the argument "through return", which we allow and for which we
7628 // need to check call users.
7629 if (U.get()->getType()->isPointerTy()) {
7630 unsigned ArgNo = CB->getArgOperandNo(&U);
7631 const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
7632 *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL);
7633 return !ArgNoCaptureAA.isAssumedNoCapture();
7634 }
7635
7636 return true;
7637}
7638
7639void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U,
7640 const Instruction *UserI) {
7641 assert(UserI->mayReadOrWriteMemory())(static_cast <bool> (UserI->mayReadOrWriteMemory()) ?
void (0) : __assert_fail ("UserI->mayReadOrWriteMemory()"
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 7641, __extension__
__PRETTY_FUNCTION__))
;
7642
7643 switch (UserI->getOpcode()) {
7644 default:
7645 // TODO: Handle all atomics and other side-effect operations we know of.
7646 break;
7647 case Instruction::Load:
7648 // Loads cause the NO_READS property to disappear.
7649 removeAssumedBits(NO_READS);
7650 return;
7651
7652 case Instruction::Store:
7653 // Stores cause the NO_WRITES property to disappear if the use is the
7654 // pointer operand. Note that while capturing was taken care of somewhere
7655 // else we need to deal with stores of the value that is not looked through.
7656 if (cast<StoreInst>(UserI)->getPointerOperand() == U.get())
7657 removeAssumedBits(NO_WRITES);
7658 else
7659 indicatePessimisticFixpoint();
7660 return;
7661
7662 case Instruction::Call:
7663 case Instruction::CallBr:
7664 case Instruction::Invoke: {
7665 // For call sites we look at the argument memory behavior attribute (this
7666 // could be recursive!) in order to restrict our own state.
7667 const auto *CB = cast<CallBase>(UserI);
7668
7669 // Give up on operand bundles.
7670 if (CB->isBundleOperand(&U)) {
7671 indicatePessimisticFixpoint();
7672 return;
7673 }
7674
7675 // Calling a function does read the function pointer, maybe write it if the
7676 // function is self-modifying.
7677 if (CB->isCallee(&U)) {
7678 removeAssumedBits(NO_READS);
7679 break;
7680 }
7681
7682 // Adjust the possible access behavior based on the information on the
7683 // argument.
7684 IRPosition Pos;
7685 if (U.get()->getType()->isPointerTy())
7686 Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
7687 else
7688 Pos = IRPosition::callsite_function(*CB);
7689 const auto &MemBehaviorAA =
7690 A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
7691 // "assumed" has at most the same bits as the MemBehaviorAA assumed
7692 // and at least "known".
7693 intersectAssumedBits(MemBehaviorAA.getAssumed());
7694 return;
7695 }
7696 };
7697
7698 // Generally, look at the "may-properties" and adjust the assumed state if we
7699 // did not trigger special handling before.
7700 if (UserI->mayReadFromMemory())
7701 removeAssumedBits(NO_READS);
7702 if (UserI->mayWriteToMemory())
7703 removeAssumedBits(NO_WRITES);
7704}
7705} // namespace
7706
7707/// -------------------- Memory Locations Attributes ---------------------------
7708/// Includes read-none, argmemonly, inaccessiblememonly,
7709/// inaccessiblememorargmemonly
7710/// ----------------------------------------------------------------------------
7711
7712std::string AAMemoryLocation::getMemoryLocationsAsStr(
7713 AAMemoryLocation::MemoryLocationsKind MLK) {
7714 if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
7715 return "all memory";
7716 if (MLK == AAMemoryLocation::NO_LOCATIONS)
7717 return "no memory";
7718 std::string S = "memory:";
7719 if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
7720 S += "stack,";
7721 if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
7722 S += "constant,";
7723 if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
7724 S += "internal global,";
7725 if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
7726 S += "external global,";
7727 if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
7728 S += "argument,";
7729 if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
7730 S += "inaccessible,";
7731 if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
7732 S += "malloced,";
7733 if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
7734 S += "unknown,";
7735 S.pop_back();
7736 return S;
7737}
7738
7739namespace {
7740struct AAMemoryLocationImpl : public AAMemoryLocation {
7741
7742 AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
7743 : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
7744 for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7745 AccessKind2Accesses[u] = nullptr;
7746 }
7747
7748 ~AAMemoryLocationImpl() {
7749 // The AccessSets are allocated via a BumpPtrAllocator, we call
7750 // the destructor manually.
7751 for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7752 if (AccessKind2Accesses[u])
7753 AccessKind2Accesses[u]->~AccessSet();
7754 }
7755
7756 /// See AbstractAttribute::initialize(...).
7757 void initialize(Attributor &A) override {
7758 intersectAssumedBits(BEST_STATE);
7759 getKnownStateFromValue(A, getIRPosition(), getState());
7760 AAMemoryLocation::initialize(A);
7761 }
7762
7763 /// Return the memory behavior information encoded in the IR for \p IRP.
7764 static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
7765 BitIntegerState &State,
7766 bool IgnoreSubsumingPositions = false) {
7767 // For internal functions we ignore `argmemonly` and
7768 // `inaccessiblememorargmemonly` as we might break it via interprocedural
7769 // constant propagation. It is unclear if this is the best way but it is
7770 // unlikely this will cause real performance problems. If we are deriving
7771 // attributes for the anchor function we even remove the attribute in
7772 // addition to ignoring it.
7773 bool UseArgMemOnly = true;
7774 Function *AnchorFn = IRP.getAnchorScope();
7775 if (AnchorFn && A.isRunOn(*AnchorFn))
7776 UseArgMemOnly = !AnchorFn->hasLocalLinkage();
7777
7778 SmallVector<Attribute, 2> Attrs;
7779 IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
7780 for (const Attribute &Attr : Attrs) {
7781 switch (Attr.getKindAsEnum()) {
7782 case Attribute::ReadNone:
7783 State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
7784 break;
7785 case Attribute::InaccessibleMemOnly:
7786 State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
7787 break;
7788 case Attribute::ArgMemOnly:
7789 if (UseArgMemOnly)
7790 State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
7791 else
7792 IRP.removeAttrs({Attribute::ArgMemOnly});
7793 break;
7794 case Attribute::InaccessibleMemOrArgMemOnly:
7795 if (UseArgMemOnly)
7796 State.addKnownBits(inverseLocation(
7797 NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
7798 else
7799 IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
7800 break;
7801 default:
7802 llvm_unreachable("Unexpected attribute!")::llvm::llvm_unreachable_internal("Unexpected attribute!", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 7802)
;
7803 }
7804 }
7805 }
7806
7807 /// See AbstractAttribute::getDeducedAttributes(...).
7808 void getDeducedAttributes(LLVMContext &Ctx,
7809 SmallVectorImpl<Attribute> &Attrs) const override {
7810 assert(Attrs.size() == 0)(static_cast <bool> (Attrs.size() == 0) ? void (0) : __assert_fail
("Attrs.size() == 0", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 7810, __extension__ __PRETTY_FUNCTION__))
;
7811 if (isAssumedReadNone()) {
7812 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7813 } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
7814 if (isAssumedInaccessibleMemOnly())
7815 Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
7816 else if (isAssumedArgMemOnly())
7817 Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
7818 else if (isAssumedInaccessibleOrArgMemOnly())
7819 Attrs.push_back(
7820 Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
7821 }
7822 assert(Attrs.size() <= 1)(static_cast <bool> (Attrs.size() <= 1) ? void (0) :
__assert_fail ("Attrs.size() <= 1", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 7822, __extension__ __PRETTY_FUNCTION__))
;
7823 }
7824
7825 /// See AbstractAttribute::manifest(...).
7826 ChangeStatus manifest(Attributor &A) override {
7827 const IRPosition &IRP = getIRPosition();
7828
7829 // Check if we would improve the existing attributes first.
7830 SmallVector<Attribute, 4> DeducedAttrs;
7831 getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
7832 if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7833 return IRP.hasAttr(Attr.getKindAsEnum(),
7834 /* IgnoreSubsumingPositions */ true);
7835 }))
7836 return ChangeStatus::UNCHANGED;
7837
7838 // Clear existing attributes.
7839 IRP.removeAttrs(AttrKinds);
7840 if (isAssumedReadNone())
7841 IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
7842
7843 // Use the generic manifest method.
7844 return IRAttribute::manifest(A);
7845 }
7846
7847 /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
7848 bool checkForAllAccessesToMemoryKind(
7849 function_ref<bool(const Instruction *, const Value *, AccessKind,
7850 MemoryLocationsKind)>
7851 Pred,
7852 MemoryLocationsKind RequestedMLK) const override {
7853 if (!isValidState())
7854 return false;
7855
7856 MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
7857 if (AssumedMLK == NO_LOCATIONS)
7858 return true;
7859
7860 unsigned Idx = 0;
7861 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
7862 CurMLK *= 2, ++Idx) {
7863 if (CurMLK & RequestedMLK)
7864 continue;
7865
7866 if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
7867 for (const AccessInfo &AI : *Accesses)
7868 if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
7869 return false;
7870 }
7871
7872 return true;
7873 }
7874
7875 ChangeStatus indicatePessimisticFixpoint() override {
7876 // If we give up and indicate a pessimistic fixpoint this instruction will
7877 // become an access for all potential access kinds:
7878 // TODO: Add pointers for argmemonly and globals to improve the results of
7879 // checkForAllAccessesToMemoryKind.
7880 bool Changed = false;
7881 MemoryLocationsKind KnownMLK = getKnown();
7882 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
7883 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
7884 if (!(CurMLK & KnownMLK))
7885 updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
7886 getAccessKindFromInst(I));
7887 return AAMemoryLocation::indicatePessimisticFixpoint();
7888 }
7889
7890protected:
7891 /// Helper struct to tie together an instruction that has a read or write
7892 /// effect with the pointer it accesses (if any).
7893 struct AccessInfo {
7894
7895 /// The instruction that caused the access.
7896 const Instruction *I;
7897
7898 /// The base pointer that is accessed, or null if unknown.
7899 const Value *Ptr;
7900
7901 /// The kind of access (read/write/read+write).
7902 AccessKind Kind;
7903
7904 bool operator==(const AccessInfo &RHS) const {
7905 return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
7906 }
7907 bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
7908 if (LHS.I != RHS.I)
7909 return LHS.I < RHS.I;
7910 if (LHS.Ptr != RHS.Ptr)
7911 return LHS.Ptr < RHS.Ptr;
7912 if (LHS.Kind != RHS.Kind)
7913 return LHS.Kind < RHS.Kind;
7914 return false;
7915 }
7916 };
7917
7918 /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
7919 /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
7920 using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
7921 AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
7922
7923 /// Categorize the pointer arguments of CB that might access memory in
7924 /// AccessedLoc and update the state and access map accordingly.
7925 void
7926 categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
7927 AAMemoryLocation::StateType &AccessedLocs,
7928 bool &Changed);
7929
7930 /// Return the kind(s) of location that may be accessed by \p V.
7931 AAMemoryLocation::MemoryLocationsKind
7932 categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
7933
7934 /// Return the access kind as determined by \p I.
7935 AccessKind getAccessKindFromInst(const Instruction *I) {
7936 AccessKind AK = READ_WRITE;
7937 if (I) {
7938 AK = I->mayReadFromMemory() ? READ : NONE;
7939 AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
7940 }
7941 return AK;
7942 }
7943
7944 /// Update the state \p State and the AccessKind2Accesses given that \p I is
7945 /// an access of kind \p AK to a \p MLK memory location with the access
7946 /// pointer \p Ptr.
7947 void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
7948 MemoryLocationsKind MLK, const Instruction *I,
7949 const Value *Ptr, bool &Changed,
7950 AccessKind AK = READ_WRITE) {
7951
7952 assert(isPowerOf2_32(MLK) && "Expected a single location set!")(static_cast <bool> (isPowerOf2_32(MLK) && "Expected a single location set!"
) ? void (0) : __assert_fail ("isPowerOf2_32(MLK) && \"Expected a single location set!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 7952, __extension__
__PRETTY_FUNCTION__))
;
7953 auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
7954 if (!Accesses)
7955 Accesses = new (Allocator) AccessSet();
7956 Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
7957 State.removeAssumedBits(MLK);
7958 }
7959
7960 /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
7961 /// arguments, and update the state and access map accordingly.
7962 void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
7963 AAMemoryLocation::StateType &State, bool &Changed);
7964
7965 /// Used to allocate access sets.
7966 BumpPtrAllocator &Allocator;
7967
7968 /// The set of IR attributes AAMemoryLocation deals with.
7969 static const Attribute::AttrKind AttrKinds[4];
7970};
7971
7972const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
7973 Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
7974 Attribute::InaccessibleMemOrArgMemOnly};
7975
7976void AAMemoryLocationImpl::categorizePtrValue(
7977 Attributor &A, const Instruction &I, const Value &Ptr,
7978 AAMemoryLocation::StateType &State, bool &Changed) {
7979 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
<< Ptr << " [" << getMemoryLocationsAsStr(
State.getAssumed()) << "]\n"; } } while (false)
7980 << Ptr << " ["do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
<< Ptr << " [" << getMemoryLocationsAsStr(
State.getAssumed()) << "]\n"; } } while (false)
7981 << getMemoryLocationsAsStr(State.getAssumed()) << "]\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
<< Ptr << " [" << getMemoryLocationsAsStr(
State.getAssumed()) << "]\n"; } } while (false)
;
7982
7983 SmallVector<Value *, 8> Objects;
7984 bool UsedAssumedInformation = false;
7985 if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, *this, &I,
7986 UsedAssumedInformation,
7987 AA::Intraprocedural)) {
7988 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n"
; } } while (false)
7989 dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n"
; } } while (false)
;
7990 updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
7991 getAccessKindFromInst(&I));
7992 return;
7993 }
7994
7995 for (Value *Obj : Objects) {
7996 // TODO: recognize the TBAA used for constant accesses.
7997 MemoryLocationsKind MLK = NO_LOCATIONS;
7998 if (isa<UndefValue>(Obj))
7999 continue;
8000 if (isa<Argument>(Obj)) {
8001 // TODO: For now we do not treat byval arguments as local copies performed
8002 // on the call edge, though, we should. To make that happen we need to
8003 // teach various passes, e.g., DSE, about the copy effect of a byval. That
8004 // would also allow us to mark functions only accessing byval arguments as
8005 // readnone again, atguably their acceses have no effect outside of the
8006 // function, like accesses to allocas.
8007 MLK = NO_ARGUMENT_MEM;
8008 } else if (auto *GV = dyn_cast<GlobalValue>(Obj)) {
8009 // Reading constant memory is not treated as a read "effect" by the
8010 // function attr pass so we won't neither. Constants defined by TBAA are
8011 // similar. (We know we do not write it because it is constant.)
8012 if (auto *GVar = dyn_cast<GlobalVariable>(GV))
8013 if (GVar->isConstant())
8014 continue;
8015
8016 if (GV->hasLocalLinkage())
8017 MLK = NO_GLOBAL_INTERNAL_MEM;
8018 else
8019 MLK = NO_GLOBAL_EXTERNAL_MEM;
8020 } else if (isa<ConstantPointerNull>(Obj) &&
8021 !NullPointerIsDefined(getAssociatedFunction(),
8022 Ptr.getType()->getPointerAddressSpace())) {
8023 continue;
8024 } else if (isa<AllocaInst>(Obj)) {
8025 MLK = NO_LOCAL_MEM;
8026 } else if (const auto *CB = dyn_cast<CallBase>(Obj)) {
8027 const auto &NoAliasAA = A.getAAFor<AANoAlias>(
8028 *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL);
8029 if (NoAliasAA.isAssumedNoAlias())
8030 MLK = NO_MALLOCED_MEM;
8031 else
8032 MLK = NO_UNKOWN_MEM;
8033 } else {
8034 MLK = NO_UNKOWN_MEM;
8035 }
8036
8037 assert(MLK != NO_LOCATIONS && "No location specified!")(static_cast <bool> (MLK != NO_LOCATIONS && "No location specified!"
) ? void (0) : __assert_fail ("MLK != NO_LOCATIONS && \"No location specified!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 8037, __extension__
__PRETTY_FUNCTION__))
;
8038 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryLocation] Ptr value can be categorized: "
<< *Obj << " -> " << getMemoryLocationsAsStr
(MLK) << "\n"; } } while (false)
8039 << *Obj << " -> " << getMemoryLocationsAsStr(MLK)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryLocation] Ptr value can be categorized: "
<< *Obj << " -> " << getMemoryLocationsAsStr
(MLK) << "\n"; } } while (false)
8040 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryLocation] Ptr value can be categorized: "
<< *Obj << " -> " << getMemoryLocationsAsStr
(MLK) << "\n"; } } while (false)
;
8041 updateStateAndAccessesMap(getState(), MLK, &I, Obj, Changed,
8042 getAccessKindFromInst(&I));
8043 }
8044
8045 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: "
<< getMemoryLocationsAsStr(State.getAssumed()) <<
"\n"; } } while (false)
8046 dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: "
<< getMemoryLocationsAsStr(State.getAssumed()) <<
"\n"; } } while (false)
8047 << getMemoryLocationsAsStr(State.getAssumed()) << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: "
<< getMemoryLocationsAsStr(State.getAssumed()) <<
"\n"; } } while (false)
;
8048}
8049
8050void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
8051 Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
8052 bool &Changed) {
8053 for (unsigned ArgNo = 0, E = CB.arg_size(); ArgNo < E; ++ArgNo) {
8054
8055 // Skip non-pointer arguments.
8056 const Value *ArgOp = CB.getArgOperand(ArgNo);
8057 if (!ArgOp->getType()->isPtrOrPtrVectorTy())
8058 continue;
8059
8060 // Skip readnone arguments.
8061 const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
8062 const auto &ArgOpMemLocationAA =
8063 A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
8064
8065 if (ArgOpMemLocationAA.isAssumedReadNone())
8066 continue;
8067
8068 // Categorize potentially accessed pointer arguments as if there was an
8069 // access instruction with them as pointer.
8070 categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
8071 }
8072}
8073
8074AAMemoryLocation::MemoryLocationsKind
8075AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
8076 bool &Changed) {
8077 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
<< I << "\n"; } } while (false)
8078 << I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
<< I << "\n"; } } while (false)
;
8079
8080 AAMemoryLocation::StateType AccessedLocs;
8081 AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
8082
8083 if (auto *CB = dyn_cast<CallBase>(&I)) {
8084
8085 // First check if we assume any memory is access is visible.
8086 const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
8087 *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
8088 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << Ido { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryLocation] Categorize call site: "
<< I << " [" << CBMemLocationAA << "]\n"
; } } while (false)
8089 << " [" << CBMemLocationAA << "]\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryLocation] Categorize call site: "
<< I << " [" << CBMemLocationAA << "]\n"
; } } while (false)
;
8090
8091 if (CBMemLocationAA.isAssumedReadNone())
8092 return NO_LOCATIONS;
8093
8094 if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
8095 updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
8096 Changed, getAccessKindFromInst(&I));
8097 return AccessedLocs.getAssumed();
8098 }
8099
8100 uint32_t CBAssumedNotAccessedLocs =
8101 CBMemLocationAA.getAssumedNotAccessedLocation();
8102
8103 // Set the argmemonly and global bit as we handle them separately below.
8104 uint32_t CBAssumedNotAccessedLocsNoArgMem =
8105 CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
8106
8107 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
8108 if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
8109 continue;
8110 updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
8111 getAccessKindFromInst(&I));
8112 }
8113
8114 // Now handle global memory if it might be accessed. This is slightly tricky
8115 // as NO_GLOBAL_MEM has multiple bits set.
8116 bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
8117 if (HasGlobalAccesses) {
8118 auto AccessPred = [&](const Instruction *, const Value *Ptr,
8119 AccessKind Kind, MemoryLocationsKind MLK) {
8120 updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
8121 getAccessKindFromInst(&I));
8122 return true;
8123 };
8124 if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
8125 AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
8126 return AccessedLocs.getWorstState();
8127 }
8128
8129 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
<< getMemoryLocationsAsStr(AccessedLocs.getAssumed()) <<
"\n"; } } while (false)
8130 dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
<< getMemoryLocationsAsStr(AccessedLocs.getAssumed()) <<
"\n"; } } while (false)
8131 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
<< getMemoryLocationsAsStr(AccessedLocs.getAssumed()) <<
"\n"; } } while (false)
;
8132
8133 // Now handle argument memory if it might be accessed.
8134 bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
8135 if (HasArgAccesses)
8136 categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
8137
8138 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
<< getMemoryLocationsAsStr(AccessedLocs.getAssumed()) <<
"\n"; } } while (false)
8139 dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
<< getMemoryLocationsAsStr(AccessedLocs.getAssumed()) <<
"\n"; } } while (false)
8140 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
<< getMemoryLocationsAsStr(AccessedLocs.getAssumed()) <<
"\n"; } } while (false)
;
8141
8142 return AccessedLocs.getAssumed();
8143 }
8144
8145 if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
8146 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
<< I << " [" << *Ptr << "]\n"; } } while
(false)
8147 dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
<< I << " [" << *Ptr << "]\n"; } } while
(false)
8148 << I << " [" << *Ptr << "]\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
<< I << " [" << *Ptr << "]\n"; } } while
(false)
;
8149 categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
8150 return AccessedLocs.getAssumed();
8151 }
8152
8153 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
<< I << "\n"; } } while (false)
8154 << I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
<< I << "\n"; } } while (false)
;
8155 updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
8156 getAccessKindFromInst(&I));
8157 return AccessedLocs.getAssumed();
8158}
8159
8160/// An AA to represent the memory behavior function attributes.
8161struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
8162 AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
8163 : AAMemoryLocationImpl(IRP, A) {}
8164
8165 /// See AbstractAttribute::updateImpl(Attributor &A).
8166 virtual ChangeStatus updateImpl(Attributor &A) override {
8167
8168 const auto &MemBehaviorAA =
8169 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
8170 if (MemBehaviorAA.isAssumedReadNone()) {
8171 if (MemBehaviorAA.isKnownReadNone())
8172 return indicateOptimisticFixpoint();
8173 assert(isAssumedReadNone() &&(static_cast <bool> (isAssumedReadNone() && "AAMemoryLocation was not read-none but AAMemoryBehavior was!"
) ? void (0) : __assert_fail ("isAssumedReadNone() && \"AAMemoryLocation was not read-none but AAMemoryBehavior was!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 8174, __extension__
__PRETTY_FUNCTION__))
8174 "AAMemoryLocation was not read-none but AAMemoryBehavior was!")(static_cast <bool> (isAssumedReadNone() && "AAMemoryLocation was not read-none but AAMemoryBehavior was!"
) ? void (0) : __assert_fail ("isAssumedReadNone() && \"AAMemoryLocation was not read-none but AAMemoryBehavior was!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 8174, __extension__
__PRETTY_FUNCTION__))
;
8175 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
8176 return ChangeStatus::UNCHANGED;
8177 }
8178
8179 // The current assumed state used to determine a change.
8180 auto AssumedState = getAssumed();
8181 bool Changed = false;
8182
8183 auto CheckRWInst = [&](Instruction &I) {
8184 MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
8185 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << Ido { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryLocation] Accessed locations for "
<< I << ": " << getMemoryLocationsAsStr(MLK
) << "\n"; } } while (false)
8186 << ": " << getMemoryLocationsAsStr(MLK) << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryLocation] Accessed locations for "
<< I << ": " << getMemoryLocationsAsStr(MLK
) << "\n"; } } while (false)
;
8187 removeAssumedBits(inverseLocation(MLK, false, false));
8188 // Stop once only the valid bit set in the *not assumed location*, thus
8189 // once we don't actually exclude any memory locations in the state.
8190 return getAssumedNotAccessedLocation() != VALID_STATE;
8191 };
8192
8193 bool UsedAssumedInformation = false;
8194 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
8195 UsedAssumedInformation))
8196 return indicatePessimisticFixpoint();
8197
8198 Changed |= AssumedState != getAssumed();
8199 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
8200 }
8201
8202 /// See AbstractAttribute::trackStatistics()
8203 void trackStatistics() const override {
8204 if (isAssumedReadNone())
8205 STATS_DECLTRACK_FN_ATTR(readnone){ static llvm::Statistic NumIRFunction_readnone = {"attributor"
, "NumIRFunction_readnone", ("Number of " "functions" " marked '"
"readnone" "'")};; ++(NumIRFunction_readnone); }
8206 else if (isAssumedArgMemOnly())
8207 STATS_DECLTRACK_FN_ATTR(argmemonly){ static llvm::Statistic NumIRFunction_argmemonly = {"attributor"
, "NumIRFunction_argmemonly", ("Number of " "functions" " marked '"
"argmemonly" "'")};; ++(NumIRFunction_argmemonly); }
8208 else if (isAssumedInaccessibleMemOnly())
8209 STATS_DECLTRACK_FN_ATTR(inaccessiblememonly){ static llvm::Statistic NumIRFunction_inaccessiblememonly = {
"attributor", "NumIRFunction_inaccessiblememonly", ("Number of "
"functions" " marked '" "inaccessiblememonly" "'")};; ++(NumIRFunction_inaccessiblememonly
); }
8210 else if (isAssumedInaccessibleOrArgMemOnly())
8211 STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly){ static llvm::Statistic NumIRFunction_inaccessiblememorargmemonly
= {"attributor", "NumIRFunction_inaccessiblememorargmemonly"
, ("Number of " "functions" " marked '" "inaccessiblememorargmemonly"
"'")};; ++(NumIRFunction_inaccessiblememorargmemonly); }
8212 }
8213};
8214
8215/// AAMemoryLocation attribute for call sites.
8216struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
8217 AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
8218 : AAMemoryLocationImpl(IRP, A) {}
8219
8220 /// See AbstractAttribute::initialize(...).
8221 void initialize(Attributor &A) override {
8222 AAMemoryLocationImpl::initialize(A);
8223 Function *F = getAssociatedFunction();
8224 if (!F || F->isDeclaration())
8225 indicatePessimisticFixpoint();
8226 }
8227
8228 /// See AbstractAttribute::updateImpl(...).
8229 ChangeStatus updateImpl(Attributor &A) override {
8230 // TODO: Once we have call site specific value information we can provide
8231 // call site specific liveness liveness information and then it makes
8232 // sense to specialize attributes for call sites arguments instead of
8233 // redirecting requests to the callee argument.
8234 Function *F = getAssociatedFunction();
8235 const IRPosition &FnPos = IRPosition::function(*F);
8236 auto &FnAA =
8237 A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
8238 bool Changed = false;
8239 auto AccessPred = [&](const Instruction *I, const Value *Ptr,
8240 AccessKind Kind, MemoryLocationsKind MLK) {
8241 updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
8242 getAccessKindFromInst(I));
8243 return true;
8244 };
8245 if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
8246 return indicatePessimisticFixpoint();
8247 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
8248 }
8249
8250 /// See AbstractAttribute::trackStatistics()
8251 void trackStatistics() const override {
8252 if (isAssumedReadNone())
8253 STATS_DECLTRACK_CS_ATTR(readnone){ static llvm::Statistic NumIRCS_readnone = {"attributor", "NumIRCS_readnone"
, ("Number of " "call site" " marked '" "readnone" "'")};; ++
(NumIRCS_readnone); }
8254 }
8255};
8256} // namespace
8257
8258/// ------------------ Value Constant Range Attribute -------------------------
8259
8260namespace {
8261struct AAValueConstantRangeImpl : AAValueConstantRange {
8262 using StateType = IntegerRangeState;
8263 AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
8264 : AAValueConstantRange(IRP, A) {}
8265
8266 /// See AbstractAttribute::initialize(..).
8267 void initialize(Attributor &A) override {
8268 if (A.hasSimplificationCallback(getIRPosition())) {
8269 indicatePessimisticFixpoint();
8270 return;
8271 }
8272
8273 // Intersect a range given by SCEV.
8274 intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
8275
8276 // Intersect a range given by LVI.
8277 intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
8278 }
8279
8280 /// See AbstractAttribute::getAsStr().
8281 const std::string getAsStr() const override {
8282 std::string Str;
8283 llvm::raw_string_ostream OS(Str);
8284 OS << "range(" << getBitWidth() << ")<";
8285 getKnown().print(OS);
8286 OS << " / ";
8287 getAssumed().print(OS);
8288 OS << ">";
8289 return OS.str();
8290 }
8291
8292 /// Helper function to get a SCEV expr for the associated value at program
8293 /// point \p I.
8294 const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
8295 if (!getAnchorScope())
8296 return nullptr;
8297
8298 ScalarEvolution *SE =
8299 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
8300 *getAnchorScope());
8301
8302 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
8303 *getAnchorScope());
8304
8305 if (!SE || !LI)
8306 return nullptr;
8307
8308 const SCEV *S = SE->getSCEV(&getAssociatedValue());
8309 if (!I)
8310 return S;
8311
8312 return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
8313 }
8314
8315 /// Helper function to get a range from SCEV for the associated value at
8316 /// program point \p I.
8317 ConstantRange getConstantRangeFromSCEV(Attributor &A,
8318 const Instruction *I = nullptr) const {
8319 if (!getAnchorScope())
8320 return getWorstState(getBitWidth());
8321
8322 ScalarEvolution *SE =
8323 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
8324 *getAnchorScope());
8325
8326 const SCEV *S = getSCEV(A, I);
8327 if (!SE || !S)
8328 return getWorstState(getBitWidth());
8329
8330 return SE->getUnsignedRange(S);
8331 }
8332
8333 /// Helper function to get a range from LVI for the associated value at
8334 /// program point \p I.
8335 ConstantRange
8336 getConstantRangeFromLVI(Attributor &A,
8337 const Instruction *CtxI = nullptr) const {
8338 if (!getAnchorScope())
8339 return getWorstState(getBitWidth());
8340
8341 LazyValueInfo *LVI =
8342 A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
8343 *getAnchorScope());
8344
8345 if (!LVI || !CtxI)
8346 return getWorstState(getBitWidth());
8347 return LVI->getConstantRange(&getAssociatedValue(),
8348 const_cast<Instruction *>(CtxI));
8349 }
8350
8351 /// Return true if \p CtxI is valid for querying outside analyses.
8352 /// This basically makes sure we do not ask intra-procedural analysis
8353 /// about a context in the wrong function or a context that violates
8354 /// dominance assumptions they might have. The \p AllowAACtxI flag indicates
8355 /// if the original context of this AA is OK or should be considered invalid.
8356 bool isValidCtxInstructionForOutsideAnalysis(Attributor &A,
8357 const Instruction *CtxI,
8358 bool AllowAACtxI) const {
8359 if (!CtxI || (!AllowAACtxI && CtxI == getCtxI()))
8360 return false;
8361
8362 // Our context might be in a different function, neither intra-procedural
8363 // analysis (ScalarEvolution nor LazyValueInfo) can handle that.
8364 if (!AA::isValidInScope(getAssociatedValue(), CtxI->getFunction()))
8365 return false;
8366
8367 // If the context is not dominated by the value there are paths to the
8368 // context that do not define the value. This cannot be handled by
8369 // LazyValueInfo so we need to bail.
8370 if (auto *I = dyn_cast<Instruction>(&getAssociatedValue())) {
8371 InformationCache &InfoCache = A.getInfoCache();
8372 const DominatorTree *DT =
8373 InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
8374 *I->getFunction());
8375 return DT && DT->dominates(I, CtxI);
8376 }
8377
8378 return true;
8379 }
8380
8381 /// See AAValueConstantRange::getKnownConstantRange(..).
8382 ConstantRange
8383 getKnownConstantRange(Attributor &A,
8384 const Instruction *CtxI = nullptr) const override {
8385 if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8386 /* AllowAACtxI */ false))
8387 return getKnown();
8388
8389 ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8390 ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8391 return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
8392 }
8393
8394 /// See AAValueConstantRange::getAssumedConstantRange(..).
8395 ConstantRange
8396 getAssumedConstantRange(Attributor &A,
8397 const Instruction *CtxI = nullptr) const override {
8398 // TODO: Make SCEV use Attributor assumption.
8399 // We may be able to bound a variable range via assumptions in
8400 // Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
8401 // evolve to x^2 + x, then we can say that y is in [2, 12].
8402 if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8403 /* AllowAACtxI */ false))
8404 return getAssumed();
8405
8406 ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8407 ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8408 return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
8409 }
8410
8411 /// Helper function to create MDNode for range metadata.
8412 static MDNode *
8413 getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
8414 const ConstantRange &AssumedConstantRange) {
8415 Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
8416 Ty, AssumedConstantRange.getLower())),
8417 ConstantAsMetadata::get(ConstantInt::get(
8418 Ty, AssumedConstantRange.getUpper()))};
8419 return MDNode::get(Ctx, LowAndHigh);
8420 }
8421
8422 /// Return true if \p Assumed is included in \p KnownRanges.
8423 static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
8424
8425 if (Assumed.isFullSet())
8426 return false;
8427
8428 if (!KnownRanges)
8429 return true;
8430
8431 // If multiple ranges are annotated in IR, we give up to annotate assumed
8432 // range for now.
8433
8434 // TODO: If there exists a known range which containts assumed range, we
8435 // can say assumed range is better.
8436 if (KnownRanges->getNumOperands() > 2)
8437 return false;
8438
8439 ConstantInt *Lower =
8440 mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
8441 ConstantInt *Upper =
8442 mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
8443
8444 ConstantRange Known(Lower->getValue(), Upper->getValue());
8445 return Known.contains(Assumed) && Known != Assumed;
8446 }
8447
8448 /// Helper function to set range metadata.
8449 static bool
8450 setRangeMetadataIfisBetterRange(Instruction *I,
8451 const ConstantRange &AssumedConstantRange) {
8452 auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
8453 if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
8454 if (!AssumedConstantRange.isEmptySet()) {
8455 I->setMetadata(LLVMContext::MD_range,
8456 getMDNodeForConstantRange(I->getType(), I->getContext(),
8457 AssumedConstantRange));
8458 return true;
8459 }
8460 }
8461 return false;
8462 }
8463
8464 /// See AbstractAttribute::manifest()
8465 ChangeStatus manifest(Attributor &A) override {
8466 ChangeStatus Changed = ChangeStatus::UNCHANGED;
8467 ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
8468 assert(!AssumedConstantRange.isFullSet() && "Invalid state")(static_cast <bool> (!AssumedConstantRange.isFullSet() &&
"Invalid state") ? void (0) : __assert_fail ("!AssumedConstantRange.isFullSet() && \"Invalid state\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 8468, __extension__
__PRETTY_FUNCTION__))
;
8469
8470 auto &V = getAssociatedValue();
8471 if (!AssumedConstantRange.isEmptySet() &&
8472 !AssumedConstantRange.isSingleElement()) {
8473 if (Instruction *I = dyn_cast<Instruction>(&V)) {
8474 assert(I == getCtxI() && "Should not annotate an instruction which is "(static_cast <bool> (I == getCtxI() && "Should not annotate an instruction which is "
"not the context instruction") ? void (0) : __assert_fail ("I == getCtxI() && \"Should not annotate an instruction which is \" \"not the context instruction\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 8475, __extension__
__PRETTY_FUNCTION__))
8475 "not the context instruction")(static_cast <bool> (I == getCtxI() && "Should not annotate an instruction which is "
"not the context instruction") ? void (0) : __assert_fail ("I == getCtxI() && \"Should not annotate an instruction which is \" \"not the context instruction\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 8475, __extension__
__PRETTY_FUNCTION__))
;
8476 if (isa<CallInst>(I) || isa<LoadInst>(I))
8477 if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
8478 Changed = ChangeStatus::CHANGED;
8479 }
8480 }
8481
8482 return Changed;
8483 }
8484};
8485
8486struct AAValueConstantRangeArgument final
8487 : AAArgumentFromCallSiteArguments<
8488 AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8489 true /* BridgeCallBaseContext */> {
8490 using Base = AAArgumentFromCallSiteArguments<
8491 AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8492 true /* BridgeCallBaseContext */>;
8493 AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
8494 : Base(IRP, A) {}
8495
8496 /// See AbstractAttribute::initialize(..).
8497 void initialize(Attributor &A) override {
8498 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8499 indicatePessimisticFixpoint();
8500 } else {
8501 Base::initialize(A);
8502 }
8503 }
8504
8505 /// See AbstractAttribute::trackStatistics()
8506 void trackStatistics() const override {
8507 STATS_DECLTRACK_ARG_ATTR(value_range){ static llvm::Statistic NumIRArguments_value_range = {"attributor"
, "NumIRArguments_value_range", ("Number of " "arguments" " marked '"
"value_range" "'")};; ++(NumIRArguments_value_range); }
8508 }
8509};
8510
8511struct AAValueConstantRangeReturned
8512 : AAReturnedFromReturnedValues<AAValueConstantRange,
8513 AAValueConstantRangeImpl,
8514 AAValueConstantRangeImpl::StateType,
8515 /* PropogateCallBaseContext */ true> {
8516 using Base =
8517 AAReturnedFromReturnedValues<AAValueConstantRange,
8518 AAValueConstantRangeImpl,
8519 AAValueConstantRangeImpl::StateType,
8520 /* PropogateCallBaseContext */ true>;
8521 AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
8522 : Base(IRP, A) {}
8523
8524 /// See AbstractAttribute::initialize(...).
8525 void initialize(Attributor &A) override {}
8526
8527 /// See AbstractAttribute::trackStatistics()
8528 void trackStatistics() const override {
8529 STATS_DECLTRACK_FNRET_ATTR(value_range){ static llvm::Statistic NumIRFunctionReturn_value_range = {"attributor"
, "NumIRFunctionReturn_value_range", ("Number of " "function returns"
" marked '" "value_range" "'")};; ++(NumIRFunctionReturn_value_range
); }
8530 }
8531};
8532
8533struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
8534 AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
8535 : AAValueConstantRangeImpl(IRP, A) {}
8536
8537 /// See AbstractAttribute::initialize(...).
8538 void initialize(Attributor &A) override {
8539 AAValueConstantRangeImpl::initialize(A);
8540 if (isAtFixpoint())
8541 return;
8542
8543 Value &V = getAssociatedValue();
8544
8545 if (auto *C = dyn_cast<ConstantInt>(&V)) {
8546 unionAssumed(ConstantRange(C->getValue()));
8547 indicateOptimisticFixpoint();
8548 return;
8549 }
8550
8551 if (isa<UndefValue>(&V)) {
8552 // Collapse the undef state to 0.
8553 unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
8554 indicateOptimisticFixpoint();
8555 return;
8556 }
8557
8558 if (isa<CallBase>(&V))
8559 return;
8560
8561 if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
8562 return;
8563
8564 // If it is a load instruction with range metadata, use it.
8565 if (LoadInst *LI = dyn_cast<LoadInst>(&V))
8566 if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
8567 intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8568 return;
8569 }
8570
8571 // We can work with PHI and select instruction as we traverse their operands
8572 // during update.
8573 if (isa<SelectInst>(V) || isa<PHINode>(V))
8574 return;
8575
8576 // Otherwise we give up.
8577 indicatePessimisticFixpoint();
8578
8579 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAValueConstantRange] We give up: "
<< getAssociatedValue() << "\n"; } } while (false
)
8580 << getAssociatedValue() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAValueConstantRange] We give up: "
<< getAssociatedValue() << "\n"; } } while (false
)
;
8581 }
8582
8583 bool calculateBinaryOperator(
8584 Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
8585 const Instruction *CtxI,
8586 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8587 Value *LHS = BinOp->getOperand(0);
8588 Value *RHS = BinOp->getOperand(1);
8589
8590 // Simplify the operands first.
8591 bool UsedAssumedInformation = false;
8592 const auto &SimplifiedLHS =
8593 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8594 *this, UsedAssumedInformation);
8595 if (!SimplifiedLHS.hasValue())
8596 return true;
8597 if (!SimplifiedLHS.getValue())
8598 return false;
8599 LHS = *SimplifiedLHS;
8600
8601 const auto &SimplifiedRHS =
8602 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8603 *this, UsedAssumedInformation);
8604 if (!SimplifiedRHS.hasValue())
8605 return true;
8606 if (!SimplifiedRHS.getValue())
8607 return false;
8608 RHS = *SimplifiedRHS;
8609
8610 // TODO: Allow non integers as well.
8611 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8612 return false;
8613
8614 auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8615 *this, IRPosition::value(*LHS, getCallBaseContext()),
8616 DepClassTy::REQUIRED);
8617 QuerriedAAs.push_back(&LHSAA);
8618 auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8619
8620 auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8621 *this, IRPosition::value(*RHS, getCallBaseContext()),
8622 DepClassTy::REQUIRED);
8623 QuerriedAAs.push_back(&RHSAA);
8624 auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8625
8626 auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
8627
8628 T.unionAssumed(AssumedRange);
8629
8630 // TODO: Track a known state too.
8631
8632 return T.isValidState();
8633 }
8634
8635 bool calculateCastInst(
8636 Attributor &A, CastInst *CastI, IntegerRangeState &T,
8637 const Instruction *CtxI,
8638 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8639 assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!")(static_cast <bool> (CastI->getNumOperands() == 1 &&
"Expected cast to be unary!") ? void (0) : __assert_fail ("CastI->getNumOperands() == 1 && \"Expected cast to be unary!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 8639, __extension__
__PRETTY_FUNCTION__))
;
8640 // TODO: Allow non integers as well.
8641 Value *OpV = CastI->getOperand(0);
8642
8643 // Simplify the operand first.
8644 bool UsedAssumedInformation = false;
8645 const auto &SimplifiedOpV =
8646 A.getAssumedSimplified(IRPosition::value(*OpV, getCallBaseContext()),
8647 *this, UsedAssumedInformation);
8648 if (!SimplifiedOpV.hasValue())
8649 return true;
8650 if (!SimplifiedOpV.getValue())
8651 return false;
8652 OpV = *SimplifiedOpV;
8653
8654 if (!OpV->getType()->isIntegerTy())
8655 return false;
8656
8657 auto &OpAA = A.getAAFor<AAValueConstantRange>(
8658 *this, IRPosition::value(*OpV, getCallBaseContext()),
8659 DepClassTy::REQUIRED);
8660 QuerriedAAs.push_back(&OpAA);
8661 T.unionAssumed(
8662 OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
8663 return T.isValidState();
8664 }
8665
8666 bool
8667 calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
8668 const Instruction *CtxI,
8669 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8670 Value *LHS = CmpI->getOperand(0);
8671 Value *RHS = CmpI->getOperand(1);
8672
8673 // Simplify the operands first.
8674 bool UsedAssumedInformation = false;
8675 const auto &SimplifiedLHS =
8676 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8677 *this, UsedAssumedInformation);
8678 if (!SimplifiedLHS.hasValue())
8679 return true;
8680 if (!SimplifiedLHS.getValue())
8681 return false;
8682 LHS = *SimplifiedLHS;
8683
8684 const auto &SimplifiedRHS =
8685 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8686 *this, UsedAssumedInformation);
8687 if (!SimplifiedRHS.hasValue())
8688 return true;
8689 if (!SimplifiedRHS.getValue())
8690 return false;
8691 RHS = *SimplifiedRHS;
8692
8693 // TODO: Allow non integers as well.
8694 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8695 return false;
8696
8697 auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8698 *this, IRPosition::value(*LHS, getCallBaseContext()),
8699 DepClassTy::REQUIRED);
8700 QuerriedAAs.push_back(&LHSAA);
8701 auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8702 *this, IRPosition::value(*RHS, getCallBaseContext()),
8703 DepClassTy::REQUIRED);
8704 QuerriedAAs.push_back(&RHSAA);
8705 auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8706 auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8707
8708 // If one of them is empty set, we can't decide.
8709 if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
8710 return true;
8711
8712 bool MustTrue = false, MustFalse = false;
8713
8714 auto AllowedRegion =
8715 ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
8716
8717 if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
8718 MustFalse = true;
8719
8720 if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange))
8721 MustTrue = true;
8722
8723 assert((!MustTrue || !MustFalse) &&(static_cast <bool> ((!MustTrue || !MustFalse) &&
"Either MustTrue or MustFalse should be false!") ? void (0) :
__assert_fail ("(!MustTrue || !MustFalse) && \"Either MustTrue or MustFalse should be false!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 8724, __extension__
__PRETTY_FUNCTION__))
8724 "Either MustTrue or MustFalse should be false!")(static_cast <bool> ((!MustTrue || !MustFalse) &&
"Either MustTrue or MustFalse should be false!") ? void (0) :
__assert_fail ("(!MustTrue || !MustFalse) && \"Either MustTrue or MustFalse should be false!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 8724, __extension__
__PRETTY_FUNCTION__))
;
8725
8726 if (MustTrue)
8727 T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
8728 else if (MustFalse)
8729 T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
8730 else
8731 T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
8732
8733 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAAdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAValueConstantRange] " <<
*CmpI << " " << LHSAA << " " << RHSAA
<< "\n"; } } while (false)
8734 << " " << RHSAA << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAValueConstantRange] " <<
*CmpI << " " << LHSAA << " " << RHSAA
<< "\n"; } } while (false)
;
8735
8736 // TODO: Track a known state too.
8737 return T.isValidState();
8738 }
8739
8740 /// See AbstractAttribute::updateImpl(...).
8741 ChangeStatus updateImpl(Attributor &A) override {
8742 auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
8743 IntegerRangeState &T, bool Stripped) -> bool {
8744 Instruction *I = dyn_cast<Instruction>(&V);
8745 if (!I || isa<CallBase>(I)) {
8746
8747 // Simplify the operand first.
8748 bool UsedAssumedInformation = false;
8749 const auto &SimplifiedOpV =
8750 A.getAssumedSimplified(IRPosition::value(V, getCallBaseContext()),
8751 *this, UsedAssumedInformation);
8752 if (!SimplifiedOpV.hasValue())
8753 return true;
8754 if (!SimplifiedOpV.getValue())
8755 return false;
8756 Value *VPtr = *SimplifiedOpV;
8757
8758 // If the value is not instruction, we query AA to Attributor.
8759 const auto &AA = A.getAAFor<AAValueConstantRange>(
8760 *this, IRPosition::value(*VPtr, getCallBaseContext()),
8761 DepClassTy::REQUIRED);
8762
8763 // Clamp operator is not used to utilize a program point CtxI.
8764 T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
8765
8766 return T.isValidState();
8767 }
8768
8769 SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
8770 if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
8771 if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
8772 return false;
8773 } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
8774 if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
8775 return false;
8776 } else if (auto *CastI = dyn_cast<CastInst>(I)) {
8777 if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
8778 return false;
8779 } else {
8780 // Give up with other instructions.
8781 // TODO: Add other instructions
8782
8783 T.indicatePessimisticFixpoint();
8784 return false;
8785 }
8786
8787 // Catch circular reasoning in a pessimistic way for now.
8788 // TODO: Check how the range evolves and if we stripped anything, see also
8789 // AADereferenceable or AAAlign for similar situations.
8790 for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
8791 if (QueriedAA != this)
8792 continue;
8793 // If we are in a stady state we do not need to worry.
8794 if (T.getAssumed() == getState().getAssumed())
8795 continue;
8796 T.indicatePessimisticFixpoint();
8797 }
8798
8799 return T.isValidState();
8800 };
8801
8802 IntegerRangeState T(getBitWidth());
8803
8804 bool UsedAssumedInformation = false;
8805 if (!genericValueTraversal<IntegerRangeState>(A, getIRPosition(), *this, T,
8806 VisitValueCB, getCtxI(),
8807 UsedAssumedInformation,
8808 /* UseValueSimplify */ false))
8809 return indicatePessimisticFixpoint();
8810
8811 // Ensure that long def-use chains can't cause circular reasoning either by
8812 // introducing a cutoff below.
8813 if (clampStateAndIndicateChange(getState(), T) == ChangeStatus::UNCHANGED)
8814 return ChangeStatus::UNCHANGED;
8815 if (++NumChanges > MaxNumChanges) {
8816 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] performed " << NumChangesdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAValueConstantRange] performed "
<< NumChanges << " but only " << MaxNumChanges
<< " are allowed to avoid cyclic reasoning."; } } while
(false)
8817 << " but only " << MaxNumChangesdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAValueConstantRange] performed "
<< NumChanges << " but only " << MaxNumChanges
<< " are allowed to avoid cyclic reasoning."; } } while
(false)
8818 << " are allowed to avoid cyclic reasoning.")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAValueConstantRange] performed "
<< NumChanges << " but only " << MaxNumChanges
<< " are allowed to avoid cyclic reasoning."; } } while
(false)
;
8819 return indicatePessimisticFixpoint();
8820 }
8821 return ChangeStatus::CHANGED;
8822 }
8823
8824 /// See AbstractAttribute::trackStatistics()
8825 void trackStatistics() const override {
8826 STATS_DECLTRACK_FLOATING_ATTR(value_range){ static llvm::Statistic NumIRFloating_value_range = {"attributor"
, "NumIRFloating_value_range", ("Number of floating values known to be '"
"value_range" "'")};; ++(NumIRFloating_value_range); }
8827 }
8828
8829 /// Tracker to bail after too many widening steps of the constant range.
8830 int NumChanges = 0;
8831
8832 /// Upper bound for the number of allowed changes (=widening steps) for the
8833 /// constant range before we give up.
8834 static constexpr int MaxNumChanges = 5;
8835};
8836
8837struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
8838 AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
8839 : AAValueConstantRangeImpl(IRP, A) {}
8840
8841 /// See AbstractAttribute::initialize(...).
8842 ChangeStatus updateImpl(Attributor &A) override {
8843 llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "::llvm::llvm_unreachable_internal("AAValueConstantRange(Function|CallSite)::updateImpl will "
"not be called", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 8844)
8844 "not be called")::llvm::llvm_unreachable_internal("AAValueConstantRange(Function|CallSite)::updateImpl will "
"not be called", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 8844)
;
8845 }
8846
8847 /// See AbstractAttribute::trackStatistics()
8848 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range){ static llvm::Statistic NumIRFunction_value_range = {"attributor"
, "NumIRFunction_value_range", ("Number of " "functions" " marked '"
"value_range" "'")};; ++(NumIRFunction_value_range); }
}
8849};
8850
8851struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
8852 AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
8853 : AAValueConstantRangeFunction(IRP, A) {}
8854
8855 /// See AbstractAttribute::trackStatistics()
8856 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range){ static llvm::Statistic NumIRCS_value_range = {"attributor",
"NumIRCS_value_range", ("Number of " "call site" " marked '"
"value_range" "'")};; ++(NumIRCS_value_range); }
}
8857};
8858
8859struct AAValueConstantRangeCallSiteReturned
8860 : AACallSiteReturnedFromReturned<AAValueConstantRange,
8861 AAValueConstantRangeImpl,
8862 AAValueConstantRangeImpl::StateType,
8863 /* IntroduceCallBaseContext */ true> {
8864 AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
8865 : AACallSiteReturnedFromReturned<AAValueConstantRange,
8866 AAValueConstantRangeImpl,
8867 AAValueConstantRangeImpl::StateType,
8868 /* IntroduceCallBaseContext */ true>(IRP,
8869 A) {
8870 }
8871
8872 /// See AbstractAttribute::initialize(...).
8873 void initialize(Attributor &A) override {
8874 // If it is a load instruction with range metadata, use the metadata.
8875 if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
8876 if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
8877 intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8878
8879 AAValueConstantRangeImpl::initialize(A);
8880 }
8881
8882 /// See AbstractAttribute::trackStatistics()
8883 void trackStatistics() const override {
8884 STATS_DECLTRACK_CSRET_ATTR(value_range){ static llvm::Statistic NumIRCSReturn_value_range = {"attributor"
, "NumIRCSReturn_value_range", ("Number of " "call site returns"
" marked '" "value_range" "'")};; ++(NumIRCSReturn_value_range
); }
8885 }
8886};
8887struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
8888 AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
8889 : AAValueConstantRangeFloating(IRP, A) {}
8890
8891 /// See AbstractAttribute::manifest()
8892 ChangeStatus manifest(Attributor &A) override {
8893 return ChangeStatus::UNCHANGED;
8894 }
8895
8896 /// See AbstractAttribute::trackStatistics()
8897 void trackStatistics() const override {
8898 STATS_DECLTRACK_CSARG_ATTR(value_range){ static llvm::Statistic NumIRCSArguments_value_range = {"attributor"
, "NumIRCSArguments_value_range", ("Number of " "call site arguments"
" marked '" "value_range" "'")};; ++(NumIRCSArguments_value_range
); }
8899 }
8900};
8901} // namespace
8902
8903/// ------------------ Potential Values Attribute -------------------------
8904
8905namespace {
8906struct AAPotentialConstantValuesImpl : AAPotentialConstantValues {
8907 using StateType = PotentialConstantIntValuesState;
8908
8909 AAPotentialConstantValuesImpl(const IRPosition &IRP, Attributor &A)
8910 : AAPotentialConstantValues(IRP, A) {}
8911
8912 /// See AbstractAttribute::initialize(..).
8913 void initialize(Attributor &A) override {
8914 if (A.hasSimplificationCallback(getIRPosition()))
8915 indicatePessimisticFixpoint();
8916 else
8917 AAPotentialConstantValues::initialize(A);
8918 }
8919
8920 /// See AbstractAttribute::getAsStr().
8921 const std::string getAsStr() const override {
8922 std::string Str;
8923 llvm::raw_string_ostream OS(Str);
8924 OS << getState();
8925 return OS.str();
8926 }
8927
8928 /// See AbstractAttribute::updateImpl(...).
8929 ChangeStatus updateImpl(Attributor &A) override {
8930 return indicatePessimisticFixpoint();
8931 }
8932};
8933
8934struct AAPotentialConstantValuesArgument final
8935 : AAArgumentFromCallSiteArguments<AAPotentialConstantValues,
8936 AAPotentialConstantValuesImpl,
8937 PotentialConstantIntValuesState> {
8938 using Base = AAArgumentFromCallSiteArguments<AAPotentialConstantValues,
8939 AAPotentialConstantValuesImpl,
8940 PotentialConstantIntValuesState>;
8941 AAPotentialConstantValuesArgument(const IRPosition &IRP, Attributor &A)
8942 : Base(IRP, A) {}
8943
8944 /// See AbstractAttribute::initialize(..).
8945 void initialize(Attributor &A) override {
8946 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8947 indicatePessimisticFixpoint();
8948 } else {
8949 Base::initialize(A);
8950 }
8951 }
8952
8953 /// See AbstractAttribute::trackStatistics()
8954 void trackStatistics() const override {
8955 STATS_DECLTRACK_ARG_ATTR(potential_values){ static llvm::Statistic NumIRArguments_potential_values = {"attributor"
, "NumIRArguments_potential_values", ("Number of " "arguments"
" marked '" "potential_values" "'")};; ++(NumIRArguments_potential_values
); }
8956 }
8957};
8958
8959struct AAPotentialConstantValuesReturned
8960 : AAReturnedFromReturnedValues<AAPotentialConstantValues,
8961 AAPotentialConstantValuesImpl> {
8962 using Base = AAReturnedFromReturnedValues<AAPotentialConstantValues,
8963 AAPotentialConstantValuesImpl>;
8964 AAPotentialConstantValuesReturned(const IRPosition &IRP, Attributor &A)
8965 : Base(IRP, A) {}
8966
8967 /// See AbstractAttribute::trackStatistics()
8968 void trackStatistics() const override {
8969 STATS_DECLTRACK_FNRET_ATTR(potential_values){ static llvm::Statistic NumIRFunctionReturn_potential_values
= {"attributor", "NumIRFunctionReturn_potential_values", ("Number of "
"function returns" " marked '" "potential_values" "'")};; ++
(NumIRFunctionReturn_potential_values); }
8970 }
8971};
8972
8973struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
8974 AAPotentialConstantValuesFloating(const IRPosition &IRP, Attributor &A)
8975 : AAPotentialConstantValuesImpl(IRP, A) {}
8976
8977 /// See AbstractAttribute::initialize(..).
8978 void initialize(Attributor &A) override {
8979 AAPotentialConstantValuesImpl::initialize(A);
8980 if (isAtFixpoint())
8981 return;
8982
8983 Value &V = getAssociatedValue();
8984
8985 if (auto *C = dyn_cast<ConstantInt>(&V)) {
8986 unionAssumed(C->getValue());
8987 indicateOptimisticFixpoint();
8988 return;
8989 }
8990
8991 if (isa<UndefValue>(&V)) {
8992 unionAssumedWithUndef();
8993 indicateOptimisticFixpoint();
8994 return;
8995 }
8996
8997 if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
8998 return;
8999
9000 if (isa<SelectInst>(V) || isa<PHINode>(V) || isa<LoadInst>(V))
9001 return;
9002
9003 indicatePessimisticFixpoint();
9004
9005 LLVM_DEBUG(dbgs() << "[AAPotentialConstantValues] We give up: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPotentialConstantValues] We give up: "
<< getAssociatedValue() << "\n"; } } while (false
)
9006 << getAssociatedValue() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAPotentialConstantValues] We give up: "
<< getAssociatedValue() << "\n"; } } while (false
)
;
9007 }
9008
9009 static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
9010 const APInt &RHS) {
9011 return ICmpInst::compare(LHS, RHS, ICI->getPredicate());
9012 }
9013
9014 static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
9015 uint32_t ResultBitWidth) {
9016 Instruction::CastOps CastOp = CI->getOpcode();
9017 switch (CastOp) {
9018 default:
9019 llvm_unreachable("unsupported or not integer cast")::llvm::llvm_unreachable_internal("unsupported or not integer cast"
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 9019)
;
9020 case Instruction::Trunc:
9021 return Src.trunc(ResultBitWidth);
9022 case Instruction::SExt:
9023 return Src.sext(ResultBitWidth);
9024 case Instruction::ZExt:
9025 return Src.zext(ResultBitWidth);
9026 case Instruction::BitCast:
9027 return Src;
9028 }
9029 }
9030
9031 static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
9032 const APInt &LHS, const APInt &RHS,
9033 bool &SkipOperation, bool &Unsupported) {
9034 Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
9035 // Unsupported is set to true when the binary operator is not supported.
9036 // SkipOperation is set to true when UB occur with the given operand pair
9037 // (LHS, RHS).
9038 // TODO: we should look at nsw and nuw keywords to handle operations
9039 // that create poison or undef value.
9040 switch (BinOpcode) {
9041 default:
9042 Unsupported = true;
9043 return LHS;
9044 case Instruction::Add:
9045 return LHS + RHS;
9046 case Instruction::Sub:
9047 return LHS - RHS;
9048 case Instruction::Mul:
9049 return LHS * RHS;
9050 case Instruction::UDiv:
9051 if (RHS.isZero()) {
9052 SkipOperation = true;
9053 return LHS;
9054 }
9055 return LHS.udiv(RHS);
9056 case Instruction::SDiv:
9057 if (RHS.isZero()) {
9058 SkipOperation = true;
9059 return LHS;
9060 }
9061 return LHS.sdiv(RHS);
9062 case Instruction::URem:
9063 if (RHS.isZero()) {
9064 SkipOperation = true;
9065 return LHS;
9066 }
9067 return LHS.urem(RHS);
9068 case Instruction::SRem:
9069 if (RHS.isZero()) {
9070 SkipOperation = true;
9071 return LHS;
9072 }
9073 return LHS.srem(RHS);
9074 case Instruction::Shl:
9075 return LHS.shl(RHS);
9076 case Instruction::LShr:
9077 return LHS.lshr(RHS);
9078 case Instruction::AShr:
9079 return LHS.ashr(RHS);
9080 case Instruction::And:
9081 return LHS & RHS;
9082 case Instruction::Or:
9083 return LHS | RHS;
9084 case Instruction::Xor:
9085 return LHS ^ RHS;
9086 }
9087 }
9088
9089 bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
9090 const APInt &LHS, const APInt &RHS) {
9091 bool SkipOperation = false;
9092 bool Unsupported = false;
9093 APInt Result =
9094 calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
9095 if (Unsupported)
9096 return false;
9097 // If SkipOperation is true, we can ignore this operand pair (L, R).
9098 if (!SkipOperation)
9099 unionAssumed(Result);
9100 return isValidState();
9101 }
9102
9103 ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
9104 auto AssumedBefore = getAssumed();
9105 Value *LHS = ICI->getOperand(0);
9106 Value *RHS = ICI->getOperand(1);
9107
9108 // Simplify the operands first.
9109 bool UsedAssumedInformation = false;
9110 const auto &SimplifiedLHS =
9111 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
9112 *this, UsedAssumedInformation);
9113 if (!SimplifiedLHS.hasValue())
9114 return ChangeStatus::UNCHANGED;
9115 if (!SimplifiedLHS.getValue())
9116 return indicatePessimisticFixpoint();
9117 LHS = *SimplifiedLHS;
9118
9119 const auto &SimplifiedRHS =
9120 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
9121 *this, UsedAssumedInformation);
9122 if (!SimplifiedRHS.hasValue())
9123 return ChangeStatus::UNCHANGED;
9124 if (!SimplifiedRHS.getValue())
9125 return indicatePessimisticFixpoint();
9126 RHS = *SimplifiedRHS;
9127
9128 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9129 return indicatePessimisticFixpoint();
9130
9131 auto &LHSAA = A.getAAFor<AAPotentialConstantValues>(
9132 *this, IRPosition::value(*LHS), DepClassTy::REQUIRED);
9133 if (!LHSAA.isValidState())
9134 return indicatePessimisticFixpoint();
9135
9136 auto &RHSAA = A.getAAFor<AAPotentialConstantValues>(
9137 *this, IRPosition::value(*RHS), DepClassTy::REQUIRED);
9138 if (!RHSAA.isValidState())
9139 return indicatePessimisticFixpoint();
9140
9141 const SetTy &LHSAAPVS = LHSAA.getAssumedSet();
9142 const SetTy &RHSAAPVS = RHSAA.getAssumedSet();
9143
9144 // TODO: make use of undef flag to limit potential values aggressively.
9145 bool MaybeTrue = false, MaybeFalse = false;
9146 const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
9147 if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
9148 // The result of any comparison between undefs can be soundly replaced
9149 // with undef.
9150 unionAssumedWithUndef();
9151 } else if (LHSAA.undefIsContained()) {
9152 for (const APInt &R : RHSAAPVS) {
9153 bool CmpResult = calculateICmpInst(ICI, Zero, R);
9154 MaybeTrue |= CmpResult;
9155 MaybeFalse |= !CmpResult;
9156 if (MaybeTrue & MaybeFalse)
9157 return indicatePessimisticFixpoint();
9158 }
9159 } else if (RHSAA.undefIsContained()) {
9160 for (const APInt &L : LHSAAPVS) {
9161 bool CmpResult = calculateICmpInst(ICI, L, Zero);
9162 MaybeTrue |= CmpResult;
9163 MaybeFalse |= !CmpResult;
9164 if (MaybeTrue & MaybeFalse)
9165 return indicatePessimisticFixpoint();
9166 }
9167 } else {
9168 for (const APInt &L : LHSAAPVS) {
9169 for (const APInt &R : RHSAAPVS) {
9170 bool CmpResult = calculateICmpInst(ICI, L, R);
9171 MaybeTrue |= CmpResult;
9172 MaybeFalse |= !CmpResult;
9173 if (MaybeTrue & MaybeFalse)
9174 return indicatePessimisticFixpoint();
9175 }
9176 }
9177 }
9178 if (MaybeTrue)
9179 unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
9180 if (MaybeFalse)
9181 unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
9182 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9183 : ChangeStatus::CHANGED;
9184 }
9185
9186 ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
9187 auto AssumedBefore = getAssumed();
9188 Value *LHS = SI->getTrueValue();
9189 Value *RHS = SI->getFalseValue();
9190
9191 // Simplify the operands first.
9192 bool UsedAssumedInformation = false;
9193 const auto &SimplifiedLHS =
9194 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
9195 *this, UsedAssumedInformation);
9196 if (!SimplifiedLHS.hasValue())
9197 return ChangeStatus::UNCHANGED;
9198 if (!SimplifiedLHS.getValue())
9199 return indicatePessimisticFixpoint();
9200 LHS = *SimplifiedLHS;
9201
9202 const auto &SimplifiedRHS =
9203 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
9204 *this, UsedAssumedInformation);
9205 if (!SimplifiedRHS.hasValue())
9206 return ChangeStatus::UNCHANGED;
9207 if (!SimplifiedRHS.getValue())
9208 return indicatePessimisticFixpoint();
9209 RHS = *SimplifiedRHS;
9210
9211 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9212 return indicatePessimisticFixpoint();
9213
9214 Optional<Constant *> C = A.getAssumedConstant(*SI->getCondition(), *this,
9215 UsedAssumedInformation);
9216
9217 // Check if we only need one operand.
9218 bool OnlyLeft = false, OnlyRight = false;
9219 if (C.hasValue() && *C && (*C)->isOneValue())
9220 OnlyLeft = true;
9221 else if (C.hasValue() && *C && (*C)->isZeroValue())
9222 OnlyRight = true;
9223
9224 const AAPotentialConstantValues *LHSAA = nullptr, *RHSAA = nullptr;
9225 if (!OnlyRight) {
9226 LHSAA = &A.getAAFor<AAPotentialConstantValues>(
9227 *this, IRPosition::value(*LHS), DepClassTy::REQUIRED);
9228 if (!LHSAA->isValidState())
9229 return indicatePessimisticFixpoint();
9230 }
9231 if (!OnlyLeft) {
9232 RHSAA = &A.getAAFor<AAPotentialConstantValues>(
9233 *this, IRPosition::value(*RHS), DepClassTy::REQUIRED);
9234 if (!RHSAA->isValidState())
9235 return indicatePessimisticFixpoint();
9236 }
9237
9238 if (!LHSAA || !RHSAA) {
9239 // select (true/false), lhs, rhs
9240 auto *OpAA = LHSAA ? LHSAA : RHSAA;
9241
9242 if (OpAA->undefIsContained())
9243 unionAssumedWithUndef();
9244 else
9245 unionAssumed(*OpAA);
9246
9247 } else if (LHSAA->undefIsContained() && RHSAA->undefIsContained()) {
9248 // select i1 *, undef , undef => undef
9249 unionAssumedWithUndef();
9250 } else {
9251 unionAssumed(*LHSAA);
9252 unionAssumed(*RHSAA);
9253 }
9254 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9255 : ChangeStatus::CHANGED;
9256 }
9257
9258 ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
9259 auto AssumedBefore = getAssumed();
9260 if (!CI->isIntegerCast())
9261 return indicatePessimisticFixpoint();
9262 assert(CI->getNumOperands() == 1 && "Expected cast to be unary!")(static_cast <bool> (CI->getNumOperands() == 1 &&
"Expected cast to be unary!") ? void (0) : __assert_fail ("CI->getNumOperands() == 1 && \"Expected cast to be unary!\""
, "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 9262, __extension__
__PRETTY_FUNCTION__))
;
9263 uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
9264 Value *Src = CI->getOperand(0);
9265
9266 // Simplify the operand first.
9267 bool UsedAssumedInformation = false;
9268 const auto &SimplifiedSrc =
9269 A.getAssumedSimplified(IRPosition::value(*Src, getCallBaseContext()),
9270 *this, UsedAssumedInformation);
9271 if (!SimplifiedSrc.hasValue())
9272 return ChangeStatus::UNCHANGED;
9273 if (!SimplifiedSrc.getValue())
9274 return indicatePessimisticFixpoint();
9275 Src = *SimplifiedSrc;
9276
9277 auto &SrcAA = A.getAAFor<AAPotentialConstantValues>(
9278 *this, IRPosition::value(*Src), DepClassTy::REQUIRED);
9279 if (!SrcAA.isValidState())
9280 return indicatePessimisticFixpoint();
9281 const SetTy &SrcAAPVS = SrcAA.getAssumedSet();
9282 if (SrcAA.undefIsContained())
9283 unionAssumedWithUndef();
9284 else {
9285 for (const APInt &S : SrcAAPVS) {
9286 APInt T = calculateCastInst(CI, S, ResultBitWidth);
9287 unionAssumed(T);
9288 }
9289 }
9290 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9291 : ChangeStatus::CHANGED;
9292 }
9293
9294 ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
9295 auto AssumedBefore = getAssumed();
9296 Value *LHS = BinOp->getOperand(0);
9297 Value *RHS = BinOp->getOperand(1);
9298
9299 // Simplify the operands first.
9300 bool UsedAssumedInformation = false;
9301 const auto &SimplifiedLHS =
9302 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
9303 *this, UsedAssumedInformation);
9304 if (!SimplifiedLHS.hasValue())
9305 return ChangeStatus::UNCHANGED;
9306 if (!SimplifiedLHS.getValue())
9307 return indicatePessimisticFixpoint();
9308 LHS = *SimplifiedLHS;
9309
9310 const auto &SimplifiedRHS =
9311 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
9312 *this, UsedAssumedInformation);
9313 if (!SimplifiedRHS.hasValue())
9314 return ChangeStatus::UNCHANGED;
9315 if (!SimplifiedRHS.getValue())
9316 return indicatePessimisticFixpoint();
9317 RHS = *SimplifiedRHS;
9318
9319 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9320 return indicatePessimisticFixpoint();
9321
9322 auto &LHSAA = A.getAAFor<AAPotentialConstantValues>(
9323 *this, IRPosition::value(*LHS), DepClassTy::REQUIRED);
9324 if (!LHSAA.isValidState())
9325 return indicatePessimisticFixpoint();
9326
9327 auto &RHSAA = A.getAAFor<AAPotentialConstantValues>(
9328 *this, IRPosition::value(*RHS), DepClassTy::REQUIRED);
9329 if (!RHSAA.isValidState())
9330 return indicatePessimisticFixpoint();
9331
9332 const SetTy &LHSAAPVS = LHSAA.getAssumedSet();
9333 const SetTy &RHSAAPVS = RHSAA.getAssumedSet();
9334 const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
9335
9336 // TODO: make use of undef flag to limit potential values aggressively.
9337 if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
9338 if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
9339 return indicatePessimisticFixpoint();
9340 } else if (LHSAA.undefIsContained()) {
9341 for (const APInt &R : RHSAAPVS) {
9342 if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
9343 return indicatePessimisticFixpoint();
9344 }
9345 } else if (RHSAA.undefIsContained()) {
9346 for (const APInt &L : LHSAAPVS) {
9347 if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
9348 return indicatePessimisticFixpoint();
9349 }
9350 } else {
9351 for (const APInt &L : LHSAAPVS) {
9352 for (const APInt &R : RHSAAPVS) {
9353 if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
9354 return indicatePessimisticFixpoint();
9355 }
9356 }
9357 }
9358 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9359 : ChangeStatus::CHANGED;
9360 }
9361
9362 ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
9363 auto AssumedBefore = getAssumed();
9364 for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
9365 Value *IncomingValue = PHI->getIncomingValue(u);
9366
9367 // Simplify the operand first.
9368 bool UsedAssumedInformation = false;
9369 const auto &SimplifiedIncomingValue = A.getAssumedSimplified(
9370 IRPosition::value(*IncomingValue, getCallBaseContext()), *this,
9371 UsedAssumedInformation);
9372 if (!SimplifiedIncomingValue.hasValue())
9373 continue;
9374 if (!SimplifiedIncomingValue.getValue())
9375 return indicatePessimisticFixpoint();
9376 IncomingValue = *SimplifiedIncomingValue;
9377
9378 auto &PotentialValuesAA = A.getAAFor<AAPotentialConstantValues>(
9379 *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED);
9380 if (!PotentialValuesAA.isValidState())
9381 return indicatePessimisticFixpoint();
9382 if (PotentialValuesAA.undefIsContained())
9383 unionAssumedWithUndef();
9384 else
9385 unionAssumed(PotentialValuesAA.getAssumed());
9386 }
9387 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9388 : ChangeStatus::CHANGED;
9389 }
9390
9391 /// See AbstractAttribute::updateImpl(...).
9392 ChangeStatus updateImpl(Attributor &A) override {
9393 Value &V = getAssociatedValue();
9394 Instruction *I = dyn_cast<Instruction>(&V);
9395
9396 if (auto *ICI = dyn_cast<ICmpInst>(I))
9397 return updateWithICmpInst(A, ICI);
9398
9399 if (auto *SI = dyn_cast<SelectInst>(I))
9400 return updateWithSelectInst(A, SI);
9401
9402 if (auto *CI = dyn_cast<CastInst>(I))
9403 return updateWithCastInst(A, CI);
9404
9405 if (auto *BinOp = dyn_cast<BinaryOperator>(I))
9406 return updateWithBinaryOperator(A, BinOp);
9407
9408 if (auto *PHI = dyn_cast<PHINode>(I))
9409 return updateWithPHINode(A, PHI);
9410
9411 return indicatePessimisticFixpoint();
9412 }
9413
9414 /// See AbstractAttribute::trackStatistics()
9415 void trackStatistics() const override {
9416 STATS_DECLTRACK_FLOATING_ATTR(potential_values){ static llvm::Statistic NumIRFloating_potential_values = {"attributor"
, "NumIRFloating_potential_values", ("Number of floating values known to be '"
"potential_values" "'")};; ++(NumIRFloating_potential_values
); }
9417 }
9418};
9419
9420struct AAPotentialConstantValuesFunction : AAPotentialConstantValuesImpl {
9421 AAPotentialConstantValuesFunction(const IRPosition &IRP, Attributor &A)
9422 : AAPotentialConstantValuesImpl(IRP, A) {}
9423
9424 /// See AbstractAttribute::initialize(...).
9425 ChangeStatus updateImpl(Attributor &A) override {
9426 llvm_unreachable(::llvm::llvm_unreachable_internal("AAPotentialConstantValues(Function|CallSite)::updateImpl will "
"not be called", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 9428)
9427 "AAPotentialConstantValues(Function|CallSite)::updateImpl will "::llvm::llvm_unreachable_internal("AAPotentialConstantValues(Function|CallSite)::updateImpl will "
"not be called", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 9428)
9428 "not be called")::llvm::llvm_unreachable_internal("AAPotentialConstantValues(Function|CallSite)::updateImpl will "
"not be called", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 9428)
;
9429 }
9430
9431 /// See AbstractAttribute::trackStatistics()
9432 void trackStatistics() const override {
9433 STATS_DECLTRACK_FN_ATTR(potential_values){ static llvm::Statistic NumIRFunction_potential_values = {"attributor"
, "NumIRFunction_potential_values", ("Number of " "functions"
" marked '" "potential_values" "'")};; ++(NumIRFunction_potential_values
); }
9434 }
9435};
9436
9437struct AAPotentialConstantValuesCallSite : AAPotentialConstantValuesFunction {
9438 AAPotentialConstantValuesCallSite(const IRPosition &IRP, Attributor &A)
9439 : AAPotentialConstantValuesFunction(IRP, A) {}
9440
9441 /// See AbstractAttribute::trackStatistics()
9442 void trackStatistics() const override {
9443 STATS_DECLTRACK_CS_ATTR(potential_values){ static llvm::Statistic NumIRCS_potential_values = {"attributor"
, "NumIRCS_potential_values", ("Number of " "call site" " marked '"
"potential_values" "'")};; ++(NumIRCS_potential_values); }
9444 }
9445};
9446
9447struct AAPotentialConstantValuesCallSiteReturned
9448 : AACallSiteReturnedFromReturned<AAPotentialConstantValues,
9449 AAPotentialConstantValuesImpl> {
9450 AAPotentialConstantValuesCallSiteReturned(const IRPosition &IRP,
9451 Attributor &A)
9452 : AACallSiteReturnedFromReturned<AAPotentialConstantValues,
9453 AAPotentialConstantValuesImpl>(IRP, A) {}
9454
9455 /// See AbstractAttribute::trackStatistics()
9456 void trackStatistics() const override {
9457 STATS_DECLTRACK_CSRET_ATTR(potential_values){ static llvm::Statistic NumIRCSReturn_potential_values = {"attributor"
, "NumIRCSReturn_potential_values", ("Number of " "call site returns"
" marked '" "potential_values" "'")};; ++(NumIRCSReturn_potential_values
); }
9458 }
9459};
9460
9461struct AAPotentialConstantValuesCallSiteArgument
9462 : AAPotentialConstantValuesFloating {
9463 AAPotentialConstantValuesCallSiteArgument(const IRPosition &IRP,
9464 Attributor &A)
9465 : AAPotentialConstantValuesFloating(IRP, A) {}
9466
9467 /// See AbstractAttribute::initialize(..).
9468 void initialize(Attributor &A) override {
9469 AAPotentialConstantValuesImpl::initialize(A);
9470 if (isAtFixpoint())
9471 return;
9472
9473 Value &V = getAssociatedValue();
9474
9475 if (auto *C = dyn_cast<ConstantInt>(&V)) {
9476 unionAssumed(C->getValue());
9477 indicateOptimisticFixpoint();
9478 return;
9479 }
9480
9481 if (isa<UndefValue>(&V)) {
9482 unionAssumedWithUndef();
9483 indicateOptimisticFixpoint();
9484 return;
9485 }
9486 }
9487
9488 /// See AbstractAttribute::updateImpl(...).
9489 ChangeStatus updateImpl(Attributor &A) override {
9490 Value &V = getAssociatedValue();
9491 auto AssumedBefore = getAssumed();
9492 auto &AA = A.getAAFor<AAPotentialConstantValues>(
9493 *this, IRPosition::value(V), DepClassTy::REQUIRED);
9494 const auto &S = AA.getAssumed();
9495 unionAssumed(S);
9496 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9497 : ChangeStatus::CHANGED;
9498 }
9499
9500 /// See AbstractAttribute::trackStatistics()
9501 void trackStatistics() const override {
9502 STATS_DECLTRACK_CSARG_ATTR(potential_values){ static llvm::Statistic NumIRCSArguments_potential_values = {
"attributor", "NumIRCSArguments_potential_values", ("Number of "
"call site arguments" " marked '" "potential_values" "'")};;
++(NumIRCSArguments_potential_values); }
9503 }
9504};
9505
9506/// ------------------------ NoUndef Attribute ---------------------------------
9507struct AANoUndefImpl : AANoUndef {
9508 AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
9509
9510 /// See AbstractAttribute::initialize(...).
9511 void initialize(Attributor &A) override {
9512 if (getIRPosition().hasAttr({Attribute::NoUndef})) {
9513 indicateOptimisticFixpoint();
9514 return;
9515 }
9516 Value &V = getAssociatedValue();
9517 if (isa<UndefValue>(V))
9518 indicatePessimisticFixpoint();
9519 else if (isa<FreezeInst>(V))
9520 indicateOptimisticFixpoint();
9521 else if (getPositionKind() != IRPosition::IRP_RETURNED &&
9522 isGuaranteedNotToBeUndefOrPoison(&V))
9523 indicateOptimisticFixpoint();
9524 else
9525 AANoUndef::initialize(A);
9526 }
9527
9528 /// See followUsesInMBEC
9529 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
9530 AANoUndef::StateType &State) {
9531 const Value *UseV = U->get();
9532 const DominatorTree *DT = nullptr;
9533 AssumptionCache *AC = nullptr;
9534 InformationCache &InfoCache = A.getInfoCache();
9535 if (Function *F = getAnchorScope()) {
9536 DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
9537 AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
9538 }
9539 State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
9540 bool TrackUse = false;
9541 // Track use for instructions which must produce undef or poison bits when
9542 // at least one operand contains such bits.
9543 if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
9544 TrackUse = true;
9545 return TrackUse;
9546 }
9547
9548 /// See AbstractAttribute::getAsStr().
9549 const std::string getAsStr() const override {
9550 return getAssumed() ? "noundef" : "may-undef-or-poison";
9551 }
9552
9553 ChangeStatus manifest(Attributor &A) override {
9554 // We don't manifest noundef attribute for dead positions because the
9555 // associated values with dead positions would be replaced with undef
9556 // values.
9557 bool UsedAssumedInformation = false;
9558 if (A.isAssumedDead(getIRPosition(), nullptr, nullptr,
9559 UsedAssumedInformation))
9560 return ChangeStatus::UNCHANGED;
9561 // A position whose simplified value does not have any value is
9562 // considered to be dead. We don't manifest noundef in such positions for
9563 // the same reason above.
9564 if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation)
9565 .hasValue())
9566 return ChangeStatus::UNCHANGED;
9567 return AANoUndef::manifest(A);
9568 }
9569};
9570
9571struct AANoUndefFloating : public AANoUndefImpl {
9572 AANoUndefFloating(const IRPosition &IRP, Attributor &A)
9573 : AANoUndefImpl(IRP, A) {}
9574
9575 /// See AbstractAttribute::initialize(...).
9576 void initialize(Attributor &A) override {
9577 AANoUndefImpl::initialize(A);
9578 if (!getState().isAtFixpoint())
9579 if (Instruction *CtxI = getCtxI())
9580 followUsesInMBEC(*this, A, getState(), *CtxI);
9581 }
9582
9583 /// See AbstractAttribute::updateImpl(...).
9584 ChangeStatus updateImpl(Attributor &A) override {
9585 auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
9586 AANoUndef::StateType &T, bool Stripped) -> bool {
9587 const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V),
9588 DepClassTy::REQUIRED);
9589 if (!Stripped && this == &AA) {
9590 T.indicatePessimisticFixpoint();
9591 } else {
9592 const AANoUndef::StateType &S =
9593 static_cast<const AANoUndef::StateType &>(AA.getState());
9594 T ^= S;
9595 }
9596 return T.isValidState();
9597 };
9598
9599 StateType T;
9600 bool UsedAssumedInformation = false;
9601 if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
9602 VisitValueCB, getCtxI(),
9603 UsedAssumedInformation))
9604 return indicatePessimisticFixpoint();
9605
9606 return clampStateAndIndicateChange(getState(), T);
9607 }
9608
9609 /// See AbstractAttribute::trackStatistics()
9610 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef){ static llvm::Statistic NumIRFunctionReturn_noundef = {"attributor"
, "NumIRFunctionReturn_noundef", ("Number of " "function returns"
" marked '" "noundef" "'")};; ++(NumIRFunctionReturn_noundef
); }
}
9611};
9612
9613struct AANoUndefReturned final
9614 : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
9615 AANoUndefReturned(const IRPosition &IRP, Attributor &A)
9616 : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
9617
9618 /// See AbstractAttribute::trackStatistics()
9619 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef){ static llvm::Statistic NumIRFunctionReturn_noundef = {"attributor"
, "NumIRFunctionReturn_noundef", ("Number of " "function returns"
" marked '" "noundef" "'")};; ++(NumIRFunctionReturn_noundef
); }
}
9620};
9621
9622struct AANoUndefArgument final
9623 : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
9624 AANoUndefArgument(const IRPosition &IRP, Attributor &A)
9625 : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
9626
9627 /// See AbstractAttribute::trackStatistics()
9628 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef){ static llvm::Statistic NumIRArguments_noundef = {"attributor"
, "NumIRArguments_noundef", ("Number of " "arguments" " marked '"
"noundef" "'")};; ++(NumIRArguments_noundef); }
}
9629};
9630
9631struct AANoUndefCallSiteArgument final : AANoUndefFloating {
9632 AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
9633 : AANoUndefFloating(IRP, A) {}
9634
9635 /// See AbstractAttribute::trackStatistics()
9636 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef){ static llvm::Statistic NumIRCSArguments_noundef = {"attributor"
, "NumIRCSArguments_noundef", ("Number of " "call site arguments"
" marked '" "noundef" "'")};; ++(NumIRCSArguments_noundef); }
}
9637};
9638
9639struct AANoUndefCallSiteReturned final
9640 : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
9641 AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
9642 : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
9643
9644 /// See AbstractAttribute::trackStatistics()
9645 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef){ static llvm::Statistic NumIRCSReturn_noundef = {"attributor"
, "NumIRCSReturn_noundef", ("Number of " "call site returns" " marked '"
"noundef" "'")};; ++(NumIRCSReturn_noundef); }
}
9646};
9647
9648struct AACallEdgesImpl : public AACallEdges {
9649 AACallEdgesImpl(const IRPosition &IRP, Attributor &A) : AACallEdges(IRP, A) {}
9650
9651 virtual const SetVector<Function *> &getOptimisticEdges() const override {
9652 return CalledFunctions;
9653 }
9654
9655 virtual bool hasUnknownCallee() const override { return HasUnknownCallee; }
9656
9657 virtual bool hasNonAsmUnknownCallee() const override {
9658 return HasUnknownCalleeNonAsm;
9659 }
9660
9661 const std::string getAsStr() const override {
9662 return "CallEdges[" + std::to_string(HasUnknownCallee) + "," +
9663 std::to_string(CalledFunctions.size()) + "]";
9664 }
9665
9666 void trackStatistics() const override {}
9667
9668protected:
9669 void addCalledFunction(Function *Fn, ChangeStatus &Change) {
9670 if (CalledFunctions.insert(Fn)) {
9671 Change = ChangeStatus::CHANGED;
9672 LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AACallEdges] New call edge: "
<< Fn->getName() << "\n"; } } while (false)
9673 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AACallEdges] New call edge: "
<< Fn->getName() << "\n"; } } while (false)
;
9674 }
9675 }
9676
9677 void setHasUnknownCallee(bool NonAsm, ChangeStatus &Change) {
9678 if (!HasUnknownCallee)
9679 Change = ChangeStatus::CHANGED;
9680 if (NonAsm && !HasUnknownCalleeNonAsm)
9681 Change = ChangeStatus::CHANGED;
9682 HasUnknownCalleeNonAsm |= NonAsm;
9683 HasUnknownCallee = true;
9684 }
9685
9686private:
9687 /// Optimistic set of functions that might be called by this position.
9688 SetVector<Function *> CalledFunctions;
9689
9690 /// Is there any call with a unknown callee.
9691 bool HasUnknownCallee = false;
9692
9693 /// Is there any call with a unknown callee, excluding any inline asm.
9694 bool HasUnknownCalleeNonAsm = false;
9695};
9696
9697struct AACallEdgesCallSite : public AACallEdgesImpl {
9698 AACallEdgesCallSite(const IRPosition &IRP, Attributor &A)
9699 : AACallEdgesImpl(IRP, A) {}
9700 /// See AbstractAttribute::updateImpl(...).
9701 ChangeStatus updateImpl(Attributor &A) override {
9702 ChangeStatus Change = ChangeStatus::UNCHANGED;
9703
9704 auto VisitValue = [&](Value &V, const Instruction *CtxI, bool &HasUnknown,
9705 bool Stripped) -> bool {
9706 if (Function *Fn = dyn_cast<Function>(&V)) {
9707 addCalledFunction(Fn, Change);
9708 } else {
9709 LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AACallEdges] Unrecognized value: "
<< V << "\n"; } } while (false)
;
9710 setHasUnknownCallee(true, Change);
9711 }
9712
9713 // Explore all values.
9714 return true;
9715 };
9716
9717 // Process any value that we might call.
9718 auto ProcessCalledOperand = [&](Value *V) {
9719 bool DummyValue = false;
9720 bool UsedAssumedInformation = false;
9721 if (!genericValueTraversal<bool>(A, IRPosition::value(*V), *this,
9722 DummyValue, VisitValue, nullptr,
9723 UsedAssumedInformation, false)) {
9724 // If we haven't gone through all values, assume that there are unknown
9725 // callees.
9726 setHasUnknownCallee(true, Change);
9727 }
9728 };
9729
9730 CallBase *CB = cast<CallBase>(getCtxI());
9731
9732 if (CB->isInlineAsm()) {
9733 if (!hasAssumption(*CB->getCaller(), "ompx_no_call_asm") &&
9734 !hasAssumption(*CB, "ompx_no_call_asm"))
9735 setHasUnknownCallee(false, Change);
9736 return Change;
9737 }
9738
9739 // Process callee metadata if available.
9740 if (auto *MD = getCtxI()->getMetadata(LLVMContext::MD_callees)) {
9741 for (auto &Op : MD->operands()) {
9742 Function *Callee = mdconst::dyn_extract_or_null<Function>(Op);
9743 if (Callee)
9744 addCalledFunction(Callee, Change);
9745 }
9746 return Change;
9747 }
9748
9749 // The most simple case.
9750 ProcessCalledOperand(CB->getCalledOperand());
9751
9752 // Process callback functions.
9753 SmallVector<const Use *, 4u> CallbackUses;
9754 AbstractCallSite::getCallbackUses(*CB, CallbackUses);
9755 for (const Use *U : CallbackUses)
9756 ProcessCalledOperand(U->get());
9757
9758 return Change;
9759 }
9760};
9761
9762struct AACallEdgesFunction : public AACallEdgesImpl {
9763 AACallEdgesFunction(const IRPosition &IRP, Attributor &A)
9764 : AACallEdgesImpl(IRP, A) {}
9765
9766 /// See AbstractAttribute::updateImpl(...).
9767 ChangeStatus updateImpl(Attributor &A) override {
9768 ChangeStatus Change = ChangeStatus::UNCHANGED;
9769
9770 auto ProcessCallInst = [&](Instruction &Inst) {
9771 CallBase &CB = cast<CallBase>(Inst);
9772
9773 auto &CBEdges = A.getAAFor<AACallEdges>(
9774 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9775 if (CBEdges.hasNonAsmUnknownCallee())
9776 setHasUnknownCallee(true, Change);
9777 if (CBEdges.hasUnknownCallee())
9778 setHasUnknownCallee(false, Change);
9779
9780 for (Function *F : CBEdges.getOptimisticEdges())
9781 addCalledFunction(F, Change);
9782
9783 return true;
9784 };
9785
9786 // Visit all callable instructions.
9787 bool UsedAssumedInformation = false;
9788 if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this,
9789 UsedAssumedInformation,
9790 /* CheckBBLivenessOnly */ true)) {
9791 // If we haven't looked at all call like instructions, assume that there
9792 // are unknown callees.
9793 setHasUnknownCallee(true, Change);
9794 }
9795
9796 return Change;
9797 }
9798};
9799
9800struct AAFunctionReachabilityFunction : public AAFunctionReachability {
9801private:
9802 struct QuerySet {
9803 void markReachable(const Function &Fn) {
9804 Reachable.insert(&Fn);
9805 Unreachable.erase(&Fn);
9806 }
9807
9808 /// If there is no information about the function None is returned.
9809 Optional<bool> isCachedReachable(const Function &Fn) {
9810 // Assume that we can reach the function.
9811 // TODO: Be more specific with the unknown callee.
9812 if (CanReachUnknownCallee)
9813 return true;
9814
9815 if (Reachable.count(&Fn))
9816 return true;
9817
9818 if (Unreachable.count(&Fn))
9819 return false;
9820
9821 return llvm::None;
9822 }
9823
9824 /// Set of functions that we know for sure is reachable.
9825 DenseSet<const Function *> Reachable;
9826
9827 /// Set of functions that are unreachable, but might become reachable.
9828 DenseSet<const Function *> Unreachable;
9829
9830 /// If we can reach a function with a call to a unknown function we assume
9831 /// that we can reach any function.
9832 bool CanReachUnknownCallee = false;
9833 };
9834
9835 struct QueryResolver : public QuerySet {
9836 ChangeStatus update(Attributor &A, const AAFunctionReachability &AA,
9837 ArrayRef<const AACallEdges *> AAEdgesList) {
9838 ChangeStatus Change = ChangeStatus::UNCHANGED;
9839
9840 for (auto *AAEdges : AAEdgesList) {
9841 if (AAEdges->hasUnknownCallee()) {
9842 if (!CanReachUnknownCallee)
9843 Change = ChangeStatus::CHANGED;
9844 CanReachUnknownCallee = true;
9845 return Change;
9846 }
9847 }
9848
9849 for (const Function *Fn : make_early_inc_range(Unreachable)) {
9850 if (checkIfReachable(A, AA, AAEdgesList, *Fn)) {
9851 Change = ChangeStatus::CHANGED;
9852 markReachable(*Fn);
9853 }
9854 }
9855 return Change;
9856 }
9857
9858 bool isReachable(Attributor &A, AAFunctionReachability &AA,
9859 ArrayRef<const AACallEdges *> AAEdgesList,
9860 const Function &Fn) {
9861 Optional<bool> Cached = isCachedReachable(Fn);
9862 if (Cached.hasValue())
9863 return Cached.getValue();
9864
9865 // The query was not cached, thus it is new. We need to request an update
9866 // explicitly to make sure this the information is properly run to a
9867 // fixpoint.
9868 A.registerForUpdate(AA);
9869
9870 // We need to assume that this function can't reach Fn to prevent
9871 // an infinite loop if this function is recursive.
9872 Unreachable.insert(&Fn);
9873
9874 bool Result = checkIfReachable(A, AA, AAEdgesList, Fn);
9875 if (Result)
9876 markReachable(Fn);
9877 return Result;
9878 }
9879
9880 bool checkIfReachable(Attributor &A, const AAFunctionReachability &AA,
9881 ArrayRef<const AACallEdges *> AAEdgesList,
9882 const Function &Fn) const {
9883
9884 // Handle the most trivial case first.
9885 for (auto *AAEdges : AAEdgesList) {
9886 const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
9887
9888 if (Edges.count(const_cast<Function *>(&Fn)))
9889 return true;
9890 }
9891
9892 SmallVector<const AAFunctionReachability *, 8> Deps;
9893 for (auto &AAEdges : AAEdgesList) {
9894 const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
9895
9896 for (Function *Edge : Edges) {
9897 // Functions that do not call back into the module can be ignored.
9898 if (Edge->hasFnAttribute(Attribute::NoCallback))
9899 continue;
9900
9901 // We don't need a dependency if the result is reachable.
9902 const AAFunctionReachability &EdgeReachability =
9903 A.getAAFor<AAFunctionReachability>(
9904 AA, IRPosition::function(*Edge), DepClassTy::NONE);
9905 Deps.push_back(&EdgeReachability);
9906
9907 if (EdgeReachability.canReach(A, Fn))
9908 return true;
9909 }
9910 }
9911
9912 // The result is false for now, set dependencies and leave.
9913 for (auto *Dep : Deps)
9914 A.recordDependence(*Dep, AA, DepClassTy::REQUIRED);
9915
9916 return false;
9917 }
9918 };
9919
9920 /// Get call edges that can be reached by this instruction.
9921 bool getReachableCallEdges(Attributor &A, const AAReachability &Reachability,
9922 const Instruction &Inst,
9923 SmallVector<const AACallEdges *> &Result) const {
9924 // Determine call like instructions that we can reach from the inst.
9925 auto CheckCallBase = [&](Instruction &CBInst) {
9926 if (!Reachability.isAssumedReachable(A, Inst, CBInst))
9927 return true;
9928
9929 auto &CB = cast<CallBase>(CBInst);
9930 const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9931 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9932
9933 Result.push_back(&AAEdges);
9934 return true;
9935 };
9936
9937 bool UsedAssumedInformation = false;
9938 return A.checkForAllCallLikeInstructions(CheckCallBase, *this,
9939 UsedAssumedInformation,
9940 /* CheckBBLivenessOnly */ true);
9941 }
9942
9943public:
9944 AAFunctionReachabilityFunction(const IRPosition &IRP, Attributor &A)
9945 : AAFunctionReachability(IRP, A) {}
9946
9947 bool canReach(Attributor &A, const Function &Fn) const override {
9948 if (!isValidState())
9949 return true;
9950
9951 const AACallEdges &AAEdges =
9952 A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
9953
9954 // Attributor returns attributes as const, so this function has to be
9955 // const for users of this attribute to use it without having to do
9956 // a const_cast.
9957 // This is a hack for us to be able to cache queries.
9958 auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9959 bool Result = NonConstThis->WholeFunction.isReachable(A, *NonConstThis,
9960 {&AAEdges}, Fn);
9961
9962 return Result;
9963 }
9964
9965 /// Can \p CB reach \p Fn
9966 bool canReach(Attributor &A, CallBase &CB,
9967 const Function &Fn) const override {
9968 if (!isValidState())
9969 return true;
9970
9971 const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9972 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9973
9974 // Attributor returns attributes as const, so this function has to be
9975 // const for users of this attribute to use it without having to do
9976 // a const_cast.
9977 // This is a hack for us to be able to cache queries.
9978 auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9979 QueryResolver &CBQuery = NonConstThis->CBQueries[&CB];
9980
9981 bool Result = CBQuery.isReachable(A, *NonConstThis, {&AAEdges}, Fn);
9982
9983 return Result;
9984 }
9985
9986 bool instructionCanReach(Attributor &A, const Instruction &Inst,
9987 const Function &Fn,
9988 bool UseBackwards) const override {
9989 if (!isValidState())
9990 return true;
9991
9992 if (UseBackwards)
9993 return AA::isPotentiallyReachable(A, Inst, Fn, *this, nullptr);
9994
9995 const auto &Reachability = A.getAAFor<AAReachability>(
9996 *this, IRPosition::function(*getAssociatedFunction()),
9997 DepClassTy::REQUIRED);
9998
9999 SmallVector<const AACallEdges *> CallEdges;
10000 bool AllKnown = getReachableCallEdges(A, Reachability, Inst, CallEdges);
10001 // Attributor returns attributes as const, so this function has to be
10002 // const for users of this attribute to use it without having to do
10003 // a const_cast.
10004 // This is a hack for us to be able to cache queries.
10005 auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
10006 QueryResolver &InstQSet = NonConstThis->InstQueries[&Inst];
10007 if (!AllKnown)
10008 InstQSet.CanReachUnknownCallee = true;
10009
10010 return InstQSet.isReachable(A, *NonConstThis, CallEdges, Fn);
10011 }
10012
10013 /// See AbstractAttribute::updateImpl(...).
10014 ChangeStatus updateImpl(Attributor &A) override {
10015 const AACallEdges &AAEdges =
10016 A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
10017 ChangeStatus Change = ChangeStatus::UNCHANGED;
10018
10019 Change |= WholeFunction.update(A, *this, {&AAEdges});
10020
10021 for (auto &CBPair : CBQueries) {
10022 const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
10023 *this, IRPosition::callsite_function(*CBPair.first),
10024 DepClassTy::REQUIRED);
10025
10026 Change |= CBPair.second.update(A, *this, {&AAEdges});
10027 }
10028
10029 // Update the Instruction queries.
10030 if (!InstQueries.empty()) {
10031 const AAReachability *Reachability = &A.getAAFor<AAReachability>(
10032 *this, IRPosition::function(*getAssociatedFunction()),
10033 DepClassTy::REQUIRED);
10034
10035 // Check for local callbases first.
10036 for (auto &InstPair : InstQueries) {
10037 SmallVector<const AACallEdges *> CallEdges;
10038 bool AllKnown =
10039 getReachableCallEdges(A, *Reachability, *InstPair.first, CallEdges);
10040 // Update will return change if we this effects any queries.
10041 if (!AllKnown)
10042 InstPair.second.CanReachUnknownCallee = true;
10043 Change |= InstPair.second.update(A, *this, CallEdges);
10044 }
10045 }
10046
10047 return Change;
10048 }
10049
10050 const std::string getAsStr() const override {
10051 size_t QueryCount =
10052 WholeFunction.Reachable.size() + WholeFunction.Unreachable.size();
10053
10054 return "FunctionReachability [" +
10055 std::to_string(WholeFunction.Reachable.size()) + "," +
10056 std::to_string(QueryCount) + "]";
10057 }
10058
10059 void trackStatistics() const override {}
10060
10061private:
10062 bool canReachUnknownCallee() const override {
10063 return WholeFunction.CanReachUnknownCallee;
10064 }
10065
10066 /// Used to answer if a the whole function can reacha a specific function.
10067 QueryResolver WholeFunction;
10068
10069 /// Used to answer if a call base inside this function can reach a specific
10070 /// function.
10071 MapVector<const CallBase *, QueryResolver> CBQueries;
10072
10073 /// This is for instruction queries than scan "forward".
10074 MapVector<const Instruction *, QueryResolver> InstQueries;
10075};
10076} // namespace
10077
10078/// ---------------------- Assumption Propagation ------------------------------
10079namespace {
10080struct AAAssumptionInfoImpl : public AAAssumptionInfo {
10081 AAAssumptionInfoImpl(const IRPosition &IRP, Attributor &A,
10082 const DenseSet<StringRef> &Known)
10083 : AAAssumptionInfo(IRP, A, Known) {}
10084
10085 bool hasAssumption(const StringRef Assumption) const override {
10086 return isValidState() && setContains(Assumption);
10087 }
10088
10089 /// See AbstractAttribute::getAsStr()
10090 const std::string getAsStr() const override {
10091 const SetContents &Known = getKnown();
10092 const SetContents &Assumed = getAssumed();
10093
10094 const std::string KnownStr =
10095 llvm::join(Known.getSet().begin(), Known.getSet().end(), ",");
10096 const std::string AssumedStr =
10097 (Assumed.isUniversal())
10098 ? "Universal"
10099 : llvm::join(Assumed.getSet().begin(), Assumed.getSet().end(), ",");
10100
10101 return "Known [" + KnownStr + "]," + " Assumed [" + AssumedStr + "]";
10102 }
10103};
10104
10105/// Propagates assumption information from parent functions to all of their
10106/// successors. An assumption can be propagated if the containing function
10107/// dominates the called function.
10108///
10109/// We start with a "known" set of assumptions already valid for the associated
10110/// function and an "assumed" set that initially contains all possible
10111/// assumptions. The assumed set is inter-procedurally updated by narrowing its
10112/// contents as concrete values are known. The concrete values are seeded by the
10113/// first nodes that are either entries into the call graph, or contains no
10114/// assumptions. Each node is updated as the intersection of the assumed state
10115/// with all of its predecessors.
10116struct AAAssumptionInfoFunction final : AAAssumptionInfoImpl {
10117 AAAssumptionInfoFunction(const IRPosition &IRP, Attributor &A)
10118 : AAAssumptionInfoImpl(IRP, A,
10119 getAssumptions(*IRP.getAssociatedFunction())) {}
10120
10121 /// See AbstractAttribute::manifest(...).
10122 ChangeStatus manifest(Attributor &A) override {
10123 const auto &Assumptions = getKnown();
10124
10125 // Don't manifest a universal set if it somehow made it here.
10126 if (Assumptions.isUniversal())
10127 return ChangeStatus::UNCHANGED;
10128
10129 Function *AssociatedFunction = getAssociatedFunction();
10130
10131 bool Changed = addAssumptions(*AssociatedFunction, Assumptions.getSet());
10132
10133 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10134 }
10135
10136 /// See AbstractAttribute::updateImpl(...).
10137 ChangeStatus updateImpl(Attributor &A) override {
10138 bool Changed = false;
10139
10140 auto CallSitePred = [&](AbstractCallSite ACS) {
10141 const auto &AssumptionAA = A.getAAFor<AAAssumptionInfo>(
10142 *this, IRPosition::callsite_function(*ACS.getInstruction()),
10143 DepClassTy::REQUIRED);
10144 // Get the set of assumptions shared by all of this function's callers.
10145 Changed |= getIntersection(AssumptionAA.getAssumed());
10146 return !getAssumed().empty() || !getKnown().empty();
10147 };
10148
10149 bool UsedAssumedInformation = false;
10150 // Get the intersection of all assumptions held by this node's predecessors.
10151 // If we don't know all the call sites then this is either an entry into the
10152 // call graph or an empty node. This node is known to only contain its own
10153 // assumptions and can be propagated to its successors.
10154 if (!A.checkForAllCallSites(CallSitePred, *this, true,
10155 UsedAssumedInformation))
10156 return indicatePessimisticFixpoint();
10157
10158 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10159 }
10160
10161 void trackStatistics() const override {}
10162};
10163
10164/// Assumption Info defined for call sites.
10165struct AAAssumptionInfoCallSite final : AAAssumptionInfoImpl {
10166
10167 AAAssumptionInfoCallSite(const IRPosition &IRP, Attributor &A)
10168 : AAAssumptionInfoImpl(IRP, A, getInitialAssumptions(IRP)) {}
10169
10170 /// See AbstractAttribute::initialize(...).
10171 void initialize(Attributor &A) override {
10172 const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
10173 A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED);
10174 }
10175
10176 /// See AbstractAttribute::manifest(...).
10177 ChangeStatus manifest(Attributor &A) override {
10178 // Don't manifest a universal set if it somehow made it here.
10179 if (getKnown().isUniversal())
10180 return ChangeStatus::UNCHANGED;
10181
10182 CallBase &AssociatedCall = cast<CallBase>(getAssociatedValue());
10183 bool Changed = addAssumptions(AssociatedCall, getAssumed().getSet());
10184
10185 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10186 }
10187
10188 /// See AbstractAttribute::updateImpl(...).
10189 ChangeStatus updateImpl(Attributor &A) override {
10190 const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
10191 auto &AssumptionAA =
10192 A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED);
10193 bool Changed = getIntersection(AssumptionAA.getAssumed());
10194 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10195 }
10196
10197 /// See AbstractAttribute::trackStatistics()
10198 void trackStatistics() const override {}
10199
10200private:
10201 /// Helper to initialized the known set as all the assumptions this call and
10202 /// the callee contain.
10203 DenseSet<StringRef> getInitialAssumptions(const IRPosition &IRP) {
10204 const CallBase &CB = cast<CallBase>(IRP.getAssociatedValue());
10205 auto Assumptions = getAssumptions(CB);
10206 if (Function *F = IRP.getAssociatedFunction())
10207 set_union(Assumptions, getAssumptions(*F));
10208 if (Function *F = IRP.getAssociatedFunction())
10209 set_union(Assumptions, getAssumptions(*F));
10210 return Assumptions;
10211 }
10212};
10213} // namespace
10214
10215AACallGraphNode *AACallEdgeIterator::operator*() const {
10216 return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>(
10217 &A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I))));
10218}
10219
10220void AttributorCallGraph::print() { llvm::WriteGraph(outs(), this); }
10221
10222const char AAReturnedValues::ID = 0;
10223const char AANoUnwind::ID = 0;
10224const char AANoSync::ID = 0;
10225const char AANoFree::ID = 0;
10226const char AANonNull::ID = 0;
10227const char AANoRecurse::ID = 0;
10228const char AAWillReturn::ID = 0;
10229const char AAUndefinedBehavior::ID = 0;
10230const char AANoAlias::ID = 0;
10231const char AAReachability::ID = 0;
10232const char AANoReturn::ID = 0;
10233const char AAIsDead::ID = 0;
10234const char AADereferenceable::ID = 0;
10235const char AAAlign::ID = 0;
10236const char AAInstanceInfo::ID = 0;
10237const char AANoCapture::ID = 0;
10238const char AAValueSimplify::ID = 0;
10239const char AAHeapToStack::ID = 0;
10240const char AAPrivatizablePtr::ID = 0;
10241const char AAMemoryBehavior::ID = 0;
10242const char AAMemoryLocation::ID = 0;
10243const char AAValueConstantRange::ID = 0;
10244const char AAPotentialConstantValues::ID = 0;
10245const char AANoUndef::ID = 0;
10246const char AACallEdges::ID = 0;
10247const char AAFunctionReachability::ID = 0;
10248const char AAPointerInfo::ID = 0;
10249const char AAAssumptionInfo::ID = 0;
10250
10251// Macro magic to create the static generator function for attributes that
10252// follow the naming scheme.
10253
10254#define SWITCH_PK_INV(CLASS, PK, POS_NAME) \
10255 case IRPosition::PK: \
10256 llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!")::llvm::llvm_unreachable_internal("Cannot create " #CLASS " for a "
POS_NAME " position!", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp"
, 10256)
;
10257
10258#define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX) \
10259 case IRPosition::PK: \
10260 AA = new (A.Allocator) CLASS##SUFFIX(IRP, A); \
10261 ++NumAAs; \
10262 break;
10263
10264#define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
10265 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
10266 CLASS *AA = nullptr; \
10267 switch (IRP.getPositionKind()) { \
10268 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
10269 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \
10270 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \
10271 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \
10272 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \
10273 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \
10274 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \
10275 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \
10276 } \
10277 return *AA; \
10278 }
10279
10280#define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
10281 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
10282 CLASS *AA = nullptr; \
10283 switch (IRP.getPositionKind()) { \
10284 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
10285 SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function") \
10286 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \
10287 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \
10288 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \
10289 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \
10290 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \
10291 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \
10292 } \
10293 return *AA; \
10294 }
10295
10296#define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
10297 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
10298 CLASS *AA = nullptr; \
10299 switch (IRP.getPositionKind()) { \
10300 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
10301 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \
10302 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \
10303 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \
10304 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \
10305 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \
10306 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \
10307 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \
10308 } \
10309 return *AA; \
10310 }
10311
10312#define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
10313 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
10314 CLASS *AA = nullptr; \
10315 switch (IRP.getPositionKind()) { \
10316 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
10317 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \
10318 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \
10319 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \
10320 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \
10321 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \
10322 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \
10323 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \
10324 } \
10325 return *AA; \
10326 }
10327
10328#define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
10329 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
10330 CLASS *AA = nullptr; \
10331 switch (IRP.getPositionKind()) { \
10332 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
10333 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \
10334 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \
10335 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \
10336 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \
10337 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \
10338 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \
10339 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \
10340 } \
10341 return *AA; \
10342 }
10343
10344CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
10345CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
10346CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
10347CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
10348CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
10349CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
10350CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
10351CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges)
10352CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAssumptionInfo)
10353
10354CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
10355CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
10356CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
10357CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
10358CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
10359CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAInstanceInfo)
10360CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
10361CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
10362CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialConstantValues)
10363CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
10364CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo)
10365
10366CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
10367CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
10368CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
10369
10370CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
10371CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
10372CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
10373CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAFunctionReachability)
10374
10375CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
10376
10377#undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
10378#undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
10379#undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
10380#undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
10381#undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
10382#undef SWITCH_PK_CREATE
10383#undef SWITCH_PK_INV